xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_qp.c (revision 293d5b43)
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
41 
42 #include "qib.h"
43 
44 /*
45  * mask field which was present in now deleted qib_qpn_table
46  * is not present in rvt_qpn_table. Defining the same field
47  * as qpt_mask here instead of adding the mask field to
48  * rvt_qpn_table.
49  */
50 u16 qpt_mask;
51 
52 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
53 			      struct rvt_qpn_map *map, unsigned off)
54 {
55 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
56 }
57 
58 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
59 					struct rvt_qpn_map *map, unsigned off,
60 					unsigned n)
61 {
62 	if (qpt_mask) {
63 		off++;
64 		if (((off & qpt_mask) >> 1) >= n)
65 			off = (off | qpt_mask) + 2;
66 	} else {
67 		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
68 	}
69 	return off;
70 }
71 
72 /*
73  * Convert the AETH credit code into the number of credits.
74  */
75 static u32 credit_table[31] = {
76 	0,                      /* 0 */
77 	1,                      /* 1 */
78 	2,                      /* 2 */
79 	3,                      /* 3 */
80 	4,                      /* 4 */
81 	6,                      /* 5 */
82 	8,                      /* 6 */
83 	12,                     /* 7 */
84 	16,                     /* 8 */
85 	24,                     /* 9 */
86 	32,                     /* A */
87 	48,                     /* B */
88 	64,                     /* C */
89 	96,                     /* D */
90 	128,                    /* E */
91 	192,                    /* F */
92 	256,                    /* 10 */
93 	384,                    /* 11 */
94 	512,                    /* 12 */
95 	768,                    /* 13 */
96 	1024,                   /* 14 */
97 	1536,                   /* 15 */
98 	2048,                   /* 16 */
99 	3072,                   /* 17 */
100 	4096,                   /* 18 */
101 	6144,                   /* 19 */
102 	8192,                   /* 1A */
103 	12288,                  /* 1B */
104 	16384,                  /* 1C */
105 	24576,                  /* 1D */
106 	32768                   /* 1E */
107 };
108 
109 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
110 [IB_WR_RDMA_WRITE] = {
111 	.length = sizeof(struct ib_rdma_wr),
112 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
113 },
114 
115 [IB_WR_RDMA_READ] = {
116 	.length = sizeof(struct ib_rdma_wr),
117 	.qpt_support = BIT(IB_QPT_RC),
118 	.flags = RVT_OPERATION_ATOMIC,
119 },
120 
121 [IB_WR_ATOMIC_CMP_AND_SWP] = {
122 	.length = sizeof(struct ib_atomic_wr),
123 	.qpt_support = BIT(IB_QPT_RC),
124 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
125 },
126 
127 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
128 	.length = sizeof(struct ib_atomic_wr),
129 	.qpt_support = BIT(IB_QPT_RC),
130 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
131 },
132 
133 [IB_WR_RDMA_WRITE_WITH_IMM] = {
134 	.length = sizeof(struct ib_rdma_wr),
135 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
136 },
137 
138 [IB_WR_SEND] = {
139 	.length = sizeof(struct ib_send_wr),
140 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
141 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
142 },
143 
144 [IB_WR_SEND_WITH_IMM] = {
145 	.length = sizeof(struct ib_send_wr),
146 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
147 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
148 },
149 
150 };
151 
152 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
153 			 gfp_t gfp)
154 {
155 	unsigned long page = get_zeroed_page(gfp);
156 
157 	/*
158 	 * Free the page if someone raced with us installing it.
159 	 */
160 
161 	spin_lock(&qpt->lock);
162 	if (map->page)
163 		free_page(page);
164 	else
165 		map->page = (void *)page;
166 	spin_unlock(&qpt->lock);
167 }
168 
169 /*
170  * Allocate the next available QPN or
171  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
172  */
173 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
174 		  enum ib_qp_type type, u8 port, gfp_t gfp)
175 {
176 	u32 i, offset, max_scan, qpn;
177 	struct rvt_qpn_map *map;
178 	u32 ret;
179 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
180 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
181 					      verbs_dev);
182 
183 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
184 		unsigned n;
185 
186 		ret = type == IB_QPT_GSI;
187 		n = 1 << (ret + 2 * (port - 1));
188 		spin_lock(&qpt->lock);
189 		if (qpt->flags & n)
190 			ret = -EINVAL;
191 		else
192 			qpt->flags |= n;
193 		spin_unlock(&qpt->lock);
194 		goto bail;
195 	}
196 
197 	qpn = qpt->last + 2;
198 	if (qpn >= RVT_QPN_MAX)
199 		qpn = 2;
200 	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
201 		qpn = (qpn | qpt_mask) + 2;
202 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
203 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
204 	max_scan = qpt->nmaps - !offset;
205 	for (i = 0;;) {
206 		if (unlikely(!map->page)) {
207 			get_map_page(qpt, map, gfp);
208 			if (unlikely(!map->page))
209 				break;
210 		}
211 		do {
212 			if (!test_and_set_bit(offset, map->page)) {
213 				qpt->last = qpn;
214 				ret = qpn;
215 				goto bail;
216 			}
217 			offset = find_next_offset(qpt, map, offset,
218 				dd->n_krcv_queues);
219 			qpn = mk_qpn(qpt, map, offset);
220 			/*
221 			 * This test differs from alloc_pidmap().
222 			 * If find_next_offset() does find a zero
223 			 * bit, we don't need to check for QPN
224 			 * wrapping around past our starting QPN.
225 			 * We just need to be sure we don't loop
226 			 * forever.
227 			 */
228 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
229 		/*
230 		 * In order to keep the number of pages allocated to a
231 		 * minimum, we scan the all existing pages before increasing
232 		 * the size of the bitmap table.
233 		 */
234 		if (++i > max_scan) {
235 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
236 				break;
237 			map = &qpt->map[qpt->nmaps++];
238 			offset = 0;
239 		} else if (map < &qpt->map[qpt->nmaps]) {
240 			++map;
241 			offset = 0;
242 		} else {
243 			map = &qpt->map[0];
244 			offset = 2;
245 		}
246 		qpn = mk_qpn(qpt, map, offset);
247 	}
248 
249 	ret = -ENOMEM;
250 
251 bail:
252 	return ret;
253 }
254 
255 /**
256  * qib_free_all_qps - check for QPs still in use
257  */
258 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
259 {
260 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
261 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
262 					      verbs_dev);
263 	unsigned n, qp_inuse = 0;
264 
265 	for (n = 0; n < dd->num_pports; n++) {
266 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
267 
268 		rcu_read_lock();
269 		if (rcu_dereference(ibp->rvp.qp[0]))
270 			qp_inuse++;
271 		if (rcu_dereference(ibp->rvp.qp[1]))
272 			qp_inuse++;
273 		rcu_read_unlock();
274 	}
275 	return qp_inuse;
276 }
277 
278 void qib_notify_qp_reset(struct rvt_qp *qp)
279 {
280 	struct qib_qp_priv *priv = qp->priv;
281 
282 	atomic_set(&priv->s_dma_busy, 0);
283 }
284 
285 void qib_notify_error_qp(struct rvt_qp *qp)
286 {
287 	struct qib_qp_priv *priv = qp->priv;
288 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
289 
290 	spin_lock(&dev->rdi.pending_lock);
291 	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
292 		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
293 		list_del_init(&priv->iowait);
294 	}
295 	spin_unlock(&dev->rdi.pending_lock);
296 
297 	if (!(qp->s_flags & RVT_S_BUSY)) {
298 		qp->s_hdrwords = 0;
299 		if (qp->s_rdma_mr) {
300 			rvt_put_mr(qp->s_rdma_mr);
301 			qp->s_rdma_mr = NULL;
302 		}
303 		if (priv->s_tx) {
304 			qib_put_txreq(priv->s_tx);
305 			priv->s_tx = NULL;
306 		}
307 	}
308 }
309 
310 static int mtu_to_enum(u32 mtu)
311 {
312 	int enum_mtu;
313 
314 	switch (mtu) {
315 	case 4096:
316 		enum_mtu = IB_MTU_4096;
317 		break;
318 	case 2048:
319 		enum_mtu = IB_MTU_2048;
320 		break;
321 	case 1024:
322 		enum_mtu = IB_MTU_1024;
323 		break;
324 	case 512:
325 		enum_mtu = IB_MTU_512;
326 		break;
327 	case 256:
328 		enum_mtu = IB_MTU_256;
329 		break;
330 	default:
331 		enum_mtu = IB_MTU_2048;
332 	}
333 	return enum_mtu;
334 }
335 
336 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
337 			   struct ib_qp_attr *attr)
338 {
339 	int mtu, pmtu, pidx = qp->port_num - 1;
340 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
341 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
342 					      verbs_dev);
343 	mtu = ib_mtu_enum_to_int(attr->path_mtu);
344 	if (mtu == -1)
345 		return -EINVAL;
346 
347 	if (mtu > dd->pport[pidx].ibmtu)
348 		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
349 	else
350 		pmtu = attr->path_mtu;
351 	return pmtu;
352 }
353 
354 int qib_mtu_to_path_mtu(u32 mtu)
355 {
356 	return mtu_to_enum(mtu);
357 }
358 
359 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
360 {
361 	return ib_mtu_enum_to_int(pmtu);
362 }
363 
364 /**
365  * qib_compute_aeth - compute the AETH (syndrome + MSN)
366  * @qp: the queue pair to compute the AETH for
367  *
368  * Returns the AETH.
369  */
370 __be32 qib_compute_aeth(struct rvt_qp *qp)
371 {
372 	u32 aeth = qp->r_msn & QIB_MSN_MASK;
373 
374 	if (qp->ibqp.srq) {
375 		/*
376 		 * Shared receive queues don't generate credits.
377 		 * Set the credit field to the invalid value.
378 		 */
379 		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
380 	} else {
381 		u32 min, max, x;
382 		u32 credits;
383 		struct rvt_rwq *wq = qp->r_rq.wq;
384 		u32 head;
385 		u32 tail;
386 
387 		/* sanity check pointers before trusting them */
388 		head = wq->head;
389 		if (head >= qp->r_rq.size)
390 			head = 0;
391 		tail = wq->tail;
392 		if (tail >= qp->r_rq.size)
393 			tail = 0;
394 		/*
395 		 * Compute the number of credits available (RWQEs).
396 		 * XXX Not holding the r_rq.lock here so there is a small
397 		 * chance that the pair of reads are not atomic.
398 		 */
399 		credits = head - tail;
400 		if ((int)credits < 0)
401 			credits += qp->r_rq.size;
402 		/*
403 		 * Binary search the credit table to find the code to
404 		 * use.
405 		 */
406 		min = 0;
407 		max = 31;
408 		for (;;) {
409 			x = (min + max) / 2;
410 			if (credit_table[x] == credits)
411 				break;
412 			if (credit_table[x] > credits)
413 				max = x;
414 			else if (min == x)
415 				break;
416 			else
417 				min = x;
418 		}
419 		aeth |= x << QIB_AETH_CREDIT_SHIFT;
420 	}
421 	return cpu_to_be32(aeth);
422 }
423 
424 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
425 {
426 	struct qib_qp_priv *priv;
427 
428 	priv = kzalloc(sizeof(*priv), gfp);
429 	if (!priv)
430 		return ERR_PTR(-ENOMEM);
431 	priv->owner = qp;
432 
433 	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
434 	if (!priv->s_hdr) {
435 		kfree(priv);
436 		return ERR_PTR(-ENOMEM);
437 	}
438 	init_waitqueue_head(&priv->wait_dma);
439 	INIT_WORK(&priv->s_work, _qib_do_send);
440 	INIT_LIST_HEAD(&priv->iowait);
441 
442 	return priv;
443 }
444 
445 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
446 {
447 	struct qib_qp_priv *priv = qp->priv;
448 
449 	kfree(priv->s_hdr);
450 	kfree(priv);
451 }
452 
453 void qib_stop_send_queue(struct rvt_qp *qp)
454 {
455 	struct qib_qp_priv *priv = qp->priv;
456 
457 	cancel_work_sync(&priv->s_work);
458 	del_timer_sync(&qp->s_timer);
459 }
460 
461 void qib_quiesce_qp(struct rvt_qp *qp)
462 {
463 	struct qib_qp_priv *priv = qp->priv;
464 
465 	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
466 	if (priv->s_tx) {
467 		qib_put_txreq(priv->s_tx);
468 		priv->s_tx = NULL;
469 	}
470 }
471 
472 void qib_flush_qp_waiters(struct rvt_qp *qp)
473 {
474 	struct qib_qp_priv *priv = qp->priv;
475 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
476 
477 	spin_lock(&dev->rdi.pending_lock);
478 	if (!list_empty(&priv->iowait))
479 		list_del_init(&priv->iowait);
480 	spin_unlock(&dev->rdi.pending_lock);
481 }
482 
483 /**
484  * qib_get_credit - flush the send work queue of a QP
485  * @qp: the qp who's send work queue to flush
486  * @aeth: the Acknowledge Extended Transport Header
487  *
488  * The QP s_lock should be held.
489  */
490 void qib_get_credit(struct rvt_qp *qp, u32 aeth)
491 {
492 	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
493 
494 	/*
495 	 * If the credit is invalid, we can send
496 	 * as many packets as we like.  Otherwise, we have to
497 	 * honor the credit field.
498 	 */
499 	if (credit == QIB_AETH_CREDIT_INVAL) {
500 		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
501 			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
502 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
503 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
504 				qib_schedule_send(qp);
505 			}
506 		}
507 	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
508 		/* Compute new LSN (i.e., MSN + credit) */
509 		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
510 		if (qib_cmp24(credit, qp->s_lsn) > 0) {
511 			qp->s_lsn = credit;
512 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
513 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
514 				qib_schedule_send(qp);
515 			}
516 		}
517 	}
518 }
519 
520 /**
521  * qib_check_send_wqe - validate wr/wqe
522  * @qp - The qp
523  * @wqe - The built wqe
524  *
525  * validate wr/wqe.  This is called
526  * prior to inserting the wqe into
527  * the ring but after the wqe has been
528  * setup.
529  *
530  * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
531  */
532 int qib_check_send_wqe(struct rvt_qp *qp,
533 		       struct rvt_swqe *wqe)
534 {
535 	struct rvt_ah *ah;
536 	int ret = 0;
537 
538 	switch (qp->ibqp.qp_type) {
539 	case IB_QPT_RC:
540 	case IB_QPT_UC:
541 		if (wqe->length > 0x80000000U)
542 			return -EINVAL;
543 		break;
544 	case IB_QPT_SMI:
545 	case IB_QPT_GSI:
546 	case IB_QPT_UD:
547 		ah = ibah_to_rvtah(wqe->ud_wr.ah);
548 		if (wqe->length > (1 << ah->log_pmtu))
549 			return -EINVAL;
550 		/* progress hint */
551 		ret = 1;
552 		break;
553 	default:
554 		break;
555 	}
556 	return ret;
557 }
558 
559 #ifdef CONFIG_DEBUG_FS
560 
561 struct qib_qp_iter {
562 	struct qib_ibdev *dev;
563 	struct rvt_qp *qp;
564 	int n;
565 };
566 
567 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
568 {
569 	struct qib_qp_iter *iter;
570 
571 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
572 	if (!iter)
573 		return NULL;
574 
575 	iter->dev = dev;
576 	if (qib_qp_iter_next(iter)) {
577 		kfree(iter);
578 		return NULL;
579 	}
580 
581 	return iter;
582 }
583 
584 int qib_qp_iter_next(struct qib_qp_iter *iter)
585 {
586 	struct qib_ibdev *dev = iter->dev;
587 	int n = iter->n;
588 	int ret = 1;
589 	struct rvt_qp *pqp = iter->qp;
590 	struct rvt_qp *qp;
591 
592 	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
593 		if (pqp)
594 			qp = rcu_dereference(pqp->next);
595 		else
596 			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
597 		pqp = qp;
598 		if (qp) {
599 			iter->qp = qp;
600 			iter->n = n;
601 			return 0;
602 		}
603 	}
604 	return ret;
605 }
606 
607 static const char * const qp_type_str[] = {
608 	"SMI", "GSI", "RC", "UC", "UD",
609 };
610 
611 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
612 {
613 	struct rvt_swqe *wqe;
614 	struct rvt_qp *qp = iter->qp;
615 	struct qib_qp_priv *priv = qp->priv;
616 
617 	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
618 	seq_printf(s,
619 		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
620 		   iter->n,
621 		   qp->ibqp.qp_num,
622 		   qp_type_str[qp->ibqp.qp_type],
623 		   qp->state,
624 		   wqe->wr.opcode,
625 		   qp->s_hdrwords,
626 		   qp->s_flags,
627 		   atomic_read(&priv->s_dma_busy),
628 		   !list_empty(&priv->iowait),
629 		   qp->timeout,
630 		   wqe->ssn,
631 		   qp->s_lsn,
632 		   qp->s_last_psn,
633 		   qp->s_psn, qp->s_next_psn,
634 		   qp->s_sending_psn, qp->s_sending_hpsn,
635 		   qp->s_last, qp->s_acked, qp->s_cur,
636 		   qp->s_tail, qp->s_head, qp->s_size,
637 		   qp->remote_qpn,
638 		   qp->remote_ah_attr.dlid);
639 }
640 
641 #endif
642