xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_qp.c (revision 8730046c)
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
41 
42 #include "qib.h"
43 
44 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
45 			      struct rvt_qpn_map *map, unsigned off)
46 {
47 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
48 }
49 
50 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
51 					struct rvt_qpn_map *map, unsigned off,
52 					unsigned n, u16 qpt_mask)
53 {
54 	if (qpt_mask) {
55 		off++;
56 		if (((off & qpt_mask) >> 1) >= n)
57 			off = (off | qpt_mask) + 2;
58 	} else {
59 		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
60 	}
61 	return off;
62 }
63 
64 /*
65  * Convert the AETH credit code into the number of credits.
66  */
67 static u32 credit_table[31] = {
68 	0,                      /* 0 */
69 	1,                      /* 1 */
70 	2,                      /* 2 */
71 	3,                      /* 3 */
72 	4,                      /* 4 */
73 	6,                      /* 5 */
74 	8,                      /* 6 */
75 	12,                     /* 7 */
76 	16,                     /* 8 */
77 	24,                     /* 9 */
78 	32,                     /* A */
79 	48,                     /* B */
80 	64,                     /* C */
81 	96,                     /* D */
82 	128,                    /* E */
83 	192,                    /* F */
84 	256,                    /* 10 */
85 	384,                    /* 11 */
86 	512,                    /* 12 */
87 	768,                    /* 13 */
88 	1024,                   /* 14 */
89 	1536,                   /* 15 */
90 	2048,                   /* 16 */
91 	3072,                   /* 17 */
92 	4096,                   /* 18 */
93 	6144,                   /* 19 */
94 	8192,                   /* 1A */
95 	12288,                  /* 1B */
96 	16384,                  /* 1C */
97 	24576,                  /* 1D */
98 	32768                   /* 1E */
99 };
100 
101 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
102 [IB_WR_RDMA_WRITE] = {
103 	.length = sizeof(struct ib_rdma_wr),
104 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
105 },
106 
107 [IB_WR_RDMA_READ] = {
108 	.length = sizeof(struct ib_rdma_wr),
109 	.qpt_support = BIT(IB_QPT_RC),
110 	.flags = RVT_OPERATION_ATOMIC,
111 },
112 
113 [IB_WR_ATOMIC_CMP_AND_SWP] = {
114 	.length = sizeof(struct ib_atomic_wr),
115 	.qpt_support = BIT(IB_QPT_RC),
116 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
117 },
118 
119 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
120 	.length = sizeof(struct ib_atomic_wr),
121 	.qpt_support = BIT(IB_QPT_RC),
122 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
123 },
124 
125 [IB_WR_RDMA_WRITE_WITH_IMM] = {
126 	.length = sizeof(struct ib_rdma_wr),
127 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
128 },
129 
130 [IB_WR_SEND] = {
131 	.length = sizeof(struct ib_send_wr),
132 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
133 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
134 },
135 
136 [IB_WR_SEND_WITH_IMM] = {
137 	.length = sizeof(struct ib_send_wr),
138 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
139 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
140 },
141 
142 };
143 
144 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
145 			 gfp_t gfp)
146 {
147 	unsigned long page = get_zeroed_page(gfp);
148 
149 	/*
150 	 * Free the page if someone raced with us installing it.
151 	 */
152 
153 	spin_lock(&qpt->lock);
154 	if (map->page)
155 		free_page(page);
156 	else
157 		map->page = (void *)page;
158 	spin_unlock(&qpt->lock);
159 }
160 
161 /*
162  * Allocate the next available QPN or
163  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
164  */
165 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
166 		  enum ib_qp_type type, u8 port, gfp_t gfp)
167 {
168 	u32 i, offset, max_scan, qpn;
169 	struct rvt_qpn_map *map;
170 	u32 ret;
171 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
172 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
173 					      verbs_dev);
174 	u16 qpt_mask = dd->qpn_mask;
175 
176 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
177 		unsigned n;
178 
179 		ret = type == IB_QPT_GSI;
180 		n = 1 << (ret + 2 * (port - 1));
181 		spin_lock(&qpt->lock);
182 		if (qpt->flags & n)
183 			ret = -EINVAL;
184 		else
185 			qpt->flags |= n;
186 		spin_unlock(&qpt->lock);
187 		goto bail;
188 	}
189 
190 	qpn = qpt->last + 2;
191 	if (qpn >= RVT_QPN_MAX)
192 		qpn = 2;
193 	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
194 		qpn = (qpn | qpt_mask) + 2;
195 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
196 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
197 	max_scan = qpt->nmaps - !offset;
198 	for (i = 0;;) {
199 		if (unlikely(!map->page)) {
200 			get_map_page(qpt, map, gfp);
201 			if (unlikely(!map->page))
202 				break;
203 		}
204 		do {
205 			if (!test_and_set_bit(offset, map->page)) {
206 				qpt->last = qpn;
207 				ret = qpn;
208 				goto bail;
209 			}
210 			offset = find_next_offset(qpt, map, offset,
211 				dd->n_krcv_queues, qpt_mask);
212 			qpn = mk_qpn(qpt, map, offset);
213 			/*
214 			 * This test differs from alloc_pidmap().
215 			 * If find_next_offset() does find a zero
216 			 * bit, we don't need to check for QPN
217 			 * wrapping around past our starting QPN.
218 			 * We just need to be sure we don't loop
219 			 * forever.
220 			 */
221 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
222 		/*
223 		 * In order to keep the number of pages allocated to a
224 		 * minimum, we scan the all existing pages before increasing
225 		 * the size of the bitmap table.
226 		 */
227 		if (++i > max_scan) {
228 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
229 				break;
230 			map = &qpt->map[qpt->nmaps++];
231 			offset = 0;
232 		} else if (map < &qpt->map[qpt->nmaps]) {
233 			++map;
234 			offset = 0;
235 		} else {
236 			map = &qpt->map[0];
237 			offset = 2;
238 		}
239 		qpn = mk_qpn(qpt, map, offset);
240 	}
241 
242 	ret = -ENOMEM;
243 
244 bail:
245 	return ret;
246 }
247 
248 /**
249  * qib_free_all_qps - check for QPs still in use
250  */
251 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
252 {
253 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
254 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
255 					      verbs_dev);
256 	unsigned n, qp_inuse = 0;
257 
258 	for (n = 0; n < dd->num_pports; n++) {
259 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
260 
261 		rcu_read_lock();
262 		if (rcu_dereference(ibp->rvp.qp[0]))
263 			qp_inuse++;
264 		if (rcu_dereference(ibp->rvp.qp[1]))
265 			qp_inuse++;
266 		rcu_read_unlock();
267 	}
268 	return qp_inuse;
269 }
270 
271 void qib_notify_qp_reset(struct rvt_qp *qp)
272 {
273 	struct qib_qp_priv *priv = qp->priv;
274 
275 	atomic_set(&priv->s_dma_busy, 0);
276 }
277 
278 void qib_notify_error_qp(struct rvt_qp *qp)
279 {
280 	struct qib_qp_priv *priv = qp->priv;
281 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
282 
283 	spin_lock(&dev->rdi.pending_lock);
284 	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
285 		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
286 		list_del_init(&priv->iowait);
287 	}
288 	spin_unlock(&dev->rdi.pending_lock);
289 
290 	if (!(qp->s_flags & RVT_S_BUSY)) {
291 		qp->s_hdrwords = 0;
292 		if (qp->s_rdma_mr) {
293 			rvt_put_mr(qp->s_rdma_mr);
294 			qp->s_rdma_mr = NULL;
295 		}
296 		if (priv->s_tx) {
297 			qib_put_txreq(priv->s_tx);
298 			priv->s_tx = NULL;
299 		}
300 	}
301 }
302 
303 static int mtu_to_enum(u32 mtu)
304 {
305 	int enum_mtu;
306 
307 	switch (mtu) {
308 	case 4096:
309 		enum_mtu = IB_MTU_4096;
310 		break;
311 	case 2048:
312 		enum_mtu = IB_MTU_2048;
313 		break;
314 	case 1024:
315 		enum_mtu = IB_MTU_1024;
316 		break;
317 	case 512:
318 		enum_mtu = IB_MTU_512;
319 		break;
320 	case 256:
321 		enum_mtu = IB_MTU_256;
322 		break;
323 	default:
324 		enum_mtu = IB_MTU_2048;
325 	}
326 	return enum_mtu;
327 }
328 
329 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
330 			   struct ib_qp_attr *attr)
331 {
332 	int mtu, pmtu, pidx = qp->port_num - 1;
333 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
334 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
335 					      verbs_dev);
336 	mtu = ib_mtu_enum_to_int(attr->path_mtu);
337 	if (mtu == -1)
338 		return -EINVAL;
339 
340 	if (mtu > dd->pport[pidx].ibmtu)
341 		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
342 	else
343 		pmtu = attr->path_mtu;
344 	return pmtu;
345 }
346 
347 int qib_mtu_to_path_mtu(u32 mtu)
348 {
349 	return mtu_to_enum(mtu);
350 }
351 
352 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
353 {
354 	return ib_mtu_enum_to_int(pmtu);
355 }
356 
357 /**
358  * qib_compute_aeth - compute the AETH (syndrome + MSN)
359  * @qp: the queue pair to compute the AETH for
360  *
361  * Returns the AETH.
362  */
363 __be32 qib_compute_aeth(struct rvt_qp *qp)
364 {
365 	u32 aeth = qp->r_msn & QIB_MSN_MASK;
366 
367 	if (qp->ibqp.srq) {
368 		/*
369 		 * Shared receive queues don't generate credits.
370 		 * Set the credit field to the invalid value.
371 		 */
372 		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
373 	} else {
374 		u32 min, max, x;
375 		u32 credits;
376 		struct rvt_rwq *wq = qp->r_rq.wq;
377 		u32 head;
378 		u32 tail;
379 
380 		/* sanity check pointers before trusting them */
381 		head = wq->head;
382 		if (head >= qp->r_rq.size)
383 			head = 0;
384 		tail = wq->tail;
385 		if (tail >= qp->r_rq.size)
386 			tail = 0;
387 		/*
388 		 * Compute the number of credits available (RWQEs).
389 		 * XXX Not holding the r_rq.lock here so there is a small
390 		 * chance that the pair of reads are not atomic.
391 		 */
392 		credits = head - tail;
393 		if ((int)credits < 0)
394 			credits += qp->r_rq.size;
395 		/*
396 		 * Binary search the credit table to find the code to
397 		 * use.
398 		 */
399 		min = 0;
400 		max = 31;
401 		for (;;) {
402 			x = (min + max) / 2;
403 			if (credit_table[x] == credits)
404 				break;
405 			if (credit_table[x] > credits)
406 				max = x;
407 			else if (min == x)
408 				break;
409 			else
410 				min = x;
411 		}
412 		aeth |= x << QIB_AETH_CREDIT_SHIFT;
413 	}
414 	return cpu_to_be32(aeth);
415 }
416 
417 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
418 {
419 	struct qib_qp_priv *priv;
420 
421 	priv = kzalloc(sizeof(*priv), gfp);
422 	if (!priv)
423 		return ERR_PTR(-ENOMEM);
424 	priv->owner = qp;
425 
426 	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
427 	if (!priv->s_hdr) {
428 		kfree(priv);
429 		return ERR_PTR(-ENOMEM);
430 	}
431 	init_waitqueue_head(&priv->wait_dma);
432 	INIT_WORK(&priv->s_work, _qib_do_send);
433 	INIT_LIST_HEAD(&priv->iowait);
434 
435 	return priv;
436 }
437 
438 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
439 {
440 	struct qib_qp_priv *priv = qp->priv;
441 
442 	kfree(priv->s_hdr);
443 	kfree(priv);
444 }
445 
446 void qib_stop_send_queue(struct rvt_qp *qp)
447 {
448 	struct qib_qp_priv *priv = qp->priv;
449 
450 	cancel_work_sync(&priv->s_work);
451 	del_timer_sync(&qp->s_timer);
452 }
453 
454 void qib_quiesce_qp(struct rvt_qp *qp)
455 {
456 	struct qib_qp_priv *priv = qp->priv;
457 
458 	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
459 	if (priv->s_tx) {
460 		qib_put_txreq(priv->s_tx);
461 		priv->s_tx = NULL;
462 	}
463 }
464 
465 void qib_flush_qp_waiters(struct rvt_qp *qp)
466 {
467 	struct qib_qp_priv *priv = qp->priv;
468 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
469 
470 	spin_lock(&dev->rdi.pending_lock);
471 	if (!list_empty(&priv->iowait))
472 		list_del_init(&priv->iowait);
473 	spin_unlock(&dev->rdi.pending_lock);
474 }
475 
476 /**
477  * qib_get_credit - flush the send work queue of a QP
478  * @qp: the qp who's send work queue to flush
479  * @aeth: the Acknowledge Extended Transport Header
480  *
481  * The QP s_lock should be held.
482  */
483 void qib_get_credit(struct rvt_qp *qp, u32 aeth)
484 {
485 	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
486 
487 	/*
488 	 * If the credit is invalid, we can send
489 	 * as many packets as we like.  Otherwise, we have to
490 	 * honor the credit field.
491 	 */
492 	if (credit == QIB_AETH_CREDIT_INVAL) {
493 		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
494 			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
495 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
496 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
497 				qib_schedule_send(qp);
498 			}
499 		}
500 	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
501 		/* Compute new LSN (i.e., MSN + credit) */
502 		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
503 		if (qib_cmp24(credit, qp->s_lsn) > 0) {
504 			qp->s_lsn = credit;
505 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
506 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
507 				qib_schedule_send(qp);
508 			}
509 		}
510 	}
511 }
512 
513 /**
514  * qib_check_send_wqe - validate wr/wqe
515  * @qp - The qp
516  * @wqe - The built wqe
517  *
518  * validate wr/wqe.  This is called
519  * prior to inserting the wqe into
520  * the ring but after the wqe has been
521  * setup.
522  *
523  * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
524  */
525 int qib_check_send_wqe(struct rvt_qp *qp,
526 		       struct rvt_swqe *wqe)
527 {
528 	struct rvt_ah *ah;
529 	int ret = 0;
530 
531 	switch (qp->ibqp.qp_type) {
532 	case IB_QPT_RC:
533 	case IB_QPT_UC:
534 		if (wqe->length > 0x80000000U)
535 			return -EINVAL;
536 		break;
537 	case IB_QPT_SMI:
538 	case IB_QPT_GSI:
539 	case IB_QPT_UD:
540 		ah = ibah_to_rvtah(wqe->ud_wr.ah);
541 		if (wqe->length > (1 << ah->log_pmtu))
542 			return -EINVAL;
543 		/* progress hint */
544 		ret = 1;
545 		break;
546 	default:
547 		break;
548 	}
549 	return ret;
550 }
551 
552 #ifdef CONFIG_DEBUG_FS
553 
554 struct qib_qp_iter {
555 	struct qib_ibdev *dev;
556 	struct rvt_qp *qp;
557 	int n;
558 };
559 
560 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
561 {
562 	struct qib_qp_iter *iter;
563 
564 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
565 	if (!iter)
566 		return NULL;
567 
568 	iter->dev = dev;
569 
570 	return iter;
571 }
572 
573 int qib_qp_iter_next(struct qib_qp_iter *iter)
574 {
575 	struct qib_ibdev *dev = iter->dev;
576 	int n = iter->n;
577 	int ret = 1;
578 	struct rvt_qp *pqp = iter->qp;
579 	struct rvt_qp *qp;
580 
581 	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
582 		if (pqp)
583 			qp = rcu_dereference(pqp->next);
584 		else
585 			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
586 		pqp = qp;
587 		if (qp) {
588 			iter->qp = qp;
589 			iter->n = n;
590 			return 0;
591 		}
592 	}
593 	return ret;
594 }
595 
596 static const char * const qp_type_str[] = {
597 	"SMI", "GSI", "RC", "UC", "UD",
598 };
599 
600 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
601 {
602 	struct rvt_swqe *wqe;
603 	struct rvt_qp *qp = iter->qp;
604 	struct qib_qp_priv *priv = qp->priv;
605 
606 	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
607 	seq_printf(s,
608 		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
609 		   iter->n,
610 		   qp->ibqp.qp_num,
611 		   qp_type_str[qp->ibqp.qp_type],
612 		   qp->state,
613 		   wqe->wr.opcode,
614 		   qp->s_hdrwords,
615 		   qp->s_flags,
616 		   atomic_read(&priv->s_dma_busy),
617 		   !list_empty(&priv->iowait),
618 		   qp->timeout,
619 		   wqe->ssn,
620 		   qp->s_lsn,
621 		   qp->s_last_psn,
622 		   qp->s_psn, qp->s_next_psn,
623 		   qp->s_sending_psn, qp->s_sending_hpsn,
624 		   qp->s_last, qp->s_acked, qp->s_cur,
625 		   qp->s_tail, qp->s_head, qp->s_size,
626 		   qp->remote_qpn,
627 		   qp->remote_ah_attr.dlid);
628 }
629 
630 #endif
631