xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_qp.c (revision 1c2dd16a)
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
41 
42 #include "qib.h"
43 
44 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
45 			      struct rvt_qpn_map *map, unsigned off)
46 {
47 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
48 }
49 
50 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
51 					struct rvt_qpn_map *map, unsigned off,
52 					unsigned n, u16 qpt_mask)
53 {
54 	if (qpt_mask) {
55 		off++;
56 		if (((off & qpt_mask) >> 1) >= n)
57 			off = (off | qpt_mask) + 2;
58 	} else {
59 		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
60 	}
61 	return off;
62 }
63 
64 const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
65 [IB_WR_RDMA_WRITE] = {
66 	.length = sizeof(struct ib_rdma_wr),
67 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
68 },
69 
70 [IB_WR_RDMA_READ] = {
71 	.length = sizeof(struct ib_rdma_wr),
72 	.qpt_support = BIT(IB_QPT_RC),
73 	.flags = RVT_OPERATION_ATOMIC,
74 },
75 
76 [IB_WR_ATOMIC_CMP_AND_SWP] = {
77 	.length = sizeof(struct ib_atomic_wr),
78 	.qpt_support = BIT(IB_QPT_RC),
79 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
80 },
81 
82 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
83 	.length = sizeof(struct ib_atomic_wr),
84 	.qpt_support = BIT(IB_QPT_RC),
85 	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
86 },
87 
88 [IB_WR_RDMA_WRITE_WITH_IMM] = {
89 	.length = sizeof(struct ib_rdma_wr),
90 	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
91 },
92 
93 [IB_WR_SEND] = {
94 	.length = sizeof(struct ib_send_wr),
95 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
96 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
97 },
98 
99 [IB_WR_SEND_WITH_IMM] = {
100 	.length = sizeof(struct ib_send_wr),
101 	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
102 		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
103 },
104 
105 };
106 
107 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
108 			 gfp_t gfp)
109 {
110 	unsigned long page = get_zeroed_page(gfp);
111 
112 	/*
113 	 * Free the page if someone raced with us installing it.
114 	 */
115 
116 	spin_lock(&qpt->lock);
117 	if (map->page)
118 		free_page(page);
119 	else
120 		map->page = (void *)page;
121 	spin_unlock(&qpt->lock);
122 }
123 
124 /*
125  * Allocate the next available QPN or
126  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
127  */
128 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
129 		  enum ib_qp_type type, u8 port, gfp_t gfp)
130 {
131 	u32 i, offset, max_scan, qpn;
132 	struct rvt_qpn_map *map;
133 	u32 ret;
134 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
135 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
136 					      verbs_dev);
137 	u16 qpt_mask = dd->qpn_mask;
138 
139 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
140 		unsigned n;
141 
142 		ret = type == IB_QPT_GSI;
143 		n = 1 << (ret + 2 * (port - 1));
144 		spin_lock(&qpt->lock);
145 		if (qpt->flags & n)
146 			ret = -EINVAL;
147 		else
148 			qpt->flags |= n;
149 		spin_unlock(&qpt->lock);
150 		goto bail;
151 	}
152 
153 	qpn = qpt->last + 2;
154 	if (qpn >= RVT_QPN_MAX)
155 		qpn = 2;
156 	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
157 		qpn = (qpn | qpt_mask) + 2;
158 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
159 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
160 	max_scan = qpt->nmaps - !offset;
161 	for (i = 0;;) {
162 		if (unlikely(!map->page)) {
163 			get_map_page(qpt, map, gfp);
164 			if (unlikely(!map->page))
165 				break;
166 		}
167 		do {
168 			if (!test_and_set_bit(offset, map->page)) {
169 				qpt->last = qpn;
170 				ret = qpn;
171 				goto bail;
172 			}
173 			offset = find_next_offset(qpt, map, offset,
174 				dd->n_krcv_queues, qpt_mask);
175 			qpn = mk_qpn(qpt, map, offset);
176 			/*
177 			 * This test differs from alloc_pidmap().
178 			 * If find_next_offset() does find a zero
179 			 * bit, we don't need to check for QPN
180 			 * wrapping around past our starting QPN.
181 			 * We just need to be sure we don't loop
182 			 * forever.
183 			 */
184 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
185 		/*
186 		 * In order to keep the number of pages allocated to a
187 		 * minimum, we scan the all existing pages before increasing
188 		 * the size of the bitmap table.
189 		 */
190 		if (++i > max_scan) {
191 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
192 				break;
193 			map = &qpt->map[qpt->nmaps++];
194 			offset = 0;
195 		} else if (map < &qpt->map[qpt->nmaps]) {
196 			++map;
197 			offset = 0;
198 		} else {
199 			map = &qpt->map[0];
200 			offset = 2;
201 		}
202 		qpn = mk_qpn(qpt, map, offset);
203 	}
204 
205 	ret = -ENOMEM;
206 
207 bail:
208 	return ret;
209 }
210 
211 /**
212  * qib_free_all_qps - check for QPs still in use
213  */
214 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
215 {
216 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
217 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
218 					      verbs_dev);
219 	unsigned n, qp_inuse = 0;
220 
221 	for (n = 0; n < dd->num_pports; n++) {
222 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
223 
224 		rcu_read_lock();
225 		if (rcu_dereference(ibp->rvp.qp[0]))
226 			qp_inuse++;
227 		if (rcu_dereference(ibp->rvp.qp[1]))
228 			qp_inuse++;
229 		rcu_read_unlock();
230 	}
231 	return qp_inuse;
232 }
233 
234 void qib_notify_qp_reset(struct rvt_qp *qp)
235 {
236 	struct qib_qp_priv *priv = qp->priv;
237 
238 	atomic_set(&priv->s_dma_busy, 0);
239 }
240 
241 void qib_notify_error_qp(struct rvt_qp *qp)
242 {
243 	struct qib_qp_priv *priv = qp->priv;
244 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
245 
246 	spin_lock(&dev->rdi.pending_lock);
247 	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
248 		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
249 		list_del_init(&priv->iowait);
250 	}
251 	spin_unlock(&dev->rdi.pending_lock);
252 
253 	if (!(qp->s_flags & RVT_S_BUSY)) {
254 		qp->s_hdrwords = 0;
255 		if (qp->s_rdma_mr) {
256 			rvt_put_mr(qp->s_rdma_mr);
257 			qp->s_rdma_mr = NULL;
258 		}
259 		if (priv->s_tx) {
260 			qib_put_txreq(priv->s_tx);
261 			priv->s_tx = NULL;
262 		}
263 	}
264 }
265 
266 static int mtu_to_enum(u32 mtu)
267 {
268 	int enum_mtu;
269 
270 	switch (mtu) {
271 	case 4096:
272 		enum_mtu = IB_MTU_4096;
273 		break;
274 	case 2048:
275 		enum_mtu = IB_MTU_2048;
276 		break;
277 	case 1024:
278 		enum_mtu = IB_MTU_1024;
279 		break;
280 	case 512:
281 		enum_mtu = IB_MTU_512;
282 		break;
283 	case 256:
284 		enum_mtu = IB_MTU_256;
285 		break;
286 	default:
287 		enum_mtu = IB_MTU_2048;
288 	}
289 	return enum_mtu;
290 }
291 
292 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
293 			   struct ib_qp_attr *attr)
294 {
295 	int mtu, pmtu, pidx = qp->port_num - 1;
296 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
297 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
298 					      verbs_dev);
299 	mtu = ib_mtu_enum_to_int(attr->path_mtu);
300 	if (mtu == -1)
301 		return -EINVAL;
302 
303 	if (mtu > dd->pport[pidx].ibmtu)
304 		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
305 	else
306 		pmtu = attr->path_mtu;
307 	return pmtu;
308 }
309 
310 int qib_mtu_to_path_mtu(u32 mtu)
311 {
312 	return mtu_to_enum(mtu);
313 }
314 
315 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
316 {
317 	return ib_mtu_enum_to_int(pmtu);
318 }
319 
320 void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
321 {
322 	struct qib_qp_priv *priv;
323 
324 	priv = kzalloc(sizeof(*priv), gfp);
325 	if (!priv)
326 		return ERR_PTR(-ENOMEM);
327 	priv->owner = qp;
328 
329 	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
330 	if (!priv->s_hdr) {
331 		kfree(priv);
332 		return ERR_PTR(-ENOMEM);
333 	}
334 	init_waitqueue_head(&priv->wait_dma);
335 	INIT_WORK(&priv->s_work, _qib_do_send);
336 	INIT_LIST_HEAD(&priv->iowait);
337 
338 	return priv;
339 }
340 
341 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
342 {
343 	struct qib_qp_priv *priv = qp->priv;
344 
345 	kfree(priv->s_hdr);
346 	kfree(priv);
347 }
348 
349 void qib_stop_send_queue(struct rvt_qp *qp)
350 {
351 	struct qib_qp_priv *priv = qp->priv;
352 
353 	cancel_work_sync(&priv->s_work);
354 }
355 
356 void qib_quiesce_qp(struct rvt_qp *qp)
357 {
358 	struct qib_qp_priv *priv = qp->priv;
359 
360 	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
361 	if (priv->s_tx) {
362 		qib_put_txreq(priv->s_tx);
363 		priv->s_tx = NULL;
364 	}
365 }
366 
367 void qib_flush_qp_waiters(struct rvt_qp *qp)
368 {
369 	struct qib_qp_priv *priv = qp->priv;
370 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
371 
372 	spin_lock(&dev->rdi.pending_lock);
373 	if (!list_empty(&priv->iowait))
374 		list_del_init(&priv->iowait);
375 	spin_unlock(&dev->rdi.pending_lock);
376 }
377 
378 /**
379  * qib_check_send_wqe - validate wr/wqe
380  * @qp - The qp
381  * @wqe - The built wqe
382  *
383  * validate wr/wqe.  This is called
384  * prior to inserting the wqe into
385  * the ring but after the wqe has been
386  * setup.
387  *
388  * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
389  */
390 int qib_check_send_wqe(struct rvt_qp *qp,
391 		       struct rvt_swqe *wqe)
392 {
393 	struct rvt_ah *ah;
394 	int ret = 0;
395 
396 	switch (qp->ibqp.qp_type) {
397 	case IB_QPT_RC:
398 	case IB_QPT_UC:
399 		if (wqe->length > 0x80000000U)
400 			return -EINVAL;
401 		break;
402 	case IB_QPT_SMI:
403 	case IB_QPT_GSI:
404 	case IB_QPT_UD:
405 		ah = ibah_to_rvtah(wqe->ud_wr.ah);
406 		if (wqe->length > (1 << ah->log_pmtu))
407 			return -EINVAL;
408 		/* progress hint */
409 		ret = 1;
410 		break;
411 	default:
412 		break;
413 	}
414 	return ret;
415 }
416 
417 #ifdef CONFIG_DEBUG_FS
418 
419 struct qib_qp_iter {
420 	struct qib_ibdev *dev;
421 	struct rvt_qp *qp;
422 	int n;
423 };
424 
425 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
426 {
427 	struct qib_qp_iter *iter;
428 
429 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
430 	if (!iter)
431 		return NULL;
432 
433 	iter->dev = dev;
434 
435 	return iter;
436 }
437 
438 int qib_qp_iter_next(struct qib_qp_iter *iter)
439 {
440 	struct qib_ibdev *dev = iter->dev;
441 	int n = iter->n;
442 	int ret = 1;
443 	struct rvt_qp *pqp = iter->qp;
444 	struct rvt_qp *qp;
445 
446 	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
447 		if (pqp)
448 			qp = rcu_dereference(pqp->next);
449 		else
450 			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
451 		pqp = qp;
452 		if (qp) {
453 			iter->qp = qp;
454 			iter->n = n;
455 			return 0;
456 		}
457 	}
458 	return ret;
459 }
460 
461 static const char * const qp_type_str[] = {
462 	"SMI", "GSI", "RC", "UC", "UD",
463 };
464 
465 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
466 {
467 	struct rvt_swqe *wqe;
468 	struct rvt_qp *qp = iter->qp;
469 	struct qib_qp_priv *priv = qp->priv;
470 
471 	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
472 	seq_printf(s,
473 		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
474 		   iter->n,
475 		   qp->ibqp.qp_num,
476 		   qp_type_str[qp->ibqp.qp_type],
477 		   qp->state,
478 		   wqe->wr.opcode,
479 		   qp->s_hdrwords,
480 		   qp->s_flags,
481 		   atomic_read(&priv->s_dma_busy),
482 		   !list_empty(&priv->iowait),
483 		   qp->timeout,
484 		   wqe->ssn,
485 		   qp->s_lsn,
486 		   qp->s_last_psn,
487 		   qp->s_psn, qp->s_next_psn,
488 		   qp->s_sending_psn, qp->s_sending_hpsn,
489 		   qp->s_last, qp->s_acked, qp->s_cur,
490 		   qp->s_tail, qp->s_head, qp->s_size,
491 		   qp->remote_qpn,
492 		   qp->remote_ah_attr.dlid);
493 }
494 
495 #endif
496