xref: /openbmc/linux/drivers/infiniband/hw/mlx5/gsi.c (revision dc6a81c3)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "mlx5_ib.h"
34 
35 struct mlx5_ib_gsi_wr {
36 	struct ib_cqe cqe;
37 	struct ib_wc wc;
38 	int send_flags;
39 	bool completed:1;
40 };
41 
42 struct mlx5_ib_gsi_qp {
43 	struct ib_qp ibqp;
44 	struct ib_qp *rx_qp;
45 	u8 port_num;
46 	struct ib_qp_cap cap;
47 	enum ib_sig_type sq_sig_type;
48 	/* Serialize qp state modifications */
49 	struct mutex mutex;
50 	struct ib_cq *cq;
51 	struct mlx5_ib_gsi_wr *outstanding_wrs;
52 	u32 outstanding_pi, outstanding_ci;
53 	int num_qps;
54 	/* Protects access to the tx_qps. Post send operations synchronize
55 	 * with tx_qp creation in setup_qp(). Also protects the
56 	 * outstanding_wrs array and indices.
57 	 */
58 	spinlock_t lock;
59 	struct ib_qp **tx_qps;
60 };
61 
62 static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
63 {
64 	return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
65 }
66 
67 static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
68 {
69 	return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
70 }
71 
72 /* Call with gsi->lock locked */
73 static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
74 {
75 	struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
76 	struct mlx5_ib_gsi_wr *wr;
77 	u32 index;
78 
79 	for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
80 	     index++) {
81 		wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
82 
83 		if (!wr->completed)
84 			break;
85 
86 		if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
87 		    wr->send_flags & IB_SEND_SIGNALED)
88 			WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
89 
90 		wr->completed = false;
91 	}
92 
93 	gsi->outstanding_ci = index;
94 }
95 
96 static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
97 {
98 	struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
99 	struct mlx5_ib_gsi_wr *wr =
100 		container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
101 	u64 wr_id;
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&gsi->lock, flags);
105 	wr->completed = true;
106 	wr_id = wr->wc.wr_id;
107 	wr->wc = *wc;
108 	wr->wc.wr_id = wr_id;
109 	wr->wc.qp = &gsi->ibqp;
110 
111 	generate_completions(gsi);
112 	spin_unlock_irqrestore(&gsi->lock, flags);
113 }
114 
115 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
116 				    struct ib_qp_init_attr *init_attr)
117 {
118 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
119 	struct mlx5_ib_gsi_qp *gsi;
120 	struct ib_qp_init_attr hw_init_attr = *init_attr;
121 	const u8 port_num = init_attr->port_num;
122 	const int num_pkeys = pd->device->attrs.max_pkeys;
123 	const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
124 	int ret;
125 
126 	mlx5_ib_dbg(dev, "creating GSI QP\n");
127 
128 	if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
129 		mlx5_ib_warn(dev,
130 			     "invalid port number %d during GSI QP creation\n",
131 			     port_num);
132 		return ERR_PTR(-EINVAL);
133 	}
134 
135 	gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
136 	if (!gsi)
137 		return ERR_PTR(-ENOMEM);
138 
139 	gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
140 	if (!gsi->tx_qps) {
141 		ret = -ENOMEM;
142 		goto err_free;
143 	}
144 
145 	gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
146 				       sizeof(*gsi->outstanding_wrs),
147 				       GFP_KERNEL);
148 	if (!gsi->outstanding_wrs) {
149 		ret = -ENOMEM;
150 		goto err_free_tx;
151 	}
152 
153 	mutex_init(&gsi->mutex);
154 
155 	mutex_lock(&dev->devr.mutex);
156 
157 	if (dev->devr.ports[port_num - 1].gsi) {
158 		mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
159 			     port_num);
160 		ret = -EBUSY;
161 		goto err_free_wrs;
162 	}
163 	gsi->num_qps = num_qps;
164 	spin_lock_init(&gsi->lock);
165 
166 	gsi->cap = init_attr->cap;
167 	gsi->sq_sig_type = init_attr->sq_sig_type;
168 	gsi->ibqp.qp_num = 1;
169 	gsi->port_num = port_num;
170 
171 	gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
172 			      IB_POLL_SOFTIRQ);
173 	if (IS_ERR(gsi->cq)) {
174 		mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
175 			     PTR_ERR(gsi->cq));
176 		ret = PTR_ERR(gsi->cq);
177 		goto err_free_wrs;
178 	}
179 
180 	hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
181 	hw_init_attr.send_cq = gsi->cq;
182 	if (num_qps) {
183 		hw_init_attr.cap.max_send_wr = 0;
184 		hw_init_attr.cap.max_send_sge = 0;
185 		hw_init_attr.cap.max_inline_data = 0;
186 	}
187 	gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
188 	if (IS_ERR(gsi->rx_qp)) {
189 		mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
190 			     PTR_ERR(gsi->rx_qp));
191 		ret = PTR_ERR(gsi->rx_qp);
192 		goto err_destroy_cq;
193 	}
194 
195 	dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
196 
197 	mutex_unlock(&dev->devr.mutex);
198 
199 	return &gsi->ibqp;
200 
201 err_destroy_cq:
202 	ib_free_cq(gsi->cq);
203 err_free_wrs:
204 	mutex_unlock(&dev->devr.mutex);
205 	kfree(gsi->outstanding_wrs);
206 err_free_tx:
207 	kfree(gsi->tx_qps);
208 err_free:
209 	kfree(gsi);
210 	return ERR_PTR(ret);
211 }
212 
213 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
214 {
215 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
216 	struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
217 	const int port_num = gsi->port_num;
218 	int qp_index;
219 	int ret;
220 
221 	mlx5_ib_dbg(dev, "destroying GSI QP\n");
222 
223 	mutex_lock(&dev->devr.mutex);
224 	ret = ib_destroy_qp(gsi->rx_qp);
225 	if (ret) {
226 		mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
227 			     ret);
228 		mutex_unlock(&dev->devr.mutex);
229 		return ret;
230 	}
231 	dev->devr.ports[port_num - 1].gsi = NULL;
232 	mutex_unlock(&dev->devr.mutex);
233 	gsi->rx_qp = NULL;
234 
235 	for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
236 		if (!gsi->tx_qps[qp_index])
237 			continue;
238 		WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
239 		gsi->tx_qps[qp_index] = NULL;
240 	}
241 
242 	ib_free_cq(gsi->cq);
243 
244 	kfree(gsi->outstanding_wrs);
245 	kfree(gsi->tx_qps);
246 	kfree(gsi);
247 
248 	return 0;
249 }
250 
251 static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
252 {
253 	struct ib_pd *pd = gsi->rx_qp->pd;
254 	struct ib_qp_init_attr init_attr = {
255 		.event_handler = gsi->rx_qp->event_handler,
256 		.qp_context = gsi->rx_qp->qp_context,
257 		.send_cq = gsi->cq,
258 		.recv_cq = gsi->rx_qp->recv_cq,
259 		.cap = {
260 			.max_send_wr = gsi->cap.max_send_wr,
261 			.max_send_sge = gsi->cap.max_send_sge,
262 			.max_inline_data = gsi->cap.max_inline_data,
263 		},
264 		.sq_sig_type = gsi->sq_sig_type,
265 		.qp_type = IB_QPT_UD,
266 		.create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
267 	};
268 
269 	return ib_create_qp(pd, &init_attr);
270 }
271 
272 static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
273 			 u16 qp_index)
274 {
275 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
276 	struct ib_qp_attr attr;
277 	int mask;
278 	int ret;
279 
280 	mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
281 	attr.qp_state = IB_QPS_INIT;
282 	attr.pkey_index = qp_index;
283 	attr.qkey = IB_QP1_QKEY;
284 	attr.port_num = gsi->port_num;
285 	ret = ib_modify_qp(qp, &attr, mask);
286 	if (ret) {
287 		mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
288 			    qp->qp_num, ret);
289 		return ret;
290 	}
291 
292 	attr.qp_state = IB_QPS_RTR;
293 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
294 	if (ret) {
295 		mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
296 			    qp->qp_num, ret);
297 		return ret;
298 	}
299 
300 	attr.qp_state = IB_QPS_RTS;
301 	attr.sq_psn = 0;
302 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
303 	if (ret) {
304 		mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
305 			    qp->qp_num, ret);
306 		return ret;
307 	}
308 
309 	return 0;
310 }
311 
312 static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
313 {
314 	struct ib_device *device = gsi->rx_qp->device;
315 	struct mlx5_ib_dev *dev = to_mdev(device);
316 	struct ib_qp *qp;
317 	unsigned long flags;
318 	u16 pkey;
319 	int ret;
320 
321 	ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
322 	if (ret) {
323 		mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
324 			     gsi->port_num, qp_index);
325 		return;
326 	}
327 
328 	if (!pkey) {
329 		mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d.  Skipping.\n",
330 			    gsi->port_num, qp_index);
331 		return;
332 	}
333 
334 	spin_lock_irqsave(&gsi->lock, flags);
335 	qp = gsi->tx_qps[qp_index];
336 	spin_unlock_irqrestore(&gsi->lock, flags);
337 	if (qp) {
338 		mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
339 			    gsi->port_num, qp_index);
340 		return;
341 	}
342 
343 	qp = create_gsi_ud_qp(gsi);
344 	if (IS_ERR(qp)) {
345 		mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
346 			     PTR_ERR(qp));
347 		return;
348 	}
349 
350 	ret = modify_to_rts(gsi, qp, qp_index);
351 	if (ret)
352 		goto err_destroy_qp;
353 
354 	spin_lock_irqsave(&gsi->lock, flags);
355 	WARN_ON_ONCE(gsi->tx_qps[qp_index]);
356 	gsi->tx_qps[qp_index] = qp;
357 	spin_unlock_irqrestore(&gsi->lock, flags);
358 
359 	return;
360 
361 err_destroy_qp:
362 	WARN_ON_ONCE(qp);
363 }
364 
365 static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
366 {
367 	u16 qp_index;
368 
369 	for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
370 		setup_qp(gsi, qp_index);
371 }
372 
373 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
374 			  int attr_mask)
375 {
376 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
377 	struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
378 	int ret;
379 
380 	mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
381 
382 	mutex_lock(&gsi->mutex);
383 	ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
384 	if (ret) {
385 		mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
386 		goto unlock;
387 	}
388 
389 	if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
390 		setup_qps(gsi);
391 
392 unlock:
393 	mutex_unlock(&gsi->mutex);
394 
395 	return ret;
396 }
397 
398 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
399 			 int qp_attr_mask,
400 			 struct ib_qp_init_attr *qp_init_attr)
401 {
402 	struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
403 	int ret;
404 
405 	mutex_lock(&gsi->mutex);
406 	ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
407 	qp_init_attr->cap = gsi->cap;
408 	mutex_unlock(&gsi->mutex);
409 
410 	return ret;
411 }
412 
413 /* Call with gsi->lock locked */
414 static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
415 				      struct ib_ud_wr *wr, struct ib_wc *wc)
416 {
417 	struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
418 	struct mlx5_ib_gsi_wr *gsi_wr;
419 
420 	if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
421 		mlx5_ib_warn(dev, "no available GSI work request.\n");
422 		return -ENOMEM;
423 	}
424 
425 	gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
426 				       gsi->cap.max_send_wr];
427 	gsi->outstanding_pi++;
428 
429 	if (!wc) {
430 		memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
431 		gsi_wr->wc.pkey_index = wr->pkey_index;
432 		gsi_wr->wc.wr_id = wr->wr.wr_id;
433 	} else {
434 		gsi_wr->wc = *wc;
435 		gsi_wr->completed = true;
436 	}
437 
438 	gsi_wr->cqe.done = &handle_single_completion;
439 	wr->wr.wr_cqe = &gsi_wr->cqe;
440 
441 	return 0;
442 }
443 
444 /* Call with gsi->lock locked */
445 static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
446 				    struct ib_ud_wr *wr)
447 {
448 	struct ib_wc wc = {
449 		{ .wr_id = wr->wr.wr_id },
450 		.status = IB_WC_SUCCESS,
451 		.opcode = IB_WC_SEND,
452 		.qp = &gsi->ibqp,
453 	};
454 	int ret;
455 
456 	ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
457 	if (ret)
458 		return ret;
459 
460 	generate_completions(gsi);
461 
462 	return 0;
463 }
464 
465 /* Call with gsi->lock locked */
466 static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
467 {
468 	struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
469 	int qp_index = wr->pkey_index;
470 
471 	if (!mlx5_ib_deth_sqpn_cap(dev))
472 		return gsi->rx_qp;
473 
474 	if (qp_index >= gsi->num_qps)
475 		return NULL;
476 
477 	return gsi->tx_qps[qp_index];
478 }
479 
480 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
481 			  const struct ib_send_wr **bad_wr)
482 {
483 	struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
484 	struct ib_qp *tx_qp;
485 	unsigned long flags;
486 	int ret;
487 
488 	for (; wr; wr = wr->next) {
489 		struct ib_ud_wr cur_wr = *ud_wr(wr);
490 
491 		cur_wr.wr.next = NULL;
492 
493 		spin_lock_irqsave(&gsi->lock, flags);
494 		tx_qp = get_tx_qp(gsi, &cur_wr);
495 		if (!tx_qp) {
496 			ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
497 			if (ret)
498 				goto err;
499 			spin_unlock_irqrestore(&gsi->lock, flags);
500 			continue;
501 		}
502 
503 		ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
504 		if (ret)
505 			goto err;
506 
507 		ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
508 		if (ret) {
509 			/* Undo the effect of adding the outstanding wr */
510 			gsi->outstanding_pi--;
511 			goto err;
512 		}
513 		spin_unlock_irqrestore(&gsi->lock, flags);
514 	}
515 
516 	return 0;
517 
518 err:
519 	spin_unlock_irqrestore(&gsi->lock, flags);
520 	*bad_wr = wr;
521 	return ret;
522 }
523 
524 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
525 			  const struct ib_recv_wr **bad_wr)
526 {
527 	struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
528 
529 	return ib_post_recv(gsi->rx_qp, wr, bad_wr);
530 }
531 
532 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
533 {
534 	if (!gsi)
535 		return;
536 
537 	mutex_lock(&gsi->mutex);
538 	setup_qps(gsi);
539 	mutex_unlock(&gsi->mutex);
540 }
541