1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/gfp.h>
37 #include <linux/export.h>
38 
39 #include <linux/mlx4/cmd.h>
40 #include <linux/mlx4/qp.h>
41 
42 #include "mlx4.h"
43 #include "icm.h"
44 
45 /* QP to support BF should have bits 6,7 cleared */
46 #define MLX4_BF_QP_SKIP_MASK	0xc0
47 #define MLX4_MAX_BF_QP_RANGE	0x40
48 
49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
50 {
51 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
52 	struct mlx4_qp *qp;
53 
54 	spin_lock(&qp_table->lock);
55 
56 	qp = __mlx4_qp_lookup(dev, qpn);
57 	if (qp)
58 		atomic_inc(&qp->refcount);
59 
60 	spin_unlock(&qp_table->lock);
61 
62 	if (!qp) {
63 		mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
64 		return;
65 	}
66 
67 	qp->event(qp, event_type);
68 
69 	if (atomic_dec_and_test(&qp->refcount))
70 		complete(&qp->free);
71 }
72 
73 /* used for INIT/CLOSE port logic */
74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
75 {
76 	/* this procedure is called after we already know we are on the master */
77 	/* qp0 is either the proxy qp0, or the real qp0 */
78 	u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
79 	*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
80 
81 	*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
82 		qp->qpn <= dev->phys_caps.base_sqpn + 1;
83 
84 	return *real_qp0 || *proxy_qp0;
85 }
86 
87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
88 		     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
89 		     struct mlx4_qp_context *context,
90 		     enum mlx4_qp_optpar optpar,
91 		     int sqd_event, struct mlx4_qp *qp, int native)
92 {
93 	static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
94 		[MLX4_QP_STATE_RST] = {
95 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
96 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
97 			[MLX4_QP_STATE_INIT]	= MLX4_CMD_RST2INIT_QP,
98 		},
99 		[MLX4_QP_STATE_INIT]  = {
100 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
101 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
102 			[MLX4_QP_STATE_INIT]	= MLX4_CMD_INIT2INIT_QP,
103 			[MLX4_QP_STATE_RTR]	= MLX4_CMD_INIT2RTR_QP,
104 		},
105 		[MLX4_QP_STATE_RTR]   = {
106 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
107 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
108 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTR2RTS_QP,
109 		},
110 		[MLX4_QP_STATE_RTS]   = {
111 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
112 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
113 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTS2RTS_QP,
114 			[MLX4_QP_STATE_SQD]	= MLX4_CMD_RTS2SQD_QP,
115 		},
116 		[MLX4_QP_STATE_SQD] = {
117 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
118 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
119 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQD2RTS_QP,
120 			[MLX4_QP_STATE_SQD]	= MLX4_CMD_SQD2SQD_QP,
121 		},
122 		[MLX4_QP_STATE_SQER] = {
123 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
124 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
125 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQERR2RTS_QP,
126 		},
127 		[MLX4_QP_STATE_ERR] = {
128 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
129 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
130 		}
131 	};
132 
133 	struct mlx4_priv *priv = mlx4_priv(dev);
134 	struct mlx4_cmd_mailbox *mailbox;
135 	int ret = 0;
136 	int real_qp0 = 0;
137 	int proxy_qp0 = 0;
138 	u8 port;
139 
140 	if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
141 	    !op[cur_state][new_state])
142 		return -EINVAL;
143 
144 	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
145 		ret = mlx4_cmd(dev, 0, qp->qpn, 2,
146 			MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
147 		if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
148 		    cur_state != MLX4_QP_STATE_RST &&
149 		    is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
150 			port = (qp->qpn & 1) + 1;
151 			if (proxy_qp0)
152 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
153 			else
154 				priv->mfunc.master.qp0_state[port].qp0_active = 0;
155 		}
156 		return ret;
157 	}
158 
159 	mailbox = mlx4_alloc_cmd_mailbox(dev);
160 	if (IS_ERR(mailbox))
161 		return PTR_ERR(mailbox);
162 
163 	if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
164 		u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
165 		context->mtt_base_addr_h = mtt_addr >> 32;
166 		context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
167 		context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
168 	}
169 
170 	*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
171 	memcpy(mailbox->buf + 8, context, sizeof *context);
172 
173 	((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
174 		cpu_to_be32(qp->qpn);
175 
176 	ret = mlx4_cmd(dev, mailbox->dma,
177 		       qp->qpn | (!!sqd_event << 31),
178 		       new_state == MLX4_QP_STATE_RST ? 2 : 0,
179 		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
180 
181 	if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
182 		port = (qp->qpn & 1) + 1;
183 		if (cur_state != MLX4_QP_STATE_ERR &&
184 		    cur_state != MLX4_QP_STATE_RST &&
185 		    new_state == MLX4_QP_STATE_ERR) {
186 			if (proxy_qp0)
187 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
188 			else
189 				priv->mfunc.master.qp0_state[port].qp0_active = 0;
190 		} else if (new_state == MLX4_QP_STATE_RTR) {
191 			if (proxy_qp0)
192 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
193 			else
194 				priv->mfunc.master.qp0_state[port].qp0_active = 1;
195 		}
196 	}
197 
198 	mlx4_free_cmd_mailbox(dev, mailbox);
199 	return ret;
200 }
201 
202 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
203 		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
204 		   struct mlx4_qp_context *context,
205 		   enum mlx4_qp_optpar optpar,
206 		   int sqd_event, struct mlx4_qp *qp)
207 {
208 	return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
209 				optpar, sqd_event, qp, 0);
210 }
211 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
212 
213 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
214 			    int *base, u8 flags)
215 {
216 	u32 uid;
217 	int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
218 
219 	struct mlx4_priv *priv = mlx4_priv(dev);
220 	struct mlx4_qp_table *qp_table = &priv->qp_table;
221 
222 	if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
223 		return -ENOMEM;
224 
225 	uid = MLX4_QP_TABLE_ZONE_GENERAL;
226 	if (flags & (u8)MLX4_RESERVE_A0_QP) {
227 		if (bf_qp)
228 			uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
229 		else
230 			uid = MLX4_QP_TABLE_ZONE_RSS;
231 	}
232 
233 	*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
234 					bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
235 	if (*base == -1)
236 		return -ENOMEM;
237 
238 	return 0;
239 }
240 
241 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
242 			  int *base, u8 flags)
243 {
244 	u64 in_param = 0;
245 	u64 out_param;
246 	int err;
247 
248 	/* Turn off all unsupported QP allocation flags */
249 	flags &= dev->caps.alloc_res_qp_mask;
250 
251 	if (mlx4_is_mfunc(dev)) {
252 		set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
253 		set_param_h(&in_param, align);
254 		err = mlx4_cmd_imm(dev, in_param, &out_param,
255 				   RES_QP, RES_OP_RESERVE,
256 				   MLX4_CMD_ALLOC_RES,
257 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
258 		if (err)
259 			return err;
260 
261 		*base = get_param_l(&out_param);
262 		return 0;
263 	}
264 	return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
265 }
266 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
267 
268 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
269 {
270 	struct mlx4_priv *priv = mlx4_priv(dev);
271 	struct mlx4_qp_table *qp_table = &priv->qp_table;
272 
273 	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
274 		return;
275 	mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
276 }
277 
278 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
279 {
280 	u64 in_param = 0;
281 	int err;
282 
283 	if (mlx4_is_mfunc(dev)) {
284 		set_param_l(&in_param, base_qpn);
285 		set_param_h(&in_param, cnt);
286 		err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
287 			       MLX4_CMD_FREE_RES,
288 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
289 		if (err) {
290 			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
291 				  base_qpn, cnt);
292 		}
293 	} else
294 		 __mlx4_qp_release_range(dev, base_qpn, cnt);
295 }
296 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
297 
298 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
299 {
300 	struct mlx4_priv *priv = mlx4_priv(dev);
301 	struct mlx4_qp_table *qp_table = &priv->qp_table;
302 	int err;
303 
304 	err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
305 	if (err)
306 		goto err_out;
307 
308 	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
309 	if (err)
310 		goto err_put_qp;
311 
312 	err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
313 	if (err)
314 		goto err_put_auxc;
315 
316 	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
317 	if (err)
318 		goto err_put_altc;
319 
320 	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
321 	if (err)
322 		goto err_put_rdmarc;
323 
324 	return 0;
325 
326 err_put_rdmarc:
327 	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
328 
329 err_put_altc:
330 	mlx4_table_put(dev, &qp_table->altc_table, qpn);
331 
332 err_put_auxc:
333 	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
334 
335 err_put_qp:
336 	mlx4_table_put(dev, &qp_table->qp_table, qpn);
337 
338 err_out:
339 	return err;
340 }
341 
342 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
343 {
344 	u64 param = 0;
345 
346 	if (mlx4_is_mfunc(dev)) {
347 		set_param_l(&param, qpn);
348 		return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
349 				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
350 				    MLX4_CMD_WRAPPED);
351 	}
352 	return __mlx4_qp_alloc_icm(dev, qpn, gfp);
353 }
354 
355 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
356 {
357 	struct mlx4_priv *priv = mlx4_priv(dev);
358 	struct mlx4_qp_table *qp_table = &priv->qp_table;
359 
360 	mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
361 	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
362 	mlx4_table_put(dev, &qp_table->altc_table, qpn);
363 	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
364 	mlx4_table_put(dev, &qp_table->qp_table, qpn);
365 }
366 
367 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
368 {
369 	u64 in_param = 0;
370 
371 	if (mlx4_is_mfunc(dev)) {
372 		set_param_l(&in_param, qpn);
373 		if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
374 			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
375 			     MLX4_CMD_WRAPPED))
376 			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
377 	} else
378 		__mlx4_qp_free_icm(dev, qpn);
379 }
380 
381 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
382 {
383 	struct mlx4_priv *priv = mlx4_priv(dev);
384 	struct mlx4_qp_table *qp_table = &priv->qp_table;
385 	int err;
386 
387 	if (!qpn)
388 		return -EINVAL;
389 
390 	qp->qpn = qpn;
391 
392 	err = mlx4_qp_alloc_icm(dev, qpn, gfp);
393 	if (err)
394 		return err;
395 
396 	spin_lock_irq(&qp_table->lock);
397 	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
398 				(dev->caps.num_qps - 1), qp);
399 	spin_unlock_irq(&qp_table->lock);
400 	if (err)
401 		goto err_icm;
402 
403 	atomic_set(&qp->refcount, 1);
404 	init_completion(&qp->free);
405 
406 	return 0;
407 
408 err_icm:
409 	mlx4_qp_free_icm(dev, qpn);
410 	return err;
411 }
412 
413 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 
415 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
416 		   enum mlx4_update_qp_attr attr,
417 		   struct mlx4_update_qp_params *params)
418 {
419 	struct mlx4_cmd_mailbox *mailbox;
420 	struct mlx4_update_qp_context *cmd;
421 	u64 pri_addr_path_mask = 0;
422 	u64 qp_mask = 0;
423 	int err = 0;
424 
425 	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
426 		return -EINVAL;
427 
428 	mailbox = mlx4_alloc_cmd_mailbox(dev);
429 	if (IS_ERR(mailbox))
430 		return PTR_ERR(mailbox);
431 
432 	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
433 
434 	if (attr & MLX4_UPDATE_QP_SMAC) {
435 		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
436 		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
437 	}
438 
439 	if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
440 		if (!(dev->caps.flags2
441 		      & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
442 			mlx4_warn(dev,
443 				  "Trying to set src check LB, but it isn't supported\n");
444 			err = -ENOTSUPP;
445 			goto out;
446 		}
447 		pri_addr_path_mask |=
448 			1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
449 		if (params->flags &
450 		    MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
451 			cmd->qp_context.pri_path.fl |=
452 				MLX4_FL_ETH_SRC_CHECK_MC_LB;
453 		}
454 	}
455 
456 	if (attr & MLX4_UPDATE_QP_VSD) {
457 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
458 		if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
459 			cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
460 	}
461 
462 	if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
463 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
464 		cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
465 	}
466 
467 	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
468 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
469 		cmd->qp_context.qos_vport = params->qos_vport;
470 	}
471 
472 	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
473 	cmd->qp_mask = cpu_to_be64(qp_mask);
474 
475 	err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
476 		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
477 		       MLX4_CMD_NATIVE);
478 out:
479 	mlx4_free_cmd_mailbox(dev, mailbox);
480 	return err;
481 }
482 EXPORT_SYMBOL_GPL(mlx4_update_qp);
483 
484 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
485 {
486 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
487 	unsigned long flags;
488 
489 	spin_lock_irqsave(&qp_table->lock, flags);
490 	radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
491 	spin_unlock_irqrestore(&qp_table->lock, flags);
492 }
493 EXPORT_SYMBOL_GPL(mlx4_qp_remove);
494 
495 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
496 {
497 	if (atomic_dec_and_test(&qp->refcount))
498 		complete(&qp->free);
499 	wait_for_completion(&qp->free);
500 
501 	mlx4_qp_free_icm(dev, qp->qpn);
502 }
503 EXPORT_SYMBOL_GPL(mlx4_qp_free);
504 
505 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
506 {
507 	return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
508 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
509 }
510 
511 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
512 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
513 #define MLX4_QP_TABLE_RAW_ETH_SIZE     256
514 
515 static int mlx4_create_zones(struct mlx4_dev *dev,
516 			     u32 reserved_bottom_general,
517 			     u32 reserved_top_general,
518 			     u32 reserved_bottom_rss,
519 			     u32 start_offset_rss,
520 			     u32 max_table_offset)
521 {
522 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
523 	struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
524 	int bitmap_initialized = 0;
525 	u32 last_offset;
526 	int k;
527 	int err;
528 
529 	qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
530 
531 	if (NULL == qp_table->zones)
532 		return -ENOMEM;
533 
534 	bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
535 
536 	if (NULL == bitmap) {
537 		err = -ENOMEM;
538 		goto free_zone;
539 	}
540 
541 	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
542 			       (1 << 23) - 1, reserved_bottom_general,
543 			       reserved_top_general);
544 
545 	if (err)
546 		goto free_bitmap;
547 
548 	++bitmap_initialized;
549 
550 	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
551 				MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
552 				MLX4_ZONE_USE_RR, 0,
553 				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
554 
555 	if (err)
556 		goto free_bitmap;
557 
558 	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
559 			       reserved_bottom_rss,
560 			       reserved_bottom_rss - 1,
561 			       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
562 			       reserved_bottom_rss - start_offset_rss);
563 
564 	if (err)
565 		goto free_bitmap;
566 
567 	++bitmap_initialized;
568 
569 	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
570 				MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
571 				MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
572 				MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
573 				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
574 
575 	if (err)
576 		goto free_bitmap;
577 
578 	last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
579 	/*  We have a single zone for the A0 steering QPs area of the FW. This area
580 	 *  needs to be split into subareas. One set of subareas is for RSS QPs
581 	 *  (in which qp number bits 6 and/or 7 are set); the other set of subareas
582 	 *  is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
583 	 *  Currently, the values returned by the FW (A0 steering area starting qp number
584 	 *  and A0 steering area size) are such that there are only two subareas -- one
585 	 *  for RSS and one for RAW_ETH.
586 	 */
587 	for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
588 	     k++) {
589 		int size;
590 		u32 offset = start_offset_rss;
591 		u32 bf_mask;
592 		u32 requested_size;
593 
594 		/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
595 		 * a mask of all LSB bits set until (and not including) the first
596 		 * set bit of  MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
597 		 * is 0xc0, bf_mask will be 0x3f.
598 		 */
599 		bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
600 		requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
601 
602 		if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
603 		     ((int)(max_table_offset - last_offset)) >=
604 		     roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
605 		    (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
606 		     !((last_offset + requested_size - 1) &
607 		       MLX4_BF_QP_SKIP_MASK)))
608 			size = requested_size;
609 		else {
610 			u32 candidate_offset =
611 				(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
612 
613 			if (last_offset & MLX4_BF_QP_SKIP_MASK)
614 				last_offset = candidate_offset;
615 
616 			/* From this point, the BF bits are 0 */
617 
618 			if (last_offset > max_table_offset) {
619 				/* need to skip */
620 				size = -1;
621 			} else {
622 				size = min3(max_table_offset - last_offset,
623 					    bf_mask - (last_offset & bf_mask),
624 					    requested_size);
625 				if (size < requested_size) {
626 					int candidate_size;
627 
628 					candidate_size = min3(
629 						max_table_offset - candidate_offset,
630 						bf_mask - (last_offset & bf_mask),
631 						requested_size);
632 
633 					/*  We will not take this path if last_offset was
634 					 *  already set above to candidate_offset
635 					 */
636 					if (candidate_size > size) {
637 						last_offset = candidate_offset;
638 						size = candidate_size;
639 					}
640 				}
641 			}
642 		}
643 
644 		if (size > 0) {
645 			/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
646 			 * QPs in which both bits 6 and 7 are zero, because we pass it the
647 			 * MLX4_BF_SKIP_MASK).
648 			 */
649 			offset = mlx4_bitmap_alloc_range(
650 					*bitmap + MLX4_QP_TABLE_ZONE_RSS,
651 					size, 1,
652 					MLX4_BF_QP_SKIP_MASK);
653 
654 			if (offset == (u32)-1) {
655 				err = -ENOMEM;
656 				break;
657 			}
658 
659 			last_offset = offset + size;
660 
661 			err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
662 					       roundup_pow_of_two(size) - 1, 0,
663 					       roundup_pow_of_two(size) - size);
664 		} else {
665 			/* Add an empty bitmap, we'll allocate from different zones (since
666 			 * at least one is reserved)
667 			 */
668 			err = mlx4_bitmap_init(*bitmap + k, 1,
669 					       MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
670 					       0);
671 			mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
672 		}
673 
674 		if (err)
675 			break;
676 
677 		++bitmap_initialized;
678 
679 		err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
680 					MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
681 					MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
682 					MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
683 					offset, qp_table->zones_uids + k);
684 
685 		if (err)
686 			break;
687 	}
688 
689 	if (err)
690 		goto free_bitmap;
691 
692 	qp_table->bitmap_gen = *bitmap;
693 
694 	return err;
695 
696 free_bitmap:
697 	for (k = 0; k < bitmap_initialized; k++)
698 		mlx4_bitmap_cleanup(*bitmap + k);
699 	kfree(bitmap);
700 free_zone:
701 	mlx4_zone_allocator_destroy(qp_table->zones);
702 	return err;
703 }
704 
705 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
706 {
707 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
708 
709 	if (qp_table->zones) {
710 		int i;
711 
712 		for (i = 0;
713 		     i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
714 		     i++) {
715 			struct mlx4_bitmap *bitmap =
716 				mlx4_zone_get_bitmap(qp_table->zones,
717 						     qp_table->zones_uids[i]);
718 
719 			mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
720 			if (NULL == bitmap)
721 				continue;
722 
723 			mlx4_bitmap_cleanup(bitmap);
724 		}
725 		mlx4_zone_allocator_destroy(qp_table->zones);
726 		kfree(qp_table->bitmap_gen);
727 		qp_table->bitmap_gen = NULL;
728 		qp_table->zones = NULL;
729 	}
730 }
731 
732 int mlx4_init_qp_table(struct mlx4_dev *dev)
733 {
734 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
735 	int err;
736 	int reserved_from_top = 0;
737 	int reserved_from_bot;
738 	int k;
739 	int fixed_reserved_from_bot_rv = 0;
740 	int bottom_reserved_for_rss_bitmap;
741 	u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
742 			dev->caps.dmfs_high_rate_qpn_range;
743 
744 	spin_lock_init(&qp_table->lock);
745 	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
746 	if (mlx4_is_slave(dev))
747 		return 0;
748 
749 	/* We reserve 2 extra QPs per port for the special QPs.  The
750 	 * block of special QPs must be aligned to a multiple of 8, so
751 	 * round up.
752 	 *
753 	 * We also reserve the MSB of the 24-bit QP number to indicate
754 	 * that a QP is an XRC QP.
755 	 */
756 	for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
757 		fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
758 
759 	if (fixed_reserved_from_bot_rv < max_table_offset)
760 		fixed_reserved_from_bot_rv = max_table_offset;
761 
762 	/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
763 	bottom_reserved_for_rss_bitmap =
764 		roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
765 	dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
766 
767 	{
768 		int sort[MLX4_NUM_QP_REGION];
769 		int i, j;
770 		int last_base = dev->caps.num_qps;
771 
772 		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
773 			sort[i] = i;
774 
775 		for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
776 			for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
777 				if (dev->caps.reserved_qps_cnt[sort[j]] >
778 				    dev->caps.reserved_qps_cnt[sort[j - 1]])
779 					swap(sort[j], sort[j - 1]);
780 			}
781 		}
782 
783 		for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
784 			last_base -= dev->caps.reserved_qps_cnt[sort[i]];
785 			dev->caps.reserved_qps_base[sort[i]] = last_base;
786 			reserved_from_top +=
787 				dev->caps.reserved_qps_cnt[sort[i]];
788 		}
789 	}
790 
791        /* Reserve 8 real SQPs in both native and SRIOV modes.
792 	* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
793 	* (for all PFs and VFs), and 8 corresponding tunnel QPs.
794 	* Each proxy SQP works opposite its own tunnel QP.
795 	*
796 	* The QPs are arranged as follows:
797 	* a. 8 real SQPs
798 	* b. All the proxy SQPs (8 per function)
799 	* c. All the tunnel QPs (8 per function)
800 	*/
801 	reserved_from_bot = mlx4_num_reserved_sqps(dev);
802 	if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
803 		mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
804 		return -EINVAL;
805 	}
806 
807 	err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
808 				bottom_reserved_for_rss_bitmap,
809 				fixed_reserved_from_bot_rv,
810 				max_table_offset);
811 
812 	if (err)
813 		return err;
814 
815 	if (mlx4_is_mfunc(dev)) {
816 		/* for PPF use */
817 		dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
818 		dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
819 
820 		/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
821 		 * since the PF does not call mlx4_slave_caps */
822 		dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
823 		dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
824 		dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
825 		dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
826 
827 		if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
828 		    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
829 			err = -ENOMEM;
830 			goto err_mem;
831 		}
832 
833 		for (k = 0; k < dev->caps.num_ports; k++) {
834 			dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
835 				8 * mlx4_master_func_num(dev) + k;
836 			dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
837 			dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
838 				8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
839 			dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
840 		}
841 	}
842 
843 
844 	err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
845 	if (err)
846 		goto err_mem;
847 
848 	return err;
849 
850 err_mem:
851 	kfree(dev->caps.qp0_tunnel);
852 	kfree(dev->caps.qp0_proxy);
853 	kfree(dev->caps.qp1_tunnel);
854 	kfree(dev->caps.qp1_proxy);
855 	dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
856 		dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
857 	mlx4_cleanup_qp_zones(dev);
858 	return err;
859 }
860 
861 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
862 {
863 	if (mlx4_is_slave(dev))
864 		return;
865 
866 	mlx4_CONF_SPECIAL_QP(dev, 0);
867 
868 	mlx4_cleanup_qp_zones(dev);
869 }
870 
871 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
872 		  struct mlx4_qp_context *context)
873 {
874 	struct mlx4_cmd_mailbox *mailbox;
875 	int err;
876 
877 	mailbox = mlx4_alloc_cmd_mailbox(dev);
878 	if (IS_ERR(mailbox))
879 		return PTR_ERR(mailbox);
880 
881 	err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
882 			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
883 			   MLX4_CMD_WRAPPED);
884 	if (!err)
885 		memcpy(context, mailbox->buf + 8, sizeof *context);
886 
887 	mlx4_free_cmd_mailbox(dev, mailbox);
888 	return err;
889 }
890 EXPORT_SYMBOL_GPL(mlx4_qp_query);
891 
892 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
893 		     struct mlx4_qp_context *context,
894 		     struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
895 {
896 	int err;
897 	int i;
898 	enum mlx4_qp_state states[] = {
899 		MLX4_QP_STATE_RST,
900 		MLX4_QP_STATE_INIT,
901 		MLX4_QP_STATE_RTR,
902 		MLX4_QP_STATE_RTS
903 	};
904 
905 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
906 		context->flags &= cpu_to_be32(~(0xf << 28));
907 		context->flags |= cpu_to_be32(states[i + 1] << 28);
908 		if (states[i + 1] != MLX4_QP_STATE_RTR)
909 			context->params2 &= ~MLX4_QP_BIT_FPP;
910 		err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
911 				     context, 0, 0, qp);
912 		if (err) {
913 			mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
914 				 states[i + 1], err);
915 			return err;
916 		}
917 
918 		*qp_state = states[i + 1];
919 	}
920 
921 	return 0;
922 }
923 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
924