1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/gfp.h> 37 #include <linux/export.h> 38 39 #include <linux/mlx4/cmd.h> 40 #include <linux/mlx4/qp.h> 41 42 #include "mlx4.h" 43 #include "icm.h" 44 45 /* QP to support BF should have bits 6,7 cleared */ 46 #define MLX4_BF_QP_SKIP_MASK 0xc0 47 #define MLX4_MAX_BF_QP_RANGE 0x40 48 49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) 50 { 51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 52 struct mlx4_qp *qp; 53 54 spin_lock(&qp_table->lock); 55 56 qp = __mlx4_qp_lookup(dev, qpn); 57 if (qp) 58 atomic_inc(&qp->refcount); 59 60 spin_unlock(&qp_table->lock); 61 62 if (!qp) { 63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); 64 return; 65 } 66 67 qp->event(qp, event_type); 68 69 if (atomic_dec_and_test(&qp->refcount)) 70 complete(&qp->free); 71 } 72 73 /* used for INIT/CLOSE port logic */ 74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) 75 { 76 /* this procedure is called after we already know we are on the master */ 77 /* qp0 is either the proxy qp0, or the real qp0 */ 78 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev); 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; 80 81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && 82 qp->qpn <= dev->phys_caps.base_sqpn + 1; 83 84 return *real_qp0 || *proxy_qp0; 85 } 86 87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 88 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 89 struct mlx4_qp_context *context, 90 enum mlx4_qp_optpar optpar, 91 int sqd_event, struct mlx4_qp *qp, int native) 92 { 93 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { 94 [MLX4_QP_STATE_RST] = { 95 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 96 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 97 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, 98 }, 99 [MLX4_QP_STATE_INIT] = { 100 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 101 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 102 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, 103 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, 104 }, 105 [MLX4_QP_STATE_RTR] = { 106 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 107 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 108 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, 109 }, 110 [MLX4_QP_STATE_RTS] = { 111 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 112 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 113 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, 114 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, 115 }, 116 [MLX4_QP_STATE_SQD] = { 117 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 118 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 119 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, 120 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, 121 }, 122 [MLX4_QP_STATE_SQER] = { 123 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 124 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 125 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, 126 }, 127 [MLX4_QP_STATE_ERR] = { 128 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 129 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 130 } 131 }; 132 133 struct mlx4_priv *priv = mlx4_priv(dev); 134 struct mlx4_cmd_mailbox *mailbox; 135 int ret = 0; 136 int real_qp0 = 0; 137 int proxy_qp0 = 0; 138 u8 port; 139 140 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || 141 !op[cur_state][new_state]) 142 return -EINVAL; 143 144 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { 145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, 146 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); 147 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && 148 cur_state != MLX4_QP_STATE_RST && 149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { 150 port = (qp->qpn & 1) + 1; 151 if (proxy_qp0) 152 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; 153 else 154 priv->mfunc.master.qp0_state[port].qp0_active = 0; 155 } 156 return ret; 157 } 158 159 mailbox = mlx4_alloc_cmd_mailbox(dev); 160 if (IS_ERR(mailbox)) 161 return PTR_ERR(mailbox); 162 163 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { 164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); 165 context->mtt_base_addr_h = mtt_addr >> 32; 166 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 168 } 169 170 if ((cur_state == MLX4_QP_STATE_RTR) && 171 (new_state == MLX4_QP_STATE_RTS) && 172 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 173 context->roce_entropy = 174 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); 175 176 *(__be32 *) mailbox->buf = cpu_to_be32(optpar); 177 memcpy(mailbox->buf + 8, context, sizeof *context); 178 179 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 180 cpu_to_be32(qp->qpn); 181 182 ret = mlx4_cmd(dev, mailbox->dma, 183 qp->qpn | (!!sqd_event << 31), 184 new_state == MLX4_QP_STATE_RST ? 2 : 0, 185 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); 186 187 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { 188 port = (qp->qpn & 1) + 1; 189 if (cur_state != MLX4_QP_STATE_ERR && 190 cur_state != MLX4_QP_STATE_RST && 191 new_state == MLX4_QP_STATE_ERR) { 192 if (proxy_qp0) 193 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; 194 else 195 priv->mfunc.master.qp0_state[port].qp0_active = 0; 196 } else if (new_state == MLX4_QP_STATE_RTR) { 197 if (proxy_qp0) 198 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1; 199 else 200 priv->mfunc.master.qp0_state[port].qp0_active = 1; 201 } 202 } 203 204 mlx4_free_cmd_mailbox(dev, mailbox); 205 return ret; 206 } 207 208 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 209 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 210 struct mlx4_qp_context *context, 211 enum mlx4_qp_optpar optpar, 212 int sqd_event, struct mlx4_qp *qp) 213 { 214 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, 215 optpar, sqd_event, qp, 0); 216 } 217 EXPORT_SYMBOL_GPL(mlx4_qp_modify); 218 219 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 220 int *base, u8 flags) 221 { 222 u32 uid; 223 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP); 224 225 struct mlx4_priv *priv = mlx4_priv(dev); 226 struct mlx4_qp_table *qp_table = &priv->qp_table; 227 228 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp) 229 return -ENOMEM; 230 231 uid = MLX4_QP_TABLE_ZONE_GENERAL; 232 if (flags & (u8)MLX4_RESERVE_A0_QP) { 233 if (bf_qp) 234 uid = MLX4_QP_TABLE_ZONE_RAW_ETH; 235 else 236 uid = MLX4_QP_TABLE_ZONE_RSS; 237 } 238 239 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, 240 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL); 241 if (*base == -1) 242 return -ENOMEM; 243 244 return 0; 245 } 246 247 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 248 int *base, u8 flags) 249 { 250 u64 in_param = 0; 251 u64 out_param; 252 int err; 253 254 /* Turn off all unsupported QP allocation flags */ 255 flags &= dev->caps.alloc_res_qp_mask; 256 257 if (mlx4_is_mfunc(dev)) { 258 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt); 259 set_param_h(&in_param, align); 260 err = mlx4_cmd_imm(dev, in_param, &out_param, 261 RES_QP, RES_OP_RESERVE, 262 MLX4_CMD_ALLOC_RES, 263 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 264 if (err) 265 return err; 266 267 *base = get_param_l(&out_param); 268 return 0; 269 } 270 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); 271 } 272 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 273 274 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 275 { 276 struct mlx4_priv *priv = mlx4_priv(dev); 277 struct mlx4_qp_table *qp_table = &priv->qp_table; 278 279 if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) 280 return; 281 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); 282 } 283 284 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 285 { 286 u64 in_param = 0; 287 int err; 288 289 if (mlx4_is_mfunc(dev)) { 290 set_param_l(&in_param, base_qpn); 291 set_param_h(&in_param, cnt); 292 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, 293 MLX4_CMD_FREE_RES, 294 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 295 if (err) { 296 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", 297 base_qpn, cnt); 298 } 299 } else 300 __mlx4_qp_release_range(dev, base_qpn, cnt); 301 } 302 EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 303 304 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 305 { 306 struct mlx4_priv *priv = mlx4_priv(dev); 307 struct mlx4_qp_table *qp_table = &priv->qp_table; 308 int err; 309 310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); 311 if (err) 312 goto err_out; 313 314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); 315 if (err) 316 goto err_put_qp; 317 318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); 319 if (err) 320 goto err_put_auxc; 321 322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); 323 if (err) 324 goto err_put_altc; 325 326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); 327 if (err) 328 goto err_put_rdmarc; 329 330 return 0; 331 332 err_put_rdmarc: 333 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); 334 335 err_put_altc: 336 mlx4_table_put(dev, &qp_table->altc_table, qpn); 337 338 err_put_auxc: 339 mlx4_table_put(dev, &qp_table->auxc_table, qpn); 340 341 err_put_qp: 342 mlx4_table_put(dev, &qp_table->qp_table, qpn); 343 344 err_out: 345 return err; 346 } 347 348 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 349 { 350 u64 param = 0; 351 352 if (mlx4_is_mfunc(dev)) { 353 set_param_l(¶m, qpn); 354 return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, 355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, 356 MLX4_CMD_WRAPPED); 357 } 358 return __mlx4_qp_alloc_icm(dev, qpn, gfp); 359 } 360 361 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 362 { 363 struct mlx4_priv *priv = mlx4_priv(dev); 364 struct mlx4_qp_table *qp_table = &priv->qp_table; 365 366 mlx4_table_put(dev, &qp_table->cmpt_table, qpn); 367 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); 368 mlx4_table_put(dev, &qp_table->altc_table, qpn); 369 mlx4_table_put(dev, &qp_table->auxc_table, qpn); 370 mlx4_table_put(dev, &qp_table->qp_table, qpn); 371 } 372 373 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 374 { 375 u64 in_param = 0; 376 377 if (mlx4_is_mfunc(dev)) { 378 set_param_l(&in_param, qpn); 379 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, 380 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 381 MLX4_CMD_WRAPPED)) 382 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); 383 } else 384 __mlx4_qp_free_icm(dev, qpn); 385 } 386 387 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 388 { 389 struct mlx4_priv *priv = mlx4_priv(dev); 390 struct mlx4_qp_table *qp_table = &priv->qp_table; 391 int err; 392 393 if (!qpn) 394 return -EINVAL; 395 396 qp->qpn = qpn; 397 398 err = mlx4_qp_alloc_icm(dev, qpn, gfp); 399 if (err) 400 return err; 401 402 spin_lock_irq(&qp_table->lock); 403 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & 404 (dev->caps.num_qps - 1), qp); 405 spin_unlock_irq(&qp_table->lock); 406 if (err) 407 goto err_icm; 408 409 atomic_set(&qp->refcount, 1); 410 init_completion(&qp->free); 411 412 return 0; 413 414 err_icm: 415 mlx4_qp_free_icm(dev, qpn); 416 return err; 417 } 418 419 EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 420 421 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 422 enum mlx4_update_qp_attr attr, 423 struct mlx4_update_qp_params *params) 424 { 425 struct mlx4_cmd_mailbox *mailbox; 426 struct mlx4_update_qp_context *cmd; 427 u64 pri_addr_path_mask = 0; 428 u64 qp_mask = 0; 429 int err = 0; 430 431 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) 432 return -EINVAL; 433 434 mailbox = mlx4_alloc_cmd_mailbox(dev); 435 if (IS_ERR(mailbox)) 436 return PTR_ERR(mailbox); 437 438 cmd = (struct mlx4_update_qp_context *)mailbox->buf; 439 440 if (attr & MLX4_UPDATE_QP_SMAC) { 441 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; 442 cmd->qp_context.pri_path.grh_mylmc = params->smac_index; 443 } 444 445 if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) { 446 if (!(dev->caps.flags2 447 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { 448 mlx4_warn(dev, 449 "Trying to set src check LB, but it isn't supported\n"); 450 err = -EOPNOTSUPP; 451 goto out; 452 } 453 pri_addr_path_mask |= 454 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB; 455 if (params->flags & 456 MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) { 457 cmd->qp_context.pri_path.fl |= 458 MLX4_FL_ETH_SRC_CHECK_MC_LB; 459 } 460 } 461 462 if (attr & MLX4_UPDATE_QP_VSD) { 463 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; 464 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) 465 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); 466 } 467 468 if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { 469 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; 470 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); 471 } 472 473 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 474 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 475 cmd->qp_context.qos_vport = params->qos_vport; 476 } 477 478 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); 479 cmd->qp_mask = cpu_to_be64(qp_mask); 480 481 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, 482 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 483 MLX4_CMD_NATIVE); 484 out: 485 mlx4_free_cmd_mailbox(dev, mailbox); 486 return err; 487 } 488 EXPORT_SYMBOL_GPL(mlx4_update_qp); 489 490 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 491 { 492 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 493 unsigned long flags; 494 495 spin_lock_irqsave(&qp_table->lock, flags); 496 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); 497 spin_unlock_irqrestore(&qp_table->lock, flags); 498 } 499 EXPORT_SYMBOL_GPL(mlx4_qp_remove); 500 501 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) 502 { 503 if (atomic_dec_and_test(&qp->refcount)) 504 complete(&qp->free); 505 wait_for_completion(&qp->free); 506 507 mlx4_qp_free_icm(dev, qp->qpn); 508 } 509 EXPORT_SYMBOL_GPL(mlx4_qp_free); 510 511 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) 512 { 513 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, 514 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 515 } 516 517 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2 518 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1 519 #define MLX4_QP_TABLE_RAW_ETH_SIZE 256 520 521 static int mlx4_create_zones(struct mlx4_dev *dev, 522 u32 reserved_bottom_general, 523 u32 reserved_top_general, 524 u32 reserved_bottom_rss, 525 u32 start_offset_rss, 526 u32 max_table_offset) 527 { 528 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 529 struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL; 530 int bitmap_initialized = 0; 531 u32 last_offset; 532 int k; 533 int err; 534 535 qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP); 536 537 if (NULL == qp_table->zones) 538 return -ENOMEM; 539 540 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); 541 542 if (NULL == bitmap) { 543 err = -ENOMEM; 544 goto free_zone; 545 } 546 547 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, 548 (1 << 23) - 1, reserved_bottom_general, 549 reserved_top_general); 550 551 if (err) 552 goto free_bitmap; 553 554 ++bitmap_initialized; 555 556 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL, 557 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO | 558 MLX4_ZONE_USE_RR, 0, 559 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL); 560 561 if (err) 562 goto free_bitmap; 563 564 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS, 565 reserved_bottom_rss, 566 reserved_bottom_rss - 1, 567 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 568 reserved_bottom_rss - start_offset_rss); 569 570 if (err) 571 goto free_bitmap; 572 573 ++bitmap_initialized; 574 575 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS, 576 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | 577 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | 578 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY, 579 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS); 580 581 if (err) 582 goto free_bitmap; 583 584 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 585 /* We have a single zone for the A0 steering QPs area of the FW. This area 586 * needs to be split into subareas. One set of subareas is for RSS QPs 587 * (in which qp number bits 6 and/or 7 are set); the other set of subareas 588 * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero. 589 * Currently, the values returned by the FW (A0 steering area starting qp number 590 * and A0 steering area size) are such that there are only two subareas -- one 591 * for RSS and one for RAW_ETH. 592 */ 593 for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]); 594 k++) { 595 int size; 596 u32 offset = start_offset_rss; 597 u32 bf_mask; 598 u32 requested_size; 599 600 /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates 601 * a mask of all LSB bits set until (and not including) the first 602 * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK 603 * is 0xc0, bf_mask will be 0x3f. 604 */ 605 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1; 606 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1); 607 608 if (((last_offset & MLX4_BF_QP_SKIP_MASK) && 609 ((int)(max_table_offset - last_offset)) >= 610 roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) || 611 (!(last_offset & MLX4_BF_QP_SKIP_MASK) && 612 !((last_offset + requested_size - 1) & 613 MLX4_BF_QP_SKIP_MASK))) 614 size = requested_size; 615 else { 616 u32 candidate_offset = 617 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1; 618 619 if (last_offset & MLX4_BF_QP_SKIP_MASK) 620 last_offset = candidate_offset; 621 622 /* From this point, the BF bits are 0 */ 623 624 if (last_offset > max_table_offset) { 625 /* need to skip */ 626 size = -1; 627 } else { 628 size = min3(max_table_offset - last_offset, 629 bf_mask - (last_offset & bf_mask), 630 requested_size); 631 if (size < requested_size) { 632 int candidate_size; 633 634 candidate_size = min3( 635 max_table_offset - candidate_offset, 636 bf_mask - (last_offset & bf_mask), 637 requested_size); 638 639 /* We will not take this path if last_offset was 640 * already set above to candidate_offset 641 */ 642 if (candidate_size > size) { 643 last_offset = candidate_offset; 644 size = candidate_size; 645 } 646 } 647 } 648 } 649 650 if (size > 0) { 651 /* mlx4_bitmap_alloc_range will find a contiguous range of "size" 652 * QPs in which both bits 6 and 7 are zero, because we pass it the 653 * MLX4_BF_SKIP_MASK). 654 */ 655 offset = mlx4_bitmap_alloc_range( 656 *bitmap + MLX4_QP_TABLE_ZONE_RSS, 657 size, 1, 658 MLX4_BF_QP_SKIP_MASK); 659 660 if (offset == (u32)-1) { 661 err = -ENOMEM; 662 break; 663 } 664 665 last_offset = offset + size; 666 667 err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size), 668 roundup_pow_of_two(size) - 1, 0, 669 roundup_pow_of_two(size) - size); 670 } else { 671 /* Add an empty bitmap, we'll allocate from different zones (since 672 * at least one is reserved) 673 */ 674 err = mlx4_bitmap_init(*bitmap + k, 1, 675 MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, 676 0); 677 mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); 678 } 679 680 if (err) 681 break; 682 683 ++bitmap_initialized; 684 685 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k, 686 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | 687 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | 688 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY, 689 offset, qp_table->zones_uids + k); 690 691 if (err) 692 break; 693 } 694 695 if (err) 696 goto free_bitmap; 697 698 qp_table->bitmap_gen = *bitmap; 699 700 return err; 701 702 free_bitmap: 703 for (k = 0; k < bitmap_initialized; k++) 704 mlx4_bitmap_cleanup(*bitmap + k); 705 kfree(bitmap); 706 free_zone: 707 mlx4_zone_allocator_destroy(qp_table->zones); 708 return err; 709 } 710 711 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) 712 { 713 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 714 715 if (qp_table->zones) { 716 int i; 717 718 for (i = 0; 719 i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); 720 i++) { 721 struct mlx4_bitmap *bitmap = 722 mlx4_zone_get_bitmap(qp_table->zones, 723 qp_table->zones_uids[i]); 724 725 mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]); 726 if (NULL == bitmap) 727 continue; 728 729 mlx4_bitmap_cleanup(bitmap); 730 } 731 mlx4_zone_allocator_destroy(qp_table->zones); 732 kfree(qp_table->bitmap_gen); 733 qp_table->bitmap_gen = NULL; 734 qp_table->zones = NULL; 735 } 736 } 737 738 int mlx4_init_qp_table(struct mlx4_dev *dev) 739 { 740 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 741 int err; 742 int reserved_from_top = 0; 743 int reserved_from_bot; 744 int k; 745 int fixed_reserved_from_bot_rv = 0; 746 int bottom_reserved_for_rss_bitmap; 747 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + 748 dev->caps.dmfs_high_rate_qpn_range; 749 750 spin_lock_init(&qp_table->lock); 751 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 752 if (mlx4_is_slave(dev)) 753 return 0; 754 755 /* We reserve 2 extra QPs per port for the special QPs. The 756 * block of special QPs must be aligned to a multiple of 8, so 757 * round up. 758 * 759 * We also reserve the MSB of the 24-bit QP number to indicate 760 * that a QP is an XRC QP. 761 */ 762 for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++) 763 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; 764 765 if (fixed_reserved_from_bot_rv < max_table_offset) 766 fixed_reserved_from_bot_rv = max_table_offset; 767 768 /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/ 769 bottom_reserved_for_rss_bitmap = 770 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1); 771 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); 772 773 { 774 int sort[MLX4_NUM_QP_REGION]; 775 int i, j; 776 int last_base = dev->caps.num_qps; 777 778 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) 779 sort[i] = i; 780 781 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { 782 for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { 783 if (dev->caps.reserved_qps_cnt[sort[j]] > 784 dev->caps.reserved_qps_cnt[sort[j - 1]]) 785 swap(sort[j], sort[j - 1]); 786 } 787 } 788 789 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) { 790 last_base -= dev->caps.reserved_qps_cnt[sort[i]]; 791 dev->caps.reserved_qps_base[sort[i]] = last_base; 792 reserved_from_top += 793 dev->caps.reserved_qps_cnt[sort[i]]; 794 } 795 } 796 797 /* Reserve 8 real SQPs in both native and SRIOV modes. 798 * In addition, in SRIOV mode, reserve 8 proxy SQPs per function 799 * (for all PFs and VFs), and 8 corresponding tunnel QPs. 800 * Each proxy SQP works opposite its own tunnel QP. 801 * 802 * The QPs are arranged as follows: 803 * a. 8 real SQPs 804 * b. All the proxy SQPs (8 per function) 805 * c. All the tunnel QPs (8 per function) 806 */ 807 reserved_from_bot = mlx4_num_reserved_sqps(dev); 808 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { 809 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); 810 return -EINVAL; 811 } 812 813 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, 814 bottom_reserved_for_rss_bitmap, 815 fixed_reserved_from_bot_rv, 816 max_table_offset); 817 818 if (err) 819 return err; 820 821 if (mlx4_is_mfunc(dev)) { 822 /* for PPF use */ 823 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8; 824 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX; 825 826 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, 827 * since the PF does not call mlx4_slave_caps */ 828 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 829 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 830 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 831 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 832 833 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 834 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { 835 err = -ENOMEM; 836 goto err_mem; 837 } 838 839 for (k = 0; k < dev->caps.num_ports; k++) { 840 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn + 841 8 * mlx4_master_func_num(dev) + k; 842 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX; 843 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn + 844 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; 845 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX; 846 } 847 } 848 849 850 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); 851 if (err) 852 goto err_mem; 853 854 return err; 855 856 err_mem: 857 kfree(dev->caps.qp0_tunnel); 858 kfree(dev->caps.qp0_proxy); 859 kfree(dev->caps.qp1_tunnel); 860 kfree(dev->caps.qp1_proxy); 861 dev->caps.qp0_tunnel = dev->caps.qp0_proxy = 862 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; 863 mlx4_cleanup_qp_zones(dev); 864 return err; 865 } 866 867 void mlx4_cleanup_qp_table(struct mlx4_dev *dev) 868 { 869 if (mlx4_is_slave(dev)) 870 return; 871 872 mlx4_CONF_SPECIAL_QP(dev, 0); 873 874 mlx4_cleanup_qp_zones(dev); 875 } 876 877 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 878 struct mlx4_qp_context *context) 879 { 880 struct mlx4_cmd_mailbox *mailbox; 881 int err; 882 883 mailbox = mlx4_alloc_cmd_mailbox(dev); 884 if (IS_ERR(mailbox)) 885 return PTR_ERR(mailbox); 886 887 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, 888 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, 889 MLX4_CMD_WRAPPED); 890 if (!err) 891 memcpy(context, mailbox->buf + 8, sizeof *context); 892 893 mlx4_free_cmd_mailbox(dev, mailbox); 894 return err; 895 } 896 EXPORT_SYMBOL_GPL(mlx4_qp_query); 897 898 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 899 struct mlx4_qp_context *context, 900 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) 901 { 902 int err; 903 int i; 904 enum mlx4_qp_state states[] = { 905 MLX4_QP_STATE_RST, 906 MLX4_QP_STATE_INIT, 907 MLX4_QP_STATE_RTR, 908 MLX4_QP_STATE_RTS 909 }; 910 911 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 912 context->flags &= cpu_to_be32(~(0xf << 28)); 913 context->flags |= cpu_to_be32(states[i + 1] << 28); 914 if (states[i + 1] != MLX4_QP_STATE_RTR) 915 context->params2 &= ~MLX4_QP_BIT_FPP; 916 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 917 context, 0, 0, qp); 918 if (err) { 919 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", 920 states[i + 1], err); 921 return err; 922 } 923 924 *qp_state = states[i + 1]; 925 } 926 927 return 0; 928 } 929 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); 930 931 u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn) 932 { 933 struct mlx4_qp_context context; 934 struct mlx4_qp qp; 935 int err; 936 937 qp.qpn = qpn; 938 err = mlx4_qp_query(dev, &qp, &context); 939 if (!err) { 940 u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff; 941 u16 folded_dst = folded_qp(dest_qpn); 942 u16 folded_src = folded_qp(qpn); 943 944 return (dest_qpn != qpn) ? 945 ((folded_dst ^ folded_src) | 0xC000) : 946 folded_src | 0xC000; 947 } 948 return 0xdead; 949 } 950