1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/gfp.h> 37 #include <linux/export.h> 38 39 #include <linux/mlx4/cmd.h> 40 #include <linux/mlx4/qp.h> 41 42 #include "mlx4.h" 43 #include "icm.h" 44 45 /* QP to support BF should have bits 6,7 cleared */ 46 #define MLX4_BF_QP_SKIP_MASK 0xc0 47 #define MLX4_MAX_BF_QP_RANGE 0x40 48 49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) 50 { 51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 52 struct mlx4_qp *qp; 53 54 spin_lock(&qp_table->lock); 55 56 qp = __mlx4_qp_lookup(dev, qpn); 57 if (qp) 58 atomic_inc(&qp->refcount); 59 60 spin_unlock(&qp_table->lock); 61 62 if (!qp) { 63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); 64 return; 65 } 66 67 qp->event(qp, event_type); 68 69 if (atomic_dec_and_test(&qp->refcount)) 70 complete(&qp->free); 71 } 72 73 /* used for INIT/CLOSE port logic */ 74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) 75 { 76 /* this procedure is called after we already know we are on the master */ 77 /* qp0 is either the proxy qp0, or the real qp0 */ 78 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev); 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; 80 81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && 82 qp->qpn <= dev->phys_caps.base_sqpn + 1; 83 84 return *real_qp0 || *proxy_qp0; 85 } 86 87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 88 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 89 struct mlx4_qp_context *context, 90 enum mlx4_qp_optpar optpar, 91 int sqd_event, struct mlx4_qp *qp, int native) 92 { 93 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { 94 [MLX4_QP_STATE_RST] = { 95 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 96 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 97 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, 98 }, 99 [MLX4_QP_STATE_INIT] = { 100 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 101 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 102 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, 103 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, 104 }, 105 [MLX4_QP_STATE_RTR] = { 106 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 107 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 108 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, 109 }, 110 [MLX4_QP_STATE_RTS] = { 111 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 112 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 113 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, 114 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, 115 }, 116 [MLX4_QP_STATE_SQD] = { 117 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 118 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 119 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, 120 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, 121 }, 122 [MLX4_QP_STATE_SQER] = { 123 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 124 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 125 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, 126 }, 127 [MLX4_QP_STATE_ERR] = { 128 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, 129 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, 130 } 131 }; 132 133 struct mlx4_priv *priv = mlx4_priv(dev); 134 struct mlx4_cmd_mailbox *mailbox; 135 int ret = 0; 136 int real_qp0 = 0; 137 int proxy_qp0 = 0; 138 u8 port; 139 140 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || 141 !op[cur_state][new_state]) 142 return -EINVAL; 143 144 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { 145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, 146 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); 147 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && 148 cur_state != MLX4_QP_STATE_RST && 149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { 150 port = (qp->qpn & 1) + 1; 151 if (proxy_qp0) 152 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; 153 else 154 priv->mfunc.master.qp0_state[port].qp0_active = 0; 155 } 156 return ret; 157 } 158 159 mailbox = mlx4_alloc_cmd_mailbox(dev); 160 if (IS_ERR(mailbox)) 161 return PTR_ERR(mailbox); 162 163 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { 164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); 165 context->mtt_base_addr_h = mtt_addr >> 32; 166 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 168 } 169 170 *(__be32 *) mailbox->buf = cpu_to_be32(optpar); 171 memcpy(mailbox->buf + 8, context, sizeof *context); 172 173 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 174 cpu_to_be32(qp->qpn); 175 176 ret = mlx4_cmd(dev, mailbox->dma, 177 qp->qpn | (!!sqd_event << 31), 178 new_state == MLX4_QP_STATE_RST ? 2 : 0, 179 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); 180 181 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { 182 port = (qp->qpn & 1) + 1; 183 if (cur_state != MLX4_QP_STATE_ERR && 184 cur_state != MLX4_QP_STATE_RST && 185 new_state == MLX4_QP_STATE_ERR) { 186 if (proxy_qp0) 187 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; 188 else 189 priv->mfunc.master.qp0_state[port].qp0_active = 0; 190 } else if (new_state == MLX4_QP_STATE_RTR) { 191 if (proxy_qp0) 192 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1; 193 else 194 priv->mfunc.master.qp0_state[port].qp0_active = 1; 195 } 196 } 197 198 mlx4_free_cmd_mailbox(dev, mailbox); 199 return ret; 200 } 201 202 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 203 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 204 struct mlx4_qp_context *context, 205 enum mlx4_qp_optpar optpar, 206 int sqd_event, struct mlx4_qp *qp) 207 { 208 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, 209 optpar, sqd_event, qp, 0); 210 } 211 EXPORT_SYMBOL_GPL(mlx4_qp_modify); 212 213 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 214 int *base, u8 flags) 215 { 216 u32 uid; 217 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP); 218 219 struct mlx4_priv *priv = mlx4_priv(dev); 220 struct mlx4_qp_table *qp_table = &priv->qp_table; 221 222 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp) 223 return -ENOMEM; 224 225 uid = MLX4_QP_TABLE_ZONE_GENERAL; 226 if (flags & (u8)MLX4_RESERVE_A0_QP) { 227 if (bf_qp) 228 uid = MLX4_QP_TABLE_ZONE_RAW_ETH; 229 else 230 uid = MLX4_QP_TABLE_ZONE_RSS; 231 } 232 233 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, 234 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL); 235 if (*base == -1) 236 return -ENOMEM; 237 238 return 0; 239 } 240 241 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 242 int *base, u8 flags) 243 { 244 u64 in_param = 0; 245 u64 out_param; 246 int err; 247 248 /* Turn off all unsupported QP allocation flags */ 249 flags &= dev->caps.alloc_res_qp_mask; 250 251 if (mlx4_is_mfunc(dev)) { 252 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt); 253 set_param_h(&in_param, align); 254 err = mlx4_cmd_imm(dev, in_param, &out_param, 255 RES_QP, RES_OP_RESERVE, 256 MLX4_CMD_ALLOC_RES, 257 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 258 if (err) 259 return err; 260 261 *base = get_param_l(&out_param); 262 return 0; 263 } 264 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); 265 } 266 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); 267 268 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 269 { 270 struct mlx4_priv *priv = mlx4_priv(dev); 271 struct mlx4_qp_table *qp_table = &priv->qp_table; 272 273 if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) 274 return; 275 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); 276 } 277 278 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 279 { 280 u64 in_param = 0; 281 int err; 282 283 if (mlx4_is_mfunc(dev)) { 284 set_param_l(&in_param, base_qpn); 285 set_param_h(&in_param, cnt); 286 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, 287 MLX4_CMD_FREE_RES, 288 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 289 if (err) { 290 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", 291 base_qpn, cnt); 292 } 293 } else 294 __mlx4_qp_release_range(dev, base_qpn, cnt); 295 } 296 EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 297 298 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 299 { 300 struct mlx4_priv *priv = mlx4_priv(dev); 301 struct mlx4_qp_table *qp_table = &priv->qp_table; 302 int err; 303 304 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); 305 if (err) 306 goto err_out; 307 308 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); 309 if (err) 310 goto err_put_qp; 311 312 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); 313 if (err) 314 goto err_put_auxc; 315 316 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); 317 if (err) 318 goto err_put_altc; 319 320 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); 321 if (err) 322 goto err_put_rdmarc; 323 324 return 0; 325 326 err_put_rdmarc: 327 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); 328 329 err_put_altc: 330 mlx4_table_put(dev, &qp_table->altc_table, qpn); 331 332 err_put_auxc: 333 mlx4_table_put(dev, &qp_table->auxc_table, qpn); 334 335 err_put_qp: 336 mlx4_table_put(dev, &qp_table->qp_table, qpn); 337 338 err_out: 339 return err; 340 } 341 342 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 343 { 344 u64 param = 0; 345 346 if (mlx4_is_mfunc(dev)) { 347 set_param_l(¶m, qpn); 348 return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, 349 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, 350 MLX4_CMD_WRAPPED); 351 } 352 return __mlx4_qp_alloc_icm(dev, qpn, gfp); 353 } 354 355 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 356 { 357 struct mlx4_priv *priv = mlx4_priv(dev); 358 struct mlx4_qp_table *qp_table = &priv->qp_table; 359 360 mlx4_table_put(dev, &qp_table->cmpt_table, qpn); 361 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); 362 mlx4_table_put(dev, &qp_table->altc_table, qpn); 363 mlx4_table_put(dev, &qp_table->auxc_table, qpn); 364 mlx4_table_put(dev, &qp_table->qp_table, qpn); 365 } 366 367 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 368 { 369 u64 in_param = 0; 370 371 if (mlx4_is_mfunc(dev)) { 372 set_param_l(&in_param, qpn); 373 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, 374 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 375 MLX4_CMD_WRAPPED)) 376 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); 377 } else 378 __mlx4_qp_free_icm(dev, qpn); 379 } 380 381 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 382 { 383 struct mlx4_priv *priv = mlx4_priv(dev); 384 struct mlx4_qp_table *qp_table = &priv->qp_table; 385 int err; 386 387 if (!qpn) 388 return -EINVAL; 389 390 qp->qpn = qpn; 391 392 err = mlx4_qp_alloc_icm(dev, qpn, gfp); 393 if (err) 394 return err; 395 396 spin_lock_irq(&qp_table->lock); 397 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & 398 (dev->caps.num_qps - 1), qp); 399 spin_unlock_irq(&qp_table->lock); 400 if (err) 401 goto err_icm; 402 403 atomic_set(&qp->refcount, 1); 404 init_completion(&qp->free); 405 406 return 0; 407 408 err_icm: 409 mlx4_qp_free_icm(dev, qpn); 410 return err; 411 } 412 413 EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 414 415 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 416 enum mlx4_update_qp_attr attr, 417 struct mlx4_update_qp_params *params) 418 { 419 struct mlx4_cmd_mailbox *mailbox; 420 struct mlx4_update_qp_context *cmd; 421 u64 pri_addr_path_mask = 0; 422 u64 qp_mask = 0; 423 int err = 0; 424 425 mailbox = mlx4_alloc_cmd_mailbox(dev); 426 if (IS_ERR(mailbox)) 427 return PTR_ERR(mailbox); 428 429 cmd = (struct mlx4_update_qp_context *)mailbox->buf; 430 431 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) 432 return -EINVAL; 433 434 if (attr & MLX4_UPDATE_QP_SMAC) { 435 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; 436 cmd->qp_context.pri_path.grh_mylmc = params->smac_index; 437 } 438 439 if (attr & MLX4_UPDATE_QP_VSD) { 440 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; 441 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) 442 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); 443 } 444 445 if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { 446 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; 447 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); 448 } 449 450 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 451 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 452 cmd->qp_context.qos_vport = params->qos_vport; 453 } 454 455 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); 456 cmd->qp_mask = cpu_to_be64(qp_mask); 457 458 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, 459 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 460 MLX4_CMD_NATIVE); 461 462 mlx4_free_cmd_mailbox(dev, mailbox); 463 return err; 464 } 465 EXPORT_SYMBOL_GPL(mlx4_update_qp); 466 467 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 468 { 469 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 470 unsigned long flags; 471 472 spin_lock_irqsave(&qp_table->lock, flags); 473 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); 474 spin_unlock_irqrestore(&qp_table->lock, flags); 475 } 476 EXPORT_SYMBOL_GPL(mlx4_qp_remove); 477 478 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) 479 { 480 if (atomic_dec_and_test(&qp->refcount)) 481 complete(&qp->free); 482 wait_for_completion(&qp->free); 483 484 mlx4_qp_free_icm(dev, qp->qpn); 485 } 486 EXPORT_SYMBOL_GPL(mlx4_qp_free); 487 488 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) 489 { 490 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, 491 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 492 } 493 494 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2 495 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1 496 #define MLX4_QP_TABLE_RAW_ETH_SIZE 256 497 498 static int mlx4_create_zones(struct mlx4_dev *dev, 499 u32 reserved_bottom_general, 500 u32 reserved_top_general, 501 u32 reserved_bottom_rss, 502 u32 start_offset_rss, 503 u32 max_table_offset) 504 { 505 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 506 struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL; 507 int bitmap_initialized = 0; 508 u32 last_offset; 509 int k; 510 int err; 511 512 qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP); 513 514 if (NULL == qp_table->zones) 515 return -ENOMEM; 516 517 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); 518 519 if (NULL == bitmap) { 520 err = -ENOMEM; 521 goto free_zone; 522 } 523 524 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, 525 (1 << 23) - 1, reserved_bottom_general, 526 reserved_top_general); 527 528 if (err) 529 goto free_bitmap; 530 531 ++bitmap_initialized; 532 533 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL, 534 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO | 535 MLX4_ZONE_USE_RR, 0, 536 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL); 537 538 if (err) 539 goto free_bitmap; 540 541 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS, 542 reserved_bottom_rss, 543 reserved_bottom_rss - 1, 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 545 reserved_bottom_rss - start_offset_rss); 546 547 if (err) 548 goto free_bitmap; 549 550 ++bitmap_initialized; 551 552 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS, 553 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | 554 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | 555 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY, 556 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS); 557 558 if (err) 559 goto free_bitmap; 560 561 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 562 /* We have a single zone for the A0 steering QPs area of the FW. This area 563 * needs to be split into subareas. One set of subareas is for RSS QPs 564 * (in which qp number bits 6 and/or 7 are set); the other set of subareas 565 * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero. 566 * Currently, the values returned by the FW (A0 steering area starting qp number 567 * and A0 steering area size) are such that there are only two subareas -- one 568 * for RSS and one for RAW_ETH. 569 */ 570 for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]); 571 k++) { 572 int size; 573 u32 offset = start_offset_rss; 574 u32 bf_mask; 575 u32 requested_size; 576 577 /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates 578 * a mask of all LSB bits set until (and not including) the first 579 * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK 580 * is 0xc0, bf_mask will be 0x3f. 581 */ 582 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1; 583 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1); 584 585 if (((last_offset & MLX4_BF_QP_SKIP_MASK) && 586 ((int)(max_table_offset - last_offset)) >= 587 roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) || 588 (!(last_offset & MLX4_BF_QP_SKIP_MASK) && 589 !((last_offset + requested_size - 1) & 590 MLX4_BF_QP_SKIP_MASK))) 591 size = requested_size; 592 else { 593 u32 candidate_offset = 594 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1; 595 596 if (last_offset & MLX4_BF_QP_SKIP_MASK) 597 last_offset = candidate_offset; 598 599 /* From this point, the BF bits are 0 */ 600 601 if (last_offset > max_table_offset) { 602 /* need to skip */ 603 size = -1; 604 } else { 605 size = min3(max_table_offset - last_offset, 606 bf_mask - (last_offset & bf_mask), 607 requested_size); 608 if (size < requested_size) { 609 int candidate_size; 610 611 candidate_size = min3( 612 max_table_offset - candidate_offset, 613 bf_mask - (last_offset & bf_mask), 614 requested_size); 615 616 /* We will not take this path if last_offset was 617 * already set above to candidate_offset 618 */ 619 if (candidate_size > size) { 620 last_offset = candidate_offset; 621 size = candidate_size; 622 } 623 } 624 } 625 } 626 627 if (size > 0) { 628 /* mlx4_bitmap_alloc_range will find a contiguous range of "size" 629 * QPs in which both bits 6 and 7 are zero, because we pass it the 630 * MLX4_BF_SKIP_MASK). 631 */ 632 offset = mlx4_bitmap_alloc_range( 633 *bitmap + MLX4_QP_TABLE_ZONE_RSS, 634 size, 1, 635 MLX4_BF_QP_SKIP_MASK); 636 637 if (offset == (u32)-1) { 638 err = -ENOMEM; 639 break; 640 } 641 642 last_offset = offset + size; 643 644 err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size), 645 roundup_pow_of_two(size) - 1, 0, 646 roundup_pow_of_two(size) - size); 647 } else { 648 /* Add an empty bitmap, we'll allocate from different zones (since 649 * at least one is reserved) 650 */ 651 err = mlx4_bitmap_init(*bitmap + k, 1, 652 MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, 653 0); 654 mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); 655 } 656 657 if (err) 658 break; 659 660 ++bitmap_initialized; 661 662 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k, 663 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | 664 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | 665 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY, 666 offset, qp_table->zones_uids + k); 667 668 if (err) 669 break; 670 } 671 672 if (err) 673 goto free_bitmap; 674 675 qp_table->bitmap_gen = *bitmap; 676 677 return err; 678 679 free_bitmap: 680 for (k = 0; k < bitmap_initialized; k++) 681 mlx4_bitmap_cleanup(*bitmap + k); 682 kfree(bitmap); 683 free_zone: 684 mlx4_zone_allocator_destroy(qp_table->zones); 685 return err; 686 } 687 688 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) 689 { 690 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 691 692 if (qp_table->zones) { 693 int i; 694 695 for (i = 0; 696 i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); 697 i++) { 698 struct mlx4_bitmap *bitmap = 699 mlx4_zone_get_bitmap(qp_table->zones, 700 qp_table->zones_uids[i]); 701 702 mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]); 703 if (NULL == bitmap) 704 continue; 705 706 mlx4_bitmap_cleanup(bitmap); 707 } 708 mlx4_zone_allocator_destroy(qp_table->zones); 709 kfree(qp_table->bitmap_gen); 710 qp_table->bitmap_gen = NULL; 711 qp_table->zones = NULL; 712 } 713 } 714 715 int mlx4_init_qp_table(struct mlx4_dev *dev) 716 { 717 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 718 int err; 719 int reserved_from_top = 0; 720 int reserved_from_bot; 721 int k; 722 int fixed_reserved_from_bot_rv = 0; 723 int bottom_reserved_for_rss_bitmap; 724 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + 725 dev->caps.dmfs_high_rate_qpn_range; 726 727 spin_lock_init(&qp_table->lock); 728 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 729 if (mlx4_is_slave(dev)) 730 return 0; 731 732 /* We reserve 2 extra QPs per port for the special QPs. The 733 * block of special QPs must be aligned to a multiple of 8, so 734 * round up. 735 * 736 * We also reserve the MSB of the 24-bit QP number to indicate 737 * that a QP is an XRC QP. 738 */ 739 for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++) 740 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; 741 742 if (fixed_reserved_from_bot_rv < max_table_offset) 743 fixed_reserved_from_bot_rv = max_table_offset; 744 745 /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/ 746 bottom_reserved_for_rss_bitmap = 747 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1); 748 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); 749 750 { 751 int sort[MLX4_NUM_QP_REGION]; 752 int i, j, tmp; 753 int last_base = dev->caps.num_qps; 754 755 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) 756 sort[i] = i; 757 758 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { 759 for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { 760 if (dev->caps.reserved_qps_cnt[sort[j]] > 761 dev->caps.reserved_qps_cnt[sort[j - 1]]) { 762 tmp = sort[j]; 763 sort[j] = sort[j - 1]; 764 sort[j - 1] = tmp; 765 } 766 } 767 } 768 769 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) { 770 last_base -= dev->caps.reserved_qps_cnt[sort[i]]; 771 dev->caps.reserved_qps_base[sort[i]] = last_base; 772 reserved_from_top += 773 dev->caps.reserved_qps_cnt[sort[i]]; 774 } 775 } 776 777 /* Reserve 8 real SQPs in both native and SRIOV modes. 778 * In addition, in SRIOV mode, reserve 8 proxy SQPs per function 779 * (for all PFs and VFs), and 8 corresponding tunnel QPs. 780 * Each proxy SQP works opposite its own tunnel QP. 781 * 782 * The QPs are arranged as follows: 783 * a. 8 real SQPs 784 * b. All the proxy SQPs (8 per function) 785 * c. All the tunnel QPs (8 per function) 786 */ 787 reserved_from_bot = mlx4_num_reserved_sqps(dev); 788 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { 789 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); 790 return -EINVAL; 791 } 792 793 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, 794 bottom_reserved_for_rss_bitmap, 795 fixed_reserved_from_bot_rv, 796 max_table_offset); 797 798 if (err) 799 return err; 800 801 if (mlx4_is_mfunc(dev)) { 802 /* for PPF use */ 803 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8; 804 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX; 805 806 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, 807 * since the PF does not call mlx4_slave_caps */ 808 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 809 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 810 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 811 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 812 813 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 814 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { 815 err = -ENOMEM; 816 goto err_mem; 817 } 818 819 for (k = 0; k < dev->caps.num_ports; k++) { 820 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn + 821 8 * mlx4_master_func_num(dev) + k; 822 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX; 823 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn + 824 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; 825 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX; 826 } 827 } 828 829 830 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); 831 if (err) 832 goto err_mem; 833 834 return err; 835 836 err_mem: 837 kfree(dev->caps.qp0_tunnel); 838 kfree(dev->caps.qp0_proxy); 839 kfree(dev->caps.qp1_tunnel); 840 kfree(dev->caps.qp1_proxy); 841 dev->caps.qp0_tunnel = dev->caps.qp0_proxy = 842 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; 843 mlx4_cleanup_qp_zones(dev); 844 return err; 845 } 846 847 void mlx4_cleanup_qp_table(struct mlx4_dev *dev) 848 { 849 if (mlx4_is_slave(dev)) 850 return; 851 852 mlx4_CONF_SPECIAL_QP(dev, 0); 853 854 mlx4_cleanup_qp_zones(dev); 855 } 856 857 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 858 struct mlx4_qp_context *context) 859 { 860 struct mlx4_cmd_mailbox *mailbox; 861 int err; 862 863 mailbox = mlx4_alloc_cmd_mailbox(dev); 864 if (IS_ERR(mailbox)) 865 return PTR_ERR(mailbox); 866 867 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, 868 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, 869 MLX4_CMD_WRAPPED); 870 if (!err) 871 memcpy(context, mailbox->buf + 8, sizeof *context); 872 873 mlx4_free_cmd_mailbox(dev, mailbox); 874 return err; 875 } 876 EXPORT_SYMBOL_GPL(mlx4_qp_query); 877 878 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 879 struct mlx4_qp_context *context, 880 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) 881 { 882 int err; 883 int i; 884 enum mlx4_qp_state states[] = { 885 MLX4_QP_STATE_RST, 886 MLX4_QP_STATE_INIT, 887 MLX4_QP_STATE_RTR, 888 MLX4_QP_STATE_RTS 889 }; 890 891 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 892 context->flags &= cpu_to_be32(~(0xf << 28)); 893 context->flags |= cpu_to_be32(states[i + 1] << 28); 894 if (states[i + 1] != MLX4_QP_STATE_RTR) 895 context->params2 &= ~MLX4_QP_BIT_FPP; 896 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 897 context, 0, 0, qp); 898 if (err) { 899 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", 900 states[i + 1], err); 901 return err; 902 } 903 904 *qp_state = states[i + 1]; 905 } 906 907 return 0; 908 } 909 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); 910