1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. 4 * All rights reserved. 5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/sched.h> 37 #include <linux/pci.h> 38 #include <linux/errno.h> 39 #include <linux/kernel.h> 40 #include <linux/io.h> 41 #include <linux/slab.h> 42 #include <linux/mlx4/cmd.h> 43 #include <linux/mlx4/qp.h> 44 #include <linux/if_ether.h> 45 #include <linux/etherdevice.h> 46 47 #include "mlx4.h" 48 #include "fw.h" 49 50 #define MLX4_MAC_VALID (1ull << 63) 51 52 struct mac_res { 53 struct list_head list; 54 u64 mac; 55 int ref_count; 56 u8 smac_index; 57 u8 port; 58 }; 59 60 struct vlan_res { 61 struct list_head list; 62 u16 vlan; 63 int ref_count; 64 int vlan_index; 65 u8 port; 66 }; 67 68 struct res_common { 69 struct list_head list; 70 struct rb_node node; 71 u64 res_id; 72 int owner; 73 int state; 74 int from_state; 75 int to_state; 76 int removing; 77 }; 78 79 enum { 80 RES_ANY_BUSY = 1 81 }; 82 83 struct res_gid { 84 struct list_head list; 85 u8 gid[16]; 86 enum mlx4_protocol prot; 87 enum mlx4_steer_type steer; 88 u64 reg_id; 89 }; 90 91 enum res_qp_states { 92 RES_QP_BUSY = RES_ANY_BUSY, 93 94 /* QP number was allocated */ 95 RES_QP_RESERVED, 96 97 /* ICM memory for QP context was mapped */ 98 RES_QP_MAPPED, 99 100 /* QP is in hw ownership */ 101 RES_QP_HW 102 }; 103 104 struct res_qp { 105 struct res_common com; 106 struct res_mtt *mtt; 107 struct res_cq *rcq; 108 struct res_cq *scq; 109 struct res_srq *srq; 110 struct list_head mcg_list; 111 spinlock_t mcg_spl; 112 int local_qpn; 113 atomic_t ref_count; 114 u32 qpc_flags; 115 /* saved qp params before VST enforcement in order to restore on VGT */ 116 u8 sched_queue; 117 __be32 param3; 118 u8 vlan_control; 119 u8 fvl_rx; 120 u8 pri_path_fl; 121 u8 vlan_index; 122 u8 feup; 123 }; 124 125 enum res_mtt_states { 126 RES_MTT_BUSY = RES_ANY_BUSY, 127 RES_MTT_ALLOCATED, 128 }; 129 130 static inline const char *mtt_states_str(enum res_mtt_states state) 131 { 132 switch (state) { 133 case RES_MTT_BUSY: return "RES_MTT_BUSY"; 134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; 135 default: return "Unknown"; 136 } 137 } 138 139 struct res_mtt { 140 struct res_common com; 141 int order; 142 atomic_t ref_count; 143 }; 144 145 enum res_mpt_states { 146 RES_MPT_BUSY = RES_ANY_BUSY, 147 RES_MPT_RESERVED, 148 RES_MPT_MAPPED, 149 RES_MPT_HW, 150 }; 151 152 struct res_mpt { 153 struct res_common com; 154 struct res_mtt *mtt; 155 int key; 156 }; 157 158 enum res_eq_states { 159 RES_EQ_BUSY = RES_ANY_BUSY, 160 RES_EQ_RESERVED, 161 RES_EQ_HW, 162 }; 163 164 struct res_eq { 165 struct res_common com; 166 struct res_mtt *mtt; 167 }; 168 169 enum res_cq_states { 170 RES_CQ_BUSY = RES_ANY_BUSY, 171 RES_CQ_ALLOCATED, 172 RES_CQ_HW, 173 }; 174 175 struct res_cq { 176 struct res_common com; 177 struct res_mtt *mtt; 178 atomic_t ref_count; 179 }; 180 181 enum res_srq_states { 182 RES_SRQ_BUSY = RES_ANY_BUSY, 183 RES_SRQ_ALLOCATED, 184 RES_SRQ_HW, 185 }; 186 187 struct res_srq { 188 struct res_common com; 189 struct res_mtt *mtt; 190 struct res_cq *cq; 191 atomic_t ref_count; 192 }; 193 194 enum res_counter_states { 195 RES_COUNTER_BUSY = RES_ANY_BUSY, 196 RES_COUNTER_ALLOCATED, 197 }; 198 199 struct res_counter { 200 struct res_common com; 201 int port; 202 }; 203 204 enum res_xrcdn_states { 205 RES_XRCD_BUSY = RES_ANY_BUSY, 206 RES_XRCD_ALLOCATED, 207 }; 208 209 struct res_xrcdn { 210 struct res_common com; 211 int port; 212 }; 213 214 enum res_fs_rule_states { 215 RES_FS_RULE_BUSY = RES_ANY_BUSY, 216 RES_FS_RULE_ALLOCATED, 217 }; 218 219 struct res_fs_rule { 220 struct res_common com; 221 int qpn; 222 }; 223 224 static int mlx4_is_eth(struct mlx4_dev *dev, int port) 225 { 226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1; 227 } 228 229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 230 { 231 struct rb_node *node = root->rb_node; 232 233 while (node) { 234 struct res_common *res = container_of(node, struct res_common, 235 node); 236 237 if (res_id < res->res_id) 238 node = node->rb_left; 239 else if (res_id > res->res_id) 240 node = node->rb_right; 241 else 242 return res; 243 } 244 return NULL; 245 } 246 247 static int res_tracker_insert(struct rb_root *root, struct res_common *res) 248 { 249 struct rb_node **new = &(root->rb_node), *parent = NULL; 250 251 /* Figure out where to put new node */ 252 while (*new) { 253 struct res_common *this = container_of(*new, struct res_common, 254 node); 255 256 parent = *new; 257 if (res->res_id < this->res_id) 258 new = &((*new)->rb_left); 259 else if (res->res_id > this->res_id) 260 new = &((*new)->rb_right); 261 else 262 return -EEXIST; 263 } 264 265 /* Add new node and rebalance tree. */ 266 rb_link_node(&res->node, parent, new); 267 rb_insert_color(&res->node, root); 268 269 return 0; 270 } 271 272 enum qp_transition { 273 QP_TRANS_INIT2RTR, 274 QP_TRANS_RTR2RTS, 275 QP_TRANS_RTS2RTS, 276 QP_TRANS_SQERR2RTS, 277 QP_TRANS_SQD2SQD, 278 QP_TRANS_SQD2RTS 279 }; 280 281 /* For Debug uses */ 282 static const char *ResourceType(enum mlx4_resource rt) 283 { 284 switch (rt) { 285 case RES_QP: return "RES_QP"; 286 case RES_CQ: return "RES_CQ"; 287 case RES_SRQ: return "RES_SRQ"; 288 case RES_MPT: return "RES_MPT"; 289 case RES_MTT: return "RES_MTT"; 290 case RES_MAC: return "RES_MAC"; 291 case RES_VLAN: return "RES_VLAN"; 292 case RES_EQ: return "RES_EQ"; 293 case RES_COUNTER: return "RES_COUNTER"; 294 case RES_FS_RULE: return "RES_FS_RULE"; 295 case RES_XRCD: return "RES_XRCD"; 296 default: return "Unknown resource type !!!"; 297 }; 298 } 299 300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave); 301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, 302 enum mlx4_resource res_type, int count, 303 int port) 304 { 305 struct mlx4_priv *priv = mlx4_priv(dev); 306 struct resource_allocator *res_alloc = 307 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 308 int err = -EINVAL; 309 int allocated, free, reserved, guaranteed, from_free; 310 311 if (slave > dev->num_vfs) 312 return -EINVAL; 313 314 spin_lock(&res_alloc->alloc_lock); 315 allocated = (port > 0) ? 316 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 317 res_alloc->allocated[slave]; 318 free = (port > 0) ? res_alloc->res_port_free[port - 1] : 319 res_alloc->res_free; 320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : 321 res_alloc->res_reserved; 322 guaranteed = res_alloc->guaranteed[slave]; 323 324 if (allocated + count > res_alloc->quota[slave]) 325 goto out; 326 327 if (allocated + count <= guaranteed) { 328 err = 0; 329 } else { 330 /* portion may need to be obtained from free area */ 331 if (guaranteed - allocated > 0) 332 from_free = count - (guaranteed - allocated); 333 else 334 from_free = count; 335 336 if (free - from_free > reserved) 337 err = 0; 338 } 339 340 if (!err) { 341 /* grant the request */ 342 if (port > 0) { 343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 344 res_alloc->res_port_free[port - 1] -= count; 345 } else { 346 res_alloc->allocated[slave] += count; 347 res_alloc->res_free -= count; 348 } 349 } 350 351 out: 352 spin_unlock(&res_alloc->alloc_lock); 353 return err; 354 } 355 356 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, 357 enum mlx4_resource res_type, int count, 358 int port) 359 { 360 struct mlx4_priv *priv = mlx4_priv(dev); 361 struct resource_allocator *res_alloc = 362 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 363 364 if (slave > dev->num_vfs) 365 return; 366 367 spin_lock(&res_alloc->alloc_lock); 368 if (port > 0) { 369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 370 res_alloc->res_port_free[port - 1] += count; 371 } else { 372 res_alloc->allocated[slave] -= count; 373 res_alloc->res_free += count; 374 } 375 376 spin_unlock(&res_alloc->alloc_lock); 377 return; 378 } 379 380 static inline void initialize_res_quotas(struct mlx4_dev *dev, 381 struct resource_allocator *res_alloc, 382 enum mlx4_resource res_type, 383 int vf, int num_instances) 384 { 385 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); 386 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 387 if (vf == mlx4_master_func_num(dev)) { 388 res_alloc->res_free = num_instances; 389 if (res_type == RES_MTT) { 390 /* reserved mtts will be taken out of the PF allocation */ 391 res_alloc->res_free += dev->caps.reserved_mtts; 392 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; 393 res_alloc->quota[vf] += dev->caps.reserved_mtts; 394 } 395 } 396 } 397 398 void mlx4_init_quotas(struct mlx4_dev *dev) 399 { 400 struct mlx4_priv *priv = mlx4_priv(dev); 401 int pf; 402 403 /* quotas for VFs are initialized in mlx4_slave_cap */ 404 if (mlx4_is_slave(dev)) 405 return; 406 407 if (!mlx4_is_mfunc(dev)) { 408 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - 409 mlx4_num_reserved_sqps(dev); 410 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; 411 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; 412 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; 413 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; 414 return; 415 } 416 417 pf = mlx4_master_func_num(dev); 418 dev->quotas.qp = 419 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; 420 dev->quotas.cq = 421 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; 422 dev->quotas.srq = 423 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; 424 dev->quotas.mtt = 425 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; 426 dev->quotas.mpt = 427 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 428 } 429 int mlx4_init_resource_tracker(struct mlx4_dev *dev) 430 { 431 struct mlx4_priv *priv = mlx4_priv(dev); 432 int i, j; 433 int t; 434 435 priv->mfunc.master.res_tracker.slave_list = 436 kzalloc(dev->num_slaves * sizeof(struct slave_list), 437 GFP_KERNEL); 438 if (!priv->mfunc.master.res_tracker.slave_list) 439 return -ENOMEM; 440 441 for (i = 0 ; i < dev->num_slaves; i++) { 442 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) 443 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. 444 slave_list[i].res_list[t]); 445 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 446 } 447 448 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 449 dev->num_slaves); 450 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 451 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 452 453 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 454 struct resource_allocator *res_alloc = 455 &priv->mfunc.master.res_tracker.res_alloc[i]; 456 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 457 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 458 if (i == RES_MAC || i == RES_VLAN) 459 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 460 (dev->num_vfs + 1) * sizeof(int), 461 GFP_KERNEL); 462 else 463 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 464 465 if (!res_alloc->quota || !res_alloc->guaranteed || 466 !res_alloc->allocated) 467 goto no_mem_err; 468 469 spin_lock_init(&res_alloc->alloc_lock); 470 for (t = 0; t < dev->num_vfs + 1; t++) { 471 struct mlx4_active_ports actv_ports = 472 mlx4_get_active_ports(dev, t); 473 switch (i) { 474 case RES_QP: 475 initialize_res_quotas(dev, res_alloc, RES_QP, 476 t, dev->caps.num_qps - 477 dev->caps.reserved_qps - 478 mlx4_num_reserved_sqps(dev)); 479 break; 480 case RES_CQ: 481 initialize_res_quotas(dev, res_alloc, RES_CQ, 482 t, dev->caps.num_cqs - 483 dev->caps.reserved_cqs); 484 break; 485 case RES_SRQ: 486 initialize_res_quotas(dev, res_alloc, RES_SRQ, 487 t, dev->caps.num_srqs - 488 dev->caps.reserved_srqs); 489 break; 490 case RES_MPT: 491 initialize_res_quotas(dev, res_alloc, RES_MPT, 492 t, dev->caps.num_mpts - 493 dev->caps.reserved_mrws); 494 break; 495 case RES_MTT: 496 initialize_res_quotas(dev, res_alloc, RES_MTT, 497 t, dev->caps.num_mtts - 498 dev->caps.reserved_mtts); 499 break; 500 case RES_MAC: 501 if (t == mlx4_master_func_num(dev)) { 502 int max_vfs_pport = 0; 503 /* Calculate the max vfs per port for */ 504 /* both ports. */ 505 for (j = 0; j < dev->caps.num_ports; 506 j++) { 507 struct mlx4_slaves_pport slaves_pport = 508 mlx4_phys_to_slaves_pport(dev, j + 1); 509 unsigned current_slaves = 510 bitmap_weight(slaves_pport.slaves, 511 dev->caps.num_ports) - 1; 512 if (max_vfs_pport < current_slaves) 513 max_vfs_pport = 514 current_slaves; 515 } 516 res_alloc->quota[t] = 517 MLX4_MAX_MAC_NUM - 518 2 * max_vfs_pport; 519 res_alloc->guaranteed[t] = 2; 520 for (j = 0; j < MLX4_MAX_PORTS; j++) 521 res_alloc->res_port_free[j] = 522 MLX4_MAX_MAC_NUM; 523 } else { 524 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 525 res_alloc->guaranteed[t] = 2; 526 } 527 break; 528 case RES_VLAN: 529 if (t == mlx4_master_func_num(dev)) { 530 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; 531 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; 532 for (j = 0; j < MLX4_MAX_PORTS; j++) 533 res_alloc->res_port_free[j] = 534 res_alloc->quota[t]; 535 } else { 536 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; 537 res_alloc->guaranteed[t] = 0; 538 } 539 break; 540 case RES_COUNTER: 541 res_alloc->quota[t] = dev->caps.max_counters; 542 res_alloc->guaranteed[t] = 0; 543 if (t == mlx4_master_func_num(dev)) 544 res_alloc->res_free = res_alloc->quota[t]; 545 break; 546 default: 547 break; 548 } 549 if (i == RES_MAC || i == RES_VLAN) { 550 for (j = 0; j < dev->caps.num_ports; j++) 551 if (test_bit(j, actv_ports.ports)) 552 res_alloc->res_port_rsvd[j] += 553 res_alloc->guaranteed[t]; 554 } else { 555 res_alloc->res_reserved += res_alloc->guaranteed[t]; 556 } 557 } 558 } 559 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 560 return 0; 561 562 no_mem_err: 563 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 564 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 565 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 566 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 567 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 568 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 569 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 570 } 571 return -ENOMEM; 572 } 573 574 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 575 enum mlx4_res_tracker_free_type type) 576 { 577 struct mlx4_priv *priv = mlx4_priv(dev); 578 int i; 579 580 if (priv->mfunc.master.res_tracker.slave_list) { 581 if (type != RES_TR_FREE_STRUCTS_ONLY) { 582 for (i = 0; i < dev->num_slaves; i++) { 583 if (type == RES_TR_FREE_ALL || 584 dev->caps.function != i) 585 mlx4_delete_all_resources_for_slave(dev, i); 586 } 587 /* free master's vlans */ 588 i = dev->caps.function; 589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 rem_slave_vlans(dev, i); 591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 } 593 594 if (type != RES_TR_FREE_SLAVES_ONLY) { 595 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 596 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 597 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 598 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 599 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 600 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 601 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 602 } 603 kfree(priv->mfunc.master.res_tracker.slave_list); 604 priv->mfunc.master.res_tracker.slave_list = NULL; 605 } 606 } 607 } 608 609 static void update_pkey_index(struct mlx4_dev *dev, int slave, 610 struct mlx4_cmd_mailbox *inbox) 611 { 612 u8 sched = *(u8 *)(inbox->buf + 64); 613 u8 orig_index = *(u8 *)(inbox->buf + 35); 614 u8 new_index; 615 struct mlx4_priv *priv = mlx4_priv(dev); 616 int port; 617 618 port = (sched >> 6 & 1) + 1; 619 620 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 621 *(u8 *)(inbox->buf + 35) = new_index; 622 } 623 624 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 625 u8 slave) 626 { 627 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 628 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 629 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 630 int port; 631 632 if (MLX4_QP_ST_UD == ts) { 633 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 634 if (mlx4_is_eth(dev, port)) 635 qp_ctx->pri_path.mgid_index = 636 mlx4_get_base_gid_ix(dev, slave, port) | 0x80; 637 else 638 qp_ctx->pri_path.mgid_index = slave | 0x80; 639 640 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { 641 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 642 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 643 if (mlx4_is_eth(dev, port)) { 644 qp_ctx->pri_path.mgid_index += 645 mlx4_get_base_gid_ix(dev, slave, port); 646 qp_ctx->pri_path.mgid_index &= 0x7f; 647 } else { 648 qp_ctx->pri_path.mgid_index = slave & 0x7F; 649 } 650 } 651 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 652 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 653 if (mlx4_is_eth(dev, port)) { 654 qp_ctx->alt_path.mgid_index += 655 mlx4_get_base_gid_ix(dev, slave, port); 656 qp_ctx->alt_path.mgid_index &= 0x7f; 657 } else { 658 qp_ctx->alt_path.mgid_index = slave & 0x7F; 659 } 660 } 661 } 662 } 663 664 static int update_vport_qp_param(struct mlx4_dev *dev, 665 struct mlx4_cmd_mailbox *inbox, 666 u8 slave, u32 qpn) 667 { 668 struct mlx4_qp_context *qpc = inbox->buf + 8; 669 struct mlx4_vport_oper_state *vp_oper; 670 struct mlx4_priv *priv; 671 int port; 672 673 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 674 priv = mlx4_priv(dev); 675 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 676 677 if (MLX4_VGT != vp_oper->state.default_vlan) { 678 /* the reserved QPs (special, proxy, tunnel) 679 * do not operate over vlans 680 */ 681 if (mlx4_is_qp_reserved(dev, qpn)) 682 return 0; 683 684 /* force strip vlan by clear vsd */ 685 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 686 687 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && 688 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 689 qpc->pri_path.vlan_control = 690 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 691 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 692 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 693 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 694 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 695 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 696 } else if (0 != vp_oper->state.default_vlan) { 697 qpc->pri_path.vlan_control = 698 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 699 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 700 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 701 } else { /* priority tagged */ 702 qpc->pri_path.vlan_control = 703 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 704 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 705 } 706 707 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; 708 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 709 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 710 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 711 qpc->pri_path.sched_queue &= 0xC7; 712 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 713 } 714 if (vp_oper->state.spoofchk) { 715 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 716 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 717 } 718 return 0; 719 } 720 721 static int mpt_mask(struct mlx4_dev *dev) 722 { 723 return dev->caps.num_mpts - 1; 724 } 725 726 static void *find_res(struct mlx4_dev *dev, u64 res_id, 727 enum mlx4_resource type) 728 { 729 struct mlx4_priv *priv = mlx4_priv(dev); 730 731 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 732 res_id); 733 } 734 735 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, 736 enum mlx4_resource type, 737 void *res) 738 { 739 struct res_common *r; 740 int err = 0; 741 742 spin_lock_irq(mlx4_tlock(dev)); 743 r = find_res(dev, res_id, type); 744 if (!r) { 745 err = -ENONET; 746 goto exit; 747 } 748 749 if (r->state == RES_ANY_BUSY) { 750 err = -EBUSY; 751 goto exit; 752 } 753 754 if (r->owner != slave) { 755 err = -EPERM; 756 goto exit; 757 } 758 759 r->from_state = r->state; 760 r->state = RES_ANY_BUSY; 761 762 if (res) 763 *((struct res_common **)res) = r; 764 765 exit: 766 spin_unlock_irq(mlx4_tlock(dev)); 767 return err; 768 } 769 770 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 771 enum mlx4_resource type, 772 u64 res_id, int *slave) 773 { 774 775 struct res_common *r; 776 int err = -ENOENT; 777 int id = res_id; 778 779 if (type == RES_QP) 780 id &= 0x7fffff; 781 spin_lock(mlx4_tlock(dev)); 782 783 r = find_res(dev, id, type); 784 if (r) { 785 *slave = r->owner; 786 err = 0; 787 } 788 spin_unlock(mlx4_tlock(dev)); 789 790 return err; 791 } 792 793 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, 794 enum mlx4_resource type) 795 { 796 struct res_common *r; 797 798 spin_lock_irq(mlx4_tlock(dev)); 799 r = find_res(dev, res_id, type); 800 if (r) 801 r->state = r->from_state; 802 spin_unlock_irq(mlx4_tlock(dev)); 803 } 804 805 static struct res_common *alloc_qp_tr(int id) 806 { 807 struct res_qp *ret; 808 809 ret = kzalloc(sizeof *ret, GFP_KERNEL); 810 if (!ret) 811 return NULL; 812 813 ret->com.res_id = id; 814 ret->com.state = RES_QP_RESERVED; 815 ret->local_qpn = id; 816 INIT_LIST_HEAD(&ret->mcg_list); 817 spin_lock_init(&ret->mcg_spl); 818 atomic_set(&ret->ref_count, 0); 819 820 return &ret->com; 821 } 822 823 static struct res_common *alloc_mtt_tr(int id, int order) 824 { 825 struct res_mtt *ret; 826 827 ret = kzalloc(sizeof *ret, GFP_KERNEL); 828 if (!ret) 829 return NULL; 830 831 ret->com.res_id = id; 832 ret->order = order; 833 ret->com.state = RES_MTT_ALLOCATED; 834 atomic_set(&ret->ref_count, 0); 835 836 return &ret->com; 837 } 838 839 static struct res_common *alloc_mpt_tr(int id, int key) 840 { 841 struct res_mpt *ret; 842 843 ret = kzalloc(sizeof *ret, GFP_KERNEL); 844 if (!ret) 845 return NULL; 846 847 ret->com.res_id = id; 848 ret->com.state = RES_MPT_RESERVED; 849 ret->key = key; 850 851 return &ret->com; 852 } 853 854 static struct res_common *alloc_eq_tr(int id) 855 { 856 struct res_eq *ret; 857 858 ret = kzalloc(sizeof *ret, GFP_KERNEL); 859 if (!ret) 860 return NULL; 861 862 ret->com.res_id = id; 863 ret->com.state = RES_EQ_RESERVED; 864 865 return &ret->com; 866 } 867 868 static struct res_common *alloc_cq_tr(int id) 869 { 870 struct res_cq *ret; 871 872 ret = kzalloc(sizeof *ret, GFP_KERNEL); 873 if (!ret) 874 return NULL; 875 876 ret->com.res_id = id; 877 ret->com.state = RES_CQ_ALLOCATED; 878 atomic_set(&ret->ref_count, 0); 879 880 return &ret->com; 881 } 882 883 static struct res_common *alloc_srq_tr(int id) 884 { 885 struct res_srq *ret; 886 887 ret = kzalloc(sizeof *ret, GFP_KERNEL); 888 if (!ret) 889 return NULL; 890 891 ret->com.res_id = id; 892 ret->com.state = RES_SRQ_ALLOCATED; 893 atomic_set(&ret->ref_count, 0); 894 895 return &ret->com; 896 } 897 898 static struct res_common *alloc_counter_tr(int id) 899 { 900 struct res_counter *ret; 901 902 ret = kzalloc(sizeof *ret, GFP_KERNEL); 903 if (!ret) 904 return NULL; 905 906 ret->com.res_id = id; 907 ret->com.state = RES_COUNTER_ALLOCATED; 908 909 return &ret->com; 910 } 911 912 static struct res_common *alloc_xrcdn_tr(int id) 913 { 914 struct res_xrcdn *ret; 915 916 ret = kzalloc(sizeof *ret, GFP_KERNEL); 917 if (!ret) 918 return NULL; 919 920 ret->com.res_id = id; 921 ret->com.state = RES_XRCD_ALLOCATED; 922 923 return &ret->com; 924 } 925 926 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) 927 { 928 struct res_fs_rule *ret; 929 930 ret = kzalloc(sizeof *ret, GFP_KERNEL); 931 if (!ret) 932 return NULL; 933 934 ret->com.res_id = id; 935 ret->com.state = RES_FS_RULE_ALLOCATED; 936 ret->qpn = qpn; 937 return &ret->com; 938 } 939 940 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, 941 int extra) 942 { 943 struct res_common *ret; 944 945 switch (type) { 946 case RES_QP: 947 ret = alloc_qp_tr(id); 948 break; 949 case RES_MPT: 950 ret = alloc_mpt_tr(id, extra); 951 break; 952 case RES_MTT: 953 ret = alloc_mtt_tr(id, extra); 954 break; 955 case RES_EQ: 956 ret = alloc_eq_tr(id); 957 break; 958 case RES_CQ: 959 ret = alloc_cq_tr(id); 960 break; 961 case RES_SRQ: 962 ret = alloc_srq_tr(id); 963 break; 964 case RES_MAC: 965 printk(KERN_ERR "implementation missing\n"); 966 return NULL; 967 case RES_COUNTER: 968 ret = alloc_counter_tr(id); 969 break; 970 case RES_XRCD: 971 ret = alloc_xrcdn_tr(id); 972 break; 973 case RES_FS_RULE: 974 ret = alloc_fs_rule_tr(id, extra); 975 break; 976 default: 977 return NULL; 978 } 979 if (ret) 980 ret->owner = slave; 981 982 return ret; 983 } 984 985 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 986 enum mlx4_resource type, int extra) 987 { 988 int i; 989 int err; 990 struct mlx4_priv *priv = mlx4_priv(dev); 991 struct res_common **res_arr; 992 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 993 struct rb_root *root = &tracker->res_tree[type]; 994 995 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 996 if (!res_arr) 997 return -ENOMEM; 998 999 for (i = 0; i < count; ++i) { 1000 res_arr[i] = alloc_tr(base + i, type, slave, extra); 1001 if (!res_arr[i]) { 1002 for (--i; i >= 0; --i) 1003 kfree(res_arr[i]); 1004 1005 kfree(res_arr); 1006 return -ENOMEM; 1007 } 1008 } 1009 1010 spin_lock_irq(mlx4_tlock(dev)); 1011 for (i = 0; i < count; ++i) { 1012 if (find_res(dev, base + i, type)) { 1013 err = -EEXIST; 1014 goto undo; 1015 } 1016 err = res_tracker_insert(root, res_arr[i]); 1017 if (err) 1018 goto undo; 1019 list_add_tail(&res_arr[i]->list, 1020 &tracker->slave_list[slave].res_list[type]); 1021 } 1022 spin_unlock_irq(mlx4_tlock(dev)); 1023 kfree(res_arr); 1024 1025 return 0; 1026 1027 undo: 1028 for (--i; i >= base; --i) 1029 rb_erase(&res_arr[i]->node, root); 1030 1031 spin_unlock_irq(mlx4_tlock(dev)); 1032 1033 for (i = 0; i < count; ++i) 1034 kfree(res_arr[i]); 1035 1036 kfree(res_arr); 1037 1038 return err; 1039 } 1040 1041 static int remove_qp_ok(struct res_qp *res) 1042 { 1043 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || 1044 !list_empty(&res->mcg_list)) { 1045 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", 1046 res->com.state, atomic_read(&res->ref_count)); 1047 return -EBUSY; 1048 } else if (res->com.state != RES_QP_RESERVED) { 1049 return -EPERM; 1050 } 1051 1052 return 0; 1053 } 1054 1055 static int remove_mtt_ok(struct res_mtt *res, int order) 1056 { 1057 if (res->com.state == RES_MTT_BUSY || 1058 atomic_read(&res->ref_count)) { 1059 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", 1060 __func__, __LINE__, 1061 mtt_states_str(res->com.state), 1062 atomic_read(&res->ref_count)); 1063 return -EBUSY; 1064 } else if (res->com.state != RES_MTT_ALLOCATED) 1065 return -EPERM; 1066 else if (res->order != order) 1067 return -EINVAL; 1068 1069 return 0; 1070 } 1071 1072 static int remove_mpt_ok(struct res_mpt *res) 1073 { 1074 if (res->com.state == RES_MPT_BUSY) 1075 return -EBUSY; 1076 else if (res->com.state != RES_MPT_RESERVED) 1077 return -EPERM; 1078 1079 return 0; 1080 } 1081 1082 static int remove_eq_ok(struct res_eq *res) 1083 { 1084 if (res->com.state == RES_MPT_BUSY) 1085 return -EBUSY; 1086 else if (res->com.state != RES_MPT_RESERVED) 1087 return -EPERM; 1088 1089 return 0; 1090 } 1091 1092 static int remove_counter_ok(struct res_counter *res) 1093 { 1094 if (res->com.state == RES_COUNTER_BUSY) 1095 return -EBUSY; 1096 else if (res->com.state != RES_COUNTER_ALLOCATED) 1097 return -EPERM; 1098 1099 return 0; 1100 } 1101 1102 static int remove_xrcdn_ok(struct res_xrcdn *res) 1103 { 1104 if (res->com.state == RES_XRCD_BUSY) 1105 return -EBUSY; 1106 else if (res->com.state != RES_XRCD_ALLOCATED) 1107 return -EPERM; 1108 1109 return 0; 1110 } 1111 1112 static int remove_fs_rule_ok(struct res_fs_rule *res) 1113 { 1114 if (res->com.state == RES_FS_RULE_BUSY) 1115 return -EBUSY; 1116 else if (res->com.state != RES_FS_RULE_ALLOCATED) 1117 return -EPERM; 1118 1119 return 0; 1120 } 1121 1122 static int remove_cq_ok(struct res_cq *res) 1123 { 1124 if (res->com.state == RES_CQ_BUSY) 1125 return -EBUSY; 1126 else if (res->com.state != RES_CQ_ALLOCATED) 1127 return -EPERM; 1128 1129 return 0; 1130 } 1131 1132 static int remove_srq_ok(struct res_srq *res) 1133 { 1134 if (res->com.state == RES_SRQ_BUSY) 1135 return -EBUSY; 1136 else if (res->com.state != RES_SRQ_ALLOCATED) 1137 return -EPERM; 1138 1139 return 0; 1140 } 1141 1142 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) 1143 { 1144 switch (type) { 1145 case RES_QP: 1146 return remove_qp_ok((struct res_qp *)res); 1147 case RES_CQ: 1148 return remove_cq_ok((struct res_cq *)res); 1149 case RES_SRQ: 1150 return remove_srq_ok((struct res_srq *)res); 1151 case RES_MPT: 1152 return remove_mpt_ok((struct res_mpt *)res); 1153 case RES_MTT: 1154 return remove_mtt_ok((struct res_mtt *)res, extra); 1155 case RES_MAC: 1156 return -ENOSYS; 1157 case RES_EQ: 1158 return remove_eq_ok((struct res_eq *)res); 1159 case RES_COUNTER: 1160 return remove_counter_ok((struct res_counter *)res); 1161 case RES_XRCD: 1162 return remove_xrcdn_ok((struct res_xrcdn *)res); 1163 case RES_FS_RULE: 1164 return remove_fs_rule_ok((struct res_fs_rule *)res); 1165 default: 1166 return -EINVAL; 1167 } 1168 } 1169 1170 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1171 enum mlx4_resource type, int extra) 1172 { 1173 u64 i; 1174 int err; 1175 struct mlx4_priv *priv = mlx4_priv(dev); 1176 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1177 struct res_common *r; 1178 1179 spin_lock_irq(mlx4_tlock(dev)); 1180 for (i = base; i < base + count; ++i) { 1181 r = res_tracker_lookup(&tracker->res_tree[type], i); 1182 if (!r) { 1183 err = -ENOENT; 1184 goto out; 1185 } 1186 if (r->owner != slave) { 1187 err = -EPERM; 1188 goto out; 1189 } 1190 err = remove_ok(r, type, extra); 1191 if (err) 1192 goto out; 1193 } 1194 1195 for (i = base; i < base + count; ++i) { 1196 r = res_tracker_lookup(&tracker->res_tree[type], i); 1197 rb_erase(&r->node, &tracker->res_tree[type]); 1198 list_del(&r->list); 1199 kfree(r); 1200 } 1201 err = 0; 1202 1203 out: 1204 spin_unlock_irq(mlx4_tlock(dev)); 1205 1206 return err; 1207 } 1208 1209 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, 1210 enum res_qp_states state, struct res_qp **qp, 1211 int alloc) 1212 { 1213 struct mlx4_priv *priv = mlx4_priv(dev); 1214 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1215 struct res_qp *r; 1216 int err = 0; 1217 1218 spin_lock_irq(mlx4_tlock(dev)); 1219 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); 1220 if (!r) 1221 err = -ENOENT; 1222 else if (r->com.owner != slave) 1223 err = -EPERM; 1224 else { 1225 switch (state) { 1226 case RES_QP_BUSY: 1227 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", 1228 __func__, r->com.res_id); 1229 err = -EBUSY; 1230 break; 1231 1232 case RES_QP_RESERVED: 1233 if (r->com.state == RES_QP_MAPPED && !alloc) 1234 break; 1235 1236 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); 1237 err = -EINVAL; 1238 break; 1239 1240 case RES_QP_MAPPED: 1241 if ((r->com.state == RES_QP_RESERVED && alloc) || 1242 r->com.state == RES_QP_HW) 1243 break; 1244 else { 1245 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", 1246 r->com.res_id); 1247 err = -EINVAL; 1248 } 1249 1250 break; 1251 1252 case RES_QP_HW: 1253 if (r->com.state != RES_QP_MAPPED) 1254 err = -EINVAL; 1255 break; 1256 default: 1257 err = -EINVAL; 1258 } 1259 1260 if (!err) { 1261 r->com.from_state = r->com.state; 1262 r->com.to_state = state; 1263 r->com.state = RES_QP_BUSY; 1264 if (qp) 1265 *qp = r; 1266 } 1267 } 1268 1269 spin_unlock_irq(mlx4_tlock(dev)); 1270 1271 return err; 1272 } 1273 1274 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1275 enum res_mpt_states state, struct res_mpt **mpt) 1276 { 1277 struct mlx4_priv *priv = mlx4_priv(dev); 1278 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1279 struct res_mpt *r; 1280 int err = 0; 1281 1282 spin_lock_irq(mlx4_tlock(dev)); 1283 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); 1284 if (!r) 1285 err = -ENOENT; 1286 else if (r->com.owner != slave) 1287 err = -EPERM; 1288 else { 1289 switch (state) { 1290 case RES_MPT_BUSY: 1291 err = -EINVAL; 1292 break; 1293 1294 case RES_MPT_RESERVED: 1295 if (r->com.state != RES_MPT_MAPPED) 1296 err = -EINVAL; 1297 break; 1298 1299 case RES_MPT_MAPPED: 1300 if (r->com.state != RES_MPT_RESERVED && 1301 r->com.state != RES_MPT_HW) 1302 err = -EINVAL; 1303 break; 1304 1305 case RES_MPT_HW: 1306 if (r->com.state != RES_MPT_MAPPED) 1307 err = -EINVAL; 1308 break; 1309 default: 1310 err = -EINVAL; 1311 } 1312 1313 if (!err) { 1314 r->com.from_state = r->com.state; 1315 r->com.to_state = state; 1316 r->com.state = RES_MPT_BUSY; 1317 if (mpt) 1318 *mpt = r; 1319 } 1320 } 1321 1322 spin_unlock_irq(mlx4_tlock(dev)); 1323 1324 return err; 1325 } 1326 1327 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1328 enum res_eq_states state, struct res_eq **eq) 1329 { 1330 struct mlx4_priv *priv = mlx4_priv(dev); 1331 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1332 struct res_eq *r; 1333 int err = 0; 1334 1335 spin_lock_irq(mlx4_tlock(dev)); 1336 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); 1337 if (!r) 1338 err = -ENOENT; 1339 else if (r->com.owner != slave) 1340 err = -EPERM; 1341 else { 1342 switch (state) { 1343 case RES_EQ_BUSY: 1344 err = -EINVAL; 1345 break; 1346 1347 case RES_EQ_RESERVED: 1348 if (r->com.state != RES_EQ_HW) 1349 err = -EINVAL; 1350 break; 1351 1352 case RES_EQ_HW: 1353 if (r->com.state != RES_EQ_RESERVED) 1354 err = -EINVAL; 1355 break; 1356 1357 default: 1358 err = -EINVAL; 1359 } 1360 1361 if (!err) { 1362 r->com.from_state = r->com.state; 1363 r->com.to_state = state; 1364 r->com.state = RES_EQ_BUSY; 1365 if (eq) 1366 *eq = r; 1367 } 1368 } 1369 1370 spin_unlock_irq(mlx4_tlock(dev)); 1371 1372 return err; 1373 } 1374 1375 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, 1376 enum res_cq_states state, struct res_cq **cq) 1377 { 1378 struct mlx4_priv *priv = mlx4_priv(dev); 1379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1380 struct res_cq *r; 1381 int err; 1382 1383 spin_lock_irq(mlx4_tlock(dev)); 1384 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); 1385 if (!r) { 1386 err = -ENOENT; 1387 } else if (r->com.owner != slave) { 1388 err = -EPERM; 1389 } else if (state == RES_CQ_ALLOCATED) { 1390 if (r->com.state != RES_CQ_HW) 1391 err = -EINVAL; 1392 else if (atomic_read(&r->ref_count)) 1393 err = -EBUSY; 1394 else 1395 err = 0; 1396 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { 1397 err = -EINVAL; 1398 } else { 1399 err = 0; 1400 } 1401 1402 if (!err) { 1403 r->com.from_state = r->com.state; 1404 r->com.to_state = state; 1405 r->com.state = RES_CQ_BUSY; 1406 if (cq) 1407 *cq = r; 1408 } 1409 1410 spin_unlock_irq(mlx4_tlock(dev)); 1411 1412 return err; 1413 } 1414 1415 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1416 enum res_srq_states state, struct res_srq **srq) 1417 { 1418 struct mlx4_priv *priv = mlx4_priv(dev); 1419 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1420 struct res_srq *r; 1421 int err = 0; 1422 1423 spin_lock_irq(mlx4_tlock(dev)); 1424 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); 1425 if (!r) { 1426 err = -ENOENT; 1427 } else if (r->com.owner != slave) { 1428 err = -EPERM; 1429 } else if (state == RES_SRQ_ALLOCATED) { 1430 if (r->com.state != RES_SRQ_HW) 1431 err = -EINVAL; 1432 else if (atomic_read(&r->ref_count)) 1433 err = -EBUSY; 1434 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { 1435 err = -EINVAL; 1436 } 1437 1438 if (!err) { 1439 r->com.from_state = r->com.state; 1440 r->com.to_state = state; 1441 r->com.state = RES_SRQ_BUSY; 1442 if (srq) 1443 *srq = r; 1444 } 1445 1446 spin_unlock_irq(mlx4_tlock(dev)); 1447 1448 return err; 1449 } 1450 1451 static void res_abort_move(struct mlx4_dev *dev, int slave, 1452 enum mlx4_resource type, int id) 1453 { 1454 struct mlx4_priv *priv = mlx4_priv(dev); 1455 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1456 struct res_common *r; 1457 1458 spin_lock_irq(mlx4_tlock(dev)); 1459 r = res_tracker_lookup(&tracker->res_tree[type], id); 1460 if (r && (r->owner == slave)) 1461 r->state = r->from_state; 1462 spin_unlock_irq(mlx4_tlock(dev)); 1463 } 1464 1465 static void res_end_move(struct mlx4_dev *dev, int slave, 1466 enum mlx4_resource type, int id) 1467 { 1468 struct mlx4_priv *priv = mlx4_priv(dev); 1469 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1470 struct res_common *r; 1471 1472 spin_lock_irq(mlx4_tlock(dev)); 1473 r = res_tracker_lookup(&tracker->res_tree[type], id); 1474 if (r && (r->owner == slave)) 1475 r->state = r->to_state; 1476 spin_unlock_irq(mlx4_tlock(dev)); 1477 } 1478 1479 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) 1480 { 1481 return mlx4_is_qp_reserved(dev, qpn) && 1482 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); 1483 } 1484 1485 static int fw_reserved(struct mlx4_dev *dev, int qpn) 1486 { 1487 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1488 } 1489 1490 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1491 u64 in_param, u64 *out_param) 1492 { 1493 int err; 1494 int count; 1495 int align; 1496 int base; 1497 int qpn; 1498 1499 switch (op) { 1500 case RES_OP_RESERVE: 1501 count = get_param_l(&in_param); 1502 align = get_param_h(&in_param); 1503 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1504 if (err) 1505 return err; 1506 1507 err = __mlx4_qp_reserve_range(dev, count, align, &base); 1508 if (err) { 1509 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1510 return err; 1511 } 1512 1513 err = add_res_range(dev, slave, base, count, RES_QP, 0); 1514 if (err) { 1515 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1516 __mlx4_qp_release_range(dev, base, count); 1517 return err; 1518 } 1519 set_param_l(out_param, base); 1520 break; 1521 case RES_OP_MAP_ICM: 1522 qpn = get_param_l(&in_param) & 0x7fffff; 1523 if (valid_reserved(dev, slave, qpn)) { 1524 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); 1525 if (err) 1526 return err; 1527 } 1528 1529 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, 1530 NULL, 1); 1531 if (err) 1532 return err; 1533 1534 if (!fw_reserved(dev, qpn)) { 1535 err = __mlx4_qp_alloc_icm(dev, qpn); 1536 if (err) { 1537 res_abort_move(dev, slave, RES_QP, qpn); 1538 return err; 1539 } 1540 } 1541 1542 res_end_move(dev, slave, RES_QP, qpn); 1543 break; 1544 1545 default: 1546 err = -EINVAL; 1547 break; 1548 } 1549 return err; 1550 } 1551 1552 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1553 u64 in_param, u64 *out_param) 1554 { 1555 int err = -EINVAL; 1556 int base; 1557 int order; 1558 1559 if (op != RES_OP_RESERVE_AND_MAP) 1560 return err; 1561 1562 order = get_param_l(&in_param); 1563 1564 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); 1565 if (err) 1566 return err; 1567 1568 base = __mlx4_alloc_mtt_range(dev, order); 1569 if (base == -1) { 1570 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1571 return -ENOMEM; 1572 } 1573 1574 err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1575 if (err) { 1576 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1577 __mlx4_free_mtt_range(dev, base, order); 1578 } else { 1579 set_param_l(out_param, base); 1580 } 1581 1582 return err; 1583 } 1584 1585 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1586 u64 in_param, u64 *out_param) 1587 { 1588 int err = -EINVAL; 1589 int index; 1590 int id; 1591 struct res_mpt *mpt; 1592 1593 switch (op) { 1594 case RES_OP_RESERVE: 1595 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); 1596 if (err) 1597 break; 1598 1599 index = __mlx4_mpt_reserve(dev); 1600 if (index == -1) { 1601 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1602 break; 1603 } 1604 id = index & mpt_mask(dev); 1605 1606 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1607 if (err) { 1608 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1609 __mlx4_mpt_release(dev, index); 1610 break; 1611 } 1612 set_param_l(out_param, index); 1613 break; 1614 case RES_OP_MAP_ICM: 1615 index = get_param_l(&in_param); 1616 id = index & mpt_mask(dev); 1617 err = mr_res_start_move_to(dev, slave, id, 1618 RES_MPT_MAPPED, &mpt); 1619 if (err) 1620 return err; 1621 1622 err = __mlx4_mpt_alloc_icm(dev, mpt->key); 1623 if (err) { 1624 res_abort_move(dev, slave, RES_MPT, id); 1625 return err; 1626 } 1627 1628 res_end_move(dev, slave, RES_MPT, id); 1629 break; 1630 } 1631 return err; 1632 } 1633 1634 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1635 u64 in_param, u64 *out_param) 1636 { 1637 int cqn; 1638 int err; 1639 1640 switch (op) { 1641 case RES_OP_RESERVE_AND_MAP: 1642 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); 1643 if (err) 1644 break; 1645 1646 err = __mlx4_cq_alloc_icm(dev, &cqn); 1647 if (err) { 1648 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1649 break; 1650 } 1651 1652 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1653 if (err) { 1654 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1655 __mlx4_cq_free_icm(dev, cqn); 1656 break; 1657 } 1658 1659 set_param_l(out_param, cqn); 1660 break; 1661 1662 default: 1663 err = -EINVAL; 1664 } 1665 1666 return err; 1667 } 1668 1669 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1670 u64 in_param, u64 *out_param) 1671 { 1672 int srqn; 1673 int err; 1674 1675 switch (op) { 1676 case RES_OP_RESERVE_AND_MAP: 1677 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); 1678 if (err) 1679 break; 1680 1681 err = __mlx4_srq_alloc_icm(dev, &srqn); 1682 if (err) { 1683 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1684 break; 1685 } 1686 1687 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1688 if (err) { 1689 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1690 __mlx4_srq_free_icm(dev, srqn); 1691 break; 1692 } 1693 1694 set_param_l(out_param, srqn); 1695 break; 1696 1697 default: 1698 err = -EINVAL; 1699 } 1700 1701 return err; 1702 } 1703 1704 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, 1705 u8 smac_index, u64 *mac) 1706 { 1707 struct mlx4_priv *priv = mlx4_priv(dev); 1708 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1709 struct list_head *mac_list = 1710 &tracker->slave_list[slave].res_list[RES_MAC]; 1711 struct mac_res *res, *tmp; 1712 1713 list_for_each_entry_safe(res, tmp, mac_list, list) { 1714 if (res->smac_index == smac_index && res->port == (u8) port) { 1715 *mac = res->mac; 1716 return 0; 1717 } 1718 } 1719 return -ENOENT; 1720 } 1721 1722 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) 1723 { 1724 struct mlx4_priv *priv = mlx4_priv(dev); 1725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1726 struct list_head *mac_list = 1727 &tracker->slave_list[slave].res_list[RES_MAC]; 1728 struct mac_res *res, *tmp; 1729 1730 list_for_each_entry_safe(res, tmp, mac_list, list) { 1731 if (res->mac == mac && res->port == (u8) port) { 1732 /* mac found. update ref count */ 1733 ++res->ref_count; 1734 return 0; 1735 } 1736 } 1737 1738 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1739 return -EINVAL; 1740 res = kzalloc(sizeof *res, GFP_KERNEL); 1741 if (!res) { 1742 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1743 return -ENOMEM; 1744 } 1745 res->mac = mac; 1746 res->port = (u8) port; 1747 res->smac_index = smac_index; 1748 res->ref_count = 1; 1749 list_add_tail(&res->list, 1750 &tracker->slave_list[slave].res_list[RES_MAC]); 1751 return 0; 1752 } 1753 1754 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, 1755 int port) 1756 { 1757 struct mlx4_priv *priv = mlx4_priv(dev); 1758 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1759 struct list_head *mac_list = 1760 &tracker->slave_list[slave].res_list[RES_MAC]; 1761 struct mac_res *res, *tmp; 1762 1763 list_for_each_entry_safe(res, tmp, mac_list, list) { 1764 if (res->mac == mac && res->port == (u8) port) { 1765 if (!--res->ref_count) { 1766 list_del(&res->list); 1767 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1768 kfree(res); 1769 } 1770 break; 1771 } 1772 } 1773 } 1774 1775 static void rem_slave_macs(struct mlx4_dev *dev, int slave) 1776 { 1777 struct mlx4_priv *priv = mlx4_priv(dev); 1778 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1779 struct list_head *mac_list = 1780 &tracker->slave_list[slave].res_list[RES_MAC]; 1781 struct mac_res *res, *tmp; 1782 int i; 1783 1784 list_for_each_entry_safe(res, tmp, mac_list, list) { 1785 list_del(&res->list); 1786 /* dereference the mac the num times the slave referenced it */ 1787 for (i = 0; i < res->ref_count; i++) 1788 __mlx4_unregister_mac(dev, res->port, res->mac); 1789 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 1790 kfree(res); 1791 } 1792 } 1793 1794 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1795 u64 in_param, u64 *out_param, int in_port) 1796 { 1797 int err = -EINVAL; 1798 int port; 1799 u64 mac; 1800 u8 smac_index; 1801 1802 if (op != RES_OP_RESERVE_AND_MAP) 1803 return err; 1804 1805 port = !in_port ? get_param_l(out_param) : in_port; 1806 port = mlx4_slave_convert_port( 1807 dev, slave, port); 1808 1809 if (port < 0) 1810 return -EINVAL; 1811 mac = in_param; 1812 1813 err = __mlx4_register_mac(dev, port, mac); 1814 if (err >= 0) { 1815 smac_index = err; 1816 set_param_l(out_param, err); 1817 err = 0; 1818 } 1819 1820 if (!err) { 1821 err = mac_add_to_slave(dev, slave, mac, port, smac_index); 1822 if (err) 1823 __mlx4_unregister_mac(dev, port, mac); 1824 } 1825 return err; 1826 } 1827 1828 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, 1829 int port, int vlan_index) 1830 { 1831 struct mlx4_priv *priv = mlx4_priv(dev); 1832 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1833 struct list_head *vlan_list = 1834 &tracker->slave_list[slave].res_list[RES_VLAN]; 1835 struct vlan_res *res, *tmp; 1836 1837 list_for_each_entry_safe(res, tmp, vlan_list, list) { 1838 if (res->vlan == vlan && res->port == (u8) port) { 1839 /* vlan found. update ref count */ 1840 ++res->ref_count; 1841 return 0; 1842 } 1843 } 1844 1845 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) 1846 return -EINVAL; 1847 res = kzalloc(sizeof(*res), GFP_KERNEL); 1848 if (!res) { 1849 mlx4_release_resource(dev, slave, RES_VLAN, 1, port); 1850 return -ENOMEM; 1851 } 1852 res->vlan = vlan; 1853 res->port = (u8) port; 1854 res->vlan_index = vlan_index; 1855 res->ref_count = 1; 1856 list_add_tail(&res->list, 1857 &tracker->slave_list[slave].res_list[RES_VLAN]); 1858 return 0; 1859 } 1860 1861 1862 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, 1863 int port) 1864 { 1865 struct mlx4_priv *priv = mlx4_priv(dev); 1866 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1867 struct list_head *vlan_list = 1868 &tracker->slave_list[slave].res_list[RES_VLAN]; 1869 struct vlan_res *res, *tmp; 1870 1871 list_for_each_entry_safe(res, tmp, vlan_list, list) { 1872 if (res->vlan == vlan && res->port == (u8) port) { 1873 if (!--res->ref_count) { 1874 list_del(&res->list); 1875 mlx4_release_resource(dev, slave, RES_VLAN, 1876 1, port); 1877 kfree(res); 1878 } 1879 break; 1880 } 1881 } 1882 } 1883 1884 static void rem_slave_vlans(struct mlx4_dev *dev, int slave) 1885 { 1886 struct mlx4_priv *priv = mlx4_priv(dev); 1887 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1888 struct list_head *vlan_list = 1889 &tracker->slave_list[slave].res_list[RES_VLAN]; 1890 struct vlan_res *res, *tmp; 1891 int i; 1892 1893 list_for_each_entry_safe(res, tmp, vlan_list, list) { 1894 list_del(&res->list); 1895 /* dereference the vlan the num times the slave referenced it */ 1896 for (i = 0; i < res->ref_count; i++) 1897 __mlx4_unregister_vlan(dev, res->port, res->vlan); 1898 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); 1899 kfree(res); 1900 } 1901 } 1902 1903 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1904 u64 in_param, u64 *out_param, int in_port) 1905 { 1906 struct mlx4_priv *priv = mlx4_priv(dev); 1907 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 1908 int err; 1909 u16 vlan; 1910 int vlan_index; 1911 int port; 1912 1913 port = !in_port ? get_param_l(out_param) : in_port; 1914 1915 if (!port || op != RES_OP_RESERVE_AND_MAP) 1916 return -EINVAL; 1917 1918 port = mlx4_slave_convert_port( 1919 dev, slave, port); 1920 1921 if (port < 0) 1922 return -EINVAL; 1923 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 1924 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 1925 slave_state[slave].old_vlan_api = true; 1926 return 0; 1927 } 1928 1929 vlan = (u16) in_param; 1930 1931 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); 1932 if (!err) { 1933 set_param_l(out_param, (u32) vlan_index); 1934 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); 1935 if (err) 1936 __mlx4_unregister_vlan(dev, port, vlan); 1937 } 1938 return err; 1939 } 1940 1941 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1942 u64 in_param, u64 *out_param) 1943 { 1944 u32 index; 1945 int err; 1946 1947 if (op != RES_OP_RESERVE) 1948 return -EINVAL; 1949 1950 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); 1951 if (err) 1952 return err; 1953 1954 err = __mlx4_counter_alloc(dev, &index); 1955 if (err) { 1956 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 1957 return err; 1958 } 1959 1960 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); 1961 if (err) { 1962 __mlx4_counter_free(dev, index); 1963 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 1964 } else { 1965 set_param_l(out_param, index); 1966 } 1967 1968 return err; 1969 } 1970 1971 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1972 u64 in_param, u64 *out_param) 1973 { 1974 u32 xrcdn; 1975 int err; 1976 1977 if (op != RES_OP_RESERVE) 1978 return -EINVAL; 1979 1980 err = __mlx4_xrcd_alloc(dev, &xrcdn); 1981 if (err) 1982 return err; 1983 1984 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 1985 if (err) 1986 __mlx4_xrcd_free(dev, xrcdn); 1987 else 1988 set_param_l(out_param, xrcdn); 1989 1990 return err; 1991 } 1992 1993 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 1994 struct mlx4_vhcr *vhcr, 1995 struct mlx4_cmd_mailbox *inbox, 1996 struct mlx4_cmd_mailbox *outbox, 1997 struct mlx4_cmd_info *cmd) 1998 { 1999 int err; 2000 int alop = vhcr->op_modifier; 2001 2002 switch (vhcr->in_modifier & 0xFF) { 2003 case RES_QP: 2004 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 2005 vhcr->in_param, &vhcr->out_param); 2006 break; 2007 2008 case RES_MTT: 2009 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2010 vhcr->in_param, &vhcr->out_param); 2011 break; 2012 2013 case RES_MPT: 2014 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2015 vhcr->in_param, &vhcr->out_param); 2016 break; 2017 2018 case RES_CQ: 2019 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2020 vhcr->in_param, &vhcr->out_param); 2021 break; 2022 2023 case RES_SRQ: 2024 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2025 vhcr->in_param, &vhcr->out_param); 2026 break; 2027 2028 case RES_MAC: 2029 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 2030 vhcr->in_param, &vhcr->out_param, 2031 (vhcr->in_modifier >> 8) & 0xFF); 2032 break; 2033 2034 case RES_VLAN: 2035 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 2036 vhcr->in_param, &vhcr->out_param, 2037 (vhcr->in_modifier >> 8) & 0xFF); 2038 break; 2039 2040 case RES_COUNTER: 2041 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, 2042 vhcr->in_param, &vhcr->out_param); 2043 break; 2044 2045 case RES_XRCD: 2046 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, 2047 vhcr->in_param, &vhcr->out_param); 2048 break; 2049 2050 default: 2051 err = -EINVAL; 2052 break; 2053 } 2054 2055 return err; 2056 } 2057 2058 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2059 u64 in_param) 2060 { 2061 int err; 2062 int count; 2063 int base; 2064 int qpn; 2065 2066 switch (op) { 2067 case RES_OP_RESERVE: 2068 base = get_param_l(&in_param) & 0x7fffff; 2069 count = get_param_h(&in_param); 2070 err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2071 if (err) 2072 break; 2073 mlx4_release_resource(dev, slave, RES_QP, count, 0); 2074 __mlx4_qp_release_range(dev, base, count); 2075 break; 2076 case RES_OP_MAP_ICM: 2077 qpn = get_param_l(&in_param) & 0x7fffff; 2078 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, 2079 NULL, 0); 2080 if (err) 2081 return err; 2082 2083 if (!fw_reserved(dev, qpn)) 2084 __mlx4_qp_free_icm(dev, qpn); 2085 2086 res_end_move(dev, slave, RES_QP, qpn); 2087 2088 if (valid_reserved(dev, slave, qpn)) 2089 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); 2090 break; 2091 default: 2092 err = -EINVAL; 2093 break; 2094 } 2095 return err; 2096 } 2097 2098 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2099 u64 in_param, u64 *out_param) 2100 { 2101 int err = -EINVAL; 2102 int base; 2103 int order; 2104 2105 if (op != RES_OP_RESERVE_AND_MAP) 2106 return err; 2107 2108 base = get_param_l(&in_param); 2109 order = get_param_h(&in_param); 2110 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2111 if (!err) { 2112 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 2113 __mlx4_free_mtt_range(dev, base, order); 2114 } 2115 return err; 2116 } 2117 2118 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2119 u64 in_param) 2120 { 2121 int err = -EINVAL; 2122 int index; 2123 int id; 2124 struct res_mpt *mpt; 2125 2126 switch (op) { 2127 case RES_OP_RESERVE: 2128 index = get_param_l(&in_param); 2129 id = index & mpt_mask(dev); 2130 err = get_res(dev, slave, id, RES_MPT, &mpt); 2131 if (err) 2132 break; 2133 index = mpt->key; 2134 put_res(dev, slave, id, RES_MPT); 2135 2136 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2137 if (err) 2138 break; 2139 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 2140 __mlx4_mpt_release(dev, index); 2141 break; 2142 case RES_OP_MAP_ICM: 2143 index = get_param_l(&in_param); 2144 id = index & mpt_mask(dev); 2145 err = mr_res_start_move_to(dev, slave, id, 2146 RES_MPT_RESERVED, &mpt); 2147 if (err) 2148 return err; 2149 2150 __mlx4_mpt_free_icm(dev, mpt->key); 2151 res_end_move(dev, slave, RES_MPT, id); 2152 return err; 2153 break; 2154 default: 2155 err = -EINVAL; 2156 break; 2157 } 2158 return err; 2159 } 2160 2161 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2162 u64 in_param, u64 *out_param) 2163 { 2164 int cqn; 2165 int err; 2166 2167 switch (op) { 2168 case RES_OP_RESERVE_AND_MAP: 2169 cqn = get_param_l(&in_param); 2170 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); 2171 if (err) 2172 break; 2173 2174 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 2175 __mlx4_cq_free_icm(dev, cqn); 2176 break; 2177 2178 default: 2179 err = -EINVAL; 2180 break; 2181 } 2182 2183 return err; 2184 } 2185 2186 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2187 u64 in_param, u64 *out_param) 2188 { 2189 int srqn; 2190 int err; 2191 2192 switch (op) { 2193 case RES_OP_RESERVE_AND_MAP: 2194 srqn = get_param_l(&in_param); 2195 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 2196 if (err) 2197 break; 2198 2199 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 2200 __mlx4_srq_free_icm(dev, srqn); 2201 break; 2202 2203 default: 2204 err = -EINVAL; 2205 break; 2206 } 2207 2208 return err; 2209 } 2210 2211 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2212 u64 in_param, u64 *out_param, int in_port) 2213 { 2214 int port; 2215 int err = 0; 2216 2217 switch (op) { 2218 case RES_OP_RESERVE_AND_MAP: 2219 port = !in_port ? get_param_l(out_param) : in_port; 2220 port = mlx4_slave_convert_port( 2221 dev, slave, port); 2222 2223 if (port < 0) 2224 return -EINVAL; 2225 mac_del_from_slave(dev, slave, in_param, port); 2226 __mlx4_unregister_mac(dev, port, in_param); 2227 break; 2228 default: 2229 err = -EINVAL; 2230 break; 2231 } 2232 2233 return err; 2234 2235 } 2236 2237 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2238 u64 in_param, u64 *out_param, int port) 2239 { 2240 struct mlx4_priv *priv = mlx4_priv(dev); 2241 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2242 int err = 0; 2243 2244 port = mlx4_slave_convert_port( 2245 dev, slave, port); 2246 2247 if (port < 0) 2248 return -EINVAL; 2249 switch (op) { 2250 case RES_OP_RESERVE_AND_MAP: 2251 if (slave_state[slave].old_vlan_api) 2252 return 0; 2253 if (!port) 2254 return -EINVAL; 2255 vlan_del_from_slave(dev, slave, in_param, port); 2256 __mlx4_unregister_vlan(dev, port, in_param); 2257 break; 2258 default: 2259 err = -EINVAL; 2260 break; 2261 } 2262 2263 return err; 2264 } 2265 2266 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2267 u64 in_param, u64 *out_param) 2268 { 2269 int index; 2270 int err; 2271 2272 if (op != RES_OP_RESERVE) 2273 return -EINVAL; 2274 2275 index = get_param_l(&in_param); 2276 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); 2277 if (err) 2278 return err; 2279 2280 __mlx4_counter_free(dev, index); 2281 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2282 2283 return err; 2284 } 2285 2286 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2287 u64 in_param, u64 *out_param) 2288 { 2289 int xrcdn; 2290 int err; 2291 2292 if (op != RES_OP_RESERVE) 2293 return -EINVAL; 2294 2295 xrcdn = get_param_l(&in_param); 2296 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2297 if (err) 2298 return err; 2299 2300 __mlx4_xrcd_free(dev, xrcdn); 2301 2302 return err; 2303 } 2304 2305 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 2306 struct mlx4_vhcr *vhcr, 2307 struct mlx4_cmd_mailbox *inbox, 2308 struct mlx4_cmd_mailbox *outbox, 2309 struct mlx4_cmd_info *cmd) 2310 { 2311 int err = -EINVAL; 2312 int alop = vhcr->op_modifier; 2313 2314 switch (vhcr->in_modifier & 0xFF) { 2315 case RES_QP: 2316 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2317 vhcr->in_param); 2318 break; 2319 2320 case RES_MTT: 2321 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, 2322 vhcr->in_param, &vhcr->out_param); 2323 break; 2324 2325 case RES_MPT: 2326 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, 2327 vhcr->in_param); 2328 break; 2329 2330 case RES_CQ: 2331 err = cq_free_res(dev, slave, vhcr->op_modifier, alop, 2332 vhcr->in_param, &vhcr->out_param); 2333 break; 2334 2335 case RES_SRQ: 2336 err = srq_free_res(dev, slave, vhcr->op_modifier, alop, 2337 vhcr->in_param, &vhcr->out_param); 2338 break; 2339 2340 case RES_MAC: 2341 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2342 vhcr->in_param, &vhcr->out_param, 2343 (vhcr->in_modifier >> 8) & 0xFF); 2344 break; 2345 2346 case RES_VLAN: 2347 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2348 vhcr->in_param, &vhcr->out_param, 2349 (vhcr->in_modifier >> 8) & 0xFF); 2350 break; 2351 2352 case RES_COUNTER: 2353 err = counter_free_res(dev, slave, vhcr->op_modifier, alop, 2354 vhcr->in_param, &vhcr->out_param); 2355 break; 2356 2357 case RES_XRCD: 2358 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, 2359 vhcr->in_param, &vhcr->out_param); 2360 2361 default: 2362 break; 2363 } 2364 return err; 2365 } 2366 2367 /* ugly but other choices are uglier */ 2368 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) 2369 { 2370 return (be32_to_cpu(mpt->flags) >> 9) & 1; 2371 } 2372 2373 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) 2374 { 2375 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; 2376 } 2377 2378 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) 2379 { 2380 return be32_to_cpu(mpt->mtt_sz); 2381 } 2382 2383 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) 2384 { 2385 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; 2386 } 2387 2388 static int mr_is_fmr(struct mlx4_mpt_entry *mpt) 2389 { 2390 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; 2391 } 2392 2393 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) 2394 { 2395 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; 2396 } 2397 2398 static int mr_is_region(struct mlx4_mpt_entry *mpt) 2399 { 2400 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; 2401 } 2402 2403 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 2404 { 2405 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 2406 } 2407 2408 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) 2409 { 2410 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; 2411 } 2412 2413 static int qp_get_mtt_size(struct mlx4_qp_context *qpc) 2414 { 2415 int page_shift = (qpc->log_page_size & 0x3f) + 12; 2416 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; 2417 int log_sq_sride = qpc->sq_size_stride & 7; 2418 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; 2419 int log_rq_stride = qpc->rq_size_stride & 7; 2420 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; 2421 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; 2422 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 2423 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; 2424 int sq_size; 2425 int rq_size; 2426 int total_pages; 2427 int total_mem; 2428 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; 2429 2430 sq_size = 1 << (log_sq_size + log_sq_sride + 4); 2431 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); 2432 total_mem = sq_size + rq_size; 2433 total_pages = 2434 roundup_pow_of_two((total_mem + (page_offset << 6)) >> 2435 page_shift); 2436 2437 return total_pages; 2438 } 2439 2440 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, 2441 int size, struct res_mtt *mtt) 2442 { 2443 int res_start = mtt->com.res_id; 2444 int res_size = (1 << mtt->order); 2445 2446 if (start < res_start || start + size > res_start + res_size) 2447 return -EPERM; 2448 return 0; 2449 } 2450 2451 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2452 struct mlx4_vhcr *vhcr, 2453 struct mlx4_cmd_mailbox *inbox, 2454 struct mlx4_cmd_mailbox *outbox, 2455 struct mlx4_cmd_info *cmd) 2456 { 2457 int err; 2458 int index = vhcr->in_modifier; 2459 struct res_mtt *mtt; 2460 struct res_mpt *mpt; 2461 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; 2462 int phys; 2463 int id; 2464 u32 pd; 2465 int pd_slave; 2466 2467 id = index & mpt_mask(dev); 2468 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); 2469 if (err) 2470 return err; 2471 2472 /* Disable memory windows for VFs. */ 2473 if (!mr_is_region(inbox->buf)) { 2474 err = -EPERM; 2475 goto ex_abort; 2476 } 2477 2478 /* Make sure that the PD bits related to the slave id are zeros. */ 2479 pd = mr_get_pd(inbox->buf); 2480 pd_slave = (pd >> 17) & 0x7f; 2481 if (pd_slave != 0 && pd_slave != slave) { 2482 err = -EPERM; 2483 goto ex_abort; 2484 } 2485 2486 if (mr_is_fmr(inbox->buf)) { 2487 /* FMR and Bind Enable are forbidden in slave devices. */ 2488 if (mr_is_bind_enabled(inbox->buf)) { 2489 err = -EPERM; 2490 goto ex_abort; 2491 } 2492 /* FMR and Memory Windows are also forbidden. */ 2493 if (!mr_is_region(inbox->buf)) { 2494 err = -EPERM; 2495 goto ex_abort; 2496 } 2497 } 2498 2499 phys = mr_phys_mpt(inbox->buf); 2500 if (!phys) { 2501 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2502 if (err) 2503 goto ex_abort; 2504 2505 err = check_mtt_range(dev, slave, mtt_base, 2506 mr_get_mtt_size(inbox->buf), mtt); 2507 if (err) 2508 goto ex_put; 2509 2510 mpt->mtt = mtt; 2511 } 2512 2513 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2514 if (err) 2515 goto ex_put; 2516 2517 if (!phys) { 2518 atomic_inc(&mtt->ref_count); 2519 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2520 } 2521 2522 res_end_move(dev, slave, RES_MPT, id); 2523 return 0; 2524 2525 ex_put: 2526 if (!phys) 2527 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2528 ex_abort: 2529 res_abort_move(dev, slave, RES_MPT, id); 2530 2531 return err; 2532 } 2533 2534 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2535 struct mlx4_vhcr *vhcr, 2536 struct mlx4_cmd_mailbox *inbox, 2537 struct mlx4_cmd_mailbox *outbox, 2538 struct mlx4_cmd_info *cmd) 2539 { 2540 int err; 2541 int index = vhcr->in_modifier; 2542 struct res_mpt *mpt; 2543 int id; 2544 2545 id = index & mpt_mask(dev); 2546 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); 2547 if (err) 2548 return err; 2549 2550 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2551 if (err) 2552 goto ex_abort; 2553 2554 if (mpt->mtt) 2555 atomic_dec(&mpt->mtt->ref_count); 2556 2557 res_end_move(dev, slave, RES_MPT, id); 2558 return 0; 2559 2560 ex_abort: 2561 res_abort_move(dev, slave, RES_MPT, id); 2562 2563 return err; 2564 } 2565 2566 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 2567 struct mlx4_vhcr *vhcr, 2568 struct mlx4_cmd_mailbox *inbox, 2569 struct mlx4_cmd_mailbox *outbox, 2570 struct mlx4_cmd_info *cmd) 2571 { 2572 int err; 2573 int index = vhcr->in_modifier; 2574 struct res_mpt *mpt; 2575 int id; 2576 2577 id = index & mpt_mask(dev); 2578 err = get_res(dev, slave, id, RES_MPT, &mpt); 2579 if (err) 2580 return err; 2581 2582 if (mpt->com.from_state != RES_MPT_HW) { 2583 err = -EBUSY; 2584 goto out; 2585 } 2586 2587 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2588 2589 out: 2590 put_res(dev, slave, id, RES_MPT); 2591 return err; 2592 } 2593 2594 static int qp_get_rcqn(struct mlx4_qp_context *qpc) 2595 { 2596 return be32_to_cpu(qpc->cqn_recv) & 0xffffff; 2597 } 2598 2599 static int qp_get_scqn(struct mlx4_qp_context *qpc) 2600 { 2601 return be32_to_cpu(qpc->cqn_send) & 0xffffff; 2602 } 2603 2604 static u32 qp_get_srqn(struct mlx4_qp_context *qpc) 2605 { 2606 return be32_to_cpu(qpc->srqn) & 0x1ffffff; 2607 } 2608 2609 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, 2610 struct mlx4_qp_context *context) 2611 { 2612 u32 qpn = vhcr->in_modifier & 0xffffff; 2613 u32 qkey = 0; 2614 2615 if (mlx4_get_parav_qkey(dev, qpn, &qkey)) 2616 return; 2617 2618 /* adjust qkey in qp context */ 2619 context->qkey = cpu_to_be32(qkey); 2620 } 2621 2622 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 2623 struct mlx4_vhcr *vhcr, 2624 struct mlx4_cmd_mailbox *inbox, 2625 struct mlx4_cmd_mailbox *outbox, 2626 struct mlx4_cmd_info *cmd) 2627 { 2628 int err; 2629 int qpn = vhcr->in_modifier & 0x7fffff; 2630 struct res_mtt *mtt; 2631 struct res_qp *qp; 2632 struct mlx4_qp_context *qpc = inbox->buf + 8; 2633 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; 2634 int mtt_size = qp_get_mtt_size(qpc); 2635 struct res_cq *rcq; 2636 struct res_cq *scq; 2637 int rcqn = qp_get_rcqn(qpc); 2638 int scqn = qp_get_scqn(qpc); 2639 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2640 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2641 struct res_srq *srq; 2642 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2643 2644 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); 2645 if (err) 2646 return err; 2647 qp->local_qpn = local_qpn; 2648 qp->sched_queue = 0; 2649 qp->param3 = 0; 2650 qp->vlan_control = 0; 2651 qp->fvl_rx = 0; 2652 qp->pri_path_fl = 0; 2653 qp->vlan_index = 0; 2654 qp->feup = 0; 2655 qp->qpc_flags = be32_to_cpu(qpc->flags); 2656 2657 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2658 if (err) 2659 goto ex_abort; 2660 2661 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 2662 if (err) 2663 goto ex_put_mtt; 2664 2665 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 2666 if (err) 2667 goto ex_put_mtt; 2668 2669 if (scqn != rcqn) { 2670 err = get_res(dev, slave, scqn, RES_CQ, &scq); 2671 if (err) 2672 goto ex_put_rcq; 2673 } else 2674 scq = rcq; 2675 2676 if (use_srq) { 2677 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 2678 if (err) 2679 goto ex_put_scq; 2680 } 2681 2682 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2683 update_pkey_index(dev, slave, inbox); 2684 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2685 if (err) 2686 goto ex_put_srq; 2687 atomic_inc(&mtt->ref_count); 2688 qp->mtt = mtt; 2689 atomic_inc(&rcq->ref_count); 2690 qp->rcq = rcq; 2691 atomic_inc(&scq->ref_count); 2692 qp->scq = scq; 2693 2694 if (scqn != rcqn) 2695 put_res(dev, slave, scqn, RES_CQ); 2696 2697 if (use_srq) { 2698 atomic_inc(&srq->ref_count); 2699 put_res(dev, slave, srqn, RES_SRQ); 2700 qp->srq = srq; 2701 } 2702 put_res(dev, slave, rcqn, RES_CQ); 2703 put_res(dev, slave, mtt_base, RES_MTT); 2704 res_end_move(dev, slave, RES_QP, qpn); 2705 2706 return 0; 2707 2708 ex_put_srq: 2709 if (use_srq) 2710 put_res(dev, slave, srqn, RES_SRQ); 2711 ex_put_scq: 2712 if (scqn != rcqn) 2713 put_res(dev, slave, scqn, RES_CQ); 2714 ex_put_rcq: 2715 put_res(dev, slave, rcqn, RES_CQ); 2716 ex_put_mtt: 2717 put_res(dev, slave, mtt_base, RES_MTT); 2718 ex_abort: 2719 res_abort_move(dev, slave, RES_QP, qpn); 2720 2721 return err; 2722 } 2723 2724 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) 2725 { 2726 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; 2727 } 2728 2729 static int eq_get_mtt_size(struct mlx4_eq_context *eqc) 2730 { 2731 int log_eq_size = eqc->log_eq_size & 0x1f; 2732 int page_shift = (eqc->log_page_size & 0x3f) + 12; 2733 2734 if (log_eq_size + 5 < page_shift) 2735 return 1; 2736 2737 return 1 << (log_eq_size + 5 - page_shift); 2738 } 2739 2740 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) 2741 { 2742 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; 2743 } 2744 2745 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) 2746 { 2747 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; 2748 int page_shift = (cqc->log_page_size & 0x3f) + 12; 2749 2750 if (log_cq_size + 5 < page_shift) 2751 return 1; 2752 2753 return 1 << (log_cq_size + 5 - page_shift); 2754 } 2755 2756 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 2757 struct mlx4_vhcr *vhcr, 2758 struct mlx4_cmd_mailbox *inbox, 2759 struct mlx4_cmd_mailbox *outbox, 2760 struct mlx4_cmd_info *cmd) 2761 { 2762 int err; 2763 int eqn = vhcr->in_modifier; 2764 int res_id = (slave << 8) | eqn; 2765 struct mlx4_eq_context *eqc = inbox->buf; 2766 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; 2767 int mtt_size = eq_get_mtt_size(eqc); 2768 struct res_eq *eq; 2769 struct res_mtt *mtt; 2770 2771 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); 2772 if (err) 2773 return err; 2774 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); 2775 if (err) 2776 goto out_add; 2777 2778 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2779 if (err) 2780 goto out_move; 2781 2782 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 2783 if (err) 2784 goto out_put; 2785 2786 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2787 if (err) 2788 goto out_put; 2789 2790 atomic_inc(&mtt->ref_count); 2791 eq->mtt = mtt; 2792 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2793 res_end_move(dev, slave, RES_EQ, res_id); 2794 return 0; 2795 2796 out_put: 2797 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2798 out_move: 2799 res_abort_move(dev, slave, RES_EQ, res_id); 2800 out_add: 2801 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 2802 return err; 2803 } 2804 2805 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, 2806 int len, struct res_mtt **res) 2807 { 2808 struct mlx4_priv *priv = mlx4_priv(dev); 2809 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2810 struct res_mtt *mtt; 2811 int err = -EINVAL; 2812 2813 spin_lock_irq(mlx4_tlock(dev)); 2814 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], 2815 com.list) { 2816 if (!check_mtt_range(dev, slave, start, len, mtt)) { 2817 *res = mtt; 2818 mtt->com.from_state = mtt->com.state; 2819 mtt->com.state = RES_MTT_BUSY; 2820 err = 0; 2821 break; 2822 } 2823 } 2824 spin_unlock_irq(mlx4_tlock(dev)); 2825 2826 return err; 2827 } 2828 2829 static int verify_qp_parameters(struct mlx4_dev *dev, 2830 struct mlx4_cmd_mailbox *inbox, 2831 enum qp_transition transition, u8 slave) 2832 { 2833 u32 qp_type; 2834 struct mlx4_qp_context *qp_ctx; 2835 enum mlx4_qp_optpar optpar; 2836 int port; 2837 int num_gids; 2838 2839 qp_ctx = inbox->buf + 8; 2840 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2841 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 2842 2843 switch (qp_type) { 2844 case MLX4_QP_ST_RC: 2845 case MLX4_QP_ST_XRC: 2846 case MLX4_QP_ST_UC: 2847 switch (transition) { 2848 case QP_TRANS_INIT2RTR: 2849 case QP_TRANS_RTR2RTS: 2850 case QP_TRANS_RTS2RTS: 2851 case QP_TRANS_SQD2SQD: 2852 case QP_TRANS_SQD2RTS: 2853 if (slave != mlx4_master_func_num(dev)) 2854 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 2855 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 2856 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 2857 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 2858 else 2859 num_gids = 1; 2860 if (qp_ctx->pri_path.mgid_index >= num_gids) 2861 return -EINVAL; 2862 } 2863 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 2864 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 2865 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 2866 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 2867 else 2868 num_gids = 1; 2869 if (qp_ctx->alt_path.mgid_index >= num_gids) 2870 return -EINVAL; 2871 } 2872 break; 2873 default: 2874 break; 2875 } 2876 2877 break; 2878 default: 2879 break; 2880 } 2881 2882 return 0; 2883 } 2884 2885 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 2886 struct mlx4_vhcr *vhcr, 2887 struct mlx4_cmd_mailbox *inbox, 2888 struct mlx4_cmd_mailbox *outbox, 2889 struct mlx4_cmd_info *cmd) 2890 { 2891 struct mlx4_mtt mtt; 2892 __be64 *page_list = inbox->buf; 2893 u64 *pg_list = (u64 *)page_list; 2894 int i; 2895 struct res_mtt *rmtt = NULL; 2896 int start = be64_to_cpu(page_list[0]); 2897 int npages = vhcr->in_modifier; 2898 int err; 2899 2900 err = get_containing_mtt(dev, slave, start, npages, &rmtt); 2901 if (err) 2902 return err; 2903 2904 /* Call the SW implementation of write_mtt: 2905 * - Prepare a dummy mtt struct 2906 * - Translate inbox contents to simple addresses in host endianess */ 2907 mtt.offset = 0; /* TBD this is broken but I don't handle it since 2908 we don't really use it */ 2909 mtt.order = 0; 2910 mtt.page_shift = 0; 2911 for (i = 0; i < npages; ++i) 2912 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); 2913 2914 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, 2915 ((u64 *)page_list + 2)); 2916 2917 if (rmtt) 2918 put_res(dev, slave, rmtt->com.res_id, RES_MTT); 2919 2920 return err; 2921 } 2922 2923 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 2924 struct mlx4_vhcr *vhcr, 2925 struct mlx4_cmd_mailbox *inbox, 2926 struct mlx4_cmd_mailbox *outbox, 2927 struct mlx4_cmd_info *cmd) 2928 { 2929 int eqn = vhcr->in_modifier; 2930 int res_id = eqn | (slave << 8); 2931 struct res_eq *eq; 2932 int err; 2933 2934 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); 2935 if (err) 2936 return err; 2937 2938 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); 2939 if (err) 2940 goto ex_abort; 2941 2942 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2943 if (err) 2944 goto ex_put; 2945 2946 atomic_dec(&eq->mtt->ref_count); 2947 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 2948 res_end_move(dev, slave, RES_EQ, res_id); 2949 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 2950 2951 return 0; 2952 2953 ex_put: 2954 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 2955 ex_abort: 2956 res_abort_move(dev, slave, RES_EQ, res_id); 2957 2958 return err; 2959 } 2960 2961 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) 2962 { 2963 struct mlx4_priv *priv = mlx4_priv(dev); 2964 struct mlx4_slave_event_eq_info *event_eq; 2965 struct mlx4_cmd_mailbox *mailbox; 2966 u32 in_modifier = 0; 2967 int err; 2968 int res_id; 2969 struct res_eq *req; 2970 2971 if (!priv->mfunc.master.slave_state) 2972 return -EINVAL; 2973 2974 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 2975 2976 /* Create the event only if the slave is registered */ 2977 if (event_eq->eqn < 0) 2978 return 0; 2979 2980 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 2981 res_id = (slave << 8) | event_eq->eqn; 2982 err = get_res(dev, slave, res_id, RES_EQ, &req); 2983 if (err) 2984 goto unlock; 2985 2986 if (req->com.from_state != RES_EQ_HW) { 2987 err = -EINVAL; 2988 goto put; 2989 } 2990 2991 mailbox = mlx4_alloc_cmd_mailbox(dev); 2992 if (IS_ERR(mailbox)) { 2993 err = PTR_ERR(mailbox); 2994 goto put; 2995 } 2996 2997 if (eqe->type == MLX4_EVENT_TYPE_CMD) { 2998 ++event_eq->token; 2999 eqe->event.cmd.token = cpu_to_be16(event_eq->token); 3000 } 3001 3002 memcpy(mailbox->buf, (u8 *) eqe, 28); 3003 3004 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); 3005 3006 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, 3007 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, 3008 MLX4_CMD_NATIVE); 3009 3010 put_res(dev, slave, res_id, RES_EQ); 3011 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3012 mlx4_free_cmd_mailbox(dev, mailbox); 3013 return err; 3014 3015 put: 3016 put_res(dev, slave, res_id, RES_EQ); 3017 3018 unlock: 3019 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3020 return err; 3021 } 3022 3023 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 3024 struct mlx4_vhcr *vhcr, 3025 struct mlx4_cmd_mailbox *inbox, 3026 struct mlx4_cmd_mailbox *outbox, 3027 struct mlx4_cmd_info *cmd) 3028 { 3029 int eqn = vhcr->in_modifier; 3030 int res_id = eqn | (slave << 8); 3031 struct res_eq *eq; 3032 int err; 3033 3034 err = get_res(dev, slave, res_id, RES_EQ, &eq); 3035 if (err) 3036 return err; 3037 3038 if (eq->com.from_state != RES_EQ_HW) { 3039 err = -EINVAL; 3040 goto ex_put; 3041 } 3042 3043 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3044 3045 ex_put: 3046 put_res(dev, slave, res_id, RES_EQ); 3047 return err; 3048 } 3049 3050 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3051 struct mlx4_vhcr *vhcr, 3052 struct mlx4_cmd_mailbox *inbox, 3053 struct mlx4_cmd_mailbox *outbox, 3054 struct mlx4_cmd_info *cmd) 3055 { 3056 int err; 3057 int cqn = vhcr->in_modifier; 3058 struct mlx4_cq_context *cqc = inbox->buf; 3059 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3060 struct res_cq *cq; 3061 struct res_mtt *mtt; 3062 3063 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); 3064 if (err) 3065 return err; 3066 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3067 if (err) 3068 goto out_move; 3069 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3070 if (err) 3071 goto out_put; 3072 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3073 if (err) 3074 goto out_put; 3075 atomic_inc(&mtt->ref_count); 3076 cq->mtt = mtt; 3077 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3078 res_end_move(dev, slave, RES_CQ, cqn); 3079 return 0; 3080 3081 out_put: 3082 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3083 out_move: 3084 res_abort_move(dev, slave, RES_CQ, cqn); 3085 return err; 3086 } 3087 3088 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3089 struct mlx4_vhcr *vhcr, 3090 struct mlx4_cmd_mailbox *inbox, 3091 struct mlx4_cmd_mailbox *outbox, 3092 struct mlx4_cmd_info *cmd) 3093 { 3094 int err; 3095 int cqn = vhcr->in_modifier; 3096 struct res_cq *cq; 3097 3098 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3099 if (err) 3100 return err; 3101 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3102 if (err) 3103 goto out_move; 3104 atomic_dec(&cq->mtt->ref_count); 3105 res_end_move(dev, slave, RES_CQ, cqn); 3106 return 0; 3107 3108 out_move: 3109 res_abort_move(dev, slave, RES_CQ, cqn); 3110 return err; 3111 } 3112 3113 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3114 struct mlx4_vhcr *vhcr, 3115 struct mlx4_cmd_mailbox *inbox, 3116 struct mlx4_cmd_mailbox *outbox, 3117 struct mlx4_cmd_info *cmd) 3118 { 3119 int cqn = vhcr->in_modifier; 3120 struct res_cq *cq; 3121 int err; 3122 3123 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3124 if (err) 3125 return err; 3126 3127 if (cq->com.from_state != RES_CQ_HW) 3128 goto ex_put; 3129 3130 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3131 ex_put: 3132 put_res(dev, slave, cqn, RES_CQ); 3133 3134 return err; 3135 } 3136 3137 static int handle_resize(struct mlx4_dev *dev, int slave, 3138 struct mlx4_vhcr *vhcr, 3139 struct mlx4_cmd_mailbox *inbox, 3140 struct mlx4_cmd_mailbox *outbox, 3141 struct mlx4_cmd_info *cmd, 3142 struct res_cq *cq) 3143 { 3144 int err; 3145 struct res_mtt *orig_mtt; 3146 struct res_mtt *mtt; 3147 struct mlx4_cq_context *cqc = inbox->buf; 3148 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3149 3150 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); 3151 if (err) 3152 return err; 3153 3154 if (orig_mtt != cq->mtt) { 3155 err = -EINVAL; 3156 goto ex_put; 3157 } 3158 3159 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3160 if (err) 3161 goto ex_put; 3162 3163 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3164 if (err) 3165 goto ex_put1; 3166 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3167 if (err) 3168 goto ex_put1; 3169 atomic_dec(&orig_mtt->ref_count); 3170 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3171 atomic_inc(&mtt->ref_count); 3172 cq->mtt = mtt; 3173 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3174 return 0; 3175 3176 ex_put1: 3177 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3178 ex_put: 3179 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3180 3181 return err; 3182 3183 } 3184 3185 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3186 struct mlx4_vhcr *vhcr, 3187 struct mlx4_cmd_mailbox *inbox, 3188 struct mlx4_cmd_mailbox *outbox, 3189 struct mlx4_cmd_info *cmd) 3190 { 3191 int cqn = vhcr->in_modifier; 3192 struct res_cq *cq; 3193 int err; 3194 3195 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3196 if (err) 3197 return err; 3198 3199 if (cq->com.from_state != RES_CQ_HW) 3200 goto ex_put; 3201 3202 if (vhcr->op_modifier == 0) { 3203 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 3204 goto ex_put; 3205 } 3206 3207 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3208 ex_put: 3209 put_res(dev, slave, cqn, RES_CQ); 3210 3211 return err; 3212 } 3213 3214 static int srq_get_mtt_size(struct mlx4_srq_context *srqc) 3215 { 3216 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; 3217 int log_rq_stride = srqc->logstride & 7; 3218 int page_shift = (srqc->log_page_size & 0x3f) + 12; 3219 3220 if (log_srq_size + log_rq_stride + 4 < page_shift) 3221 return 1; 3222 3223 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); 3224 } 3225 3226 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3227 struct mlx4_vhcr *vhcr, 3228 struct mlx4_cmd_mailbox *inbox, 3229 struct mlx4_cmd_mailbox *outbox, 3230 struct mlx4_cmd_info *cmd) 3231 { 3232 int err; 3233 int srqn = vhcr->in_modifier; 3234 struct res_mtt *mtt; 3235 struct res_srq *srq; 3236 struct mlx4_srq_context *srqc = inbox->buf; 3237 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3238 3239 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) 3240 return -EINVAL; 3241 3242 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); 3243 if (err) 3244 return err; 3245 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3246 if (err) 3247 goto ex_abort; 3248 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), 3249 mtt); 3250 if (err) 3251 goto ex_put_mtt; 3252 3253 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3254 if (err) 3255 goto ex_put_mtt; 3256 3257 atomic_inc(&mtt->ref_count); 3258 srq->mtt = mtt; 3259 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3260 res_end_move(dev, slave, RES_SRQ, srqn); 3261 return 0; 3262 3263 ex_put_mtt: 3264 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3265 ex_abort: 3266 res_abort_move(dev, slave, RES_SRQ, srqn); 3267 3268 return err; 3269 } 3270 3271 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3272 struct mlx4_vhcr *vhcr, 3273 struct mlx4_cmd_mailbox *inbox, 3274 struct mlx4_cmd_mailbox *outbox, 3275 struct mlx4_cmd_info *cmd) 3276 { 3277 int err; 3278 int srqn = vhcr->in_modifier; 3279 struct res_srq *srq; 3280 3281 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3282 if (err) 3283 return err; 3284 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3285 if (err) 3286 goto ex_abort; 3287 atomic_dec(&srq->mtt->ref_count); 3288 if (srq->cq) 3289 atomic_dec(&srq->cq->ref_count); 3290 res_end_move(dev, slave, RES_SRQ, srqn); 3291 3292 return 0; 3293 3294 ex_abort: 3295 res_abort_move(dev, slave, RES_SRQ, srqn); 3296 3297 return err; 3298 } 3299 3300 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3301 struct mlx4_vhcr *vhcr, 3302 struct mlx4_cmd_mailbox *inbox, 3303 struct mlx4_cmd_mailbox *outbox, 3304 struct mlx4_cmd_info *cmd) 3305 { 3306 int err; 3307 int srqn = vhcr->in_modifier; 3308 struct res_srq *srq; 3309 3310 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3311 if (err) 3312 return err; 3313 if (srq->com.from_state != RES_SRQ_HW) { 3314 err = -EBUSY; 3315 goto out; 3316 } 3317 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3318 out: 3319 put_res(dev, slave, srqn, RES_SRQ); 3320 return err; 3321 } 3322 3323 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3324 struct mlx4_vhcr *vhcr, 3325 struct mlx4_cmd_mailbox *inbox, 3326 struct mlx4_cmd_mailbox *outbox, 3327 struct mlx4_cmd_info *cmd) 3328 { 3329 int err; 3330 int srqn = vhcr->in_modifier; 3331 struct res_srq *srq; 3332 3333 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3334 if (err) 3335 return err; 3336 3337 if (srq->com.from_state != RES_SRQ_HW) { 3338 err = -EBUSY; 3339 goto out; 3340 } 3341 3342 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3343 out: 3344 put_res(dev, slave, srqn, RES_SRQ); 3345 return err; 3346 } 3347 3348 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 3349 struct mlx4_vhcr *vhcr, 3350 struct mlx4_cmd_mailbox *inbox, 3351 struct mlx4_cmd_mailbox *outbox, 3352 struct mlx4_cmd_info *cmd) 3353 { 3354 int err; 3355 int qpn = vhcr->in_modifier & 0x7fffff; 3356 struct res_qp *qp; 3357 3358 err = get_res(dev, slave, qpn, RES_QP, &qp); 3359 if (err) 3360 return err; 3361 if (qp->com.from_state != RES_QP_HW) { 3362 err = -EBUSY; 3363 goto out; 3364 } 3365 3366 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3367 out: 3368 put_res(dev, slave, qpn, RES_QP); 3369 return err; 3370 } 3371 3372 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 3373 struct mlx4_vhcr *vhcr, 3374 struct mlx4_cmd_mailbox *inbox, 3375 struct mlx4_cmd_mailbox *outbox, 3376 struct mlx4_cmd_info *cmd) 3377 { 3378 struct mlx4_qp_context *context = inbox->buf + 8; 3379 adjust_proxy_tun_qkey(dev, vhcr, context); 3380 update_pkey_index(dev, slave, inbox); 3381 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3382 } 3383 3384 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 3385 struct mlx4_qp_context *qpc, 3386 struct mlx4_cmd_mailbox *inbox) 3387 { 3388 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); 3389 u8 pri_sched_queue; 3390 int port = mlx4_slave_convert_port( 3391 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; 3392 3393 if (port < 0) 3394 return -EINVAL; 3395 3396 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | 3397 ((port & 1) << 6); 3398 3399 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || 3400 mlx4_is_eth(dev, port + 1)) { 3401 qpc->pri_path.sched_queue = pri_sched_queue; 3402 } 3403 3404 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3405 port = mlx4_slave_convert_port( 3406 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) 3407 + 1) - 1; 3408 if (port < 0) 3409 return -EINVAL; 3410 qpc->alt_path.sched_queue = 3411 (qpc->alt_path.sched_queue & ~(1 << 6)) | 3412 (port & 1) << 6; 3413 } 3414 return 0; 3415 } 3416 3417 static int roce_verify_mac(struct mlx4_dev *dev, int slave, 3418 struct mlx4_qp_context *qpc, 3419 struct mlx4_cmd_mailbox *inbox) 3420 { 3421 u64 mac; 3422 int port; 3423 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 3424 u8 sched = *(u8 *)(inbox->buf + 64); 3425 u8 smac_ix; 3426 3427 port = (sched >> 6 & 1) + 1; 3428 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { 3429 smac_ix = qpc->pri_path.grh_mylmc & 0x7f; 3430 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) 3431 return -ENOENT; 3432 } 3433 return 0; 3434 } 3435 3436 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3437 struct mlx4_vhcr *vhcr, 3438 struct mlx4_cmd_mailbox *inbox, 3439 struct mlx4_cmd_mailbox *outbox, 3440 struct mlx4_cmd_info *cmd) 3441 { 3442 int err; 3443 struct mlx4_qp_context *qpc = inbox->buf + 8; 3444 int qpn = vhcr->in_modifier & 0x7fffff; 3445 struct res_qp *qp; 3446 u8 orig_sched_queue; 3447 __be32 orig_param3 = qpc->param3; 3448 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3449 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3450 u8 orig_pri_path_fl = qpc->pri_path.fl; 3451 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3452 u8 orig_feup = qpc->pri_path.feup; 3453 3454 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 3455 if (err) 3456 return err; 3457 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3458 if (err) 3459 return err; 3460 3461 if (roce_verify_mac(dev, slave, qpc, inbox)) 3462 return -EINVAL; 3463 3464 update_pkey_index(dev, slave, inbox); 3465 update_gid(dev, inbox, (u8)slave); 3466 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3467 orig_sched_queue = qpc->pri_path.sched_queue; 3468 err = update_vport_qp_param(dev, inbox, slave, qpn); 3469 if (err) 3470 return err; 3471 3472 err = get_res(dev, slave, qpn, RES_QP, &qp); 3473 if (err) 3474 return err; 3475 if (qp->com.from_state != RES_QP_HW) { 3476 err = -EBUSY; 3477 goto out; 3478 } 3479 3480 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3481 out: 3482 /* if no error, save sched queue value passed in by VF. This is 3483 * essentially the QOS value provided by the VF. This will be useful 3484 * if we allow dynamic changes from VST back to VGT 3485 */ 3486 if (!err) { 3487 qp->sched_queue = orig_sched_queue; 3488 qp->param3 = orig_param3; 3489 qp->vlan_control = orig_vlan_control; 3490 qp->fvl_rx = orig_fvl_rx; 3491 qp->pri_path_fl = orig_pri_path_fl; 3492 qp->vlan_index = orig_vlan_index; 3493 qp->feup = orig_feup; 3494 } 3495 put_res(dev, slave, qpn, RES_QP); 3496 return err; 3497 } 3498 3499 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3500 struct mlx4_vhcr *vhcr, 3501 struct mlx4_cmd_mailbox *inbox, 3502 struct mlx4_cmd_mailbox *outbox, 3503 struct mlx4_cmd_info *cmd) 3504 { 3505 int err; 3506 struct mlx4_qp_context *context = inbox->buf + 8; 3507 3508 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3509 if (err) 3510 return err; 3511 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); 3512 if (err) 3513 return err; 3514 3515 update_pkey_index(dev, slave, inbox); 3516 update_gid(dev, inbox, (u8)slave); 3517 adjust_proxy_tun_qkey(dev, vhcr, context); 3518 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3519 } 3520 3521 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3522 struct mlx4_vhcr *vhcr, 3523 struct mlx4_cmd_mailbox *inbox, 3524 struct mlx4_cmd_mailbox *outbox, 3525 struct mlx4_cmd_info *cmd) 3526 { 3527 int err; 3528 struct mlx4_qp_context *context = inbox->buf + 8; 3529 3530 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3531 if (err) 3532 return err; 3533 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); 3534 if (err) 3535 return err; 3536 3537 update_pkey_index(dev, slave, inbox); 3538 update_gid(dev, inbox, (u8)slave); 3539 adjust_proxy_tun_qkey(dev, vhcr, context); 3540 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3541 } 3542 3543 3544 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3545 struct mlx4_vhcr *vhcr, 3546 struct mlx4_cmd_mailbox *inbox, 3547 struct mlx4_cmd_mailbox *outbox, 3548 struct mlx4_cmd_info *cmd) 3549 { 3550 struct mlx4_qp_context *context = inbox->buf + 8; 3551 int err = adjust_qp_sched_queue(dev, slave, context, inbox); 3552 if (err) 3553 return err; 3554 adjust_proxy_tun_qkey(dev, vhcr, context); 3555 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3556 } 3557 3558 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 3559 struct mlx4_vhcr *vhcr, 3560 struct mlx4_cmd_mailbox *inbox, 3561 struct mlx4_cmd_mailbox *outbox, 3562 struct mlx4_cmd_info *cmd) 3563 { 3564 int err; 3565 struct mlx4_qp_context *context = inbox->buf + 8; 3566 3567 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3568 if (err) 3569 return err; 3570 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); 3571 if (err) 3572 return err; 3573 3574 adjust_proxy_tun_qkey(dev, vhcr, context); 3575 update_gid(dev, inbox, (u8)slave); 3576 update_pkey_index(dev, slave, inbox); 3577 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3578 } 3579 3580 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3581 struct mlx4_vhcr *vhcr, 3582 struct mlx4_cmd_mailbox *inbox, 3583 struct mlx4_cmd_mailbox *outbox, 3584 struct mlx4_cmd_info *cmd) 3585 { 3586 int err; 3587 struct mlx4_qp_context *context = inbox->buf + 8; 3588 3589 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3590 if (err) 3591 return err; 3592 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); 3593 if (err) 3594 return err; 3595 3596 adjust_proxy_tun_qkey(dev, vhcr, context); 3597 update_gid(dev, inbox, (u8)slave); 3598 update_pkey_index(dev, slave, inbox); 3599 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3600 } 3601 3602 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 3603 struct mlx4_vhcr *vhcr, 3604 struct mlx4_cmd_mailbox *inbox, 3605 struct mlx4_cmd_mailbox *outbox, 3606 struct mlx4_cmd_info *cmd) 3607 { 3608 int err; 3609 int qpn = vhcr->in_modifier & 0x7fffff; 3610 struct res_qp *qp; 3611 3612 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); 3613 if (err) 3614 return err; 3615 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3616 if (err) 3617 goto ex_abort; 3618 3619 atomic_dec(&qp->mtt->ref_count); 3620 atomic_dec(&qp->rcq->ref_count); 3621 atomic_dec(&qp->scq->ref_count); 3622 if (qp->srq) 3623 atomic_dec(&qp->srq->ref_count); 3624 res_end_move(dev, slave, RES_QP, qpn); 3625 return 0; 3626 3627 ex_abort: 3628 res_abort_move(dev, slave, RES_QP, qpn); 3629 3630 return err; 3631 } 3632 3633 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, 3634 struct res_qp *rqp, u8 *gid) 3635 { 3636 struct res_gid *res; 3637 3638 list_for_each_entry(res, &rqp->mcg_list, list) { 3639 if (!memcmp(res->gid, gid, 16)) 3640 return res; 3641 } 3642 return NULL; 3643 } 3644 3645 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3646 u8 *gid, enum mlx4_protocol prot, 3647 enum mlx4_steer_type steer, u64 reg_id) 3648 { 3649 struct res_gid *res; 3650 int err; 3651 3652 res = kzalloc(sizeof *res, GFP_KERNEL); 3653 if (!res) 3654 return -ENOMEM; 3655 3656 spin_lock_irq(&rqp->mcg_spl); 3657 if (find_gid(dev, slave, rqp, gid)) { 3658 kfree(res); 3659 err = -EEXIST; 3660 } else { 3661 memcpy(res->gid, gid, 16); 3662 res->prot = prot; 3663 res->steer = steer; 3664 res->reg_id = reg_id; 3665 list_add_tail(&res->list, &rqp->mcg_list); 3666 err = 0; 3667 } 3668 spin_unlock_irq(&rqp->mcg_spl); 3669 3670 return err; 3671 } 3672 3673 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3674 u8 *gid, enum mlx4_protocol prot, 3675 enum mlx4_steer_type steer, u64 *reg_id) 3676 { 3677 struct res_gid *res; 3678 int err; 3679 3680 spin_lock_irq(&rqp->mcg_spl); 3681 res = find_gid(dev, slave, rqp, gid); 3682 if (!res || res->prot != prot || res->steer != steer) 3683 err = -EINVAL; 3684 else { 3685 *reg_id = res->reg_id; 3686 list_del(&res->list); 3687 kfree(res); 3688 err = 0; 3689 } 3690 spin_unlock_irq(&rqp->mcg_spl); 3691 3692 return err; 3693 } 3694 3695 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, 3696 u8 gid[16], int block_loopback, enum mlx4_protocol prot, 3697 enum mlx4_steer_type type, u64 *reg_id) 3698 { 3699 switch (dev->caps.steering_mode) { 3700 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 3701 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 3702 if (port < 0) 3703 return port; 3704 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 3705 block_loopback, prot, 3706 reg_id); 3707 } 3708 case MLX4_STEERING_MODE_B0: 3709 if (prot == MLX4_PROT_ETH) { 3710 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 3711 if (port < 0) 3712 return port; 3713 gid[5] = port; 3714 } 3715 return mlx4_qp_attach_common(dev, qp, gid, 3716 block_loopback, prot, type); 3717 default: 3718 return -EINVAL; 3719 } 3720 } 3721 3722 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 3723 u8 gid[16], enum mlx4_protocol prot, 3724 enum mlx4_steer_type type, u64 reg_id) 3725 { 3726 switch (dev->caps.steering_mode) { 3727 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3728 return mlx4_flow_detach(dev, reg_id); 3729 case MLX4_STEERING_MODE_B0: 3730 return mlx4_qp_detach_common(dev, qp, gid, prot, type); 3731 default: 3732 return -EINVAL; 3733 } 3734 } 3735 3736 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3737 struct mlx4_vhcr *vhcr, 3738 struct mlx4_cmd_mailbox *inbox, 3739 struct mlx4_cmd_mailbox *outbox, 3740 struct mlx4_cmd_info *cmd) 3741 { 3742 struct mlx4_qp qp; /* dummy for calling attach/detach */ 3743 u8 *gid = inbox->buf; 3744 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; 3745 int err; 3746 int qpn; 3747 struct res_qp *rqp; 3748 u64 reg_id = 0; 3749 int attach = vhcr->op_modifier; 3750 int block_loopback = vhcr->in_modifier >> 31; 3751 u8 steer_type_mask = 2; 3752 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 3753 3754 qpn = vhcr->in_modifier & 0xffffff; 3755 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3756 if (err) 3757 return err; 3758 3759 qp.qpn = qpn; 3760 if (attach) { 3761 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, 3762 type, ®_id); 3763 if (err) { 3764 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 3765 goto ex_put; 3766 } 3767 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); 3768 if (err) 3769 goto ex_detach; 3770 } else { 3771 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); 3772 if (err) 3773 goto ex_put; 3774 3775 err = qp_detach(dev, &qp, gid, prot, type, reg_id); 3776 if (err) 3777 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", 3778 qpn, reg_id); 3779 } 3780 put_res(dev, slave, qpn, RES_QP); 3781 return err; 3782 3783 ex_detach: 3784 qp_detach(dev, &qp, gid, prot, type, reg_id); 3785 ex_put: 3786 put_res(dev, slave, qpn, RES_QP); 3787 return err; 3788 } 3789 3790 /* 3791 * MAC validation for Flow Steering rules. 3792 * VF can attach rules only with a mac address which is assigned to it. 3793 */ 3794 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, 3795 struct list_head *rlist) 3796 { 3797 struct mac_res *res, *tmp; 3798 __be64 be_mac; 3799 3800 /* make sure it isn't multicast or broadcast mac*/ 3801 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && 3802 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 3803 list_for_each_entry_safe(res, tmp, rlist, list) { 3804 be_mac = cpu_to_be64(res->mac << 16); 3805 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) 3806 return 0; 3807 } 3808 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", 3809 eth_header->eth.dst_mac, slave); 3810 return -EINVAL; 3811 } 3812 return 0; 3813 } 3814 3815 /* 3816 * In case of missing eth header, append eth header with a MAC address 3817 * assigned to the VF. 3818 */ 3819 static int add_eth_header(struct mlx4_dev *dev, int slave, 3820 struct mlx4_cmd_mailbox *inbox, 3821 struct list_head *rlist, int header_id) 3822 { 3823 struct mac_res *res, *tmp; 3824 u8 port; 3825 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3826 struct mlx4_net_trans_rule_hw_eth *eth_header; 3827 struct mlx4_net_trans_rule_hw_ipv4 *ip_header; 3828 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; 3829 __be64 be_mac = 0; 3830 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 3831 3832 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3833 port = ctrl->port; 3834 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); 3835 3836 /* Clear a space in the inbox for eth header */ 3837 switch (header_id) { 3838 case MLX4_NET_TRANS_RULE_ID_IPV4: 3839 ip_header = 3840 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); 3841 memmove(ip_header, eth_header, 3842 sizeof(*ip_header) + sizeof(*l4_header)); 3843 break; 3844 case MLX4_NET_TRANS_RULE_ID_TCP: 3845 case MLX4_NET_TRANS_RULE_ID_UDP: 3846 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) 3847 (eth_header + 1); 3848 memmove(l4_header, eth_header, sizeof(*l4_header)); 3849 break; 3850 default: 3851 return -EINVAL; 3852 } 3853 list_for_each_entry_safe(res, tmp, rlist, list) { 3854 if (port == res->port) { 3855 be_mac = cpu_to_be64(res->mac << 16); 3856 break; 3857 } 3858 } 3859 if (!be_mac) { 3860 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", 3861 port); 3862 return -EINVAL; 3863 } 3864 3865 memset(eth_header, 0, sizeof(*eth_header)); 3866 eth_header->size = sizeof(*eth_header) >> 2; 3867 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); 3868 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); 3869 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); 3870 3871 return 0; 3872 3873 } 3874 3875 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3876 struct mlx4_vhcr *vhcr, 3877 struct mlx4_cmd_mailbox *inbox, 3878 struct mlx4_cmd_mailbox *outbox, 3879 struct mlx4_cmd_info *cmd) 3880 { 3881 3882 struct mlx4_priv *priv = mlx4_priv(dev); 3883 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3884 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3885 int err; 3886 int qpn; 3887 struct res_qp *rqp; 3888 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3889 struct _rule_hw *rule_header; 3890 int header_id; 3891 3892 if (dev->caps.steering_mode != 3893 MLX4_STEERING_MODE_DEVICE_MANAGED) 3894 return -EOPNOTSUPP; 3895 3896 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3897 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); 3898 if (ctrl->port <= 0) 3899 return -EINVAL; 3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3901 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3902 if (err) { 3903 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 3904 return err; 3905 } 3906 rule_header = (struct _rule_hw *)(ctrl + 1); 3907 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 3908 3909 switch (header_id) { 3910 case MLX4_NET_TRANS_RULE_ID_ETH: 3911 if (validate_eth_header_mac(slave, rule_header, rlist)) { 3912 err = -EINVAL; 3913 goto err_put; 3914 } 3915 break; 3916 case MLX4_NET_TRANS_RULE_ID_IB: 3917 break; 3918 case MLX4_NET_TRANS_RULE_ID_IPV4: 3919 case MLX4_NET_TRANS_RULE_ID_TCP: 3920 case MLX4_NET_TRANS_RULE_ID_UDP: 3921 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 3922 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 3923 err = -EINVAL; 3924 goto err_put; 3925 } 3926 vhcr->in_modifier += 3927 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 3928 break; 3929 default: 3930 pr_err("Corrupted mailbox.\n"); 3931 err = -EINVAL; 3932 goto err_put; 3933 } 3934 3935 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 3936 vhcr->in_modifier, 0, 3937 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3938 MLX4_CMD_NATIVE); 3939 if (err) 3940 goto err_put; 3941 3942 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 3943 if (err) { 3944 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3945 /* detach rule*/ 3946 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3947 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3948 MLX4_CMD_NATIVE); 3949 goto err_put; 3950 } 3951 atomic_inc(&rqp->ref_count); 3952 err_put: 3953 put_res(dev, slave, qpn, RES_QP); 3954 return err; 3955 } 3956 3957 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 3958 struct mlx4_vhcr *vhcr, 3959 struct mlx4_cmd_mailbox *inbox, 3960 struct mlx4_cmd_mailbox *outbox, 3961 struct mlx4_cmd_info *cmd) 3962 { 3963 int err; 3964 struct res_qp *rqp; 3965 struct res_fs_rule *rrule; 3966 3967 if (dev->caps.steering_mode != 3968 MLX4_STEERING_MODE_DEVICE_MANAGED) 3969 return -EOPNOTSUPP; 3970 3971 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); 3972 if (err) 3973 return err; 3974 /* Release the rule form busy state before removal */ 3975 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 3976 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); 3977 if (err) 3978 return err; 3979 3980 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 3981 if (err) { 3982 mlx4_err(dev, "Fail to remove flow steering resources.\n "); 3983 goto out; 3984 } 3985 3986 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 3987 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3988 MLX4_CMD_NATIVE); 3989 if (!err) 3990 atomic_dec(&rqp->ref_count); 3991 out: 3992 put_res(dev, slave, rrule->qpn, RES_QP); 3993 return err; 3994 } 3995 3996 enum { 3997 BUSY_MAX_RETRIES = 10 3998 }; 3999 4000 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 4001 struct mlx4_vhcr *vhcr, 4002 struct mlx4_cmd_mailbox *inbox, 4003 struct mlx4_cmd_mailbox *outbox, 4004 struct mlx4_cmd_info *cmd) 4005 { 4006 int err; 4007 int index = vhcr->in_modifier & 0xffff; 4008 4009 err = get_res(dev, slave, index, RES_COUNTER, NULL); 4010 if (err) 4011 return err; 4012 4013 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 4014 put_res(dev, slave, index, RES_COUNTER); 4015 return err; 4016 } 4017 4018 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) 4019 { 4020 struct res_gid *rgid; 4021 struct res_gid *tmp; 4022 struct mlx4_qp qp; /* dummy for calling attach/detach */ 4023 4024 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 4025 switch (dev->caps.steering_mode) { 4026 case MLX4_STEERING_MODE_DEVICE_MANAGED: 4027 mlx4_flow_detach(dev, rgid->reg_id); 4028 break; 4029 case MLX4_STEERING_MODE_B0: 4030 qp.qpn = rqp->local_qpn; 4031 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, 4032 rgid->prot, rgid->steer); 4033 break; 4034 } 4035 list_del(&rgid->list); 4036 kfree(rgid); 4037 } 4038 } 4039 4040 static int _move_all_busy(struct mlx4_dev *dev, int slave, 4041 enum mlx4_resource type, int print) 4042 { 4043 struct mlx4_priv *priv = mlx4_priv(dev); 4044 struct mlx4_resource_tracker *tracker = 4045 &priv->mfunc.master.res_tracker; 4046 struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; 4047 struct res_common *r; 4048 struct res_common *tmp; 4049 int busy; 4050 4051 busy = 0; 4052 spin_lock_irq(mlx4_tlock(dev)); 4053 list_for_each_entry_safe(r, tmp, rlist, list) { 4054 if (r->owner == slave) { 4055 if (!r->removing) { 4056 if (r->state == RES_ANY_BUSY) { 4057 if (print) 4058 mlx4_dbg(dev, 4059 "%s id 0x%llx is busy\n", 4060 ResourceType(type), 4061 r->res_id); 4062 ++busy; 4063 } else { 4064 r->from_state = r->state; 4065 r->state = RES_ANY_BUSY; 4066 r->removing = 1; 4067 } 4068 } 4069 } 4070 } 4071 spin_unlock_irq(mlx4_tlock(dev)); 4072 4073 return busy; 4074 } 4075 4076 static int move_all_busy(struct mlx4_dev *dev, int slave, 4077 enum mlx4_resource type) 4078 { 4079 unsigned long begin; 4080 int busy; 4081 4082 begin = jiffies; 4083 do { 4084 busy = _move_all_busy(dev, slave, type, 0); 4085 if (time_after(jiffies, begin + 5 * HZ)) 4086 break; 4087 if (busy) 4088 cond_resched(); 4089 } while (busy); 4090 4091 if (busy) 4092 busy = _move_all_busy(dev, slave, type, 1); 4093 4094 return busy; 4095 } 4096 static void rem_slave_qps(struct mlx4_dev *dev, int slave) 4097 { 4098 struct mlx4_priv *priv = mlx4_priv(dev); 4099 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4100 struct list_head *qp_list = 4101 &tracker->slave_list[slave].res_list[RES_QP]; 4102 struct res_qp *qp; 4103 struct res_qp *tmp; 4104 int state; 4105 u64 in_param; 4106 int qpn; 4107 int err; 4108 4109 err = move_all_busy(dev, slave, RES_QP); 4110 if (err) 4111 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" 4112 "for slave %d\n", slave); 4113 4114 spin_lock_irq(mlx4_tlock(dev)); 4115 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4116 spin_unlock_irq(mlx4_tlock(dev)); 4117 if (qp->com.owner == slave) { 4118 qpn = qp->com.res_id; 4119 detach_qp(dev, slave, qp); 4120 state = qp->com.from_state; 4121 while (state != 0) { 4122 switch (state) { 4123 case RES_QP_RESERVED: 4124 spin_lock_irq(mlx4_tlock(dev)); 4125 rb_erase(&qp->com.node, 4126 &tracker->res_tree[RES_QP]); 4127 list_del(&qp->com.list); 4128 spin_unlock_irq(mlx4_tlock(dev)); 4129 if (!valid_reserved(dev, slave, qpn)) { 4130 __mlx4_qp_release_range(dev, qpn, 1); 4131 mlx4_release_resource(dev, slave, 4132 RES_QP, 1, 0); 4133 } 4134 kfree(qp); 4135 state = 0; 4136 break; 4137 case RES_QP_MAPPED: 4138 if (!valid_reserved(dev, slave, qpn)) 4139 __mlx4_qp_free_icm(dev, qpn); 4140 state = RES_QP_RESERVED; 4141 break; 4142 case RES_QP_HW: 4143 in_param = slave; 4144 err = mlx4_cmd(dev, in_param, 4145 qp->local_qpn, 2, 4146 MLX4_CMD_2RST_QP, 4147 MLX4_CMD_TIME_CLASS_A, 4148 MLX4_CMD_NATIVE); 4149 if (err) 4150 mlx4_dbg(dev, "rem_slave_qps: failed" 4151 " to move slave %d qpn %d to" 4152 " reset\n", slave, 4153 qp->local_qpn); 4154 atomic_dec(&qp->rcq->ref_count); 4155 atomic_dec(&qp->scq->ref_count); 4156 atomic_dec(&qp->mtt->ref_count); 4157 if (qp->srq) 4158 atomic_dec(&qp->srq->ref_count); 4159 state = RES_QP_MAPPED; 4160 break; 4161 default: 4162 state = 0; 4163 } 4164 } 4165 } 4166 spin_lock_irq(mlx4_tlock(dev)); 4167 } 4168 spin_unlock_irq(mlx4_tlock(dev)); 4169 } 4170 4171 static void rem_slave_srqs(struct mlx4_dev *dev, int slave) 4172 { 4173 struct mlx4_priv *priv = mlx4_priv(dev); 4174 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4175 struct list_head *srq_list = 4176 &tracker->slave_list[slave].res_list[RES_SRQ]; 4177 struct res_srq *srq; 4178 struct res_srq *tmp; 4179 int state; 4180 u64 in_param; 4181 LIST_HEAD(tlist); 4182 int srqn; 4183 int err; 4184 4185 err = move_all_busy(dev, slave, RES_SRQ); 4186 if (err) 4187 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " 4188 "busy for slave %d\n", slave); 4189 4190 spin_lock_irq(mlx4_tlock(dev)); 4191 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4192 spin_unlock_irq(mlx4_tlock(dev)); 4193 if (srq->com.owner == slave) { 4194 srqn = srq->com.res_id; 4195 state = srq->com.from_state; 4196 while (state != 0) { 4197 switch (state) { 4198 case RES_SRQ_ALLOCATED: 4199 __mlx4_srq_free_icm(dev, srqn); 4200 spin_lock_irq(mlx4_tlock(dev)); 4201 rb_erase(&srq->com.node, 4202 &tracker->res_tree[RES_SRQ]); 4203 list_del(&srq->com.list); 4204 spin_unlock_irq(mlx4_tlock(dev)); 4205 mlx4_release_resource(dev, slave, 4206 RES_SRQ, 1, 0); 4207 kfree(srq); 4208 state = 0; 4209 break; 4210 4211 case RES_SRQ_HW: 4212 in_param = slave; 4213 err = mlx4_cmd(dev, in_param, srqn, 1, 4214 MLX4_CMD_HW2SW_SRQ, 4215 MLX4_CMD_TIME_CLASS_A, 4216 MLX4_CMD_NATIVE); 4217 if (err) 4218 mlx4_dbg(dev, "rem_slave_srqs: failed" 4219 " to move slave %d srq %d to" 4220 " SW ownership\n", 4221 slave, srqn); 4222 4223 atomic_dec(&srq->mtt->ref_count); 4224 if (srq->cq) 4225 atomic_dec(&srq->cq->ref_count); 4226 state = RES_SRQ_ALLOCATED; 4227 break; 4228 4229 default: 4230 state = 0; 4231 } 4232 } 4233 } 4234 spin_lock_irq(mlx4_tlock(dev)); 4235 } 4236 spin_unlock_irq(mlx4_tlock(dev)); 4237 } 4238 4239 static void rem_slave_cqs(struct mlx4_dev *dev, int slave) 4240 { 4241 struct mlx4_priv *priv = mlx4_priv(dev); 4242 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4243 struct list_head *cq_list = 4244 &tracker->slave_list[slave].res_list[RES_CQ]; 4245 struct res_cq *cq; 4246 struct res_cq *tmp; 4247 int state; 4248 u64 in_param; 4249 LIST_HEAD(tlist); 4250 int cqn; 4251 int err; 4252 4253 err = move_all_busy(dev, slave, RES_CQ); 4254 if (err) 4255 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " 4256 "busy for slave %d\n", slave); 4257 4258 spin_lock_irq(mlx4_tlock(dev)); 4259 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4260 spin_unlock_irq(mlx4_tlock(dev)); 4261 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { 4262 cqn = cq->com.res_id; 4263 state = cq->com.from_state; 4264 while (state != 0) { 4265 switch (state) { 4266 case RES_CQ_ALLOCATED: 4267 __mlx4_cq_free_icm(dev, cqn); 4268 spin_lock_irq(mlx4_tlock(dev)); 4269 rb_erase(&cq->com.node, 4270 &tracker->res_tree[RES_CQ]); 4271 list_del(&cq->com.list); 4272 spin_unlock_irq(mlx4_tlock(dev)); 4273 mlx4_release_resource(dev, slave, 4274 RES_CQ, 1, 0); 4275 kfree(cq); 4276 state = 0; 4277 break; 4278 4279 case RES_CQ_HW: 4280 in_param = slave; 4281 err = mlx4_cmd(dev, in_param, cqn, 1, 4282 MLX4_CMD_HW2SW_CQ, 4283 MLX4_CMD_TIME_CLASS_A, 4284 MLX4_CMD_NATIVE); 4285 if (err) 4286 mlx4_dbg(dev, "rem_slave_cqs: failed" 4287 " to move slave %d cq %d to" 4288 " SW ownership\n", 4289 slave, cqn); 4290 atomic_dec(&cq->mtt->ref_count); 4291 state = RES_CQ_ALLOCATED; 4292 break; 4293 4294 default: 4295 state = 0; 4296 } 4297 } 4298 } 4299 spin_lock_irq(mlx4_tlock(dev)); 4300 } 4301 spin_unlock_irq(mlx4_tlock(dev)); 4302 } 4303 4304 static void rem_slave_mrs(struct mlx4_dev *dev, int slave) 4305 { 4306 struct mlx4_priv *priv = mlx4_priv(dev); 4307 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4308 struct list_head *mpt_list = 4309 &tracker->slave_list[slave].res_list[RES_MPT]; 4310 struct res_mpt *mpt; 4311 struct res_mpt *tmp; 4312 int state; 4313 u64 in_param; 4314 LIST_HEAD(tlist); 4315 int mptn; 4316 int err; 4317 4318 err = move_all_busy(dev, slave, RES_MPT); 4319 if (err) 4320 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " 4321 "busy for slave %d\n", slave); 4322 4323 spin_lock_irq(mlx4_tlock(dev)); 4324 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4325 spin_unlock_irq(mlx4_tlock(dev)); 4326 if (mpt->com.owner == slave) { 4327 mptn = mpt->com.res_id; 4328 state = mpt->com.from_state; 4329 while (state != 0) { 4330 switch (state) { 4331 case RES_MPT_RESERVED: 4332 __mlx4_mpt_release(dev, mpt->key); 4333 spin_lock_irq(mlx4_tlock(dev)); 4334 rb_erase(&mpt->com.node, 4335 &tracker->res_tree[RES_MPT]); 4336 list_del(&mpt->com.list); 4337 spin_unlock_irq(mlx4_tlock(dev)); 4338 mlx4_release_resource(dev, slave, 4339 RES_MPT, 1, 0); 4340 kfree(mpt); 4341 state = 0; 4342 break; 4343 4344 case RES_MPT_MAPPED: 4345 __mlx4_mpt_free_icm(dev, mpt->key); 4346 state = RES_MPT_RESERVED; 4347 break; 4348 4349 case RES_MPT_HW: 4350 in_param = slave; 4351 err = mlx4_cmd(dev, in_param, mptn, 0, 4352 MLX4_CMD_HW2SW_MPT, 4353 MLX4_CMD_TIME_CLASS_A, 4354 MLX4_CMD_NATIVE); 4355 if (err) 4356 mlx4_dbg(dev, "rem_slave_mrs: failed" 4357 " to move slave %d mpt %d to" 4358 " SW ownership\n", 4359 slave, mptn); 4360 if (mpt->mtt) 4361 atomic_dec(&mpt->mtt->ref_count); 4362 state = RES_MPT_MAPPED; 4363 break; 4364 default: 4365 state = 0; 4366 } 4367 } 4368 } 4369 spin_lock_irq(mlx4_tlock(dev)); 4370 } 4371 spin_unlock_irq(mlx4_tlock(dev)); 4372 } 4373 4374 static void rem_slave_mtts(struct mlx4_dev *dev, int slave) 4375 { 4376 struct mlx4_priv *priv = mlx4_priv(dev); 4377 struct mlx4_resource_tracker *tracker = 4378 &priv->mfunc.master.res_tracker; 4379 struct list_head *mtt_list = 4380 &tracker->slave_list[slave].res_list[RES_MTT]; 4381 struct res_mtt *mtt; 4382 struct res_mtt *tmp; 4383 int state; 4384 LIST_HEAD(tlist); 4385 int base; 4386 int err; 4387 4388 err = move_all_busy(dev, slave, RES_MTT); 4389 if (err) 4390 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " 4391 "busy for slave %d\n", slave); 4392 4393 spin_lock_irq(mlx4_tlock(dev)); 4394 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4395 spin_unlock_irq(mlx4_tlock(dev)); 4396 if (mtt->com.owner == slave) { 4397 base = mtt->com.res_id; 4398 state = mtt->com.from_state; 4399 while (state != 0) { 4400 switch (state) { 4401 case RES_MTT_ALLOCATED: 4402 __mlx4_free_mtt_range(dev, base, 4403 mtt->order); 4404 spin_lock_irq(mlx4_tlock(dev)); 4405 rb_erase(&mtt->com.node, 4406 &tracker->res_tree[RES_MTT]); 4407 list_del(&mtt->com.list); 4408 spin_unlock_irq(mlx4_tlock(dev)); 4409 mlx4_release_resource(dev, slave, RES_MTT, 4410 1 << mtt->order, 0); 4411 kfree(mtt); 4412 state = 0; 4413 break; 4414 4415 default: 4416 state = 0; 4417 } 4418 } 4419 } 4420 spin_lock_irq(mlx4_tlock(dev)); 4421 } 4422 spin_unlock_irq(mlx4_tlock(dev)); 4423 } 4424 4425 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) 4426 { 4427 struct mlx4_priv *priv = mlx4_priv(dev); 4428 struct mlx4_resource_tracker *tracker = 4429 &priv->mfunc.master.res_tracker; 4430 struct list_head *fs_rule_list = 4431 &tracker->slave_list[slave].res_list[RES_FS_RULE]; 4432 struct res_fs_rule *fs_rule; 4433 struct res_fs_rule *tmp; 4434 int state; 4435 u64 base; 4436 int err; 4437 4438 err = move_all_busy(dev, slave, RES_FS_RULE); 4439 if (err) 4440 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", 4441 slave); 4442 4443 spin_lock_irq(mlx4_tlock(dev)); 4444 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { 4445 spin_unlock_irq(mlx4_tlock(dev)); 4446 if (fs_rule->com.owner == slave) { 4447 base = fs_rule->com.res_id; 4448 state = fs_rule->com.from_state; 4449 while (state != 0) { 4450 switch (state) { 4451 case RES_FS_RULE_ALLOCATED: 4452 /* detach rule */ 4453 err = mlx4_cmd(dev, base, 0, 0, 4454 MLX4_QP_FLOW_STEERING_DETACH, 4455 MLX4_CMD_TIME_CLASS_A, 4456 MLX4_CMD_NATIVE); 4457 4458 spin_lock_irq(mlx4_tlock(dev)); 4459 rb_erase(&fs_rule->com.node, 4460 &tracker->res_tree[RES_FS_RULE]); 4461 list_del(&fs_rule->com.list); 4462 spin_unlock_irq(mlx4_tlock(dev)); 4463 kfree(fs_rule); 4464 state = 0; 4465 break; 4466 4467 default: 4468 state = 0; 4469 } 4470 } 4471 } 4472 spin_lock_irq(mlx4_tlock(dev)); 4473 } 4474 spin_unlock_irq(mlx4_tlock(dev)); 4475 } 4476 4477 static void rem_slave_eqs(struct mlx4_dev *dev, int slave) 4478 { 4479 struct mlx4_priv *priv = mlx4_priv(dev); 4480 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4481 struct list_head *eq_list = 4482 &tracker->slave_list[slave].res_list[RES_EQ]; 4483 struct res_eq *eq; 4484 struct res_eq *tmp; 4485 int err; 4486 int state; 4487 LIST_HEAD(tlist); 4488 int eqn; 4489 struct mlx4_cmd_mailbox *mailbox; 4490 4491 err = move_all_busy(dev, slave, RES_EQ); 4492 if (err) 4493 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " 4494 "busy for slave %d\n", slave); 4495 4496 spin_lock_irq(mlx4_tlock(dev)); 4497 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4498 spin_unlock_irq(mlx4_tlock(dev)); 4499 if (eq->com.owner == slave) { 4500 eqn = eq->com.res_id; 4501 state = eq->com.from_state; 4502 while (state != 0) { 4503 switch (state) { 4504 case RES_EQ_RESERVED: 4505 spin_lock_irq(mlx4_tlock(dev)); 4506 rb_erase(&eq->com.node, 4507 &tracker->res_tree[RES_EQ]); 4508 list_del(&eq->com.list); 4509 spin_unlock_irq(mlx4_tlock(dev)); 4510 kfree(eq); 4511 state = 0; 4512 break; 4513 4514 case RES_EQ_HW: 4515 mailbox = mlx4_alloc_cmd_mailbox(dev); 4516 if (IS_ERR(mailbox)) { 4517 cond_resched(); 4518 continue; 4519 } 4520 err = mlx4_cmd_box(dev, slave, 0, 4521 eqn & 0xff, 0, 4522 MLX4_CMD_HW2SW_EQ, 4523 MLX4_CMD_TIME_CLASS_A, 4524 MLX4_CMD_NATIVE); 4525 if (err) 4526 mlx4_dbg(dev, "rem_slave_eqs: failed" 4527 " to move slave %d eqs %d to" 4528 " SW ownership\n", slave, eqn); 4529 mlx4_free_cmd_mailbox(dev, mailbox); 4530 atomic_dec(&eq->mtt->ref_count); 4531 state = RES_EQ_RESERVED; 4532 break; 4533 4534 default: 4535 state = 0; 4536 } 4537 } 4538 } 4539 spin_lock_irq(mlx4_tlock(dev)); 4540 } 4541 spin_unlock_irq(mlx4_tlock(dev)); 4542 } 4543 4544 static void rem_slave_counters(struct mlx4_dev *dev, int slave) 4545 { 4546 struct mlx4_priv *priv = mlx4_priv(dev); 4547 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4548 struct list_head *counter_list = 4549 &tracker->slave_list[slave].res_list[RES_COUNTER]; 4550 struct res_counter *counter; 4551 struct res_counter *tmp; 4552 int err; 4553 int index; 4554 4555 err = move_all_busy(dev, slave, RES_COUNTER); 4556 if (err) 4557 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " 4558 "busy for slave %d\n", slave); 4559 4560 spin_lock_irq(mlx4_tlock(dev)); 4561 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4562 if (counter->com.owner == slave) { 4563 index = counter->com.res_id; 4564 rb_erase(&counter->com.node, 4565 &tracker->res_tree[RES_COUNTER]); 4566 list_del(&counter->com.list); 4567 kfree(counter); 4568 __mlx4_counter_free(dev, index); 4569 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4570 } 4571 } 4572 spin_unlock_irq(mlx4_tlock(dev)); 4573 } 4574 4575 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4576 { 4577 struct mlx4_priv *priv = mlx4_priv(dev); 4578 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4579 struct list_head *xrcdn_list = 4580 &tracker->slave_list[slave].res_list[RES_XRCD]; 4581 struct res_xrcdn *xrcd; 4582 struct res_xrcdn *tmp; 4583 int err; 4584 int xrcdn; 4585 4586 err = move_all_busy(dev, slave, RES_XRCD); 4587 if (err) 4588 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " 4589 "busy for slave %d\n", slave); 4590 4591 spin_lock_irq(mlx4_tlock(dev)); 4592 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 4593 if (xrcd->com.owner == slave) { 4594 xrcdn = xrcd->com.res_id; 4595 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); 4596 list_del(&xrcd->com.list); 4597 kfree(xrcd); 4598 __mlx4_xrcd_free(dev, xrcdn); 4599 } 4600 } 4601 spin_unlock_irq(mlx4_tlock(dev)); 4602 } 4603 4604 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4605 { 4606 struct mlx4_priv *priv = mlx4_priv(dev); 4607 4608 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4609 rem_slave_vlans(dev, slave); 4610 rem_slave_macs(dev, slave); 4611 rem_slave_fs_rule(dev, slave); 4612 rem_slave_qps(dev, slave); 4613 rem_slave_srqs(dev, slave); 4614 rem_slave_cqs(dev, slave); 4615 rem_slave_mrs(dev, slave); 4616 rem_slave_eqs(dev, slave); 4617 rem_slave_mtts(dev, slave); 4618 rem_slave_counters(dev, slave); 4619 rem_slave_xrcdns(dev, slave); 4620 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4621 } 4622 4623 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 4624 { 4625 struct mlx4_vf_immed_vlan_work *work = 4626 container_of(_work, struct mlx4_vf_immed_vlan_work, work); 4627 struct mlx4_cmd_mailbox *mailbox; 4628 struct mlx4_update_qp_context *upd_context; 4629 struct mlx4_dev *dev = &work->priv->dev; 4630 struct mlx4_resource_tracker *tracker = 4631 &work->priv->mfunc.master.res_tracker; 4632 struct list_head *qp_list = 4633 &tracker->slave_list[work->slave].res_list[RES_QP]; 4634 struct res_qp *qp; 4635 struct res_qp *tmp; 4636 u64 qp_path_mask_vlan_ctrl = 4637 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 4638 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 4639 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 4640 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 4641 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 4642 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); 4643 4644 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 4645 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | 4646 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | 4647 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | 4648 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | 4649 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | 4650 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 4651 4652 int err; 4653 int port, errors = 0; 4654 u8 vlan_control; 4655 4656 if (mlx4_is_slave(dev)) { 4657 mlx4_warn(dev, "Trying to update-qp in slave %d\n", 4658 work->slave); 4659 goto out; 4660 } 4661 4662 mailbox = mlx4_alloc_cmd_mailbox(dev); 4663 if (IS_ERR(mailbox)) 4664 goto out; 4665 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ 4666 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 4667 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 4668 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 4669 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 4670 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 4671 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 4672 else if (!work->vlan_id) 4673 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 4674 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 4675 else 4676 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 4677 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 4678 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 4679 4680 upd_context = mailbox->buf; 4681 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); 4682 4683 spin_lock_irq(mlx4_tlock(dev)); 4684 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4685 spin_unlock_irq(mlx4_tlock(dev)); 4686 if (qp->com.owner == work->slave) { 4687 if (qp->com.from_state != RES_QP_HW || 4688 !qp->sched_queue || /* no INIT2RTR trans yet */ 4689 mlx4_is_qp_reserved(dev, qp->local_qpn) || 4690 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { 4691 spin_lock_irq(mlx4_tlock(dev)); 4692 continue; 4693 } 4694 port = (qp->sched_queue >> 6 & 1) + 1; 4695 if (port != work->port) { 4696 spin_lock_irq(mlx4_tlock(dev)); 4697 continue; 4698 } 4699 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) 4700 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); 4701 else 4702 upd_context->primary_addr_path_mask = 4703 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); 4704 if (work->vlan_id == MLX4_VGT) { 4705 upd_context->qp_context.param3 = qp->param3; 4706 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; 4707 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; 4708 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; 4709 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; 4710 upd_context->qp_context.pri_path.feup = qp->feup; 4711 upd_context->qp_context.pri_path.sched_queue = 4712 qp->sched_queue; 4713 } else { 4714 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); 4715 upd_context->qp_context.pri_path.vlan_control = vlan_control; 4716 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; 4717 upd_context->qp_context.pri_path.fvl_rx = 4718 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; 4719 upd_context->qp_context.pri_path.fl = 4720 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 4721 upd_context->qp_context.pri_path.feup = 4722 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 4723 upd_context->qp_context.pri_path.sched_queue = 4724 qp->sched_queue & 0xC7; 4725 upd_context->qp_context.pri_path.sched_queue |= 4726 ((work->qos & 0x7) << 3); 4727 } 4728 4729 err = mlx4_cmd(dev, mailbox->dma, 4730 qp->local_qpn & 0xffffff, 4731 0, MLX4_CMD_UPDATE_QP, 4732 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 4733 if (err) { 4734 mlx4_info(dev, "UPDATE_QP failed for slave %d, " 4735 "port %d, qpn %d (%d)\n", 4736 work->slave, port, qp->local_qpn, 4737 err); 4738 errors++; 4739 } 4740 } 4741 spin_lock_irq(mlx4_tlock(dev)); 4742 } 4743 spin_unlock_irq(mlx4_tlock(dev)); 4744 mlx4_free_cmd_mailbox(dev, mailbox); 4745 4746 if (errors) 4747 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", 4748 errors, work->slave, work->port); 4749 4750 /* unregister previous vlan_id if needed and we had no errors 4751 * while updating the QPs 4752 */ 4753 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 4754 NO_INDX != work->orig_vlan_ix) 4755 __mlx4_unregister_vlan(&work->priv->dev, work->port, 4756 work->orig_vlan_id); 4757 out: 4758 kfree(work); 4759 return; 4760 } 4761