1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. 4 * All rights reserved. 5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/sched.h> 37 #include <linux/pci.h> 38 #include <linux/errno.h> 39 #include <linux/kernel.h> 40 #include <linux/io.h> 41 #include <linux/slab.h> 42 #include <linux/mlx4/cmd.h> 43 #include <linux/mlx4/qp.h> 44 #include <linux/if_ether.h> 45 #include <linux/etherdevice.h> 46 47 #include "mlx4.h" 48 #include "fw.h" 49 #include "mlx4_stats.h" 50 51 #define MLX4_MAC_VALID (1ull << 63) 52 #define MLX4_PF_COUNTERS_PER_PORT 2 53 #define MLX4_VF_COUNTERS_PER_PORT 1 54 55 struct mac_res { 56 struct list_head list; 57 u64 mac; 58 int ref_count; 59 u8 smac_index; 60 u8 port; 61 }; 62 63 struct vlan_res { 64 struct list_head list; 65 u16 vlan; 66 int ref_count; 67 int vlan_index; 68 u8 port; 69 }; 70 71 struct res_common { 72 struct list_head list; 73 struct rb_node node; 74 u64 res_id; 75 int owner; 76 int state; 77 int from_state; 78 int to_state; 79 int removing; 80 }; 81 82 enum { 83 RES_ANY_BUSY = 1 84 }; 85 86 struct res_gid { 87 struct list_head list; 88 u8 gid[16]; 89 enum mlx4_protocol prot; 90 enum mlx4_steer_type steer; 91 u64 reg_id; 92 }; 93 94 enum res_qp_states { 95 RES_QP_BUSY = RES_ANY_BUSY, 96 97 /* QP number was allocated */ 98 RES_QP_RESERVED, 99 100 /* ICM memory for QP context was mapped */ 101 RES_QP_MAPPED, 102 103 /* QP is in hw ownership */ 104 RES_QP_HW 105 }; 106 107 struct res_qp { 108 struct res_common com; 109 struct res_mtt *mtt; 110 struct res_cq *rcq; 111 struct res_cq *scq; 112 struct res_srq *srq; 113 struct list_head mcg_list; 114 spinlock_t mcg_spl; 115 int local_qpn; 116 atomic_t ref_count; 117 u32 qpc_flags; 118 /* saved qp params before VST enforcement in order to restore on VGT */ 119 u8 sched_queue; 120 __be32 param3; 121 u8 vlan_control; 122 u8 fvl_rx; 123 u8 pri_path_fl; 124 u8 vlan_index; 125 u8 feup; 126 }; 127 128 enum res_mtt_states { 129 RES_MTT_BUSY = RES_ANY_BUSY, 130 RES_MTT_ALLOCATED, 131 }; 132 133 static inline const char *mtt_states_str(enum res_mtt_states state) 134 { 135 switch (state) { 136 case RES_MTT_BUSY: return "RES_MTT_BUSY"; 137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; 138 default: return "Unknown"; 139 } 140 } 141 142 struct res_mtt { 143 struct res_common com; 144 int order; 145 atomic_t ref_count; 146 }; 147 148 enum res_mpt_states { 149 RES_MPT_BUSY = RES_ANY_BUSY, 150 RES_MPT_RESERVED, 151 RES_MPT_MAPPED, 152 RES_MPT_HW, 153 }; 154 155 struct res_mpt { 156 struct res_common com; 157 struct res_mtt *mtt; 158 int key; 159 }; 160 161 enum res_eq_states { 162 RES_EQ_BUSY = RES_ANY_BUSY, 163 RES_EQ_RESERVED, 164 RES_EQ_HW, 165 }; 166 167 struct res_eq { 168 struct res_common com; 169 struct res_mtt *mtt; 170 }; 171 172 enum res_cq_states { 173 RES_CQ_BUSY = RES_ANY_BUSY, 174 RES_CQ_ALLOCATED, 175 RES_CQ_HW, 176 }; 177 178 struct res_cq { 179 struct res_common com; 180 struct res_mtt *mtt; 181 atomic_t ref_count; 182 }; 183 184 enum res_srq_states { 185 RES_SRQ_BUSY = RES_ANY_BUSY, 186 RES_SRQ_ALLOCATED, 187 RES_SRQ_HW, 188 }; 189 190 struct res_srq { 191 struct res_common com; 192 struct res_mtt *mtt; 193 struct res_cq *cq; 194 atomic_t ref_count; 195 }; 196 197 enum res_counter_states { 198 RES_COUNTER_BUSY = RES_ANY_BUSY, 199 RES_COUNTER_ALLOCATED, 200 }; 201 202 struct res_counter { 203 struct res_common com; 204 int port; 205 }; 206 207 enum res_xrcdn_states { 208 RES_XRCD_BUSY = RES_ANY_BUSY, 209 RES_XRCD_ALLOCATED, 210 }; 211 212 struct res_xrcdn { 213 struct res_common com; 214 int port; 215 }; 216 217 enum res_fs_rule_states { 218 RES_FS_RULE_BUSY = RES_ANY_BUSY, 219 RES_FS_RULE_ALLOCATED, 220 }; 221 222 struct res_fs_rule { 223 struct res_common com; 224 int qpn; 225 }; 226 227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 228 { 229 struct rb_node *node = root->rb_node; 230 231 while (node) { 232 struct res_common *res = container_of(node, struct res_common, 233 node); 234 235 if (res_id < res->res_id) 236 node = node->rb_left; 237 else if (res_id > res->res_id) 238 node = node->rb_right; 239 else 240 return res; 241 } 242 return NULL; 243 } 244 245 static int res_tracker_insert(struct rb_root *root, struct res_common *res) 246 { 247 struct rb_node **new = &(root->rb_node), *parent = NULL; 248 249 /* Figure out where to put new node */ 250 while (*new) { 251 struct res_common *this = container_of(*new, struct res_common, 252 node); 253 254 parent = *new; 255 if (res->res_id < this->res_id) 256 new = &((*new)->rb_left); 257 else if (res->res_id > this->res_id) 258 new = &((*new)->rb_right); 259 else 260 return -EEXIST; 261 } 262 263 /* Add new node and rebalance tree. */ 264 rb_link_node(&res->node, parent, new); 265 rb_insert_color(&res->node, root); 266 267 return 0; 268 } 269 270 enum qp_transition { 271 QP_TRANS_INIT2RTR, 272 QP_TRANS_RTR2RTS, 273 QP_TRANS_RTS2RTS, 274 QP_TRANS_SQERR2RTS, 275 QP_TRANS_SQD2SQD, 276 QP_TRANS_SQD2RTS 277 }; 278 279 /* For Debug uses */ 280 static const char *resource_str(enum mlx4_resource rt) 281 { 282 switch (rt) { 283 case RES_QP: return "RES_QP"; 284 case RES_CQ: return "RES_CQ"; 285 case RES_SRQ: return "RES_SRQ"; 286 case RES_MPT: return "RES_MPT"; 287 case RES_MTT: return "RES_MTT"; 288 case RES_MAC: return "RES_MAC"; 289 case RES_VLAN: return "RES_VLAN"; 290 case RES_EQ: return "RES_EQ"; 291 case RES_COUNTER: return "RES_COUNTER"; 292 case RES_FS_RULE: return "RES_FS_RULE"; 293 case RES_XRCD: return "RES_XRCD"; 294 default: return "Unknown resource type !!!"; 295 }; 296 } 297 298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave); 299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, 300 enum mlx4_resource res_type, int count, 301 int port) 302 { 303 struct mlx4_priv *priv = mlx4_priv(dev); 304 struct resource_allocator *res_alloc = 305 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 306 int err = -EINVAL; 307 int allocated, free, reserved, guaranteed, from_free; 308 int from_rsvd; 309 310 if (slave > dev->persist->num_vfs) 311 return -EINVAL; 312 313 spin_lock(&res_alloc->alloc_lock); 314 allocated = (port > 0) ? 315 res_alloc->allocated[(port - 1) * 316 (dev->persist->num_vfs + 1) + slave] : 317 res_alloc->allocated[slave]; 318 free = (port > 0) ? res_alloc->res_port_free[port - 1] : 319 res_alloc->res_free; 320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : 321 res_alloc->res_reserved; 322 guaranteed = res_alloc->guaranteed[slave]; 323 324 if (allocated + count > res_alloc->quota[slave]) { 325 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", 326 slave, port, resource_str(res_type), count, 327 allocated, res_alloc->quota[slave]); 328 goto out; 329 } 330 331 if (allocated + count <= guaranteed) { 332 err = 0; 333 from_rsvd = count; 334 } else { 335 /* portion may need to be obtained from free area */ 336 if (guaranteed - allocated > 0) 337 from_free = count - (guaranteed - allocated); 338 else 339 from_free = count; 340 341 from_rsvd = count - from_free; 342 343 if (free - from_free >= reserved) 344 err = 0; 345 else 346 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", 347 slave, port, resource_str(res_type), free, 348 from_free, reserved); 349 } 350 351 if (!err) { 352 /* grant the request */ 353 if (port > 0) { 354 res_alloc->allocated[(port - 1) * 355 (dev->persist->num_vfs + 1) + slave] += count; 356 res_alloc->res_port_free[port - 1] -= count; 357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd; 358 } else { 359 res_alloc->allocated[slave] += count; 360 res_alloc->res_free -= count; 361 res_alloc->res_reserved -= from_rsvd; 362 } 363 } 364 365 out: 366 spin_unlock(&res_alloc->alloc_lock); 367 return err; 368 } 369 370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, 371 enum mlx4_resource res_type, int count, 372 int port) 373 { 374 struct mlx4_priv *priv = mlx4_priv(dev); 375 struct resource_allocator *res_alloc = 376 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 377 int allocated, guaranteed, from_rsvd; 378 379 if (slave > dev->persist->num_vfs) 380 return; 381 382 spin_lock(&res_alloc->alloc_lock); 383 384 allocated = (port > 0) ? 385 res_alloc->allocated[(port - 1) * 386 (dev->persist->num_vfs + 1) + slave] : 387 res_alloc->allocated[slave]; 388 guaranteed = res_alloc->guaranteed[slave]; 389 390 if (allocated - count >= guaranteed) { 391 from_rsvd = 0; 392 } else { 393 /* portion may need to be returned to reserved area */ 394 if (allocated - guaranteed > 0) 395 from_rsvd = count - (allocated - guaranteed); 396 else 397 from_rsvd = count; 398 } 399 400 if (port > 0) { 401 res_alloc->allocated[(port - 1) * 402 (dev->persist->num_vfs + 1) + slave] -= count; 403 res_alloc->res_port_free[port - 1] += count; 404 res_alloc->res_port_rsvd[port - 1] += from_rsvd; 405 } else { 406 res_alloc->allocated[slave] -= count; 407 res_alloc->res_free += count; 408 res_alloc->res_reserved += from_rsvd; 409 } 410 411 spin_unlock(&res_alloc->alloc_lock); 412 return; 413 } 414 415 static inline void initialize_res_quotas(struct mlx4_dev *dev, 416 struct resource_allocator *res_alloc, 417 enum mlx4_resource res_type, 418 int vf, int num_instances) 419 { 420 res_alloc->guaranteed[vf] = num_instances / 421 (2 * (dev->persist->num_vfs + 1)); 422 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 423 if (vf == mlx4_master_func_num(dev)) { 424 res_alloc->res_free = num_instances; 425 if (res_type == RES_MTT) { 426 /* reserved mtts will be taken out of the PF allocation */ 427 res_alloc->res_free += dev->caps.reserved_mtts; 428 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; 429 res_alloc->quota[vf] += dev->caps.reserved_mtts; 430 } 431 } 432 } 433 434 void mlx4_init_quotas(struct mlx4_dev *dev) 435 { 436 struct mlx4_priv *priv = mlx4_priv(dev); 437 int pf; 438 439 /* quotas for VFs are initialized in mlx4_slave_cap */ 440 if (mlx4_is_slave(dev)) 441 return; 442 443 if (!mlx4_is_mfunc(dev)) { 444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - 445 mlx4_num_reserved_sqps(dev); 446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; 447 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; 448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; 449 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; 450 return; 451 } 452 453 pf = mlx4_master_func_num(dev); 454 dev->quotas.qp = 455 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; 456 dev->quotas.cq = 457 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; 458 dev->quotas.srq = 459 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; 460 dev->quotas.mtt = 461 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; 462 dev->quotas.mpt = 463 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 464 } 465 466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) 467 { 468 /* reduce the sink counter */ 469 return (dev->caps.max_counters - 1 - 470 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) 471 / MLX4_MAX_PORTS; 472 } 473 474 int mlx4_init_resource_tracker(struct mlx4_dev *dev) 475 { 476 struct mlx4_priv *priv = mlx4_priv(dev); 477 int i, j; 478 int t; 479 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); 480 481 priv->mfunc.master.res_tracker.slave_list = 482 kzalloc(dev->num_slaves * sizeof(struct slave_list), 483 GFP_KERNEL); 484 if (!priv->mfunc.master.res_tracker.slave_list) 485 return -ENOMEM; 486 487 for (i = 0 ; i < dev->num_slaves; i++) { 488 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) 489 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. 490 slave_list[i].res_list[t]); 491 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 492 } 493 494 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 495 dev->num_slaves); 496 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 497 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 498 499 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 500 struct resource_allocator *res_alloc = 501 &priv->mfunc.master.res_tracker.res_alloc[i]; 502 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * 503 sizeof(int), GFP_KERNEL); 504 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * 505 sizeof(int), GFP_KERNEL); 506 if (i == RES_MAC || i == RES_VLAN) 507 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 508 (dev->persist->num_vfs 509 + 1) * 510 sizeof(int), GFP_KERNEL); 511 else 512 res_alloc->allocated = kzalloc((dev->persist-> 513 num_vfs + 1) * 514 sizeof(int), GFP_KERNEL); 515 /* Reduce the sink counter */ 516 if (i == RES_COUNTER) 517 res_alloc->res_free = dev->caps.max_counters - 1; 518 519 if (!res_alloc->quota || !res_alloc->guaranteed || 520 !res_alloc->allocated) 521 goto no_mem_err; 522 523 spin_lock_init(&res_alloc->alloc_lock); 524 for (t = 0; t < dev->persist->num_vfs + 1; t++) { 525 struct mlx4_active_ports actv_ports = 526 mlx4_get_active_ports(dev, t); 527 switch (i) { 528 case RES_QP: 529 initialize_res_quotas(dev, res_alloc, RES_QP, 530 t, dev->caps.num_qps - 531 dev->caps.reserved_qps - 532 mlx4_num_reserved_sqps(dev)); 533 break; 534 case RES_CQ: 535 initialize_res_quotas(dev, res_alloc, RES_CQ, 536 t, dev->caps.num_cqs - 537 dev->caps.reserved_cqs); 538 break; 539 case RES_SRQ: 540 initialize_res_quotas(dev, res_alloc, RES_SRQ, 541 t, dev->caps.num_srqs - 542 dev->caps.reserved_srqs); 543 break; 544 case RES_MPT: 545 initialize_res_quotas(dev, res_alloc, RES_MPT, 546 t, dev->caps.num_mpts - 547 dev->caps.reserved_mrws); 548 break; 549 case RES_MTT: 550 initialize_res_quotas(dev, res_alloc, RES_MTT, 551 t, dev->caps.num_mtts - 552 dev->caps.reserved_mtts); 553 break; 554 case RES_MAC: 555 if (t == mlx4_master_func_num(dev)) { 556 int max_vfs_pport = 0; 557 /* Calculate the max vfs per port for */ 558 /* both ports. */ 559 for (j = 0; j < dev->caps.num_ports; 560 j++) { 561 struct mlx4_slaves_pport slaves_pport = 562 mlx4_phys_to_slaves_pport(dev, j + 1); 563 unsigned current_slaves = 564 bitmap_weight(slaves_pport.slaves, 565 dev->caps.num_ports) - 1; 566 if (max_vfs_pport < current_slaves) 567 max_vfs_pport = 568 current_slaves; 569 } 570 res_alloc->quota[t] = 571 MLX4_MAX_MAC_NUM - 572 2 * max_vfs_pport; 573 res_alloc->guaranteed[t] = 2; 574 for (j = 0; j < MLX4_MAX_PORTS; j++) 575 res_alloc->res_port_free[j] = 576 MLX4_MAX_MAC_NUM; 577 } else { 578 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 579 res_alloc->guaranteed[t] = 2; 580 } 581 break; 582 case RES_VLAN: 583 if (t == mlx4_master_func_num(dev)) { 584 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; 585 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; 586 for (j = 0; j < MLX4_MAX_PORTS; j++) 587 res_alloc->res_port_free[j] = 588 res_alloc->quota[t]; 589 } else { 590 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; 591 res_alloc->guaranteed[t] = 0; 592 } 593 break; 594 case RES_COUNTER: 595 res_alloc->quota[t] = dev->caps.max_counters; 596 if (t == mlx4_master_func_num(dev)) 597 res_alloc->guaranteed[t] = 598 MLX4_PF_COUNTERS_PER_PORT * 599 MLX4_MAX_PORTS; 600 else if (t <= max_vfs_guarantee_counter) 601 res_alloc->guaranteed[t] = 602 MLX4_VF_COUNTERS_PER_PORT * 603 MLX4_MAX_PORTS; 604 else 605 res_alloc->guaranteed[t] = 0; 606 res_alloc->res_free -= res_alloc->guaranteed[t]; 607 break; 608 default: 609 break; 610 } 611 if (i == RES_MAC || i == RES_VLAN) { 612 for (j = 0; j < dev->caps.num_ports; j++) 613 if (test_bit(j, actv_ports.ports)) 614 res_alloc->res_port_rsvd[j] += 615 res_alloc->guaranteed[t]; 616 } else { 617 res_alloc->res_reserved += res_alloc->guaranteed[t]; 618 } 619 } 620 } 621 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 622 return 0; 623 624 no_mem_err: 625 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 626 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 627 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 628 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 629 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 631 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 632 } 633 return -ENOMEM; 634 } 635 636 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 637 enum mlx4_res_tracker_free_type type) 638 { 639 struct mlx4_priv *priv = mlx4_priv(dev); 640 int i; 641 642 if (priv->mfunc.master.res_tracker.slave_list) { 643 if (type != RES_TR_FREE_STRUCTS_ONLY) { 644 for (i = 0; i < dev->num_slaves; i++) { 645 if (type == RES_TR_FREE_ALL || 646 dev->caps.function != i) 647 mlx4_delete_all_resources_for_slave(dev, i); 648 } 649 /* free master's vlans */ 650 i = dev->caps.function; 651 mlx4_reset_roce_gids(dev, i); 652 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 653 rem_slave_vlans(dev, i); 654 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 655 } 656 657 if (type != RES_TR_FREE_SLAVES_ONLY) { 658 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 659 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 660 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 661 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 662 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 663 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 664 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 665 } 666 kfree(priv->mfunc.master.res_tracker.slave_list); 667 priv->mfunc.master.res_tracker.slave_list = NULL; 668 } 669 } 670 } 671 672 static void update_pkey_index(struct mlx4_dev *dev, int slave, 673 struct mlx4_cmd_mailbox *inbox) 674 { 675 u8 sched = *(u8 *)(inbox->buf + 64); 676 u8 orig_index = *(u8 *)(inbox->buf + 35); 677 u8 new_index; 678 struct mlx4_priv *priv = mlx4_priv(dev); 679 int port; 680 681 port = (sched >> 6 & 1) + 1; 682 683 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 684 *(u8 *)(inbox->buf + 35) = new_index; 685 } 686 687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 688 u8 slave) 689 { 690 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 692 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 693 int port; 694 695 if (MLX4_QP_ST_UD == ts) { 696 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 697 if (mlx4_is_eth(dev, port)) 698 qp_ctx->pri_path.mgid_index = 699 mlx4_get_base_gid_ix(dev, slave, port) | 0x80; 700 else 701 qp_ctx->pri_path.mgid_index = slave | 0x80; 702 703 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { 704 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 706 if (mlx4_is_eth(dev, port)) { 707 qp_ctx->pri_path.mgid_index += 708 mlx4_get_base_gid_ix(dev, slave, port); 709 qp_ctx->pri_path.mgid_index &= 0x7f; 710 } else { 711 qp_ctx->pri_path.mgid_index = slave & 0x7F; 712 } 713 } 714 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 715 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 716 if (mlx4_is_eth(dev, port)) { 717 qp_ctx->alt_path.mgid_index += 718 mlx4_get_base_gid_ix(dev, slave, port); 719 qp_ctx->alt_path.mgid_index &= 0x7f; 720 } else { 721 qp_ctx->alt_path.mgid_index = slave & 0x7F; 722 } 723 } 724 } 725 } 726 727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 728 u8 slave, int port); 729 730 static int update_vport_qp_param(struct mlx4_dev *dev, 731 struct mlx4_cmd_mailbox *inbox, 732 u8 slave, u32 qpn) 733 { 734 struct mlx4_qp_context *qpc = inbox->buf + 8; 735 struct mlx4_vport_oper_state *vp_oper; 736 struct mlx4_priv *priv; 737 u32 qp_type; 738 int port, err = 0; 739 740 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 741 priv = mlx4_priv(dev); 742 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 743 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 744 745 err = handle_counter(dev, qpc, slave, port); 746 if (err) 747 goto out; 748 749 if (MLX4_VGT != vp_oper->state.default_vlan) { 750 /* the reserved QPs (special, proxy, tunnel) 751 * do not operate over vlans 752 */ 753 if (mlx4_is_qp_reserved(dev, qpn)) 754 return 0; 755 756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ 757 if (qp_type == MLX4_QP_ST_UD || 758 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { 759 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { 760 *(__be32 *)inbox->buf = 761 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | 762 MLX4_QP_OPTPAR_VLAN_STRIPPING); 763 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 764 } else { 765 struct mlx4_update_qp_params params = {.flags = 0}; 766 767 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); 768 if (err) 769 goto out; 770 } 771 } 772 773 /* preserve IF_COUNTER flag */ 774 qpc->pri_path.vlan_control &= 775 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; 776 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && 777 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 778 qpc->pri_path.vlan_control |= 779 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 780 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 781 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 782 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 783 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 784 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 785 } else if (0 != vp_oper->state.default_vlan) { 786 qpc->pri_path.vlan_control |= 787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 788 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 790 } else { /* priority tagged */ 791 qpc->pri_path.vlan_control |= 792 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 794 } 795 796 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; 797 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 798 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 799 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 800 qpc->pri_path.sched_queue &= 0xC7; 801 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 802 qpc->qos_vport = vp_oper->state.qos_vport; 803 } 804 if (vp_oper->state.spoofchk) { 805 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 806 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 807 } 808 out: 809 return err; 810 } 811 812 static int mpt_mask(struct mlx4_dev *dev) 813 { 814 return dev->caps.num_mpts - 1; 815 } 816 817 static void *find_res(struct mlx4_dev *dev, u64 res_id, 818 enum mlx4_resource type) 819 { 820 struct mlx4_priv *priv = mlx4_priv(dev); 821 822 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 823 res_id); 824 } 825 826 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, 827 enum mlx4_resource type, 828 void *res) 829 { 830 struct res_common *r; 831 int err = 0; 832 833 spin_lock_irq(mlx4_tlock(dev)); 834 r = find_res(dev, res_id, type); 835 if (!r) { 836 err = -ENONET; 837 goto exit; 838 } 839 840 if (r->state == RES_ANY_BUSY) { 841 err = -EBUSY; 842 goto exit; 843 } 844 845 if (r->owner != slave) { 846 err = -EPERM; 847 goto exit; 848 } 849 850 r->from_state = r->state; 851 r->state = RES_ANY_BUSY; 852 853 if (res) 854 *((struct res_common **)res) = r; 855 856 exit: 857 spin_unlock_irq(mlx4_tlock(dev)); 858 return err; 859 } 860 861 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 862 enum mlx4_resource type, 863 u64 res_id, int *slave) 864 { 865 866 struct res_common *r; 867 int err = -ENOENT; 868 int id = res_id; 869 870 if (type == RES_QP) 871 id &= 0x7fffff; 872 spin_lock(mlx4_tlock(dev)); 873 874 r = find_res(dev, id, type); 875 if (r) { 876 *slave = r->owner; 877 err = 0; 878 } 879 spin_unlock(mlx4_tlock(dev)); 880 881 return err; 882 } 883 884 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, 885 enum mlx4_resource type) 886 { 887 struct res_common *r; 888 889 spin_lock_irq(mlx4_tlock(dev)); 890 r = find_res(dev, res_id, type); 891 if (r) 892 r->state = r->from_state; 893 spin_unlock_irq(mlx4_tlock(dev)); 894 } 895 896 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 897 u64 in_param, u64 *out_param, int port); 898 899 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, 900 int counter_index) 901 { 902 struct res_common *r; 903 struct res_counter *counter; 904 int ret = 0; 905 906 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) 907 return ret; 908 909 spin_lock_irq(mlx4_tlock(dev)); 910 r = find_res(dev, counter_index, RES_COUNTER); 911 if (!r || r->owner != slave) 912 ret = -EINVAL; 913 counter = container_of(r, struct res_counter, com); 914 if (!counter->port) 915 counter->port = port; 916 917 spin_unlock_irq(mlx4_tlock(dev)); 918 return ret; 919 } 920 921 static int handle_unexisting_counter(struct mlx4_dev *dev, 922 struct mlx4_qp_context *qpc, u8 slave, 923 int port) 924 { 925 struct mlx4_priv *priv = mlx4_priv(dev); 926 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 927 struct res_common *tmp; 928 struct res_counter *counter; 929 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); 930 int err = 0; 931 932 spin_lock_irq(mlx4_tlock(dev)); 933 list_for_each_entry(tmp, 934 &tracker->slave_list[slave].res_list[RES_COUNTER], 935 list) { 936 counter = container_of(tmp, struct res_counter, com); 937 if (port == counter->port) { 938 qpc->pri_path.counter_index = counter->com.res_id; 939 spin_unlock_irq(mlx4_tlock(dev)); 940 return 0; 941 } 942 } 943 spin_unlock_irq(mlx4_tlock(dev)); 944 945 /* No existing counter, need to allocate a new counter */ 946 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx, 947 port); 948 if (err == -ENOENT) { 949 err = 0; 950 } else if (err && err != -ENOSPC) { 951 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n", 952 __func__, slave, err); 953 } else { 954 qpc->pri_path.counter_index = counter_idx; 955 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n", 956 __func__, slave, qpc->pri_path.counter_index); 957 err = 0; 958 } 959 960 return err; 961 } 962 963 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 964 u8 slave, int port) 965 { 966 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) 967 return handle_existing_counter(dev, slave, port, 968 qpc->pri_path.counter_index); 969 970 return handle_unexisting_counter(dev, qpc, slave, port); 971 } 972 973 static struct res_common *alloc_qp_tr(int id) 974 { 975 struct res_qp *ret; 976 977 ret = kzalloc(sizeof *ret, GFP_KERNEL); 978 if (!ret) 979 return NULL; 980 981 ret->com.res_id = id; 982 ret->com.state = RES_QP_RESERVED; 983 ret->local_qpn = id; 984 INIT_LIST_HEAD(&ret->mcg_list); 985 spin_lock_init(&ret->mcg_spl); 986 atomic_set(&ret->ref_count, 0); 987 988 return &ret->com; 989 } 990 991 static struct res_common *alloc_mtt_tr(int id, int order) 992 { 993 struct res_mtt *ret; 994 995 ret = kzalloc(sizeof *ret, GFP_KERNEL); 996 if (!ret) 997 return NULL; 998 999 ret->com.res_id = id; 1000 ret->order = order; 1001 ret->com.state = RES_MTT_ALLOCATED; 1002 atomic_set(&ret->ref_count, 0); 1003 1004 return &ret->com; 1005 } 1006 1007 static struct res_common *alloc_mpt_tr(int id, int key) 1008 { 1009 struct res_mpt *ret; 1010 1011 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1012 if (!ret) 1013 return NULL; 1014 1015 ret->com.res_id = id; 1016 ret->com.state = RES_MPT_RESERVED; 1017 ret->key = key; 1018 1019 return &ret->com; 1020 } 1021 1022 static struct res_common *alloc_eq_tr(int id) 1023 { 1024 struct res_eq *ret; 1025 1026 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1027 if (!ret) 1028 return NULL; 1029 1030 ret->com.res_id = id; 1031 ret->com.state = RES_EQ_RESERVED; 1032 1033 return &ret->com; 1034 } 1035 1036 static struct res_common *alloc_cq_tr(int id) 1037 { 1038 struct res_cq *ret; 1039 1040 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1041 if (!ret) 1042 return NULL; 1043 1044 ret->com.res_id = id; 1045 ret->com.state = RES_CQ_ALLOCATED; 1046 atomic_set(&ret->ref_count, 0); 1047 1048 return &ret->com; 1049 } 1050 1051 static struct res_common *alloc_srq_tr(int id) 1052 { 1053 struct res_srq *ret; 1054 1055 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1056 if (!ret) 1057 return NULL; 1058 1059 ret->com.res_id = id; 1060 ret->com.state = RES_SRQ_ALLOCATED; 1061 atomic_set(&ret->ref_count, 0); 1062 1063 return &ret->com; 1064 } 1065 1066 static struct res_common *alloc_counter_tr(int id, int port) 1067 { 1068 struct res_counter *ret; 1069 1070 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1071 if (!ret) 1072 return NULL; 1073 1074 ret->com.res_id = id; 1075 ret->com.state = RES_COUNTER_ALLOCATED; 1076 ret->port = port; 1077 1078 return &ret->com; 1079 } 1080 1081 static struct res_common *alloc_xrcdn_tr(int id) 1082 { 1083 struct res_xrcdn *ret; 1084 1085 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1086 if (!ret) 1087 return NULL; 1088 1089 ret->com.res_id = id; 1090 ret->com.state = RES_XRCD_ALLOCATED; 1091 1092 return &ret->com; 1093 } 1094 1095 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) 1096 { 1097 struct res_fs_rule *ret; 1098 1099 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1100 if (!ret) 1101 return NULL; 1102 1103 ret->com.res_id = id; 1104 ret->com.state = RES_FS_RULE_ALLOCATED; 1105 ret->qpn = qpn; 1106 return &ret->com; 1107 } 1108 1109 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, 1110 int extra) 1111 { 1112 struct res_common *ret; 1113 1114 switch (type) { 1115 case RES_QP: 1116 ret = alloc_qp_tr(id); 1117 break; 1118 case RES_MPT: 1119 ret = alloc_mpt_tr(id, extra); 1120 break; 1121 case RES_MTT: 1122 ret = alloc_mtt_tr(id, extra); 1123 break; 1124 case RES_EQ: 1125 ret = alloc_eq_tr(id); 1126 break; 1127 case RES_CQ: 1128 ret = alloc_cq_tr(id); 1129 break; 1130 case RES_SRQ: 1131 ret = alloc_srq_tr(id); 1132 break; 1133 case RES_MAC: 1134 pr_err("implementation missing\n"); 1135 return NULL; 1136 case RES_COUNTER: 1137 ret = alloc_counter_tr(id, extra); 1138 break; 1139 case RES_XRCD: 1140 ret = alloc_xrcdn_tr(id); 1141 break; 1142 case RES_FS_RULE: 1143 ret = alloc_fs_rule_tr(id, extra); 1144 break; 1145 default: 1146 return NULL; 1147 } 1148 if (ret) 1149 ret->owner = slave; 1150 1151 return ret; 1152 } 1153 1154 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, 1155 struct mlx4_counter *data) 1156 { 1157 struct mlx4_priv *priv = mlx4_priv(dev); 1158 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1159 struct res_common *tmp; 1160 struct res_counter *counter; 1161 int *counters_arr; 1162 int i = 0, err = 0; 1163 1164 memset(data, 0, sizeof(*data)); 1165 1166 counters_arr = kmalloc_array(dev->caps.max_counters, 1167 sizeof(*counters_arr), GFP_KERNEL); 1168 if (!counters_arr) 1169 return -ENOMEM; 1170 1171 spin_lock_irq(mlx4_tlock(dev)); 1172 list_for_each_entry(tmp, 1173 &tracker->slave_list[slave].res_list[RES_COUNTER], 1174 list) { 1175 counter = container_of(tmp, struct res_counter, com); 1176 if (counter->port == port) { 1177 counters_arr[i] = (int)tmp->res_id; 1178 i++; 1179 } 1180 } 1181 spin_unlock_irq(mlx4_tlock(dev)); 1182 counters_arr[i] = -1; 1183 1184 i = 0; 1185 1186 while (counters_arr[i] != -1) { 1187 err = mlx4_get_counter_stats(dev, counters_arr[i], data, 1188 0); 1189 if (err) { 1190 memset(data, 0, sizeof(*data)); 1191 goto table_changed; 1192 } 1193 i++; 1194 } 1195 1196 table_changed: 1197 kfree(counters_arr); 1198 return 0; 1199 } 1200 1201 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1202 enum mlx4_resource type, int extra) 1203 { 1204 int i; 1205 int err; 1206 struct mlx4_priv *priv = mlx4_priv(dev); 1207 struct res_common **res_arr; 1208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1209 struct rb_root *root = &tracker->res_tree[type]; 1210 1211 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 1212 if (!res_arr) 1213 return -ENOMEM; 1214 1215 for (i = 0; i < count; ++i) { 1216 res_arr[i] = alloc_tr(base + i, type, slave, extra); 1217 if (!res_arr[i]) { 1218 for (--i; i >= 0; --i) 1219 kfree(res_arr[i]); 1220 1221 kfree(res_arr); 1222 return -ENOMEM; 1223 } 1224 } 1225 1226 spin_lock_irq(mlx4_tlock(dev)); 1227 for (i = 0; i < count; ++i) { 1228 if (find_res(dev, base + i, type)) { 1229 err = -EEXIST; 1230 goto undo; 1231 } 1232 err = res_tracker_insert(root, res_arr[i]); 1233 if (err) 1234 goto undo; 1235 list_add_tail(&res_arr[i]->list, 1236 &tracker->slave_list[slave].res_list[type]); 1237 } 1238 spin_unlock_irq(mlx4_tlock(dev)); 1239 kfree(res_arr); 1240 1241 return 0; 1242 1243 undo: 1244 for (--i; i >= 0; --i) { 1245 rb_erase(&res_arr[i]->node, root); 1246 list_del_init(&res_arr[i]->list); 1247 } 1248 1249 spin_unlock_irq(mlx4_tlock(dev)); 1250 1251 for (i = 0; i < count; ++i) 1252 kfree(res_arr[i]); 1253 1254 kfree(res_arr); 1255 1256 return err; 1257 } 1258 1259 static int remove_qp_ok(struct res_qp *res) 1260 { 1261 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || 1262 !list_empty(&res->mcg_list)) { 1263 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", 1264 res->com.state, atomic_read(&res->ref_count)); 1265 return -EBUSY; 1266 } else if (res->com.state != RES_QP_RESERVED) { 1267 return -EPERM; 1268 } 1269 1270 return 0; 1271 } 1272 1273 static int remove_mtt_ok(struct res_mtt *res, int order) 1274 { 1275 if (res->com.state == RES_MTT_BUSY || 1276 atomic_read(&res->ref_count)) { 1277 pr_devel("%s-%d: state %s, ref_count %d\n", 1278 __func__, __LINE__, 1279 mtt_states_str(res->com.state), 1280 atomic_read(&res->ref_count)); 1281 return -EBUSY; 1282 } else if (res->com.state != RES_MTT_ALLOCATED) 1283 return -EPERM; 1284 else if (res->order != order) 1285 return -EINVAL; 1286 1287 return 0; 1288 } 1289 1290 static int remove_mpt_ok(struct res_mpt *res) 1291 { 1292 if (res->com.state == RES_MPT_BUSY) 1293 return -EBUSY; 1294 else if (res->com.state != RES_MPT_RESERVED) 1295 return -EPERM; 1296 1297 return 0; 1298 } 1299 1300 static int remove_eq_ok(struct res_eq *res) 1301 { 1302 if (res->com.state == RES_MPT_BUSY) 1303 return -EBUSY; 1304 else if (res->com.state != RES_MPT_RESERVED) 1305 return -EPERM; 1306 1307 return 0; 1308 } 1309 1310 static int remove_counter_ok(struct res_counter *res) 1311 { 1312 if (res->com.state == RES_COUNTER_BUSY) 1313 return -EBUSY; 1314 else if (res->com.state != RES_COUNTER_ALLOCATED) 1315 return -EPERM; 1316 1317 return 0; 1318 } 1319 1320 static int remove_xrcdn_ok(struct res_xrcdn *res) 1321 { 1322 if (res->com.state == RES_XRCD_BUSY) 1323 return -EBUSY; 1324 else if (res->com.state != RES_XRCD_ALLOCATED) 1325 return -EPERM; 1326 1327 return 0; 1328 } 1329 1330 static int remove_fs_rule_ok(struct res_fs_rule *res) 1331 { 1332 if (res->com.state == RES_FS_RULE_BUSY) 1333 return -EBUSY; 1334 else if (res->com.state != RES_FS_RULE_ALLOCATED) 1335 return -EPERM; 1336 1337 return 0; 1338 } 1339 1340 static int remove_cq_ok(struct res_cq *res) 1341 { 1342 if (res->com.state == RES_CQ_BUSY) 1343 return -EBUSY; 1344 else if (res->com.state != RES_CQ_ALLOCATED) 1345 return -EPERM; 1346 1347 return 0; 1348 } 1349 1350 static int remove_srq_ok(struct res_srq *res) 1351 { 1352 if (res->com.state == RES_SRQ_BUSY) 1353 return -EBUSY; 1354 else if (res->com.state != RES_SRQ_ALLOCATED) 1355 return -EPERM; 1356 1357 return 0; 1358 } 1359 1360 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) 1361 { 1362 switch (type) { 1363 case RES_QP: 1364 return remove_qp_ok((struct res_qp *)res); 1365 case RES_CQ: 1366 return remove_cq_ok((struct res_cq *)res); 1367 case RES_SRQ: 1368 return remove_srq_ok((struct res_srq *)res); 1369 case RES_MPT: 1370 return remove_mpt_ok((struct res_mpt *)res); 1371 case RES_MTT: 1372 return remove_mtt_ok((struct res_mtt *)res, extra); 1373 case RES_MAC: 1374 return -ENOSYS; 1375 case RES_EQ: 1376 return remove_eq_ok((struct res_eq *)res); 1377 case RES_COUNTER: 1378 return remove_counter_ok((struct res_counter *)res); 1379 case RES_XRCD: 1380 return remove_xrcdn_ok((struct res_xrcdn *)res); 1381 case RES_FS_RULE: 1382 return remove_fs_rule_ok((struct res_fs_rule *)res); 1383 default: 1384 return -EINVAL; 1385 } 1386 } 1387 1388 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1389 enum mlx4_resource type, int extra) 1390 { 1391 u64 i; 1392 int err; 1393 struct mlx4_priv *priv = mlx4_priv(dev); 1394 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1395 struct res_common *r; 1396 1397 spin_lock_irq(mlx4_tlock(dev)); 1398 for (i = base; i < base + count; ++i) { 1399 r = res_tracker_lookup(&tracker->res_tree[type], i); 1400 if (!r) { 1401 err = -ENOENT; 1402 goto out; 1403 } 1404 if (r->owner != slave) { 1405 err = -EPERM; 1406 goto out; 1407 } 1408 err = remove_ok(r, type, extra); 1409 if (err) 1410 goto out; 1411 } 1412 1413 for (i = base; i < base + count; ++i) { 1414 r = res_tracker_lookup(&tracker->res_tree[type], i); 1415 rb_erase(&r->node, &tracker->res_tree[type]); 1416 list_del(&r->list); 1417 kfree(r); 1418 } 1419 err = 0; 1420 1421 out: 1422 spin_unlock_irq(mlx4_tlock(dev)); 1423 1424 return err; 1425 } 1426 1427 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, 1428 enum res_qp_states state, struct res_qp **qp, 1429 int alloc) 1430 { 1431 struct mlx4_priv *priv = mlx4_priv(dev); 1432 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1433 struct res_qp *r; 1434 int err = 0; 1435 1436 spin_lock_irq(mlx4_tlock(dev)); 1437 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); 1438 if (!r) 1439 err = -ENOENT; 1440 else if (r->com.owner != slave) 1441 err = -EPERM; 1442 else { 1443 switch (state) { 1444 case RES_QP_BUSY: 1445 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", 1446 __func__, r->com.res_id); 1447 err = -EBUSY; 1448 break; 1449 1450 case RES_QP_RESERVED: 1451 if (r->com.state == RES_QP_MAPPED && !alloc) 1452 break; 1453 1454 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); 1455 err = -EINVAL; 1456 break; 1457 1458 case RES_QP_MAPPED: 1459 if ((r->com.state == RES_QP_RESERVED && alloc) || 1460 r->com.state == RES_QP_HW) 1461 break; 1462 else { 1463 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", 1464 r->com.res_id); 1465 err = -EINVAL; 1466 } 1467 1468 break; 1469 1470 case RES_QP_HW: 1471 if (r->com.state != RES_QP_MAPPED) 1472 err = -EINVAL; 1473 break; 1474 default: 1475 err = -EINVAL; 1476 } 1477 1478 if (!err) { 1479 r->com.from_state = r->com.state; 1480 r->com.to_state = state; 1481 r->com.state = RES_QP_BUSY; 1482 if (qp) 1483 *qp = r; 1484 } 1485 } 1486 1487 spin_unlock_irq(mlx4_tlock(dev)); 1488 1489 return err; 1490 } 1491 1492 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1493 enum res_mpt_states state, struct res_mpt **mpt) 1494 { 1495 struct mlx4_priv *priv = mlx4_priv(dev); 1496 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1497 struct res_mpt *r; 1498 int err = 0; 1499 1500 spin_lock_irq(mlx4_tlock(dev)); 1501 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); 1502 if (!r) 1503 err = -ENOENT; 1504 else if (r->com.owner != slave) 1505 err = -EPERM; 1506 else { 1507 switch (state) { 1508 case RES_MPT_BUSY: 1509 err = -EINVAL; 1510 break; 1511 1512 case RES_MPT_RESERVED: 1513 if (r->com.state != RES_MPT_MAPPED) 1514 err = -EINVAL; 1515 break; 1516 1517 case RES_MPT_MAPPED: 1518 if (r->com.state != RES_MPT_RESERVED && 1519 r->com.state != RES_MPT_HW) 1520 err = -EINVAL; 1521 break; 1522 1523 case RES_MPT_HW: 1524 if (r->com.state != RES_MPT_MAPPED) 1525 err = -EINVAL; 1526 break; 1527 default: 1528 err = -EINVAL; 1529 } 1530 1531 if (!err) { 1532 r->com.from_state = r->com.state; 1533 r->com.to_state = state; 1534 r->com.state = RES_MPT_BUSY; 1535 if (mpt) 1536 *mpt = r; 1537 } 1538 } 1539 1540 spin_unlock_irq(mlx4_tlock(dev)); 1541 1542 return err; 1543 } 1544 1545 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1546 enum res_eq_states state, struct res_eq **eq) 1547 { 1548 struct mlx4_priv *priv = mlx4_priv(dev); 1549 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1550 struct res_eq *r; 1551 int err = 0; 1552 1553 spin_lock_irq(mlx4_tlock(dev)); 1554 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); 1555 if (!r) 1556 err = -ENOENT; 1557 else if (r->com.owner != slave) 1558 err = -EPERM; 1559 else { 1560 switch (state) { 1561 case RES_EQ_BUSY: 1562 err = -EINVAL; 1563 break; 1564 1565 case RES_EQ_RESERVED: 1566 if (r->com.state != RES_EQ_HW) 1567 err = -EINVAL; 1568 break; 1569 1570 case RES_EQ_HW: 1571 if (r->com.state != RES_EQ_RESERVED) 1572 err = -EINVAL; 1573 break; 1574 1575 default: 1576 err = -EINVAL; 1577 } 1578 1579 if (!err) { 1580 r->com.from_state = r->com.state; 1581 r->com.to_state = state; 1582 r->com.state = RES_EQ_BUSY; 1583 if (eq) 1584 *eq = r; 1585 } 1586 } 1587 1588 spin_unlock_irq(mlx4_tlock(dev)); 1589 1590 return err; 1591 } 1592 1593 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, 1594 enum res_cq_states state, struct res_cq **cq) 1595 { 1596 struct mlx4_priv *priv = mlx4_priv(dev); 1597 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1598 struct res_cq *r; 1599 int err; 1600 1601 spin_lock_irq(mlx4_tlock(dev)); 1602 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); 1603 if (!r) { 1604 err = -ENOENT; 1605 } else if (r->com.owner != slave) { 1606 err = -EPERM; 1607 } else if (state == RES_CQ_ALLOCATED) { 1608 if (r->com.state != RES_CQ_HW) 1609 err = -EINVAL; 1610 else if (atomic_read(&r->ref_count)) 1611 err = -EBUSY; 1612 else 1613 err = 0; 1614 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { 1615 err = -EINVAL; 1616 } else { 1617 err = 0; 1618 } 1619 1620 if (!err) { 1621 r->com.from_state = r->com.state; 1622 r->com.to_state = state; 1623 r->com.state = RES_CQ_BUSY; 1624 if (cq) 1625 *cq = r; 1626 } 1627 1628 spin_unlock_irq(mlx4_tlock(dev)); 1629 1630 return err; 1631 } 1632 1633 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1634 enum res_srq_states state, struct res_srq **srq) 1635 { 1636 struct mlx4_priv *priv = mlx4_priv(dev); 1637 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1638 struct res_srq *r; 1639 int err = 0; 1640 1641 spin_lock_irq(mlx4_tlock(dev)); 1642 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); 1643 if (!r) { 1644 err = -ENOENT; 1645 } else if (r->com.owner != slave) { 1646 err = -EPERM; 1647 } else if (state == RES_SRQ_ALLOCATED) { 1648 if (r->com.state != RES_SRQ_HW) 1649 err = -EINVAL; 1650 else if (atomic_read(&r->ref_count)) 1651 err = -EBUSY; 1652 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { 1653 err = -EINVAL; 1654 } 1655 1656 if (!err) { 1657 r->com.from_state = r->com.state; 1658 r->com.to_state = state; 1659 r->com.state = RES_SRQ_BUSY; 1660 if (srq) 1661 *srq = r; 1662 } 1663 1664 spin_unlock_irq(mlx4_tlock(dev)); 1665 1666 return err; 1667 } 1668 1669 static void res_abort_move(struct mlx4_dev *dev, int slave, 1670 enum mlx4_resource type, int id) 1671 { 1672 struct mlx4_priv *priv = mlx4_priv(dev); 1673 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1674 struct res_common *r; 1675 1676 spin_lock_irq(mlx4_tlock(dev)); 1677 r = res_tracker_lookup(&tracker->res_tree[type], id); 1678 if (r && (r->owner == slave)) 1679 r->state = r->from_state; 1680 spin_unlock_irq(mlx4_tlock(dev)); 1681 } 1682 1683 static void res_end_move(struct mlx4_dev *dev, int slave, 1684 enum mlx4_resource type, int id) 1685 { 1686 struct mlx4_priv *priv = mlx4_priv(dev); 1687 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1688 struct res_common *r; 1689 1690 spin_lock_irq(mlx4_tlock(dev)); 1691 r = res_tracker_lookup(&tracker->res_tree[type], id); 1692 if (r && (r->owner == slave)) 1693 r->state = r->to_state; 1694 spin_unlock_irq(mlx4_tlock(dev)); 1695 } 1696 1697 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) 1698 { 1699 return mlx4_is_qp_reserved(dev, qpn) && 1700 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); 1701 } 1702 1703 static int fw_reserved(struct mlx4_dev *dev, int qpn) 1704 { 1705 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1706 } 1707 1708 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1709 u64 in_param, u64 *out_param) 1710 { 1711 int err; 1712 int count; 1713 int align; 1714 int base; 1715 int qpn; 1716 u8 flags; 1717 1718 switch (op) { 1719 case RES_OP_RESERVE: 1720 count = get_param_l(&in_param) & 0xffffff; 1721 /* Turn off all unsupported QP allocation flags that the 1722 * slave tries to set. 1723 */ 1724 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; 1725 align = get_param_h(&in_param); 1726 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1727 if (err) 1728 return err; 1729 1730 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); 1731 if (err) { 1732 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1733 return err; 1734 } 1735 1736 err = add_res_range(dev, slave, base, count, RES_QP, 0); 1737 if (err) { 1738 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1739 __mlx4_qp_release_range(dev, base, count); 1740 return err; 1741 } 1742 set_param_l(out_param, base); 1743 break; 1744 case RES_OP_MAP_ICM: 1745 qpn = get_param_l(&in_param) & 0x7fffff; 1746 if (valid_reserved(dev, slave, qpn)) { 1747 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); 1748 if (err) 1749 return err; 1750 } 1751 1752 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, 1753 NULL, 1); 1754 if (err) 1755 return err; 1756 1757 if (!fw_reserved(dev, qpn)) { 1758 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); 1759 if (err) { 1760 res_abort_move(dev, slave, RES_QP, qpn); 1761 return err; 1762 } 1763 } 1764 1765 res_end_move(dev, slave, RES_QP, qpn); 1766 break; 1767 1768 default: 1769 err = -EINVAL; 1770 break; 1771 } 1772 return err; 1773 } 1774 1775 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1776 u64 in_param, u64 *out_param) 1777 { 1778 int err = -EINVAL; 1779 int base; 1780 int order; 1781 1782 if (op != RES_OP_RESERVE_AND_MAP) 1783 return err; 1784 1785 order = get_param_l(&in_param); 1786 1787 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); 1788 if (err) 1789 return err; 1790 1791 base = __mlx4_alloc_mtt_range(dev, order); 1792 if (base == -1) { 1793 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1794 return -ENOMEM; 1795 } 1796 1797 err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1798 if (err) { 1799 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1800 __mlx4_free_mtt_range(dev, base, order); 1801 } else { 1802 set_param_l(out_param, base); 1803 } 1804 1805 return err; 1806 } 1807 1808 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1809 u64 in_param, u64 *out_param) 1810 { 1811 int err = -EINVAL; 1812 int index; 1813 int id; 1814 struct res_mpt *mpt; 1815 1816 switch (op) { 1817 case RES_OP_RESERVE: 1818 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); 1819 if (err) 1820 break; 1821 1822 index = __mlx4_mpt_reserve(dev); 1823 if (index == -1) { 1824 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1825 break; 1826 } 1827 id = index & mpt_mask(dev); 1828 1829 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1830 if (err) { 1831 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1832 __mlx4_mpt_release(dev, index); 1833 break; 1834 } 1835 set_param_l(out_param, index); 1836 break; 1837 case RES_OP_MAP_ICM: 1838 index = get_param_l(&in_param); 1839 id = index & mpt_mask(dev); 1840 err = mr_res_start_move_to(dev, slave, id, 1841 RES_MPT_MAPPED, &mpt); 1842 if (err) 1843 return err; 1844 1845 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); 1846 if (err) { 1847 res_abort_move(dev, slave, RES_MPT, id); 1848 return err; 1849 } 1850 1851 res_end_move(dev, slave, RES_MPT, id); 1852 break; 1853 } 1854 return err; 1855 } 1856 1857 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1858 u64 in_param, u64 *out_param) 1859 { 1860 int cqn; 1861 int err; 1862 1863 switch (op) { 1864 case RES_OP_RESERVE_AND_MAP: 1865 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); 1866 if (err) 1867 break; 1868 1869 err = __mlx4_cq_alloc_icm(dev, &cqn); 1870 if (err) { 1871 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1872 break; 1873 } 1874 1875 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1876 if (err) { 1877 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1878 __mlx4_cq_free_icm(dev, cqn); 1879 break; 1880 } 1881 1882 set_param_l(out_param, cqn); 1883 break; 1884 1885 default: 1886 err = -EINVAL; 1887 } 1888 1889 return err; 1890 } 1891 1892 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1893 u64 in_param, u64 *out_param) 1894 { 1895 int srqn; 1896 int err; 1897 1898 switch (op) { 1899 case RES_OP_RESERVE_AND_MAP: 1900 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); 1901 if (err) 1902 break; 1903 1904 err = __mlx4_srq_alloc_icm(dev, &srqn); 1905 if (err) { 1906 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1907 break; 1908 } 1909 1910 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1911 if (err) { 1912 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1913 __mlx4_srq_free_icm(dev, srqn); 1914 break; 1915 } 1916 1917 set_param_l(out_param, srqn); 1918 break; 1919 1920 default: 1921 err = -EINVAL; 1922 } 1923 1924 return err; 1925 } 1926 1927 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, 1928 u8 smac_index, u64 *mac) 1929 { 1930 struct mlx4_priv *priv = mlx4_priv(dev); 1931 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1932 struct list_head *mac_list = 1933 &tracker->slave_list[slave].res_list[RES_MAC]; 1934 struct mac_res *res, *tmp; 1935 1936 list_for_each_entry_safe(res, tmp, mac_list, list) { 1937 if (res->smac_index == smac_index && res->port == (u8) port) { 1938 *mac = res->mac; 1939 return 0; 1940 } 1941 } 1942 return -ENOENT; 1943 } 1944 1945 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) 1946 { 1947 struct mlx4_priv *priv = mlx4_priv(dev); 1948 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1949 struct list_head *mac_list = 1950 &tracker->slave_list[slave].res_list[RES_MAC]; 1951 struct mac_res *res, *tmp; 1952 1953 list_for_each_entry_safe(res, tmp, mac_list, list) { 1954 if (res->mac == mac && res->port == (u8) port) { 1955 /* mac found. update ref count */ 1956 ++res->ref_count; 1957 return 0; 1958 } 1959 } 1960 1961 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1962 return -EINVAL; 1963 res = kzalloc(sizeof *res, GFP_KERNEL); 1964 if (!res) { 1965 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1966 return -ENOMEM; 1967 } 1968 res->mac = mac; 1969 res->port = (u8) port; 1970 res->smac_index = smac_index; 1971 res->ref_count = 1; 1972 list_add_tail(&res->list, 1973 &tracker->slave_list[slave].res_list[RES_MAC]); 1974 return 0; 1975 } 1976 1977 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, 1978 int port) 1979 { 1980 struct mlx4_priv *priv = mlx4_priv(dev); 1981 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1982 struct list_head *mac_list = 1983 &tracker->slave_list[slave].res_list[RES_MAC]; 1984 struct mac_res *res, *tmp; 1985 1986 list_for_each_entry_safe(res, tmp, mac_list, list) { 1987 if (res->mac == mac && res->port == (u8) port) { 1988 if (!--res->ref_count) { 1989 list_del(&res->list); 1990 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1991 kfree(res); 1992 } 1993 break; 1994 } 1995 } 1996 } 1997 1998 static void rem_slave_macs(struct mlx4_dev *dev, int slave) 1999 { 2000 struct mlx4_priv *priv = mlx4_priv(dev); 2001 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2002 struct list_head *mac_list = 2003 &tracker->slave_list[slave].res_list[RES_MAC]; 2004 struct mac_res *res, *tmp; 2005 int i; 2006 2007 list_for_each_entry_safe(res, tmp, mac_list, list) { 2008 list_del(&res->list); 2009 /* dereference the mac the num times the slave referenced it */ 2010 for (i = 0; i < res->ref_count; i++) 2011 __mlx4_unregister_mac(dev, res->port, res->mac); 2012 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 2013 kfree(res); 2014 } 2015 } 2016 2017 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2018 u64 in_param, u64 *out_param, int in_port) 2019 { 2020 int err = -EINVAL; 2021 int port; 2022 u64 mac; 2023 u8 smac_index; 2024 2025 if (op != RES_OP_RESERVE_AND_MAP) 2026 return err; 2027 2028 port = !in_port ? get_param_l(out_param) : in_port; 2029 port = mlx4_slave_convert_port( 2030 dev, slave, port); 2031 2032 if (port < 0) 2033 return -EINVAL; 2034 mac = in_param; 2035 2036 err = __mlx4_register_mac(dev, port, mac); 2037 if (err >= 0) { 2038 smac_index = err; 2039 set_param_l(out_param, err); 2040 err = 0; 2041 } 2042 2043 if (!err) { 2044 err = mac_add_to_slave(dev, slave, mac, port, smac_index); 2045 if (err) 2046 __mlx4_unregister_mac(dev, port, mac); 2047 } 2048 return err; 2049 } 2050 2051 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2052 int port, int vlan_index) 2053 { 2054 struct mlx4_priv *priv = mlx4_priv(dev); 2055 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2056 struct list_head *vlan_list = 2057 &tracker->slave_list[slave].res_list[RES_VLAN]; 2058 struct vlan_res *res, *tmp; 2059 2060 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2061 if (res->vlan == vlan && res->port == (u8) port) { 2062 /* vlan found. update ref count */ 2063 ++res->ref_count; 2064 return 0; 2065 } 2066 } 2067 2068 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) 2069 return -EINVAL; 2070 res = kzalloc(sizeof(*res), GFP_KERNEL); 2071 if (!res) { 2072 mlx4_release_resource(dev, slave, RES_VLAN, 1, port); 2073 return -ENOMEM; 2074 } 2075 res->vlan = vlan; 2076 res->port = (u8) port; 2077 res->vlan_index = vlan_index; 2078 res->ref_count = 1; 2079 list_add_tail(&res->list, 2080 &tracker->slave_list[slave].res_list[RES_VLAN]); 2081 return 0; 2082 } 2083 2084 2085 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2086 int port) 2087 { 2088 struct mlx4_priv *priv = mlx4_priv(dev); 2089 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2090 struct list_head *vlan_list = 2091 &tracker->slave_list[slave].res_list[RES_VLAN]; 2092 struct vlan_res *res, *tmp; 2093 2094 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2095 if (res->vlan == vlan && res->port == (u8) port) { 2096 if (!--res->ref_count) { 2097 list_del(&res->list); 2098 mlx4_release_resource(dev, slave, RES_VLAN, 2099 1, port); 2100 kfree(res); 2101 } 2102 break; 2103 } 2104 } 2105 } 2106 2107 static void rem_slave_vlans(struct mlx4_dev *dev, int slave) 2108 { 2109 struct mlx4_priv *priv = mlx4_priv(dev); 2110 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2111 struct list_head *vlan_list = 2112 &tracker->slave_list[slave].res_list[RES_VLAN]; 2113 struct vlan_res *res, *tmp; 2114 int i; 2115 2116 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2117 list_del(&res->list); 2118 /* dereference the vlan the num times the slave referenced it */ 2119 for (i = 0; i < res->ref_count; i++) 2120 __mlx4_unregister_vlan(dev, res->port, res->vlan); 2121 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); 2122 kfree(res); 2123 } 2124 } 2125 2126 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2127 u64 in_param, u64 *out_param, int in_port) 2128 { 2129 struct mlx4_priv *priv = mlx4_priv(dev); 2130 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2131 int err; 2132 u16 vlan; 2133 int vlan_index; 2134 int port; 2135 2136 port = !in_port ? get_param_l(out_param) : in_port; 2137 2138 if (!port || op != RES_OP_RESERVE_AND_MAP) 2139 return -EINVAL; 2140 2141 port = mlx4_slave_convert_port( 2142 dev, slave, port); 2143 2144 if (port < 0) 2145 return -EINVAL; 2146 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 2147 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 2148 slave_state[slave].old_vlan_api = true; 2149 return 0; 2150 } 2151 2152 vlan = (u16) in_param; 2153 2154 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); 2155 if (!err) { 2156 set_param_l(out_param, (u32) vlan_index); 2157 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); 2158 if (err) 2159 __mlx4_unregister_vlan(dev, port, vlan); 2160 } 2161 return err; 2162 } 2163 2164 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2165 u64 in_param, u64 *out_param, int port) 2166 { 2167 u32 index; 2168 int err; 2169 2170 if (op != RES_OP_RESERVE) 2171 return -EINVAL; 2172 2173 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); 2174 if (err) 2175 return err; 2176 2177 err = __mlx4_counter_alloc(dev, &index); 2178 if (err) { 2179 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2180 return err; 2181 } 2182 2183 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port); 2184 if (err) { 2185 __mlx4_counter_free(dev, index); 2186 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2187 } else { 2188 set_param_l(out_param, index); 2189 } 2190 2191 return err; 2192 } 2193 2194 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2195 u64 in_param, u64 *out_param) 2196 { 2197 u32 xrcdn; 2198 int err; 2199 2200 if (op != RES_OP_RESERVE) 2201 return -EINVAL; 2202 2203 err = __mlx4_xrcd_alloc(dev, &xrcdn); 2204 if (err) 2205 return err; 2206 2207 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2208 if (err) 2209 __mlx4_xrcd_free(dev, xrcdn); 2210 else 2211 set_param_l(out_param, xrcdn); 2212 2213 return err; 2214 } 2215 2216 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 2217 struct mlx4_vhcr *vhcr, 2218 struct mlx4_cmd_mailbox *inbox, 2219 struct mlx4_cmd_mailbox *outbox, 2220 struct mlx4_cmd_info *cmd) 2221 { 2222 int err; 2223 int alop = vhcr->op_modifier; 2224 2225 switch (vhcr->in_modifier & 0xFF) { 2226 case RES_QP: 2227 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 2228 vhcr->in_param, &vhcr->out_param); 2229 break; 2230 2231 case RES_MTT: 2232 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2233 vhcr->in_param, &vhcr->out_param); 2234 break; 2235 2236 case RES_MPT: 2237 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2238 vhcr->in_param, &vhcr->out_param); 2239 break; 2240 2241 case RES_CQ: 2242 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2243 vhcr->in_param, &vhcr->out_param); 2244 break; 2245 2246 case RES_SRQ: 2247 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2248 vhcr->in_param, &vhcr->out_param); 2249 break; 2250 2251 case RES_MAC: 2252 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 2253 vhcr->in_param, &vhcr->out_param, 2254 (vhcr->in_modifier >> 8) & 0xFF); 2255 break; 2256 2257 case RES_VLAN: 2258 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 2259 vhcr->in_param, &vhcr->out_param, 2260 (vhcr->in_modifier >> 8) & 0xFF); 2261 break; 2262 2263 case RES_COUNTER: 2264 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, 2265 vhcr->in_param, &vhcr->out_param, 0); 2266 break; 2267 2268 case RES_XRCD: 2269 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, 2270 vhcr->in_param, &vhcr->out_param); 2271 break; 2272 2273 default: 2274 err = -EINVAL; 2275 break; 2276 } 2277 2278 return err; 2279 } 2280 2281 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2282 u64 in_param) 2283 { 2284 int err; 2285 int count; 2286 int base; 2287 int qpn; 2288 2289 switch (op) { 2290 case RES_OP_RESERVE: 2291 base = get_param_l(&in_param) & 0x7fffff; 2292 count = get_param_h(&in_param); 2293 err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2294 if (err) 2295 break; 2296 mlx4_release_resource(dev, slave, RES_QP, count, 0); 2297 __mlx4_qp_release_range(dev, base, count); 2298 break; 2299 case RES_OP_MAP_ICM: 2300 qpn = get_param_l(&in_param) & 0x7fffff; 2301 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, 2302 NULL, 0); 2303 if (err) 2304 return err; 2305 2306 if (!fw_reserved(dev, qpn)) 2307 __mlx4_qp_free_icm(dev, qpn); 2308 2309 res_end_move(dev, slave, RES_QP, qpn); 2310 2311 if (valid_reserved(dev, slave, qpn)) 2312 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); 2313 break; 2314 default: 2315 err = -EINVAL; 2316 break; 2317 } 2318 return err; 2319 } 2320 2321 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2322 u64 in_param, u64 *out_param) 2323 { 2324 int err = -EINVAL; 2325 int base; 2326 int order; 2327 2328 if (op != RES_OP_RESERVE_AND_MAP) 2329 return err; 2330 2331 base = get_param_l(&in_param); 2332 order = get_param_h(&in_param); 2333 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2334 if (!err) { 2335 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 2336 __mlx4_free_mtt_range(dev, base, order); 2337 } 2338 return err; 2339 } 2340 2341 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2342 u64 in_param) 2343 { 2344 int err = -EINVAL; 2345 int index; 2346 int id; 2347 struct res_mpt *mpt; 2348 2349 switch (op) { 2350 case RES_OP_RESERVE: 2351 index = get_param_l(&in_param); 2352 id = index & mpt_mask(dev); 2353 err = get_res(dev, slave, id, RES_MPT, &mpt); 2354 if (err) 2355 break; 2356 index = mpt->key; 2357 put_res(dev, slave, id, RES_MPT); 2358 2359 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2360 if (err) 2361 break; 2362 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 2363 __mlx4_mpt_release(dev, index); 2364 break; 2365 case RES_OP_MAP_ICM: 2366 index = get_param_l(&in_param); 2367 id = index & mpt_mask(dev); 2368 err = mr_res_start_move_to(dev, slave, id, 2369 RES_MPT_RESERVED, &mpt); 2370 if (err) 2371 return err; 2372 2373 __mlx4_mpt_free_icm(dev, mpt->key); 2374 res_end_move(dev, slave, RES_MPT, id); 2375 return err; 2376 break; 2377 default: 2378 err = -EINVAL; 2379 break; 2380 } 2381 return err; 2382 } 2383 2384 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2385 u64 in_param, u64 *out_param) 2386 { 2387 int cqn; 2388 int err; 2389 2390 switch (op) { 2391 case RES_OP_RESERVE_AND_MAP: 2392 cqn = get_param_l(&in_param); 2393 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); 2394 if (err) 2395 break; 2396 2397 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 2398 __mlx4_cq_free_icm(dev, cqn); 2399 break; 2400 2401 default: 2402 err = -EINVAL; 2403 break; 2404 } 2405 2406 return err; 2407 } 2408 2409 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2410 u64 in_param, u64 *out_param) 2411 { 2412 int srqn; 2413 int err; 2414 2415 switch (op) { 2416 case RES_OP_RESERVE_AND_MAP: 2417 srqn = get_param_l(&in_param); 2418 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 2419 if (err) 2420 break; 2421 2422 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 2423 __mlx4_srq_free_icm(dev, srqn); 2424 break; 2425 2426 default: 2427 err = -EINVAL; 2428 break; 2429 } 2430 2431 return err; 2432 } 2433 2434 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2435 u64 in_param, u64 *out_param, int in_port) 2436 { 2437 int port; 2438 int err = 0; 2439 2440 switch (op) { 2441 case RES_OP_RESERVE_AND_MAP: 2442 port = !in_port ? get_param_l(out_param) : in_port; 2443 port = mlx4_slave_convert_port( 2444 dev, slave, port); 2445 2446 if (port < 0) 2447 return -EINVAL; 2448 mac_del_from_slave(dev, slave, in_param, port); 2449 __mlx4_unregister_mac(dev, port, in_param); 2450 break; 2451 default: 2452 err = -EINVAL; 2453 break; 2454 } 2455 2456 return err; 2457 2458 } 2459 2460 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2461 u64 in_param, u64 *out_param, int port) 2462 { 2463 struct mlx4_priv *priv = mlx4_priv(dev); 2464 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2465 int err = 0; 2466 2467 port = mlx4_slave_convert_port( 2468 dev, slave, port); 2469 2470 if (port < 0) 2471 return -EINVAL; 2472 switch (op) { 2473 case RES_OP_RESERVE_AND_MAP: 2474 if (slave_state[slave].old_vlan_api) 2475 return 0; 2476 if (!port) 2477 return -EINVAL; 2478 vlan_del_from_slave(dev, slave, in_param, port); 2479 __mlx4_unregister_vlan(dev, port, in_param); 2480 break; 2481 default: 2482 err = -EINVAL; 2483 break; 2484 } 2485 2486 return err; 2487 } 2488 2489 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2490 u64 in_param, u64 *out_param) 2491 { 2492 int index; 2493 int err; 2494 2495 if (op != RES_OP_RESERVE) 2496 return -EINVAL; 2497 2498 index = get_param_l(&in_param); 2499 if (index == MLX4_SINK_COUNTER_INDEX(dev)) 2500 return 0; 2501 2502 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); 2503 if (err) 2504 return err; 2505 2506 __mlx4_counter_free(dev, index); 2507 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2508 2509 return err; 2510 } 2511 2512 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2513 u64 in_param, u64 *out_param) 2514 { 2515 int xrcdn; 2516 int err; 2517 2518 if (op != RES_OP_RESERVE) 2519 return -EINVAL; 2520 2521 xrcdn = get_param_l(&in_param); 2522 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2523 if (err) 2524 return err; 2525 2526 __mlx4_xrcd_free(dev, xrcdn); 2527 2528 return err; 2529 } 2530 2531 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 2532 struct mlx4_vhcr *vhcr, 2533 struct mlx4_cmd_mailbox *inbox, 2534 struct mlx4_cmd_mailbox *outbox, 2535 struct mlx4_cmd_info *cmd) 2536 { 2537 int err = -EINVAL; 2538 int alop = vhcr->op_modifier; 2539 2540 switch (vhcr->in_modifier & 0xFF) { 2541 case RES_QP: 2542 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2543 vhcr->in_param); 2544 break; 2545 2546 case RES_MTT: 2547 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, 2548 vhcr->in_param, &vhcr->out_param); 2549 break; 2550 2551 case RES_MPT: 2552 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, 2553 vhcr->in_param); 2554 break; 2555 2556 case RES_CQ: 2557 err = cq_free_res(dev, slave, vhcr->op_modifier, alop, 2558 vhcr->in_param, &vhcr->out_param); 2559 break; 2560 2561 case RES_SRQ: 2562 err = srq_free_res(dev, slave, vhcr->op_modifier, alop, 2563 vhcr->in_param, &vhcr->out_param); 2564 break; 2565 2566 case RES_MAC: 2567 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2568 vhcr->in_param, &vhcr->out_param, 2569 (vhcr->in_modifier >> 8) & 0xFF); 2570 break; 2571 2572 case RES_VLAN: 2573 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2574 vhcr->in_param, &vhcr->out_param, 2575 (vhcr->in_modifier >> 8) & 0xFF); 2576 break; 2577 2578 case RES_COUNTER: 2579 err = counter_free_res(dev, slave, vhcr->op_modifier, alop, 2580 vhcr->in_param, &vhcr->out_param); 2581 break; 2582 2583 case RES_XRCD: 2584 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, 2585 vhcr->in_param, &vhcr->out_param); 2586 2587 default: 2588 break; 2589 } 2590 return err; 2591 } 2592 2593 /* ugly but other choices are uglier */ 2594 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) 2595 { 2596 return (be32_to_cpu(mpt->flags) >> 9) & 1; 2597 } 2598 2599 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) 2600 { 2601 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; 2602 } 2603 2604 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) 2605 { 2606 return be32_to_cpu(mpt->mtt_sz); 2607 } 2608 2609 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) 2610 { 2611 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; 2612 } 2613 2614 static int mr_is_fmr(struct mlx4_mpt_entry *mpt) 2615 { 2616 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; 2617 } 2618 2619 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) 2620 { 2621 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; 2622 } 2623 2624 static int mr_is_region(struct mlx4_mpt_entry *mpt) 2625 { 2626 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; 2627 } 2628 2629 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 2630 { 2631 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 2632 } 2633 2634 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) 2635 { 2636 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; 2637 } 2638 2639 static int qp_get_mtt_size(struct mlx4_qp_context *qpc) 2640 { 2641 int page_shift = (qpc->log_page_size & 0x3f) + 12; 2642 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; 2643 int log_sq_sride = qpc->sq_size_stride & 7; 2644 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; 2645 int log_rq_stride = qpc->rq_size_stride & 7; 2646 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; 2647 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; 2648 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 2649 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; 2650 int sq_size; 2651 int rq_size; 2652 int total_pages; 2653 int total_mem; 2654 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; 2655 2656 sq_size = 1 << (log_sq_size + log_sq_sride + 4); 2657 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); 2658 total_mem = sq_size + rq_size; 2659 total_pages = 2660 roundup_pow_of_two((total_mem + (page_offset << 6)) >> 2661 page_shift); 2662 2663 return total_pages; 2664 } 2665 2666 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, 2667 int size, struct res_mtt *mtt) 2668 { 2669 int res_start = mtt->com.res_id; 2670 int res_size = (1 << mtt->order); 2671 2672 if (start < res_start || start + size > res_start + res_size) 2673 return -EPERM; 2674 return 0; 2675 } 2676 2677 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2678 struct mlx4_vhcr *vhcr, 2679 struct mlx4_cmd_mailbox *inbox, 2680 struct mlx4_cmd_mailbox *outbox, 2681 struct mlx4_cmd_info *cmd) 2682 { 2683 int err; 2684 int index = vhcr->in_modifier; 2685 struct res_mtt *mtt; 2686 struct res_mpt *mpt; 2687 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; 2688 int phys; 2689 int id; 2690 u32 pd; 2691 int pd_slave; 2692 2693 id = index & mpt_mask(dev); 2694 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); 2695 if (err) 2696 return err; 2697 2698 /* Disable memory windows for VFs. */ 2699 if (!mr_is_region(inbox->buf)) { 2700 err = -EPERM; 2701 goto ex_abort; 2702 } 2703 2704 /* Make sure that the PD bits related to the slave id are zeros. */ 2705 pd = mr_get_pd(inbox->buf); 2706 pd_slave = (pd >> 17) & 0x7f; 2707 if (pd_slave != 0 && --pd_slave != slave) { 2708 err = -EPERM; 2709 goto ex_abort; 2710 } 2711 2712 if (mr_is_fmr(inbox->buf)) { 2713 /* FMR and Bind Enable are forbidden in slave devices. */ 2714 if (mr_is_bind_enabled(inbox->buf)) { 2715 err = -EPERM; 2716 goto ex_abort; 2717 } 2718 /* FMR and Memory Windows are also forbidden. */ 2719 if (!mr_is_region(inbox->buf)) { 2720 err = -EPERM; 2721 goto ex_abort; 2722 } 2723 } 2724 2725 phys = mr_phys_mpt(inbox->buf); 2726 if (!phys) { 2727 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2728 if (err) 2729 goto ex_abort; 2730 2731 err = check_mtt_range(dev, slave, mtt_base, 2732 mr_get_mtt_size(inbox->buf), mtt); 2733 if (err) 2734 goto ex_put; 2735 2736 mpt->mtt = mtt; 2737 } 2738 2739 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2740 if (err) 2741 goto ex_put; 2742 2743 if (!phys) { 2744 atomic_inc(&mtt->ref_count); 2745 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2746 } 2747 2748 res_end_move(dev, slave, RES_MPT, id); 2749 return 0; 2750 2751 ex_put: 2752 if (!phys) 2753 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2754 ex_abort: 2755 res_abort_move(dev, slave, RES_MPT, id); 2756 2757 return err; 2758 } 2759 2760 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2761 struct mlx4_vhcr *vhcr, 2762 struct mlx4_cmd_mailbox *inbox, 2763 struct mlx4_cmd_mailbox *outbox, 2764 struct mlx4_cmd_info *cmd) 2765 { 2766 int err; 2767 int index = vhcr->in_modifier; 2768 struct res_mpt *mpt; 2769 int id; 2770 2771 id = index & mpt_mask(dev); 2772 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); 2773 if (err) 2774 return err; 2775 2776 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2777 if (err) 2778 goto ex_abort; 2779 2780 if (mpt->mtt) 2781 atomic_dec(&mpt->mtt->ref_count); 2782 2783 res_end_move(dev, slave, RES_MPT, id); 2784 return 0; 2785 2786 ex_abort: 2787 res_abort_move(dev, slave, RES_MPT, id); 2788 2789 return err; 2790 } 2791 2792 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 2793 struct mlx4_vhcr *vhcr, 2794 struct mlx4_cmd_mailbox *inbox, 2795 struct mlx4_cmd_mailbox *outbox, 2796 struct mlx4_cmd_info *cmd) 2797 { 2798 int err; 2799 int index = vhcr->in_modifier; 2800 struct res_mpt *mpt; 2801 int id; 2802 2803 id = index & mpt_mask(dev); 2804 err = get_res(dev, slave, id, RES_MPT, &mpt); 2805 if (err) 2806 return err; 2807 2808 if (mpt->com.from_state == RES_MPT_MAPPED) { 2809 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do 2810 * that, the VF must read the MPT. But since the MPT entry memory is not 2811 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the 2812 * entry contents. To guarantee that the MPT cannot be changed, the driver 2813 * must perform HW2SW_MPT before this query and return the MPT entry to HW 2814 * ownership fofollowing the change. The change here allows the VF to 2815 * perform QUERY_MPT also when the entry is in SW ownership. 2816 */ 2817 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( 2818 &mlx4_priv(dev)->mr_table.dmpt_table, 2819 mpt->key, NULL); 2820 2821 if (NULL == mpt_entry || NULL == outbox->buf) { 2822 err = -EINVAL; 2823 goto out; 2824 } 2825 2826 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); 2827 2828 err = 0; 2829 } else if (mpt->com.from_state == RES_MPT_HW) { 2830 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2831 } else { 2832 err = -EBUSY; 2833 goto out; 2834 } 2835 2836 2837 out: 2838 put_res(dev, slave, id, RES_MPT); 2839 return err; 2840 } 2841 2842 static int qp_get_rcqn(struct mlx4_qp_context *qpc) 2843 { 2844 return be32_to_cpu(qpc->cqn_recv) & 0xffffff; 2845 } 2846 2847 static int qp_get_scqn(struct mlx4_qp_context *qpc) 2848 { 2849 return be32_to_cpu(qpc->cqn_send) & 0xffffff; 2850 } 2851 2852 static u32 qp_get_srqn(struct mlx4_qp_context *qpc) 2853 { 2854 return be32_to_cpu(qpc->srqn) & 0x1ffffff; 2855 } 2856 2857 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, 2858 struct mlx4_qp_context *context) 2859 { 2860 u32 qpn = vhcr->in_modifier & 0xffffff; 2861 u32 qkey = 0; 2862 2863 if (mlx4_get_parav_qkey(dev, qpn, &qkey)) 2864 return; 2865 2866 /* adjust qkey in qp context */ 2867 context->qkey = cpu_to_be32(qkey); 2868 } 2869 2870 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 2871 struct mlx4_qp_context *qpc, 2872 struct mlx4_cmd_mailbox *inbox); 2873 2874 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 2875 struct mlx4_vhcr *vhcr, 2876 struct mlx4_cmd_mailbox *inbox, 2877 struct mlx4_cmd_mailbox *outbox, 2878 struct mlx4_cmd_info *cmd) 2879 { 2880 int err; 2881 int qpn = vhcr->in_modifier & 0x7fffff; 2882 struct res_mtt *mtt; 2883 struct res_qp *qp; 2884 struct mlx4_qp_context *qpc = inbox->buf + 8; 2885 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; 2886 int mtt_size = qp_get_mtt_size(qpc); 2887 struct res_cq *rcq; 2888 struct res_cq *scq; 2889 int rcqn = qp_get_rcqn(qpc); 2890 int scqn = qp_get_scqn(qpc); 2891 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2892 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2893 struct res_srq *srq; 2894 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2895 2896 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2897 if (err) 2898 return err; 2899 2900 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); 2901 if (err) 2902 return err; 2903 qp->local_qpn = local_qpn; 2904 qp->sched_queue = 0; 2905 qp->param3 = 0; 2906 qp->vlan_control = 0; 2907 qp->fvl_rx = 0; 2908 qp->pri_path_fl = 0; 2909 qp->vlan_index = 0; 2910 qp->feup = 0; 2911 qp->qpc_flags = be32_to_cpu(qpc->flags); 2912 2913 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2914 if (err) 2915 goto ex_abort; 2916 2917 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 2918 if (err) 2919 goto ex_put_mtt; 2920 2921 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 2922 if (err) 2923 goto ex_put_mtt; 2924 2925 if (scqn != rcqn) { 2926 err = get_res(dev, slave, scqn, RES_CQ, &scq); 2927 if (err) 2928 goto ex_put_rcq; 2929 } else 2930 scq = rcq; 2931 2932 if (use_srq) { 2933 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 2934 if (err) 2935 goto ex_put_scq; 2936 } 2937 2938 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2939 update_pkey_index(dev, slave, inbox); 2940 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2941 if (err) 2942 goto ex_put_srq; 2943 atomic_inc(&mtt->ref_count); 2944 qp->mtt = mtt; 2945 atomic_inc(&rcq->ref_count); 2946 qp->rcq = rcq; 2947 atomic_inc(&scq->ref_count); 2948 qp->scq = scq; 2949 2950 if (scqn != rcqn) 2951 put_res(dev, slave, scqn, RES_CQ); 2952 2953 if (use_srq) { 2954 atomic_inc(&srq->ref_count); 2955 put_res(dev, slave, srqn, RES_SRQ); 2956 qp->srq = srq; 2957 } 2958 put_res(dev, slave, rcqn, RES_CQ); 2959 put_res(dev, slave, mtt_base, RES_MTT); 2960 res_end_move(dev, slave, RES_QP, qpn); 2961 2962 return 0; 2963 2964 ex_put_srq: 2965 if (use_srq) 2966 put_res(dev, slave, srqn, RES_SRQ); 2967 ex_put_scq: 2968 if (scqn != rcqn) 2969 put_res(dev, slave, scqn, RES_CQ); 2970 ex_put_rcq: 2971 put_res(dev, slave, rcqn, RES_CQ); 2972 ex_put_mtt: 2973 put_res(dev, slave, mtt_base, RES_MTT); 2974 ex_abort: 2975 res_abort_move(dev, slave, RES_QP, qpn); 2976 2977 return err; 2978 } 2979 2980 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) 2981 { 2982 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; 2983 } 2984 2985 static int eq_get_mtt_size(struct mlx4_eq_context *eqc) 2986 { 2987 int log_eq_size = eqc->log_eq_size & 0x1f; 2988 int page_shift = (eqc->log_page_size & 0x3f) + 12; 2989 2990 if (log_eq_size + 5 < page_shift) 2991 return 1; 2992 2993 return 1 << (log_eq_size + 5 - page_shift); 2994 } 2995 2996 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) 2997 { 2998 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; 2999 } 3000 3001 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) 3002 { 3003 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; 3004 int page_shift = (cqc->log_page_size & 0x3f) + 12; 3005 3006 if (log_cq_size + 5 < page_shift) 3007 return 1; 3008 3009 return 1 << (log_cq_size + 5 - page_shift); 3010 } 3011 3012 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3013 struct mlx4_vhcr *vhcr, 3014 struct mlx4_cmd_mailbox *inbox, 3015 struct mlx4_cmd_mailbox *outbox, 3016 struct mlx4_cmd_info *cmd) 3017 { 3018 int err; 3019 int eqn = vhcr->in_modifier; 3020 int res_id = (slave << 10) | eqn; 3021 struct mlx4_eq_context *eqc = inbox->buf; 3022 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; 3023 int mtt_size = eq_get_mtt_size(eqc); 3024 struct res_eq *eq; 3025 struct res_mtt *mtt; 3026 3027 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3028 if (err) 3029 return err; 3030 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); 3031 if (err) 3032 goto out_add; 3033 3034 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3035 if (err) 3036 goto out_move; 3037 3038 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 3039 if (err) 3040 goto out_put; 3041 3042 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3043 if (err) 3044 goto out_put; 3045 3046 atomic_inc(&mtt->ref_count); 3047 eq->mtt = mtt; 3048 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3049 res_end_move(dev, slave, RES_EQ, res_id); 3050 return 0; 3051 3052 out_put: 3053 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3054 out_move: 3055 res_abort_move(dev, slave, RES_EQ, res_id); 3056 out_add: 3057 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3058 return err; 3059 } 3060 3061 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, 3062 struct mlx4_vhcr *vhcr, 3063 struct mlx4_cmd_mailbox *inbox, 3064 struct mlx4_cmd_mailbox *outbox, 3065 struct mlx4_cmd_info *cmd) 3066 { 3067 int err; 3068 u8 get = vhcr->op_modifier; 3069 3070 if (get != 1) 3071 return -EPERM; 3072 3073 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3074 3075 return err; 3076 } 3077 3078 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, 3079 int len, struct res_mtt **res) 3080 { 3081 struct mlx4_priv *priv = mlx4_priv(dev); 3082 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3083 struct res_mtt *mtt; 3084 int err = -EINVAL; 3085 3086 spin_lock_irq(mlx4_tlock(dev)); 3087 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], 3088 com.list) { 3089 if (!check_mtt_range(dev, slave, start, len, mtt)) { 3090 *res = mtt; 3091 mtt->com.from_state = mtt->com.state; 3092 mtt->com.state = RES_MTT_BUSY; 3093 err = 0; 3094 break; 3095 } 3096 } 3097 spin_unlock_irq(mlx4_tlock(dev)); 3098 3099 return err; 3100 } 3101 3102 static int verify_qp_parameters(struct mlx4_dev *dev, 3103 struct mlx4_vhcr *vhcr, 3104 struct mlx4_cmd_mailbox *inbox, 3105 enum qp_transition transition, u8 slave) 3106 { 3107 u32 qp_type; 3108 u32 qpn; 3109 struct mlx4_qp_context *qp_ctx; 3110 enum mlx4_qp_optpar optpar; 3111 int port; 3112 int num_gids; 3113 3114 qp_ctx = inbox->buf + 8; 3115 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 3116 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 3117 3118 if (slave != mlx4_master_func_num(dev)) { 3119 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; 3120 /* setting QP rate-limit is disallowed for VFs */ 3121 if (qp_ctx->rate_limit_params) 3122 return -EPERM; 3123 } 3124 3125 switch (qp_type) { 3126 case MLX4_QP_ST_RC: 3127 case MLX4_QP_ST_XRC: 3128 case MLX4_QP_ST_UC: 3129 switch (transition) { 3130 case QP_TRANS_INIT2RTR: 3131 case QP_TRANS_RTR2RTS: 3132 case QP_TRANS_RTS2RTS: 3133 case QP_TRANS_SQD2SQD: 3134 case QP_TRANS_SQD2RTS: 3135 if (slave != mlx4_master_func_num(dev)) 3136 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 3137 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3138 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3139 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3140 else 3141 num_gids = 1; 3142 if (qp_ctx->pri_path.mgid_index >= num_gids) 3143 return -EINVAL; 3144 } 3145 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3146 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 3147 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3148 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3149 else 3150 num_gids = 1; 3151 if (qp_ctx->alt_path.mgid_index >= num_gids) 3152 return -EINVAL; 3153 } 3154 break; 3155 default: 3156 break; 3157 } 3158 break; 3159 3160 case MLX4_QP_ST_MLX: 3161 qpn = vhcr->in_modifier & 0x7fffff; 3162 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3163 if (transition == QP_TRANS_INIT2RTR && 3164 slave != mlx4_master_func_num(dev) && 3165 mlx4_is_qp_reserved(dev, qpn) && 3166 !mlx4_vf_smi_enabled(dev, slave, port)) { 3167 /* only enabled VFs may create MLX proxy QPs */ 3168 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", 3169 __func__, slave, port); 3170 return -EPERM; 3171 } 3172 break; 3173 3174 default: 3175 break; 3176 } 3177 3178 return 0; 3179 } 3180 3181 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 3182 struct mlx4_vhcr *vhcr, 3183 struct mlx4_cmd_mailbox *inbox, 3184 struct mlx4_cmd_mailbox *outbox, 3185 struct mlx4_cmd_info *cmd) 3186 { 3187 struct mlx4_mtt mtt; 3188 __be64 *page_list = inbox->buf; 3189 u64 *pg_list = (u64 *)page_list; 3190 int i; 3191 struct res_mtt *rmtt = NULL; 3192 int start = be64_to_cpu(page_list[0]); 3193 int npages = vhcr->in_modifier; 3194 int err; 3195 3196 err = get_containing_mtt(dev, slave, start, npages, &rmtt); 3197 if (err) 3198 return err; 3199 3200 /* Call the SW implementation of write_mtt: 3201 * - Prepare a dummy mtt struct 3202 * - Translate inbox contents to simple addresses in host endianness */ 3203 mtt.offset = 0; /* TBD this is broken but I don't handle it since 3204 we don't really use it */ 3205 mtt.order = 0; 3206 mtt.page_shift = 0; 3207 for (i = 0; i < npages; ++i) 3208 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); 3209 3210 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, 3211 ((u64 *)page_list + 2)); 3212 3213 if (rmtt) 3214 put_res(dev, slave, rmtt->com.res_id, RES_MTT); 3215 3216 return err; 3217 } 3218 3219 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3220 struct mlx4_vhcr *vhcr, 3221 struct mlx4_cmd_mailbox *inbox, 3222 struct mlx4_cmd_mailbox *outbox, 3223 struct mlx4_cmd_info *cmd) 3224 { 3225 int eqn = vhcr->in_modifier; 3226 int res_id = eqn | (slave << 10); 3227 struct res_eq *eq; 3228 int err; 3229 3230 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); 3231 if (err) 3232 return err; 3233 3234 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); 3235 if (err) 3236 goto ex_abort; 3237 3238 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3239 if (err) 3240 goto ex_put; 3241 3242 atomic_dec(&eq->mtt->ref_count); 3243 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3244 res_end_move(dev, slave, RES_EQ, res_id); 3245 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3246 3247 return 0; 3248 3249 ex_put: 3250 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3251 ex_abort: 3252 res_abort_move(dev, slave, RES_EQ, res_id); 3253 3254 return err; 3255 } 3256 3257 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) 3258 { 3259 struct mlx4_priv *priv = mlx4_priv(dev); 3260 struct mlx4_slave_event_eq_info *event_eq; 3261 struct mlx4_cmd_mailbox *mailbox; 3262 u32 in_modifier = 0; 3263 int err; 3264 int res_id; 3265 struct res_eq *req; 3266 3267 if (!priv->mfunc.master.slave_state) 3268 return -EINVAL; 3269 3270 /* check for slave valid, slave not PF, and slave active */ 3271 if (slave < 0 || slave > dev->persist->num_vfs || 3272 slave == dev->caps.function || 3273 !priv->mfunc.master.slave_state[slave].active) 3274 return 0; 3275 3276 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 3277 3278 /* Create the event only if the slave is registered */ 3279 if (event_eq->eqn < 0) 3280 return 0; 3281 3282 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3283 res_id = (slave << 10) | event_eq->eqn; 3284 err = get_res(dev, slave, res_id, RES_EQ, &req); 3285 if (err) 3286 goto unlock; 3287 3288 if (req->com.from_state != RES_EQ_HW) { 3289 err = -EINVAL; 3290 goto put; 3291 } 3292 3293 mailbox = mlx4_alloc_cmd_mailbox(dev); 3294 if (IS_ERR(mailbox)) { 3295 err = PTR_ERR(mailbox); 3296 goto put; 3297 } 3298 3299 if (eqe->type == MLX4_EVENT_TYPE_CMD) { 3300 ++event_eq->token; 3301 eqe->event.cmd.token = cpu_to_be16(event_eq->token); 3302 } 3303 3304 memcpy(mailbox->buf, (u8 *) eqe, 28); 3305 3306 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); 3307 3308 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, 3309 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, 3310 MLX4_CMD_NATIVE); 3311 3312 put_res(dev, slave, res_id, RES_EQ); 3313 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3314 mlx4_free_cmd_mailbox(dev, mailbox); 3315 return err; 3316 3317 put: 3318 put_res(dev, slave, res_id, RES_EQ); 3319 3320 unlock: 3321 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3322 return err; 3323 } 3324 3325 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 3326 struct mlx4_vhcr *vhcr, 3327 struct mlx4_cmd_mailbox *inbox, 3328 struct mlx4_cmd_mailbox *outbox, 3329 struct mlx4_cmd_info *cmd) 3330 { 3331 int eqn = vhcr->in_modifier; 3332 int res_id = eqn | (slave << 10); 3333 struct res_eq *eq; 3334 int err; 3335 3336 err = get_res(dev, slave, res_id, RES_EQ, &eq); 3337 if (err) 3338 return err; 3339 3340 if (eq->com.from_state != RES_EQ_HW) { 3341 err = -EINVAL; 3342 goto ex_put; 3343 } 3344 3345 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3346 3347 ex_put: 3348 put_res(dev, slave, res_id, RES_EQ); 3349 return err; 3350 } 3351 3352 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3353 struct mlx4_vhcr *vhcr, 3354 struct mlx4_cmd_mailbox *inbox, 3355 struct mlx4_cmd_mailbox *outbox, 3356 struct mlx4_cmd_info *cmd) 3357 { 3358 int err; 3359 int cqn = vhcr->in_modifier; 3360 struct mlx4_cq_context *cqc = inbox->buf; 3361 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3362 struct res_cq *cq = NULL; 3363 struct res_mtt *mtt; 3364 3365 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); 3366 if (err) 3367 return err; 3368 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3369 if (err) 3370 goto out_move; 3371 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3372 if (err) 3373 goto out_put; 3374 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3375 if (err) 3376 goto out_put; 3377 atomic_inc(&mtt->ref_count); 3378 cq->mtt = mtt; 3379 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3380 res_end_move(dev, slave, RES_CQ, cqn); 3381 return 0; 3382 3383 out_put: 3384 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3385 out_move: 3386 res_abort_move(dev, slave, RES_CQ, cqn); 3387 return err; 3388 } 3389 3390 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3391 struct mlx4_vhcr *vhcr, 3392 struct mlx4_cmd_mailbox *inbox, 3393 struct mlx4_cmd_mailbox *outbox, 3394 struct mlx4_cmd_info *cmd) 3395 { 3396 int err; 3397 int cqn = vhcr->in_modifier; 3398 struct res_cq *cq = NULL; 3399 3400 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3401 if (err) 3402 return err; 3403 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3404 if (err) 3405 goto out_move; 3406 atomic_dec(&cq->mtt->ref_count); 3407 res_end_move(dev, slave, RES_CQ, cqn); 3408 return 0; 3409 3410 out_move: 3411 res_abort_move(dev, slave, RES_CQ, cqn); 3412 return err; 3413 } 3414 3415 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3416 struct mlx4_vhcr *vhcr, 3417 struct mlx4_cmd_mailbox *inbox, 3418 struct mlx4_cmd_mailbox *outbox, 3419 struct mlx4_cmd_info *cmd) 3420 { 3421 int cqn = vhcr->in_modifier; 3422 struct res_cq *cq; 3423 int err; 3424 3425 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3426 if (err) 3427 return err; 3428 3429 if (cq->com.from_state != RES_CQ_HW) 3430 goto ex_put; 3431 3432 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3433 ex_put: 3434 put_res(dev, slave, cqn, RES_CQ); 3435 3436 return err; 3437 } 3438 3439 static int handle_resize(struct mlx4_dev *dev, int slave, 3440 struct mlx4_vhcr *vhcr, 3441 struct mlx4_cmd_mailbox *inbox, 3442 struct mlx4_cmd_mailbox *outbox, 3443 struct mlx4_cmd_info *cmd, 3444 struct res_cq *cq) 3445 { 3446 int err; 3447 struct res_mtt *orig_mtt; 3448 struct res_mtt *mtt; 3449 struct mlx4_cq_context *cqc = inbox->buf; 3450 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3451 3452 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); 3453 if (err) 3454 return err; 3455 3456 if (orig_mtt != cq->mtt) { 3457 err = -EINVAL; 3458 goto ex_put; 3459 } 3460 3461 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3462 if (err) 3463 goto ex_put; 3464 3465 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3466 if (err) 3467 goto ex_put1; 3468 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3469 if (err) 3470 goto ex_put1; 3471 atomic_dec(&orig_mtt->ref_count); 3472 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3473 atomic_inc(&mtt->ref_count); 3474 cq->mtt = mtt; 3475 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3476 return 0; 3477 3478 ex_put1: 3479 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3480 ex_put: 3481 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3482 3483 return err; 3484 3485 } 3486 3487 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3488 struct mlx4_vhcr *vhcr, 3489 struct mlx4_cmd_mailbox *inbox, 3490 struct mlx4_cmd_mailbox *outbox, 3491 struct mlx4_cmd_info *cmd) 3492 { 3493 int cqn = vhcr->in_modifier; 3494 struct res_cq *cq; 3495 int err; 3496 3497 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3498 if (err) 3499 return err; 3500 3501 if (cq->com.from_state != RES_CQ_HW) 3502 goto ex_put; 3503 3504 if (vhcr->op_modifier == 0) { 3505 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 3506 goto ex_put; 3507 } 3508 3509 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3510 ex_put: 3511 put_res(dev, slave, cqn, RES_CQ); 3512 3513 return err; 3514 } 3515 3516 static int srq_get_mtt_size(struct mlx4_srq_context *srqc) 3517 { 3518 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; 3519 int log_rq_stride = srqc->logstride & 7; 3520 int page_shift = (srqc->log_page_size & 0x3f) + 12; 3521 3522 if (log_srq_size + log_rq_stride + 4 < page_shift) 3523 return 1; 3524 3525 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); 3526 } 3527 3528 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3529 struct mlx4_vhcr *vhcr, 3530 struct mlx4_cmd_mailbox *inbox, 3531 struct mlx4_cmd_mailbox *outbox, 3532 struct mlx4_cmd_info *cmd) 3533 { 3534 int err; 3535 int srqn = vhcr->in_modifier; 3536 struct res_mtt *mtt; 3537 struct res_srq *srq = NULL; 3538 struct mlx4_srq_context *srqc = inbox->buf; 3539 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3540 3541 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) 3542 return -EINVAL; 3543 3544 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); 3545 if (err) 3546 return err; 3547 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3548 if (err) 3549 goto ex_abort; 3550 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), 3551 mtt); 3552 if (err) 3553 goto ex_put_mtt; 3554 3555 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3556 if (err) 3557 goto ex_put_mtt; 3558 3559 atomic_inc(&mtt->ref_count); 3560 srq->mtt = mtt; 3561 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3562 res_end_move(dev, slave, RES_SRQ, srqn); 3563 return 0; 3564 3565 ex_put_mtt: 3566 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3567 ex_abort: 3568 res_abort_move(dev, slave, RES_SRQ, srqn); 3569 3570 return err; 3571 } 3572 3573 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3574 struct mlx4_vhcr *vhcr, 3575 struct mlx4_cmd_mailbox *inbox, 3576 struct mlx4_cmd_mailbox *outbox, 3577 struct mlx4_cmd_info *cmd) 3578 { 3579 int err; 3580 int srqn = vhcr->in_modifier; 3581 struct res_srq *srq = NULL; 3582 3583 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3584 if (err) 3585 return err; 3586 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3587 if (err) 3588 goto ex_abort; 3589 atomic_dec(&srq->mtt->ref_count); 3590 if (srq->cq) 3591 atomic_dec(&srq->cq->ref_count); 3592 res_end_move(dev, slave, RES_SRQ, srqn); 3593 3594 return 0; 3595 3596 ex_abort: 3597 res_abort_move(dev, slave, RES_SRQ, srqn); 3598 3599 return err; 3600 } 3601 3602 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3603 struct mlx4_vhcr *vhcr, 3604 struct mlx4_cmd_mailbox *inbox, 3605 struct mlx4_cmd_mailbox *outbox, 3606 struct mlx4_cmd_info *cmd) 3607 { 3608 int err; 3609 int srqn = vhcr->in_modifier; 3610 struct res_srq *srq; 3611 3612 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3613 if (err) 3614 return err; 3615 if (srq->com.from_state != RES_SRQ_HW) { 3616 err = -EBUSY; 3617 goto out; 3618 } 3619 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3620 out: 3621 put_res(dev, slave, srqn, RES_SRQ); 3622 return err; 3623 } 3624 3625 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3626 struct mlx4_vhcr *vhcr, 3627 struct mlx4_cmd_mailbox *inbox, 3628 struct mlx4_cmd_mailbox *outbox, 3629 struct mlx4_cmd_info *cmd) 3630 { 3631 int err; 3632 int srqn = vhcr->in_modifier; 3633 struct res_srq *srq; 3634 3635 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3636 if (err) 3637 return err; 3638 3639 if (srq->com.from_state != RES_SRQ_HW) { 3640 err = -EBUSY; 3641 goto out; 3642 } 3643 3644 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3645 out: 3646 put_res(dev, slave, srqn, RES_SRQ); 3647 return err; 3648 } 3649 3650 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 3651 struct mlx4_vhcr *vhcr, 3652 struct mlx4_cmd_mailbox *inbox, 3653 struct mlx4_cmd_mailbox *outbox, 3654 struct mlx4_cmd_info *cmd) 3655 { 3656 int err; 3657 int qpn = vhcr->in_modifier & 0x7fffff; 3658 struct res_qp *qp; 3659 3660 err = get_res(dev, slave, qpn, RES_QP, &qp); 3661 if (err) 3662 return err; 3663 if (qp->com.from_state != RES_QP_HW) { 3664 err = -EBUSY; 3665 goto out; 3666 } 3667 3668 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3669 out: 3670 put_res(dev, slave, qpn, RES_QP); 3671 return err; 3672 } 3673 3674 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 3675 struct mlx4_vhcr *vhcr, 3676 struct mlx4_cmd_mailbox *inbox, 3677 struct mlx4_cmd_mailbox *outbox, 3678 struct mlx4_cmd_info *cmd) 3679 { 3680 struct mlx4_qp_context *context = inbox->buf + 8; 3681 adjust_proxy_tun_qkey(dev, vhcr, context); 3682 update_pkey_index(dev, slave, inbox); 3683 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3684 } 3685 3686 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 3687 struct mlx4_qp_context *qpc, 3688 struct mlx4_cmd_mailbox *inbox) 3689 { 3690 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); 3691 u8 pri_sched_queue; 3692 int port = mlx4_slave_convert_port( 3693 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; 3694 3695 if (port < 0) 3696 return -EINVAL; 3697 3698 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | 3699 ((port & 1) << 6); 3700 3701 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) || 3702 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) { 3703 qpc->pri_path.sched_queue = pri_sched_queue; 3704 } 3705 3706 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3707 port = mlx4_slave_convert_port( 3708 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) 3709 + 1) - 1; 3710 if (port < 0) 3711 return -EINVAL; 3712 qpc->alt_path.sched_queue = 3713 (qpc->alt_path.sched_queue & ~(1 << 6)) | 3714 (port & 1) << 6; 3715 } 3716 return 0; 3717 } 3718 3719 static int roce_verify_mac(struct mlx4_dev *dev, int slave, 3720 struct mlx4_qp_context *qpc, 3721 struct mlx4_cmd_mailbox *inbox) 3722 { 3723 u64 mac; 3724 int port; 3725 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 3726 u8 sched = *(u8 *)(inbox->buf + 64); 3727 u8 smac_ix; 3728 3729 port = (sched >> 6 & 1) + 1; 3730 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { 3731 smac_ix = qpc->pri_path.grh_mylmc & 0x7f; 3732 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) 3733 return -ENOENT; 3734 } 3735 return 0; 3736 } 3737 3738 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3739 struct mlx4_vhcr *vhcr, 3740 struct mlx4_cmd_mailbox *inbox, 3741 struct mlx4_cmd_mailbox *outbox, 3742 struct mlx4_cmd_info *cmd) 3743 { 3744 int err; 3745 struct mlx4_qp_context *qpc = inbox->buf + 8; 3746 int qpn = vhcr->in_modifier & 0x7fffff; 3747 struct res_qp *qp; 3748 u8 orig_sched_queue; 3749 __be32 orig_param3 = qpc->param3; 3750 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3751 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3752 u8 orig_pri_path_fl = qpc->pri_path.fl; 3753 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3754 u8 orig_feup = qpc->pri_path.feup; 3755 3756 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 3757 if (err) 3758 return err; 3759 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); 3760 if (err) 3761 return err; 3762 3763 if (roce_verify_mac(dev, slave, qpc, inbox)) 3764 return -EINVAL; 3765 3766 update_pkey_index(dev, slave, inbox); 3767 update_gid(dev, inbox, (u8)slave); 3768 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3769 orig_sched_queue = qpc->pri_path.sched_queue; 3770 3771 err = get_res(dev, slave, qpn, RES_QP, &qp); 3772 if (err) 3773 return err; 3774 if (qp->com.from_state != RES_QP_HW) { 3775 err = -EBUSY; 3776 goto out; 3777 } 3778 3779 err = update_vport_qp_param(dev, inbox, slave, qpn); 3780 if (err) 3781 goto out; 3782 3783 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3784 out: 3785 /* if no error, save sched queue value passed in by VF. This is 3786 * essentially the QOS value provided by the VF. This will be useful 3787 * if we allow dynamic changes from VST back to VGT 3788 */ 3789 if (!err) { 3790 qp->sched_queue = orig_sched_queue; 3791 qp->param3 = orig_param3; 3792 qp->vlan_control = orig_vlan_control; 3793 qp->fvl_rx = orig_fvl_rx; 3794 qp->pri_path_fl = orig_pri_path_fl; 3795 qp->vlan_index = orig_vlan_index; 3796 qp->feup = orig_feup; 3797 } 3798 put_res(dev, slave, qpn, RES_QP); 3799 return err; 3800 } 3801 3802 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3803 struct mlx4_vhcr *vhcr, 3804 struct mlx4_cmd_mailbox *inbox, 3805 struct mlx4_cmd_mailbox *outbox, 3806 struct mlx4_cmd_info *cmd) 3807 { 3808 int err; 3809 struct mlx4_qp_context *context = inbox->buf + 8; 3810 3811 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3812 if (err) 3813 return err; 3814 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); 3815 if (err) 3816 return err; 3817 3818 update_pkey_index(dev, slave, inbox); 3819 update_gid(dev, inbox, (u8)slave); 3820 adjust_proxy_tun_qkey(dev, vhcr, context); 3821 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3822 } 3823 3824 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3825 struct mlx4_vhcr *vhcr, 3826 struct mlx4_cmd_mailbox *inbox, 3827 struct mlx4_cmd_mailbox *outbox, 3828 struct mlx4_cmd_info *cmd) 3829 { 3830 int err; 3831 struct mlx4_qp_context *context = inbox->buf + 8; 3832 3833 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3834 if (err) 3835 return err; 3836 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); 3837 if (err) 3838 return err; 3839 3840 update_pkey_index(dev, slave, inbox); 3841 update_gid(dev, inbox, (u8)slave); 3842 adjust_proxy_tun_qkey(dev, vhcr, context); 3843 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3844 } 3845 3846 3847 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3848 struct mlx4_vhcr *vhcr, 3849 struct mlx4_cmd_mailbox *inbox, 3850 struct mlx4_cmd_mailbox *outbox, 3851 struct mlx4_cmd_info *cmd) 3852 { 3853 struct mlx4_qp_context *context = inbox->buf + 8; 3854 int err = adjust_qp_sched_queue(dev, slave, context, inbox); 3855 if (err) 3856 return err; 3857 adjust_proxy_tun_qkey(dev, vhcr, context); 3858 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3859 } 3860 3861 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 3862 struct mlx4_vhcr *vhcr, 3863 struct mlx4_cmd_mailbox *inbox, 3864 struct mlx4_cmd_mailbox *outbox, 3865 struct mlx4_cmd_info *cmd) 3866 { 3867 int err; 3868 struct mlx4_qp_context *context = inbox->buf + 8; 3869 3870 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3871 if (err) 3872 return err; 3873 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); 3874 if (err) 3875 return err; 3876 3877 adjust_proxy_tun_qkey(dev, vhcr, context); 3878 update_gid(dev, inbox, (u8)slave); 3879 update_pkey_index(dev, slave, inbox); 3880 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3881 } 3882 3883 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3884 struct mlx4_vhcr *vhcr, 3885 struct mlx4_cmd_mailbox *inbox, 3886 struct mlx4_cmd_mailbox *outbox, 3887 struct mlx4_cmd_info *cmd) 3888 { 3889 int err; 3890 struct mlx4_qp_context *context = inbox->buf + 8; 3891 3892 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3893 if (err) 3894 return err; 3895 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); 3896 if (err) 3897 return err; 3898 3899 adjust_proxy_tun_qkey(dev, vhcr, context); 3900 update_gid(dev, inbox, (u8)slave); 3901 update_pkey_index(dev, slave, inbox); 3902 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3903 } 3904 3905 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 3906 struct mlx4_vhcr *vhcr, 3907 struct mlx4_cmd_mailbox *inbox, 3908 struct mlx4_cmd_mailbox *outbox, 3909 struct mlx4_cmd_info *cmd) 3910 { 3911 int err; 3912 int qpn = vhcr->in_modifier & 0x7fffff; 3913 struct res_qp *qp; 3914 3915 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); 3916 if (err) 3917 return err; 3918 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3919 if (err) 3920 goto ex_abort; 3921 3922 atomic_dec(&qp->mtt->ref_count); 3923 atomic_dec(&qp->rcq->ref_count); 3924 atomic_dec(&qp->scq->ref_count); 3925 if (qp->srq) 3926 atomic_dec(&qp->srq->ref_count); 3927 res_end_move(dev, slave, RES_QP, qpn); 3928 return 0; 3929 3930 ex_abort: 3931 res_abort_move(dev, slave, RES_QP, qpn); 3932 3933 return err; 3934 } 3935 3936 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, 3937 struct res_qp *rqp, u8 *gid) 3938 { 3939 struct res_gid *res; 3940 3941 list_for_each_entry(res, &rqp->mcg_list, list) { 3942 if (!memcmp(res->gid, gid, 16)) 3943 return res; 3944 } 3945 return NULL; 3946 } 3947 3948 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3949 u8 *gid, enum mlx4_protocol prot, 3950 enum mlx4_steer_type steer, u64 reg_id) 3951 { 3952 struct res_gid *res; 3953 int err; 3954 3955 res = kzalloc(sizeof *res, GFP_KERNEL); 3956 if (!res) 3957 return -ENOMEM; 3958 3959 spin_lock_irq(&rqp->mcg_spl); 3960 if (find_gid(dev, slave, rqp, gid)) { 3961 kfree(res); 3962 err = -EEXIST; 3963 } else { 3964 memcpy(res->gid, gid, 16); 3965 res->prot = prot; 3966 res->steer = steer; 3967 res->reg_id = reg_id; 3968 list_add_tail(&res->list, &rqp->mcg_list); 3969 err = 0; 3970 } 3971 spin_unlock_irq(&rqp->mcg_spl); 3972 3973 return err; 3974 } 3975 3976 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3977 u8 *gid, enum mlx4_protocol prot, 3978 enum mlx4_steer_type steer, u64 *reg_id) 3979 { 3980 struct res_gid *res; 3981 int err; 3982 3983 spin_lock_irq(&rqp->mcg_spl); 3984 res = find_gid(dev, slave, rqp, gid); 3985 if (!res || res->prot != prot || res->steer != steer) 3986 err = -EINVAL; 3987 else { 3988 *reg_id = res->reg_id; 3989 list_del(&res->list); 3990 kfree(res); 3991 err = 0; 3992 } 3993 spin_unlock_irq(&rqp->mcg_spl); 3994 3995 return err; 3996 } 3997 3998 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, 3999 u8 gid[16], int block_loopback, enum mlx4_protocol prot, 4000 enum mlx4_steer_type type, u64 *reg_id) 4001 { 4002 switch (dev->caps.steering_mode) { 4003 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 4004 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4005 if (port < 0) 4006 return port; 4007 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 4008 block_loopback, prot, 4009 reg_id); 4010 } 4011 case MLX4_STEERING_MODE_B0: 4012 if (prot == MLX4_PROT_ETH) { 4013 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4014 if (port < 0) 4015 return port; 4016 gid[5] = port; 4017 } 4018 return mlx4_qp_attach_common(dev, qp, gid, 4019 block_loopback, prot, type); 4020 default: 4021 return -EINVAL; 4022 } 4023 } 4024 4025 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 4026 u8 gid[16], enum mlx4_protocol prot, 4027 enum mlx4_steer_type type, u64 reg_id) 4028 { 4029 switch (dev->caps.steering_mode) { 4030 case MLX4_STEERING_MODE_DEVICE_MANAGED: 4031 return mlx4_flow_detach(dev, reg_id); 4032 case MLX4_STEERING_MODE_B0: 4033 return mlx4_qp_detach_common(dev, qp, gid, prot, type); 4034 default: 4035 return -EINVAL; 4036 } 4037 } 4038 4039 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, 4040 u8 *gid, enum mlx4_protocol prot) 4041 { 4042 int real_port; 4043 4044 if (prot != MLX4_PROT_ETH) 4045 return 0; 4046 4047 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || 4048 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 4049 real_port = mlx4_slave_convert_port(dev, slave, gid[5]); 4050 if (real_port < 0) 4051 return -EINVAL; 4052 gid[5] = real_port; 4053 } 4054 4055 return 0; 4056 } 4057 4058 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4059 struct mlx4_vhcr *vhcr, 4060 struct mlx4_cmd_mailbox *inbox, 4061 struct mlx4_cmd_mailbox *outbox, 4062 struct mlx4_cmd_info *cmd) 4063 { 4064 struct mlx4_qp qp; /* dummy for calling attach/detach */ 4065 u8 *gid = inbox->buf; 4066 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; 4067 int err; 4068 int qpn; 4069 struct res_qp *rqp; 4070 u64 reg_id = 0; 4071 int attach = vhcr->op_modifier; 4072 int block_loopback = vhcr->in_modifier >> 31; 4073 u8 steer_type_mask = 2; 4074 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 4075 4076 qpn = vhcr->in_modifier & 0xffffff; 4077 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4078 if (err) 4079 return err; 4080 4081 qp.qpn = qpn; 4082 if (attach) { 4083 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, 4084 type, ®_id); 4085 if (err) { 4086 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 4087 goto ex_put; 4088 } 4089 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); 4090 if (err) 4091 goto ex_detach; 4092 } else { 4093 err = mlx4_adjust_port(dev, slave, gid, prot); 4094 if (err) 4095 goto ex_put; 4096 4097 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); 4098 if (err) 4099 goto ex_put; 4100 4101 err = qp_detach(dev, &qp, gid, prot, type, reg_id); 4102 if (err) 4103 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", 4104 qpn, reg_id); 4105 } 4106 put_res(dev, slave, qpn, RES_QP); 4107 return err; 4108 4109 ex_detach: 4110 qp_detach(dev, &qp, gid, prot, type, reg_id); 4111 ex_put: 4112 put_res(dev, slave, qpn, RES_QP); 4113 return err; 4114 } 4115 4116 /* 4117 * MAC validation for Flow Steering rules. 4118 * VF can attach rules only with a mac address which is assigned to it. 4119 */ 4120 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, 4121 struct list_head *rlist) 4122 { 4123 struct mac_res *res, *tmp; 4124 __be64 be_mac; 4125 4126 /* make sure it isn't multicast or broadcast mac*/ 4127 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && 4128 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4129 list_for_each_entry_safe(res, tmp, rlist, list) { 4130 be_mac = cpu_to_be64(res->mac << 16); 4131 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) 4132 return 0; 4133 } 4134 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", 4135 eth_header->eth.dst_mac, slave); 4136 return -EINVAL; 4137 } 4138 return 0; 4139 } 4140 4141 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, 4142 struct _rule_hw *eth_header) 4143 { 4144 if (is_multicast_ether_addr(eth_header->eth.dst_mac) || 4145 is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4146 struct mlx4_net_trans_rule_hw_eth *eth = 4147 (struct mlx4_net_trans_rule_hw_eth *)eth_header; 4148 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); 4149 bool last_rule = next_rule->size == 0 && next_rule->id == 0 && 4150 next_rule->rsvd == 0; 4151 4152 if (last_rule) 4153 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); 4154 } 4155 } 4156 4157 /* 4158 * In case of missing eth header, append eth header with a MAC address 4159 * assigned to the VF. 4160 */ 4161 static int add_eth_header(struct mlx4_dev *dev, int slave, 4162 struct mlx4_cmd_mailbox *inbox, 4163 struct list_head *rlist, int header_id) 4164 { 4165 struct mac_res *res, *tmp; 4166 u8 port; 4167 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4168 struct mlx4_net_trans_rule_hw_eth *eth_header; 4169 struct mlx4_net_trans_rule_hw_ipv4 *ip_header; 4170 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; 4171 __be64 be_mac = 0; 4172 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 4173 4174 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4175 port = ctrl->port; 4176 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); 4177 4178 /* Clear a space in the inbox for eth header */ 4179 switch (header_id) { 4180 case MLX4_NET_TRANS_RULE_ID_IPV4: 4181 ip_header = 4182 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); 4183 memmove(ip_header, eth_header, 4184 sizeof(*ip_header) + sizeof(*l4_header)); 4185 break; 4186 case MLX4_NET_TRANS_RULE_ID_TCP: 4187 case MLX4_NET_TRANS_RULE_ID_UDP: 4188 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) 4189 (eth_header + 1); 4190 memmove(l4_header, eth_header, sizeof(*l4_header)); 4191 break; 4192 default: 4193 return -EINVAL; 4194 } 4195 list_for_each_entry_safe(res, tmp, rlist, list) { 4196 if (port == res->port) { 4197 be_mac = cpu_to_be64(res->mac << 16); 4198 break; 4199 } 4200 } 4201 if (!be_mac) { 4202 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", 4203 port); 4204 return -EINVAL; 4205 } 4206 4207 memset(eth_header, 0, sizeof(*eth_header)); 4208 eth_header->size = sizeof(*eth_header) >> 2; 4209 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); 4210 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); 4211 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); 4212 4213 return 0; 4214 4215 } 4216 4217 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \ 4218 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\ 4219 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB) 4220 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 4221 struct mlx4_vhcr *vhcr, 4222 struct mlx4_cmd_mailbox *inbox, 4223 struct mlx4_cmd_mailbox *outbox, 4224 struct mlx4_cmd_info *cmd_info) 4225 { 4226 int err; 4227 u32 qpn = vhcr->in_modifier & 0xffffff; 4228 struct res_qp *rqp; 4229 u64 mac; 4230 unsigned port; 4231 u64 pri_addr_path_mask; 4232 struct mlx4_update_qp_context *cmd; 4233 int smac_index; 4234 4235 cmd = (struct mlx4_update_qp_context *)inbox->buf; 4236 4237 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); 4238 if (cmd->qp_mask || cmd->secondary_addr_path_mask || 4239 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) 4240 return -EPERM; 4241 4242 if ((pri_addr_path_mask & 4243 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && 4244 !(dev->caps.flags2 & 4245 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { 4246 mlx4_warn(dev, 4247 "Src check LB for slave %d isn't supported\n", 4248 slave); 4249 return -ENOTSUPP; 4250 } 4251 4252 /* Just change the smac for the QP */ 4253 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4254 if (err) { 4255 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); 4256 return err; 4257 } 4258 4259 port = (rqp->sched_queue >> 6 & 1) + 1; 4260 4261 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { 4262 smac_index = cmd->qp_context.pri_path.grh_mylmc; 4263 err = mac_find_smac_ix_in_slave(dev, slave, port, 4264 smac_index, &mac); 4265 4266 if (err) { 4267 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", 4268 qpn, smac_index); 4269 goto err_mac; 4270 } 4271 } 4272 4273 err = mlx4_cmd(dev, inbox->dma, 4274 vhcr->in_modifier, 0, 4275 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 4276 MLX4_CMD_NATIVE); 4277 if (err) { 4278 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); 4279 goto err_mac; 4280 } 4281 4282 err_mac: 4283 put_res(dev, slave, qpn, RES_QP); 4284 return err; 4285 } 4286 4287 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4288 struct mlx4_vhcr *vhcr, 4289 struct mlx4_cmd_mailbox *inbox, 4290 struct mlx4_cmd_mailbox *outbox, 4291 struct mlx4_cmd_info *cmd) 4292 { 4293 4294 struct mlx4_priv *priv = mlx4_priv(dev); 4295 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4296 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 4297 int err; 4298 int qpn; 4299 struct res_qp *rqp; 4300 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4301 struct _rule_hw *rule_header; 4302 int header_id; 4303 4304 if (dev->caps.steering_mode != 4305 MLX4_STEERING_MODE_DEVICE_MANAGED) 4306 return -EOPNOTSUPP; 4307 4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4309 err = mlx4_slave_convert_port(dev, slave, ctrl->port); 4310 if (err <= 0) 4311 return -EINVAL; 4312 ctrl->port = err; 4313 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4314 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4315 if (err) { 4316 pr_err("Steering rule with qpn 0x%x rejected\n", qpn); 4317 return err; 4318 } 4319 rule_header = (struct _rule_hw *)(ctrl + 1); 4320 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 4321 4322 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) 4323 handle_eth_header_mcast_prio(ctrl, rule_header); 4324 4325 if (slave == dev->caps.function) 4326 goto execute; 4327 4328 switch (header_id) { 4329 case MLX4_NET_TRANS_RULE_ID_ETH: 4330 if (validate_eth_header_mac(slave, rule_header, rlist)) { 4331 err = -EINVAL; 4332 goto err_put; 4333 } 4334 break; 4335 case MLX4_NET_TRANS_RULE_ID_IB: 4336 break; 4337 case MLX4_NET_TRANS_RULE_ID_IPV4: 4338 case MLX4_NET_TRANS_RULE_ID_TCP: 4339 case MLX4_NET_TRANS_RULE_ID_UDP: 4340 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); 4341 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4342 err = -EINVAL; 4343 goto err_put; 4344 } 4345 vhcr->in_modifier += 4346 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4347 break; 4348 default: 4349 pr_err("Corrupted mailbox\n"); 4350 err = -EINVAL; 4351 goto err_put; 4352 } 4353 4354 execute: 4355 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 4356 vhcr->in_modifier, 0, 4357 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4358 MLX4_CMD_NATIVE); 4359 if (err) 4360 goto err_put; 4361 4362 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4363 if (err) { 4364 mlx4_err(dev, "Fail to add flow steering resources\n"); 4365 /* detach rule*/ 4366 mlx4_cmd(dev, vhcr->out_param, 0, 0, 4367 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4368 MLX4_CMD_NATIVE); 4369 goto err_put; 4370 } 4371 atomic_inc(&rqp->ref_count); 4372 err_put: 4373 put_res(dev, slave, qpn, RES_QP); 4374 return err; 4375 } 4376 4377 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 4378 struct mlx4_vhcr *vhcr, 4379 struct mlx4_cmd_mailbox *inbox, 4380 struct mlx4_cmd_mailbox *outbox, 4381 struct mlx4_cmd_info *cmd) 4382 { 4383 int err; 4384 struct res_qp *rqp; 4385 struct res_fs_rule *rrule; 4386 4387 if (dev->caps.steering_mode != 4388 MLX4_STEERING_MODE_DEVICE_MANAGED) 4389 return -EOPNOTSUPP; 4390 4391 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); 4392 if (err) 4393 return err; 4394 /* Release the rule form busy state before removal */ 4395 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4396 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); 4397 if (err) 4398 return err; 4399 4400 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4401 if (err) { 4402 mlx4_err(dev, "Fail to remove flow steering resources\n"); 4403 goto out; 4404 } 4405 4406 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 4407 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4408 MLX4_CMD_NATIVE); 4409 if (!err) 4410 atomic_dec(&rqp->ref_count); 4411 out: 4412 put_res(dev, slave, rrule->qpn, RES_QP); 4413 return err; 4414 } 4415 4416 enum { 4417 BUSY_MAX_RETRIES = 10 4418 }; 4419 4420 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 4421 struct mlx4_vhcr *vhcr, 4422 struct mlx4_cmd_mailbox *inbox, 4423 struct mlx4_cmd_mailbox *outbox, 4424 struct mlx4_cmd_info *cmd) 4425 { 4426 int err; 4427 int index = vhcr->in_modifier & 0xffff; 4428 4429 err = get_res(dev, slave, index, RES_COUNTER, NULL); 4430 if (err) 4431 return err; 4432 4433 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 4434 put_res(dev, slave, index, RES_COUNTER); 4435 return err; 4436 } 4437 4438 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) 4439 { 4440 struct res_gid *rgid; 4441 struct res_gid *tmp; 4442 struct mlx4_qp qp; /* dummy for calling attach/detach */ 4443 4444 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 4445 switch (dev->caps.steering_mode) { 4446 case MLX4_STEERING_MODE_DEVICE_MANAGED: 4447 mlx4_flow_detach(dev, rgid->reg_id); 4448 break; 4449 case MLX4_STEERING_MODE_B0: 4450 qp.qpn = rqp->local_qpn; 4451 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, 4452 rgid->prot, rgid->steer); 4453 break; 4454 } 4455 list_del(&rgid->list); 4456 kfree(rgid); 4457 } 4458 } 4459 4460 static int _move_all_busy(struct mlx4_dev *dev, int slave, 4461 enum mlx4_resource type, int print) 4462 { 4463 struct mlx4_priv *priv = mlx4_priv(dev); 4464 struct mlx4_resource_tracker *tracker = 4465 &priv->mfunc.master.res_tracker; 4466 struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; 4467 struct res_common *r; 4468 struct res_common *tmp; 4469 int busy; 4470 4471 busy = 0; 4472 spin_lock_irq(mlx4_tlock(dev)); 4473 list_for_each_entry_safe(r, tmp, rlist, list) { 4474 if (r->owner == slave) { 4475 if (!r->removing) { 4476 if (r->state == RES_ANY_BUSY) { 4477 if (print) 4478 mlx4_dbg(dev, 4479 "%s id 0x%llx is busy\n", 4480 resource_str(type), 4481 r->res_id); 4482 ++busy; 4483 } else { 4484 r->from_state = r->state; 4485 r->state = RES_ANY_BUSY; 4486 r->removing = 1; 4487 } 4488 } 4489 } 4490 } 4491 spin_unlock_irq(mlx4_tlock(dev)); 4492 4493 return busy; 4494 } 4495 4496 static int move_all_busy(struct mlx4_dev *dev, int slave, 4497 enum mlx4_resource type) 4498 { 4499 unsigned long begin; 4500 int busy; 4501 4502 begin = jiffies; 4503 do { 4504 busy = _move_all_busy(dev, slave, type, 0); 4505 if (time_after(jiffies, begin + 5 * HZ)) 4506 break; 4507 if (busy) 4508 cond_resched(); 4509 } while (busy); 4510 4511 if (busy) 4512 busy = _move_all_busy(dev, slave, type, 1); 4513 4514 return busy; 4515 } 4516 static void rem_slave_qps(struct mlx4_dev *dev, int slave) 4517 { 4518 struct mlx4_priv *priv = mlx4_priv(dev); 4519 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4520 struct list_head *qp_list = 4521 &tracker->slave_list[slave].res_list[RES_QP]; 4522 struct res_qp *qp; 4523 struct res_qp *tmp; 4524 int state; 4525 u64 in_param; 4526 int qpn; 4527 int err; 4528 4529 err = move_all_busy(dev, slave, RES_QP); 4530 if (err) 4531 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", 4532 slave); 4533 4534 spin_lock_irq(mlx4_tlock(dev)); 4535 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4536 spin_unlock_irq(mlx4_tlock(dev)); 4537 if (qp->com.owner == slave) { 4538 qpn = qp->com.res_id; 4539 detach_qp(dev, slave, qp); 4540 state = qp->com.from_state; 4541 while (state != 0) { 4542 switch (state) { 4543 case RES_QP_RESERVED: 4544 spin_lock_irq(mlx4_tlock(dev)); 4545 rb_erase(&qp->com.node, 4546 &tracker->res_tree[RES_QP]); 4547 list_del(&qp->com.list); 4548 spin_unlock_irq(mlx4_tlock(dev)); 4549 if (!valid_reserved(dev, slave, qpn)) { 4550 __mlx4_qp_release_range(dev, qpn, 1); 4551 mlx4_release_resource(dev, slave, 4552 RES_QP, 1, 0); 4553 } 4554 kfree(qp); 4555 state = 0; 4556 break; 4557 case RES_QP_MAPPED: 4558 if (!valid_reserved(dev, slave, qpn)) 4559 __mlx4_qp_free_icm(dev, qpn); 4560 state = RES_QP_RESERVED; 4561 break; 4562 case RES_QP_HW: 4563 in_param = slave; 4564 err = mlx4_cmd(dev, in_param, 4565 qp->local_qpn, 2, 4566 MLX4_CMD_2RST_QP, 4567 MLX4_CMD_TIME_CLASS_A, 4568 MLX4_CMD_NATIVE); 4569 if (err) 4570 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", 4571 slave, qp->local_qpn); 4572 atomic_dec(&qp->rcq->ref_count); 4573 atomic_dec(&qp->scq->ref_count); 4574 atomic_dec(&qp->mtt->ref_count); 4575 if (qp->srq) 4576 atomic_dec(&qp->srq->ref_count); 4577 state = RES_QP_MAPPED; 4578 break; 4579 default: 4580 state = 0; 4581 } 4582 } 4583 } 4584 spin_lock_irq(mlx4_tlock(dev)); 4585 } 4586 spin_unlock_irq(mlx4_tlock(dev)); 4587 } 4588 4589 static void rem_slave_srqs(struct mlx4_dev *dev, int slave) 4590 { 4591 struct mlx4_priv *priv = mlx4_priv(dev); 4592 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4593 struct list_head *srq_list = 4594 &tracker->slave_list[slave].res_list[RES_SRQ]; 4595 struct res_srq *srq; 4596 struct res_srq *tmp; 4597 int state; 4598 u64 in_param; 4599 LIST_HEAD(tlist); 4600 int srqn; 4601 int err; 4602 4603 err = move_all_busy(dev, slave, RES_SRQ); 4604 if (err) 4605 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", 4606 slave); 4607 4608 spin_lock_irq(mlx4_tlock(dev)); 4609 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4610 spin_unlock_irq(mlx4_tlock(dev)); 4611 if (srq->com.owner == slave) { 4612 srqn = srq->com.res_id; 4613 state = srq->com.from_state; 4614 while (state != 0) { 4615 switch (state) { 4616 case RES_SRQ_ALLOCATED: 4617 __mlx4_srq_free_icm(dev, srqn); 4618 spin_lock_irq(mlx4_tlock(dev)); 4619 rb_erase(&srq->com.node, 4620 &tracker->res_tree[RES_SRQ]); 4621 list_del(&srq->com.list); 4622 spin_unlock_irq(mlx4_tlock(dev)); 4623 mlx4_release_resource(dev, slave, 4624 RES_SRQ, 1, 0); 4625 kfree(srq); 4626 state = 0; 4627 break; 4628 4629 case RES_SRQ_HW: 4630 in_param = slave; 4631 err = mlx4_cmd(dev, in_param, srqn, 1, 4632 MLX4_CMD_HW2SW_SRQ, 4633 MLX4_CMD_TIME_CLASS_A, 4634 MLX4_CMD_NATIVE); 4635 if (err) 4636 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", 4637 slave, srqn); 4638 4639 atomic_dec(&srq->mtt->ref_count); 4640 if (srq->cq) 4641 atomic_dec(&srq->cq->ref_count); 4642 state = RES_SRQ_ALLOCATED; 4643 break; 4644 4645 default: 4646 state = 0; 4647 } 4648 } 4649 } 4650 spin_lock_irq(mlx4_tlock(dev)); 4651 } 4652 spin_unlock_irq(mlx4_tlock(dev)); 4653 } 4654 4655 static void rem_slave_cqs(struct mlx4_dev *dev, int slave) 4656 { 4657 struct mlx4_priv *priv = mlx4_priv(dev); 4658 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4659 struct list_head *cq_list = 4660 &tracker->slave_list[slave].res_list[RES_CQ]; 4661 struct res_cq *cq; 4662 struct res_cq *tmp; 4663 int state; 4664 u64 in_param; 4665 LIST_HEAD(tlist); 4666 int cqn; 4667 int err; 4668 4669 err = move_all_busy(dev, slave, RES_CQ); 4670 if (err) 4671 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", 4672 slave); 4673 4674 spin_lock_irq(mlx4_tlock(dev)); 4675 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4676 spin_unlock_irq(mlx4_tlock(dev)); 4677 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { 4678 cqn = cq->com.res_id; 4679 state = cq->com.from_state; 4680 while (state != 0) { 4681 switch (state) { 4682 case RES_CQ_ALLOCATED: 4683 __mlx4_cq_free_icm(dev, cqn); 4684 spin_lock_irq(mlx4_tlock(dev)); 4685 rb_erase(&cq->com.node, 4686 &tracker->res_tree[RES_CQ]); 4687 list_del(&cq->com.list); 4688 spin_unlock_irq(mlx4_tlock(dev)); 4689 mlx4_release_resource(dev, slave, 4690 RES_CQ, 1, 0); 4691 kfree(cq); 4692 state = 0; 4693 break; 4694 4695 case RES_CQ_HW: 4696 in_param = slave; 4697 err = mlx4_cmd(dev, in_param, cqn, 1, 4698 MLX4_CMD_HW2SW_CQ, 4699 MLX4_CMD_TIME_CLASS_A, 4700 MLX4_CMD_NATIVE); 4701 if (err) 4702 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", 4703 slave, cqn); 4704 atomic_dec(&cq->mtt->ref_count); 4705 state = RES_CQ_ALLOCATED; 4706 break; 4707 4708 default: 4709 state = 0; 4710 } 4711 } 4712 } 4713 spin_lock_irq(mlx4_tlock(dev)); 4714 } 4715 spin_unlock_irq(mlx4_tlock(dev)); 4716 } 4717 4718 static void rem_slave_mrs(struct mlx4_dev *dev, int slave) 4719 { 4720 struct mlx4_priv *priv = mlx4_priv(dev); 4721 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4722 struct list_head *mpt_list = 4723 &tracker->slave_list[slave].res_list[RES_MPT]; 4724 struct res_mpt *mpt; 4725 struct res_mpt *tmp; 4726 int state; 4727 u64 in_param; 4728 LIST_HEAD(tlist); 4729 int mptn; 4730 int err; 4731 4732 err = move_all_busy(dev, slave, RES_MPT); 4733 if (err) 4734 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", 4735 slave); 4736 4737 spin_lock_irq(mlx4_tlock(dev)); 4738 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4739 spin_unlock_irq(mlx4_tlock(dev)); 4740 if (mpt->com.owner == slave) { 4741 mptn = mpt->com.res_id; 4742 state = mpt->com.from_state; 4743 while (state != 0) { 4744 switch (state) { 4745 case RES_MPT_RESERVED: 4746 __mlx4_mpt_release(dev, mpt->key); 4747 spin_lock_irq(mlx4_tlock(dev)); 4748 rb_erase(&mpt->com.node, 4749 &tracker->res_tree[RES_MPT]); 4750 list_del(&mpt->com.list); 4751 spin_unlock_irq(mlx4_tlock(dev)); 4752 mlx4_release_resource(dev, slave, 4753 RES_MPT, 1, 0); 4754 kfree(mpt); 4755 state = 0; 4756 break; 4757 4758 case RES_MPT_MAPPED: 4759 __mlx4_mpt_free_icm(dev, mpt->key); 4760 state = RES_MPT_RESERVED; 4761 break; 4762 4763 case RES_MPT_HW: 4764 in_param = slave; 4765 err = mlx4_cmd(dev, in_param, mptn, 0, 4766 MLX4_CMD_HW2SW_MPT, 4767 MLX4_CMD_TIME_CLASS_A, 4768 MLX4_CMD_NATIVE); 4769 if (err) 4770 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", 4771 slave, mptn); 4772 if (mpt->mtt) 4773 atomic_dec(&mpt->mtt->ref_count); 4774 state = RES_MPT_MAPPED; 4775 break; 4776 default: 4777 state = 0; 4778 } 4779 } 4780 } 4781 spin_lock_irq(mlx4_tlock(dev)); 4782 } 4783 spin_unlock_irq(mlx4_tlock(dev)); 4784 } 4785 4786 static void rem_slave_mtts(struct mlx4_dev *dev, int slave) 4787 { 4788 struct mlx4_priv *priv = mlx4_priv(dev); 4789 struct mlx4_resource_tracker *tracker = 4790 &priv->mfunc.master.res_tracker; 4791 struct list_head *mtt_list = 4792 &tracker->slave_list[slave].res_list[RES_MTT]; 4793 struct res_mtt *mtt; 4794 struct res_mtt *tmp; 4795 int state; 4796 LIST_HEAD(tlist); 4797 int base; 4798 int err; 4799 4800 err = move_all_busy(dev, slave, RES_MTT); 4801 if (err) 4802 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", 4803 slave); 4804 4805 spin_lock_irq(mlx4_tlock(dev)); 4806 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4807 spin_unlock_irq(mlx4_tlock(dev)); 4808 if (mtt->com.owner == slave) { 4809 base = mtt->com.res_id; 4810 state = mtt->com.from_state; 4811 while (state != 0) { 4812 switch (state) { 4813 case RES_MTT_ALLOCATED: 4814 __mlx4_free_mtt_range(dev, base, 4815 mtt->order); 4816 spin_lock_irq(mlx4_tlock(dev)); 4817 rb_erase(&mtt->com.node, 4818 &tracker->res_tree[RES_MTT]); 4819 list_del(&mtt->com.list); 4820 spin_unlock_irq(mlx4_tlock(dev)); 4821 mlx4_release_resource(dev, slave, RES_MTT, 4822 1 << mtt->order, 0); 4823 kfree(mtt); 4824 state = 0; 4825 break; 4826 4827 default: 4828 state = 0; 4829 } 4830 } 4831 } 4832 spin_lock_irq(mlx4_tlock(dev)); 4833 } 4834 spin_unlock_irq(mlx4_tlock(dev)); 4835 } 4836 4837 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) 4838 { 4839 struct mlx4_priv *priv = mlx4_priv(dev); 4840 struct mlx4_resource_tracker *tracker = 4841 &priv->mfunc.master.res_tracker; 4842 struct list_head *fs_rule_list = 4843 &tracker->slave_list[slave].res_list[RES_FS_RULE]; 4844 struct res_fs_rule *fs_rule; 4845 struct res_fs_rule *tmp; 4846 int state; 4847 u64 base; 4848 int err; 4849 4850 err = move_all_busy(dev, slave, RES_FS_RULE); 4851 if (err) 4852 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", 4853 slave); 4854 4855 spin_lock_irq(mlx4_tlock(dev)); 4856 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { 4857 spin_unlock_irq(mlx4_tlock(dev)); 4858 if (fs_rule->com.owner == slave) { 4859 base = fs_rule->com.res_id; 4860 state = fs_rule->com.from_state; 4861 while (state != 0) { 4862 switch (state) { 4863 case RES_FS_RULE_ALLOCATED: 4864 /* detach rule */ 4865 err = mlx4_cmd(dev, base, 0, 0, 4866 MLX4_QP_FLOW_STEERING_DETACH, 4867 MLX4_CMD_TIME_CLASS_A, 4868 MLX4_CMD_NATIVE); 4869 4870 spin_lock_irq(mlx4_tlock(dev)); 4871 rb_erase(&fs_rule->com.node, 4872 &tracker->res_tree[RES_FS_RULE]); 4873 list_del(&fs_rule->com.list); 4874 spin_unlock_irq(mlx4_tlock(dev)); 4875 kfree(fs_rule); 4876 state = 0; 4877 break; 4878 4879 default: 4880 state = 0; 4881 } 4882 } 4883 } 4884 spin_lock_irq(mlx4_tlock(dev)); 4885 } 4886 spin_unlock_irq(mlx4_tlock(dev)); 4887 } 4888 4889 static void rem_slave_eqs(struct mlx4_dev *dev, int slave) 4890 { 4891 struct mlx4_priv *priv = mlx4_priv(dev); 4892 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4893 struct list_head *eq_list = 4894 &tracker->slave_list[slave].res_list[RES_EQ]; 4895 struct res_eq *eq; 4896 struct res_eq *tmp; 4897 int err; 4898 int state; 4899 LIST_HEAD(tlist); 4900 int eqn; 4901 4902 err = move_all_busy(dev, slave, RES_EQ); 4903 if (err) 4904 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", 4905 slave); 4906 4907 spin_lock_irq(mlx4_tlock(dev)); 4908 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 4909 spin_unlock_irq(mlx4_tlock(dev)); 4910 if (eq->com.owner == slave) { 4911 eqn = eq->com.res_id; 4912 state = eq->com.from_state; 4913 while (state != 0) { 4914 switch (state) { 4915 case RES_EQ_RESERVED: 4916 spin_lock_irq(mlx4_tlock(dev)); 4917 rb_erase(&eq->com.node, 4918 &tracker->res_tree[RES_EQ]); 4919 list_del(&eq->com.list); 4920 spin_unlock_irq(mlx4_tlock(dev)); 4921 kfree(eq); 4922 state = 0; 4923 break; 4924 4925 case RES_EQ_HW: 4926 err = mlx4_cmd(dev, slave, eqn & 0x3ff, 4927 1, MLX4_CMD_HW2SW_EQ, 4928 MLX4_CMD_TIME_CLASS_A, 4929 MLX4_CMD_NATIVE); 4930 if (err) 4931 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4932 slave, eqn & 0x3ff); 4933 atomic_dec(&eq->mtt->ref_count); 4934 state = RES_EQ_RESERVED; 4935 break; 4936 4937 default: 4938 state = 0; 4939 } 4940 } 4941 } 4942 spin_lock_irq(mlx4_tlock(dev)); 4943 } 4944 spin_unlock_irq(mlx4_tlock(dev)); 4945 } 4946 4947 static void rem_slave_counters(struct mlx4_dev *dev, int slave) 4948 { 4949 struct mlx4_priv *priv = mlx4_priv(dev); 4950 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4951 struct list_head *counter_list = 4952 &tracker->slave_list[slave].res_list[RES_COUNTER]; 4953 struct res_counter *counter; 4954 struct res_counter *tmp; 4955 int err; 4956 int *counters_arr = NULL; 4957 int i, j; 4958 4959 err = move_all_busy(dev, slave, RES_COUNTER); 4960 if (err) 4961 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 4962 slave); 4963 4964 counters_arr = kmalloc_array(dev->caps.max_counters, 4965 sizeof(*counters_arr), GFP_KERNEL); 4966 if (!counters_arr) 4967 return; 4968 4969 do { 4970 i = 0; 4971 j = 0; 4972 spin_lock_irq(mlx4_tlock(dev)); 4973 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4974 if (counter->com.owner == slave) { 4975 counters_arr[i++] = counter->com.res_id; 4976 rb_erase(&counter->com.node, 4977 &tracker->res_tree[RES_COUNTER]); 4978 list_del(&counter->com.list); 4979 kfree(counter); 4980 } 4981 } 4982 spin_unlock_irq(mlx4_tlock(dev)); 4983 4984 while (j < i) { 4985 __mlx4_counter_free(dev, counters_arr[j++]); 4986 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4987 } 4988 } while (i); 4989 4990 kfree(counters_arr); 4991 } 4992 4993 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4994 { 4995 struct mlx4_priv *priv = mlx4_priv(dev); 4996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4997 struct list_head *xrcdn_list = 4998 &tracker->slave_list[slave].res_list[RES_XRCD]; 4999 struct res_xrcdn *xrcd; 5000 struct res_xrcdn *tmp; 5001 int err; 5002 int xrcdn; 5003 5004 err = move_all_busy(dev, slave, RES_XRCD); 5005 if (err) 5006 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", 5007 slave); 5008 5009 spin_lock_irq(mlx4_tlock(dev)); 5010 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 5011 if (xrcd->com.owner == slave) { 5012 xrcdn = xrcd->com.res_id; 5013 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); 5014 list_del(&xrcd->com.list); 5015 kfree(xrcd); 5016 __mlx4_xrcd_free(dev, xrcdn); 5017 } 5018 } 5019 spin_unlock_irq(mlx4_tlock(dev)); 5020 } 5021 5022 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 5023 { 5024 struct mlx4_priv *priv = mlx4_priv(dev); 5025 mlx4_reset_roce_gids(dev, slave); 5026 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5027 rem_slave_vlans(dev, slave); 5028 rem_slave_macs(dev, slave); 5029 rem_slave_fs_rule(dev, slave); 5030 rem_slave_qps(dev, slave); 5031 rem_slave_srqs(dev, slave); 5032 rem_slave_cqs(dev, slave); 5033 rem_slave_mrs(dev, slave); 5034 rem_slave_eqs(dev, slave); 5035 rem_slave_mtts(dev, slave); 5036 rem_slave_counters(dev, slave); 5037 rem_slave_xrcdns(dev, slave); 5038 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5039 } 5040 5041 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5042 { 5043 struct mlx4_vf_immed_vlan_work *work = 5044 container_of(_work, struct mlx4_vf_immed_vlan_work, work); 5045 struct mlx4_cmd_mailbox *mailbox; 5046 struct mlx4_update_qp_context *upd_context; 5047 struct mlx4_dev *dev = &work->priv->dev; 5048 struct mlx4_resource_tracker *tracker = 5049 &work->priv->mfunc.master.res_tracker; 5050 struct list_head *qp_list = 5051 &tracker->slave_list[work->slave].res_list[RES_QP]; 5052 struct res_qp *qp; 5053 struct res_qp *tmp; 5054 u64 qp_path_mask_vlan_ctrl = 5055 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 5056 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 5057 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 5058 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 5059 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 5060 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); 5061 5062 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 5063 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | 5064 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | 5065 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | 5066 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | 5067 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | 5068 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 5069 5070 int err; 5071 int port, errors = 0; 5072 u8 vlan_control; 5073 5074 if (mlx4_is_slave(dev)) { 5075 mlx4_warn(dev, "Trying to update-qp in slave %d\n", 5076 work->slave); 5077 goto out; 5078 } 5079 5080 mailbox = mlx4_alloc_cmd_mailbox(dev); 5081 if (IS_ERR(mailbox)) 5082 goto out; 5083 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ 5084 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5085 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 5086 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 5087 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5088 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 5089 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5090 else if (!work->vlan_id) 5091 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5092 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5093 else 5094 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5095 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5096 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 5097 5098 upd_context = mailbox->buf; 5099 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); 5100 5101 spin_lock_irq(mlx4_tlock(dev)); 5102 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 5103 spin_unlock_irq(mlx4_tlock(dev)); 5104 if (qp->com.owner == work->slave) { 5105 if (qp->com.from_state != RES_QP_HW || 5106 !qp->sched_queue || /* no INIT2RTR trans yet */ 5107 mlx4_is_qp_reserved(dev, qp->local_qpn) || 5108 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { 5109 spin_lock_irq(mlx4_tlock(dev)); 5110 continue; 5111 } 5112 port = (qp->sched_queue >> 6 & 1) + 1; 5113 if (port != work->port) { 5114 spin_lock_irq(mlx4_tlock(dev)); 5115 continue; 5116 } 5117 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) 5118 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); 5119 else 5120 upd_context->primary_addr_path_mask = 5121 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); 5122 if (work->vlan_id == MLX4_VGT) { 5123 upd_context->qp_context.param3 = qp->param3; 5124 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; 5125 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; 5126 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; 5127 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; 5128 upd_context->qp_context.pri_path.feup = qp->feup; 5129 upd_context->qp_context.pri_path.sched_queue = 5130 qp->sched_queue; 5131 } else { 5132 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); 5133 upd_context->qp_context.pri_path.vlan_control = vlan_control; 5134 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; 5135 upd_context->qp_context.pri_path.fvl_rx = 5136 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; 5137 upd_context->qp_context.pri_path.fl = 5138 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 5139 upd_context->qp_context.pri_path.feup = 5140 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 5141 upd_context->qp_context.pri_path.sched_queue = 5142 qp->sched_queue & 0xC7; 5143 upd_context->qp_context.pri_path.sched_queue |= 5144 ((work->qos & 0x7) << 3); 5145 upd_context->qp_mask |= 5146 cpu_to_be64(1ULL << 5147 MLX4_UPD_QP_MASK_QOS_VPP); 5148 upd_context->qp_context.qos_vport = 5149 work->qos_vport; 5150 } 5151 5152 err = mlx4_cmd(dev, mailbox->dma, 5153 qp->local_qpn & 0xffffff, 5154 0, MLX4_CMD_UPDATE_QP, 5155 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 5156 if (err) { 5157 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", 5158 work->slave, port, qp->local_qpn, err); 5159 errors++; 5160 } 5161 } 5162 spin_lock_irq(mlx4_tlock(dev)); 5163 } 5164 spin_unlock_irq(mlx4_tlock(dev)); 5165 mlx4_free_cmd_mailbox(dev, mailbox); 5166 5167 if (errors) 5168 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", 5169 errors, work->slave, work->port); 5170 5171 /* unregister previous vlan_id if needed and we had no errors 5172 * while updating the QPs 5173 */ 5174 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 5175 NO_INDX != work->orig_vlan_ix) 5176 __mlx4_unregister_vlan(&work->priv->dev, work->port, 5177 work->orig_vlan_id); 5178 out: 5179 kfree(work); 5180 return; 5181 } 5182