1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. 4 * All rights reserved. 5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/sched.h> 37 #include <linux/pci.h> 38 #include <linux/errno.h> 39 #include <linux/kernel.h> 40 #include <linux/io.h> 41 #include <linux/slab.h> 42 #include <linux/mlx4/cmd.h> 43 #include <linux/mlx4/qp.h> 44 #include <linux/if_ether.h> 45 #include <linux/etherdevice.h> 46 47 #include "mlx4.h" 48 #include "fw.h" 49 #include "mlx4_stats.h" 50 51 #define MLX4_MAC_VALID (1ull << 63) 52 #define MLX4_PF_COUNTERS_PER_PORT 2 53 #define MLX4_VF_COUNTERS_PER_PORT 1 54 55 struct mac_res { 56 struct list_head list; 57 u64 mac; 58 int ref_count; 59 u8 smac_index; 60 u8 port; 61 }; 62 63 struct vlan_res { 64 struct list_head list; 65 u16 vlan; 66 int ref_count; 67 int vlan_index; 68 u8 port; 69 }; 70 71 struct res_common { 72 struct list_head list; 73 struct rb_node node; 74 u64 res_id; 75 int owner; 76 int state; 77 int from_state; 78 int to_state; 79 int removing; 80 }; 81 82 enum { 83 RES_ANY_BUSY = 1 84 }; 85 86 struct res_gid { 87 struct list_head list; 88 u8 gid[16]; 89 enum mlx4_protocol prot; 90 enum mlx4_steer_type steer; 91 u64 reg_id; 92 }; 93 94 enum res_qp_states { 95 RES_QP_BUSY = RES_ANY_BUSY, 96 97 /* QP number was allocated */ 98 RES_QP_RESERVED, 99 100 /* ICM memory for QP context was mapped */ 101 RES_QP_MAPPED, 102 103 /* QP is in hw ownership */ 104 RES_QP_HW 105 }; 106 107 struct res_qp { 108 struct res_common com; 109 struct res_mtt *mtt; 110 struct res_cq *rcq; 111 struct res_cq *scq; 112 struct res_srq *srq; 113 struct list_head mcg_list; 114 spinlock_t mcg_spl; 115 int local_qpn; 116 atomic_t ref_count; 117 u32 qpc_flags; 118 /* saved qp params before VST enforcement in order to restore on VGT */ 119 u8 sched_queue; 120 __be32 param3; 121 u8 vlan_control; 122 u8 fvl_rx; 123 u8 pri_path_fl; 124 u8 vlan_index; 125 u8 feup; 126 }; 127 128 enum res_mtt_states { 129 RES_MTT_BUSY = RES_ANY_BUSY, 130 RES_MTT_ALLOCATED, 131 }; 132 133 static inline const char *mtt_states_str(enum res_mtt_states state) 134 { 135 switch (state) { 136 case RES_MTT_BUSY: return "RES_MTT_BUSY"; 137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; 138 default: return "Unknown"; 139 } 140 } 141 142 struct res_mtt { 143 struct res_common com; 144 int order; 145 atomic_t ref_count; 146 }; 147 148 enum res_mpt_states { 149 RES_MPT_BUSY = RES_ANY_BUSY, 150 RES_MPT_RESERVED, 151 RES_MPT_MAPPED, 152 RES_MPT_HW, 153 }; 154 155 struct res_mpt { 156 struct res_common com; 157 struct res_mtt *mtt; 158 int key; 159 }; 160 161 enum res_eq_states { 162 RES_EQ_BUSY = RES_ANY_BUSY, 163 RES_EQ_RESERVED, 164 RES_EQ_HW, 165 }; 166 167 struct res_eq { 168 struct res_common com; 169 struct res_mtt *mtt; 170 }; 171 172 enum res_cq_states { 173 RES_CQ_BUSY = RES_ANY_BUSY, 174 RES_CQ_ALLOCATED, 175 RES_CQ_HW, 176 }; 177 178 struct res_cq { 179 struct res_common com; 180 struct res_mtt *mtt; 181 atomic_t ref_count; 182 }; 183 184 enum res_srq_states { 185 RES_SRQ_BUSY = RES_ANY_BUSY, 186 RES_SRQ_ALLOCATED, 187 RES_SRQ_HW, 188 }; 189 190 struct res_srq { 191 struct res_common com; 192 struct res_mtt *mtt; 193 struct res_cq *cq; 194 atomic_t ref_count; 195 }; 196 197 enum res_counter_states { 198 RES_COUNTER_BUSY = RES_ANY_BUSY, 199 RES_COUNTER_ALLOCATED, 200 }; 201 202 struct res_counter { 203 struct res_common com; 204 int port; 205 }; 206 207 enum res_xrcdn_states { 208 RES_XRCD_BUSY = RES_ANY_BUSY, 209 RES_XRCD_ALLOCATED, 210 }; 211 212 struct res_xrcdn { 213 struct res_common com; 214 int port; 215 }; 216 217 enum res_fs_rule_states { 218 RES_FS_RULE_BUSY = RES_ANY_BUSY, 219 RES_FS_RULE_ALLOCATED, 220 }; 221 222 struct res_fs_rule { 223 struct res_common com; 224 int qpn; 225 /* VF DMFS mbox with port flipped */ 226 void *mirr_mbox; 227 /* > 0 --> apply mirror when getting into HA mode */ 228 /* = 0 --> un-apply mirror when getting out of HA mode */ 229 u32 mirr_mbox_size; 230 struct list_head mirr_list; 231 u64 mirr_rule_id; 232 }; 233 234 static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 235 { 236 struct rb_node *node = root->rb_node; 237 238 while (node) { 239 struct res_common *res = container_of(node, struct res_common, 240 node); 241 242 if (res_id < res->res_id) 243 node = node->rb_left; 244 else if (res_id > res->res_id) 245 node = node->rb_right; 246 else 247 return res; 248 } 249 return NULL; 250 } 251 252 static int res_tracker_insert(struct rb_root *root, struct res_common *res) 253 { 254 struct rb_node **new = &(root->rb_node), *parent = NULL; 255 256 /* Figure out where to put new node */ 257 while (*new) { 258 struct res_common *this = container_of(*new, struct res_common, 259 node); 260 261 parent = *new; 262 if (res->res_id < this->res_id) 263 new = &((*new)->rb_left); 264 else if (res->res_id > this->res_id) 265 new = &((*new)->rb_right); 266 else 267 return -EEXIST; 268 } 269 270 /* Add new node and rebalance tree. */ 271 rb_link_node(&res->node, parent, new); 272 rb_insert_color(&res->node, root); 273 274 return 0; 275 } 276 277 enum qp_transition { 278 QP_TRANS_INIT2RTR, 279 QP_TRANS_RTR2RTS, 280 QP_TRANS_RTS2RTS, 281 QP_TRANS_SQERR2RTS, 282 QP_TRANS_SQD2SQD, 283 QP_TRANS_SQD2RTS 284 }; 285 286 /* For Debug uses */ 287 static const char *resource_str(enum mlx4_resource rt) 288 { 289 switch (rt) { 290 case RES_QP: return "RES_QP"; 291 case RES_CQ: return "RES_CQ"; 292 case RES_SRQ: return "RES_SRQ"; 293 case RES_MPT: return "RES_MPT"; 294 case RES_MTT: return "RES_MTT"; 295 case RES_MAC: return "RES_MAC"; 296 case RES_VLAN: return "RES_VLAN"; 297 case RES_EQ: return "RES_EQ"; 298 case RES_COUNTER: return "RES_COUNTER"; 299 case RES_FS_RULE: return "RES_FS_RULE"; 300 case RES_XRCD: return "RES_XRCD"; 301 default: return "Unknown resource type !!!"; 302 }; 303 } 304 305 static void rem_slave_vlans(struct mlx4_dev *dev, int slave); 306 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, 307 enum mlx4_resource res_type, int count, 308 int port) 309 { 310 struct mlx4_priv *priv = mlx4_priv(dev); 311 struct resource_allocator *res_alloc = 312 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 313 int err = -EINVAL; 314 int allocated, free, reserved, guaranteed, from_free; 315 int from_rsvd; 316 317 if (slave > dev->persist->num_vfs) 318 return -EINVAL; 319 320 spin_lock(&res_alloc->alloc_lock); 321 allocated = (port > 0) ? 322 res_alloc->allocated[(port - 1) * 323 (dev->persist->num_vfs + 1) + slave] : 324 res_alloc->allocated[slave]; 325 free = (port > 0) ? res_alloc->res_port_free[port - 1] : 326 res_alloc->res_free; 327 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : 328 res_alloc->res_reserved; 329 guaranteed = res_alloc->guaranteed[slave]; 330 331 if (allocated + count > res_alloc->quota[slave]) { 332 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", 333 slave, port, resource_str(res_type), count, 334 allocated, res_alloc->quota[slave]); 335 goto out; 336 } 337 338 if (allocated + count <= guaranteed) { 339 err = 0; 340 from_rsvd = count; 341 } else { 342 /* portion may need to be obtained from free area */ 343 if (guaranteed - allocated > 0) 344 from_free = count - (guaranteed - allocated); 345 else 346 from_free = count; 347 348 from_rsvd = count - from_free; 349 350 if (free - from_free >= reserved) 351 err = 0; 352 else 353 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", 354 slave, port, resource_str(res_type), free, 355 from_free, reserved); 356 } 357 358 if (!err) { 359 /* grant the request */ 360 if (port > 0) { 361 res_alloc->allocated[(port - 1) * 362 (dev->persist->num_vfs + 1) + slave] += count; 363 res_alloc->res_port_free[port - 1] -= count; 364 res_alloc->res_port_rsvd[port - 1] -= from_rsvd; 365 } else { 366 res_alloc->allocated[slave] += count; 367 res_alloc->res_free -= count; 368 res_alloc->res_reserved -= from_rsvd; 369 } 370 } 371 372 out: 373 spin_unlock(&res_alloc->alloc_lock); 374 return err; 375 } 376 377 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, 378 enum mlx4_resource res_type, int count, 379 int port) 380 { 381 struct mlx4_priv *priv = mlx4_priv(dev); 382 struct resource_allocator *res_alloc = 383 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 384 int allocated, guaranteed, from_rsvd; 385 386 if (slave > dev->persist->num_vfs) 387 return; 388 389 spin_lock(&res_alloc->alloc_lock); 390 391 allocated = (port > 0) ? 392 res_alloc->allocated[(port - 1) * 393 (dev->persist->num_vfs + 1) + slave] : 394 res_alloc->allocated[slave]; 395 guaranteed = res_alloc->guaranteed[slave]; 396 397 if (allocated - count >= guaranteed) { 398 from_rsvd = 0; 399 } else { 400 /* portion may need to be returned to reserved area */ 401 if (allocated - guaranteed > 0) 402 from_rsvd = count - (allocated - guaranteed); 403 else 404 from_rsvd = count; 405 } 406 407 if (port > 0) { 408 res_alloc->allocated[(port - 1) * 409 (dev->persist->num_vfs + 1) + slave] -= count; 410 res_alloc->res_port_free[port - 1] += count; 411 res_alloc->res_port_rsvd[port - 1] += from_rsvd; 412 } else { 413 res_alloc->allocated[slave] -= count; 414 res_alloc->res_free += count; 415 res_alloc->res_reserved += from_rsvd; 416 } 417 418 spin_unlock(&res_alloc->alloc_lock); 419 return; 420 } 421 422 static inline void initialize_res_quotas(struct mlx4_dev *dev, 423 struct resource_allocator *res_alloc, 424 enum mlx4_resource res_type, 425 int vf, int num_instances) 426 { 427 res_alloc->guaranteed[vf] = num_instances / 428 (2 * (dev->persist->num_vfs + 1)); 429 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 430 if (vf == mlx4_master_func_num(dev)) { 431 res_alloc->res_free = num_instances; 432 if (res_type == RES_MTT) { 433 /* reserved mtts will be taken out of the PF allocation */ 434 res_alloc->res_free += dev->caps.reserved_mtts; 435 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; 436 res_alloc->quota[vf] += dev->caps.reserved_mtts; 437 } 438 } 439 } 440 441 void mlx4_init_quotas(struct mlx4_dev *dev) 442 { 443 struct mlx4_priv *priv = mlx4_priv(dev); 444 int pf; 445 446 /* quotas for VFs are initialized in mlx4_slave_cap */ 447 if (mlx4_is_slave(dev)) 448 return; 449 450 if (!mlx4_is_mfunc(dev)) { 451 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - 452 mlx4_num_reserved_sqps(dev); 453 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; 454 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; 455 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; 456 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; 457 return; 458 } 459 460 pf = mlx4_master_func_num(dev); 461 dev->quotas.qp = 462 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; 463 dev->quotas.cq = 464 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; 465 dev->quotas.srq = 466 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; 467 dev->quotas.mtt = 468 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; 469 dev->quotas.mpt = 470 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 471 } 472 473 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) 474 { 475 /* reduce the sink counter */ 476 return (dev->caps.max_counters - 1 - 477 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) 478 / MLX4_MAX_PORTS; 479 } 480 481 int mlx4_init_resource_tracker(struct mlx4_dev *dev) 482 { 483 struct mlx4_priv *priv = mlx4_priv(dev); 484 int i, j; 485 int t; 486 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); 487 488 priv->mfunc.master.res_tracker.slave_list = 489 kzalloc(dev->num_slaves * sizeof(struct slave_list), 490 GFP_KERNEL); 491 if (!priv->mfunc.master.res_tracker.slave_list) 492 return -ENOMEM; 493 494 for (i = 0 ; i < dev->num_slaves; i++) { 495 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) 496 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. 497 slave_list[i].res_list[t]); 498 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 499 } 500 501 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 502 dev->num_slaves); 503 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 504 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 505 506 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 507 struct resource_allocator *res_alloc = 508 &priv->mfunc.master.res_tracker.res_alloc[i]; 509 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * 510 sizeof(int), GFP_KERNEL); 511 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * 512 sizeof(int), GFP_KERNEL); 513 if (i == RES_MAC || i == RES_VLAN) 514 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 515 (dev->persist->num_vfs 516 + 1) * 517 sizeof(int), GFP_KERNEL); 518 else 519 res_alloc->allocated = kzalloc((dev->persist-> 520 num_vfs + 1) * 521 sizeof(int), GFP_KERNEL); 522 /* Reduce the sink counter */ 523 if (i == RES_COUNTER) 524 res_alloc->res_free = dev->caps.max_counters - 1; 525 526 if (!res_alloc->quota || !res_alloc->guaranteed || 527 !res_alloc->allocated) 528 goto no_mem_err; 529 530 spin_lock_init(&res_alloc->alloc_lock); 531 for (t = 0; t < dev->persist->num_vfs + 1; t++) { 532 struct mlx4_active_ports actv_ports = 533 mlx4_get_active_ports(dev, t); 534 switch (i) { 535 case RES_QP: 536 initialize_res_quotas(dev, res_alloc, RES_QP, 537 t, dev->caps.num_qps - 538 dev->caps.reserved_qps - 539 mlx4_num_reserved_sqps(dev)); 540 break; 541 case RES_CQ: 542 initialize_res_quotas(dev, res_alloc, RES_CQ, 543 t, dev->caps.num_cqs - 544 dev->caps.reserved_cqs); 545 break; 546 case RES_SRQ: 547 initialize_res_quotas(dev, res_alloc, RES_SRQ, 548 t, dev->caps.num_srqs - 549 dev->caps.reserved_srqs); 550 break; 551 case RES_MPT: 552 initialize_res_quotas(dev, res_alloc, RES_MPT, 553 t, dev->caps.num_mpts - 554 dev->caps.reserved_mrws); 555 break; 556 case RES_MTT: 557 initialize_res_quotas(dev, res_alloc, RES_MTT, 558 t, dev->caps.num_mtts - 559 dev->caps.reserved_mtts); 560 break; 561 case RES_MAC: 562 if (t == mlx4_master_func_num(dev)) { 563 int max_vfs_pport = 0; 564 /* Calculate the max vfs per port for */ 565 /* both ports. */ 566 for (j = 0; j < dev->caps.num_ports; 567 j++) { 568 struct mlx4_slaves_pport slaves_pport = 569 mlx4_phys_to_slaves_pport(dev, j + 1); 570 unsigned current_slaves = 571 bitmap_weight(slaves_pport.slaves, 572 dev->caps.num_ports) - 1; 573 if (max_vfs_pport < current_slaves) 574 max_vfs_pport = 575 current_slaves; 576 } 577 res_alloc->quota[t] = 578 MLX4_MAX_MAC_NUM - 579 2 * max_vfs_pport; 580 res_alloc->guaranteed[t] = 2; 581 for (j = 0; j < MLX4_MAX_PORTS; j++) 582 res_alloc->res_port_free[j] = 583 MLX4_MAX_MAC_NUM; 584 } else { 585 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 586 res_alloc->guaranteed[t] = 2; 587 } 588 break; 589 case RES_VLAN: 590 if (t == mlx4_master_func_num(dev)) { 591 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; 592 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; 593 for (j = 0; j < MLX4_MAX_PORTS; j++) 594 res_alloc->res_port_free[j] = 595 res_alloc->quota[t]; 596 } else { 597 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; 598 res_alloc->guaranteed[t] = 0; 599 } 600 break; 601 case RES_COUNTER: 602 res_alloc->quota[t] = dev->caps.max_counters; 603 if (t == mlx4_master_func_num(dev)) 604 res_alloc->guaranteed[t] = 605 MLX4_PF_COUNTERS_PER_PORT * 606 MLX4_MAX_PORTS; 607 else if (t <= max_vfs_guarantee_counter) 608 res_alloc->guaranteed[t] = 609 MLX4_VF_COUNTERS_PER_PORT * 610 MLX4_MAX_PORTS; 611 else 612 res_alloc->guaranteed[t] = 0; 613 res_alloc->res_free -= res_alloc->guaranteed[t]; 614 break; 615 default: 616 break; 617 } 618 if (i == RES_MAC || i == RES_VLAN) { 619 for (j = 0; j < dev->caps.num_ports; j++) 620 if (test_bit(j, actv_ports.ports)) 621 res_alloc->res_port_rsvd[j] += 622 res_alloc->guaranteed[t]; 623 } else { 624 res_alloc->res_reserved += res_alloc->guaranteed[t]; 625 } 626 } 627 } 628 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 629 return 0; 630 631 no_mem_err: 632 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 633 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 634 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 636 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 638 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 639 } 640 return -ENOMEM; 641 } 642 643 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 644 enum mlx4_res_tracker_free_type type) 645 { 646 struct mlx4_priv *priv = mlx4_priv(dev); 647 int i; 648 649 if (priv->mfunc.master.res_tracker.slave_list) { 650 if (type != RES_TR_FREE_STRUCTS_ONLY) { 651 for (i = 0; i < dev->num_slaves; i++) { 652 if (type == RES_TR_FREE_ALL || 653 dev->caps.function != i) 654 mlx4_delete_all_resources_for_slave(dev, i); 655 } 656 /* free master's vlans */ 657 i = dev->caps.function; 658 mlx4_reset_roce_gids(dev, i); 659 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 660 rem_slave_vlans(dev, i); 661 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 662 } 663 664 if (type != RES_TR_FREE_SLAVES_ONLY) { 665 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 666 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 667 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 668 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 669 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 670 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 671 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 672 } 673 kfree(priv->mfunc.master.res_tracker.slave_list); 674 priv->mfunc.master.res_tracker.slave_list = NULL; 675 } 676 } 677 } 678 679 static void update_pkey_index(struct mlx4_dev *dev, int slave, 680 struct mlx4_cmd_mailbox *inbox) 681 { 682 u8 sched = *(u8 *)(inbox->buf + 64); 683 u8 orig_index = *(u8 *)(inbox->buf + 35); 684 u8 new_index; 685 struct mlx4_priv *priv = mlx4_priv(dev); 686 int port; 687 688 port = (sched >> 6 & 1) + 1; 689 690 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 691 *(u8 *)(inbox->buf + 35) = new_index; 692 } 693 694 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 695 u8 slave) 696 { 697 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 698 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 699 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 700 int port; 701 702 if (MLX4_QP_ST_UD == ts) { 703 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 704 if (mlx4_is_eth(dev, port)) 705 qp_ctx->pri_path.mgid_index = 706 mlx4_get_base_gid_ix(dev, slave, port) | 0x80; 707 else 708 qp_ctx->pri_path.mgid_index = slave | 0x80; 709 710 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { 711 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 712 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 713 if (mlx4_is_eth(dev, port)) { 714 qp_ctx->pri_path.mgid_index += 715 mlx4_get_base_gid_ix(dev, slave, port); 716 qp_ctx->pri_path.mgid_index &= 0x7f; 717 } else { 718 qp_ctx->pri_path.mgid_index = slave & 0x7F; 719 } 720 } 721 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 722 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 723 if (mlx4_is_eth(dev, port)) { 724 qp_ctx->alt_path.mgid_index += 725 mlx4_get_base_gid_ix(dev, slave, port); 726 qp_ctx->alt_path.mgid_index &= 0x7f; 727 } else { 728 qp_ctx->alt_path.mgid_index = slave & 0x7F; 729 } 730 } 731 } 732 } 733 734 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 735 u8 slave, int port); 736 737 static int update_vport_qp_param(struct mlx4_dev *dev, 738 struct mlx4_cmd_mailbox *inbox, 739 u8 slave, u32 qpn) 740 { 741 struct mlx4_qp_context *qpc = inbox->buf + 8; 742 struct mlx4_vport_oper_state *vp_oper; 743 struct mlx4_priv *priv; 744 u32 qp_type; 745 int port, err = 0; 746 747 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 748 priv = mlx4_priv(dev); 749 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 750 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 751 752 err = handle_counter(dev, qpc, slave, port); 753 if (err) 754 goto out; 755 756 if (MLX4_VGT != vp_oper->state.default_vlan) { 757 /* the reserved QPs (special, proxy, tunnel) 758 * do not operate over vlans 759 */ 760 if (mlx4_is_qp_reserved(dev, qpn)) 761 return 0; 762 763 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ 764 if (qp_type == MLX4_QP_ST_UD || 765 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { 766 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { 767 *(__be32 *)inbox->buf = 768 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | 769 MLX4_QP_OPTPAR_VLAN_STRIPPING); 770 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 771 } else { 772 struct mlx4_update_qp_params params = {.flags = 0}; 773 774 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); 775 if (err) 776 goto out; 777 } 778 } 779 780 /* preserve IF_COUNTER flag */ 781 qpc->pri_path.vlan_control &= 782 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; 783 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && 784 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 785 qpc->pri_path.vlan_control |= 786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 792 } else if (0 != vp_oper->state.default_vlan) { 793 qpc->pri_path.vlan_control |= 794 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 795 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 796 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 797 } else { /* priority tagged */ 798 qpc->pri_path.vlan_control |= 799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 800 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 801 } 802 803 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; 804 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 805 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 806 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 807 qpc->pri_path.sched_queue &= 0xC7; 808 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 809 qpc->qos_vport = vp_oper->state.qos_vport; 810 } 811 if (vp_oper->state.spoofchk) { 812 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 813 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 814 } 815 out: 816 return err; 817 } 818 819 static int mpt_mask(struct mlx4_dev *dev) 820 { 821 return dev->caps.num_mpts - 1; 822 } 823 824 static void *find_res(struct mlx4_dev *dev, u64 res_id, 825 enum mlx4_resource type) 826 { 827 struct mlx4_priv *priv = mlx4_priv(dev); 828 829 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 830 res_id); 831 } 832 833 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, 834 enum mlx4_resource type, 835 void *res) 836 { 837 struct res_common *r; 838 int err = 0; 839 840 spin_lock_irq(mlx4_tlock(dev)); 841 r = find_res(dev, res_id, type); 842 if (!r) { 843 err = -ENONET; 844 goto exit; 845 } 846 847 if (r->state == RES_ANY_BUSY) { 848 err = -EBUSY; 849 goto exit; 850 } 851 852 if (r->owner != slave) { 853 err = -EPERM; 854 goto exit; 855 } 856 857 r->from_state = r->state; 858 r->state = RES_ANY_BUSY; 859 860 if (res) 861 *((struct res_common **)res) = r; 862 863 exit: 864 spin_unlock_irq(mlx4_tlock(dev)); 865 return err; 866 } 867 868 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 869 enum mlx4_resource type, 870 u64 res_id, int *slave) 871 { 872 873 struct res_common *r; 874 int err = -ENOENT; 875 int id = res_id; 876 877 if (type == RES_QP) 878 id &= 0x7fffff; 879 spin_lock(mlx4_tlock(dev)); 880 881 r = find_res(dev, id, type); 882 if (r) { 883 *slave = r->owner; 884 err = 0; 885 } 886 spin_unlock(mlx4_tlock(dev)); 887 888 return err; 889 } 890 891 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, 892 enum mlx4_resource type) 893 { 894 struct res_common *r; 895 896 spin_lock_irq(mlx4_tlock(dev)); 897 r = find_res(dev, res_id, type); 898 if (r) 899 r->state = r->from_state; 900 spin_unlock_irq(mlx4_tlock(dev)); 901 } 902 903 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 904 u64 in_param, u64 *out_param, int port); 905 906 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, 907 int counter_index) 908 { 909 struct res_common *r; 910 struct res_counter *counter; 911 int ret = 0; 912 913 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) 914 return ret; 915 916 spin_lock_irq(mlx4_tlock(dev)); 917 r = find_res(dev, counter_index, RES_COUNTER); 918 if (!r || r->owner != slave) 919 ret = -EINVAL; 920 counter = container_of(r, struct res_counter, com); 921 if (!counter->port) 922 counter->port = port; 923 924 spin_unlock_irq(mlx4_tlock(dev)); 925 return ret; 926 } 927 928 static int handle_unexisting_counter(struct mlx4_dev *dev, 929 struct mlx4_qp_context *qpc, u8 slave, 930 int port) 931 { 932 struct mlx4_priv *priv = mlx4_priv(dev); 933 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 934 struct res_common *tmp; 935 struct res_counter *counter; 936 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); 937 int err = 0; 938 939 spin_lock_irq(mlx4_tlock(dev)); 940 list_for_each_entry(tmp, 941 &tracker->slave_list[slave].res_list[RES_COUNTER], 942 list) { 943 counter = container_of(tmp, struct res_counter, com); 944 if (port == counter->port) { 945 qpc->pri_path.counter_index = counter->com.res_id; 946 spin_unlock_irq(mlx4_tlock(dev)); 947 return 0; 948 } 949 } 950 spin_unlock_irq(mlx4_tlock(dev)); 951 952 /* No existing counter, need to allocate a new counter */ 953 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx, 954 port); 955 if (err == -ENOENT) { 956 err = 0; 957 } else if (err && err != -ENOSPC) { 958 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n", 959 __func__, slave, err); 960 } else { 961 qpc->pri_path.counter_index = counter_idx; 962 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n", 963 __func__, slave, qpc->pri_path.counter_index); 964 err = 0; 965 } 966 967 return err; 968 } 969 970 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 971 u8 slave, int port) 972 { 973 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) 974 return handle_existing_counter(dev, slave, port, 975 qpc->pri_path.counter_index); 976 977 return handle_unexisting_counter(dev, qpc, slave, port); 978 } 979 980 static struct res_common *alloc_qp_tr(int id) 981 { 982 struct res_qp *ret; 983 984 ret = kzalloc(sizeof *ret, GFP_KERNEL); 985 if (!ret) 986 return NULL; 987 988 ret->com.res_id = id; 989 ret->com.state = RES_QP_RESERVED; 990 ret->local_qpn = id; 991 INIT_LIST_HEAD(&ret->mcg_list); 992 spin_lock_init(&ret->mcg_spl); 993 atomic_set(&ret->ref_count, 0); 994 995 return &ret->com; 996 } 997 998 static struct res_common *alloc_mtt_tr(int id, int order) 999 { 1000 struct res_mtt *ret; 1001 1002 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1003 if (!ret) 1004 return NULL; 1005 1006 ret->com.res_id = id; 1007 ret->order = order; 1008 ret->com.state = RES_MTT_ALLOCATED; 1009 atomic_set(&ret->ref_count, 0); 1010 1011 return &ret->com; 1012 } 1013 1014 static struct res_common *alloc_mpt_tr(int id, int key) 1015 { 1016 struct res_mpt *ret; 1017 1018 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1019 if (!ret) 1020 return NULL; 1021 1022 ret->com.res_id = id; 1023 ret->com.state = RES_MPT_RESERVED; 1024 ret->key = key; 1025 1026 return &ret->com; 1027 } 1028 1029 static struct res_common *alloc_eq_tr(int id) 1030 { 1031 struct res_eq *ret; 1032 1033 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1034 if (!ret) 1035 return NULL; 1036 1037 ret->com.res_id = id; 1038 ret->com.state = RES_EQ_RESERVED; 1039 1040 return &ret->com; 1041 } 1042 1043 static struct res_common *alloc_cq_tr(int id) 1044 { 1045 struct res_cq *ret; 1046 1047 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1048 if (!ret) 1049 return NULL; 1050 1051 ret->com.res_id = id; 1052 ret->com.state = RES_CQ_ALLOCATED; 1053 atomic_set(&ret->ref_count, 0); 1054 1055 return &ret->com; 1056 } 1057 1058 static struct res_common *alloc_srq_tr(int id) 1059 { 1060 struct res_srq *ret; 1061 1062 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1063 if (!ret) 1064 return NULL; 1065 1066 ret->com.res_id = id; 1067 ret->com.state = RES_SRQ_ALLOCATED; 1068 atomic_set(&ret->ref_count, 0); 1069 1070 return &ret->com; 1071 } 1072 1073 static struct res_common *alloc_counter_tr(int id, int port) 1074 { 1075 struct res_counter *ret; 1076 1077 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1078 if (!ret) 1079 return NULL; 1080 1081 ret->com.res_id = id; 1082 ret->com.state = RES_COUNTER_ALLOCATED; 1083 ret->port = port; 1084 1085 return &ret->com; 1086 } 1087 1088 static struct res_common *alloc_xrcdn_tr(int id) 1089 { 1090 struct res_xrcdn *ret; 1091 1092 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1093 if (!ret) 1094 return NULL; 1095 1096 ret->com.res_id = id; 1097 ret->com.state = RES_XRCD_ALLOCATED; 1098 1099 return &ret->com; 1100 } 1101 1102 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) 1103 { 1104 struct res_fs_rule *ret; 1105 1106 ret = kzalloc(sizeof *ret, GFP_KERNEL); 1107 if (!ret) 1108 return NULL; 1109 1110 ret->com.res_id = id; 1111 ret->com.state = RES_FS_RULE_ALLOCATED; 1112 ret->qpn = qpn; 1113 return &ret->com; 1114 } 1115 1116 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, 1117 int extra) 1118 { 1119 struct res_common *ret; 1120 1121 switch (type) { 1122 case RES_QP: 1123 ret = alloc_qp_tr(id); 1124 break; 1125 case RES_MPT: 1126 ret = alloc_mpt_tr(id, extra); 1127 break; 1128 case RES_MTT: 1129 ret = alloc_mtt_tr(id, extra); 1130 break; 1131 case RES_EQ: 1132 ret = alloc_eq_tr(id); 1133 break; 1134 case RES_CQ: 1135 ret = alloc_cq_tr(id); 1136 break; 1137 case RES_SRQ: 1138 ret = alloc_srq_tr(id); 1139 break; 1140 case RES_MAC: 1141 pr_err("implementation missing\n"); 1142 return NULL; 1143 case RES_COUNTER: 1144 ret = alloc_counter_tr(id, extra); 1145 break; 1146 case RES_XRCD: 1147 ret = alloc_xrcdn_tr(id); 1148 break; 1149 case RES_FS_RULE: 1150 ret = alloc_fs_rule_tr(id, extra); 1151 break; 1152 default: 1153 return NULL; 1154 } 1155 if (ret) 1156 ret->owner = slave; 1157 1158 return ret; 1159 } 1160 1161 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, 1162 struct mlx4_counter *data) 1163 { 1164 struct mlx4_priv *priv = mlx4_priv(dev); 1165 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1166 struct res_common *tmp; 1167 struct res_counter *counter; 1168 int *counters_arr; 1169 int i = 0, err = 0; 1170 1171 memset(data, 0, sizeof(*data)); 1172 1173 counters_arr = kmalloc_array(dev->caps.max_counters, 1174 sizeof(*counters_arr), GFP_KERNEL); 1175 if (!counters_arr) 1176 return -ENOMEM; 1177 1178 spin_lock_irq(mlx4_tlock(dev)); 1179 list_for_each_entry(tmp, 1180 &tracker->slave_list[slave].res_list[RES_COUNTER], 1181 list) { 1182 counter = container_of(tmp, struct res_counter, com); 1183 if (counter->port == port) { 1184 counters_arr[i] = (int)tmp->res_id; 1185 i++; 1186 } 1187 } 1188 spin_unlock_irq(mlx4_tlock(dev)); 1189 counters_arr[i] = -1; 1190 1191 i = 0; 1192 1193 while (counters_arr[i] != -1) { 1194 err = mlx4_get_counter_stats(dev, counters_arr[i], data, 1195 0); 1196 if (err) { 1197 memset(data, 0, sizeof(*data)); 1198 goto table_changed; 1199 } 1200 i++; 1201 } 1202 1203 table_changed: 1204 kfree(counters_arr); 1205 return 0; 1206 } 1207 1208 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1209 enum mlx4_resource type, int extra) 1210 { 1211 int i; 1212 int err; 1213 struct mlx4_priv *priv = mlx4_priv(dev); 1214 struct res_common **res_arr; 1215 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1216 struct rb_root *root = &tracker->res_tree[type]; 1217 1218 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 1219 if (!res_arr) 1220 return -ENOMEM; 1221 1222 for (i = 0; i < count; ++i) { 1223 res_arr[i] = alloc_tr(base + i, type, slave, extra); 1224 if (!res_arr[i]) { 1225 for (--i; i >= 0; --i) 1226 kfree(res_arr[i]); 1227 1228 kfree(res_arr); 1229 return -ENOMEM; 1230 } 1231 } 1232 1233 spin_lock_irq(mlx4_tlock(dev)); 1234 for (i = 0; i < count; ++i) { 1235 if (find_res(dev, base + i, type)) { 1236 err = -EEXIST; 1237 goto undo; 1238 } 1239 err = res_tracker_insert(root, res_arr[i]); 1240 if (err) 1241 goto undo; 1242 list_add_tail(&res_arr[i]->list, 1243 &tracker->slave_list[slave].res_list[type]); 1244 } 1245 spin_unlock_irq(mlx4_tlock(dev)); 1246 kfree(res_arr); 1247 1248 return 0; 1249 1250 undo: 1251 for (--i; i >= 0; --i) { 1252 rb_erase(&res_arr[i]->node, root); 1253 list_del_init(&res_arr[i]->list); 1254 } 1255 1256 spin_unlock_irq(mlx4_tlock(dev)); 1257 1258 for (i = 0; i < count; ++i) 1259 kfree(res_arr[i]); 1260 1261 kfree(res_arr); 1262 1263 return err; 1264 } 1265 1266 static int remove_qp_ok(struct res_qp *res) 1267 { 1268 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || 1269 !list_empty(&res->mcg_list)) { 1270 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", 1271 res->com.state, atomic_read(&res->ref_count)); 1272 return -EBUSY; 1273 } else if (res->com.state != RES_QP_RESERVED) { 1274 return -EPERM; 1275 } 1276 1277 return 0; 1278 } 1279 1280 static int remove_mtt_ok(struct res_mtt *res, int order) 1281 { 1282 if (res->com.state == RES_MTT_BUSY || 1283 atomic_read(&res->ref_count)) { 1284 pr_devel("%s-%d: state %s, ref_count %d\n", 1285 __func__, __LINE__, 1286 mtt_states_str(res->com.state), 1287 atomic_read(&res->ref_count)); 1288 return -EBUSY; 1289 } else if (res->com.state != RES_MTT_ALLOCATED) 1290 return -EPERM; 1291 else if (res->order != order) 1292 return -EINVAL; 1293 1294 return 0; 1295 } 1296 1297 static int remove_mpt_ok(struct res_mpt *res) 1298 { 1299 if (res->com.state == RES_MPT_BUSY) 1300 return -EBUSY; 1301 else if (res->com.state != RES_MPT_RESERVED) 1302 return -EPERM; 1303 1304 return 0; 1305 } 1306 1307 static int remove_eq_ok(struct res_eq *res) 1308 { 1309 if (res->com.state == RES_MPT_BUSY) 1310 return -EBUSY; 1311 else if (res->com.state != RES_MPT_RESERVED) 1312 return -EPERM; 1313 1314 return 0; 1315 } 1316 1317 static int remove_counter_ok(struct res_counter *res) 1318 { 1319 if (res->com.state == RES_COUNTER_BUSY) 1320 return -EBUSY; 1321 else if (res->com.state != RES_COUNTER_ALLOCATED) 1322 return -EPERM; 1323 1324 return 0; 1325 } 1326 1327 static int remove_xrcdn_ok(struct res_xrcdn *res) 1328 { 1329 if (res->com.state == RES_XRCD_BUSY) 1330 return -EBUSY; 1331 else if (res->com.state != RES_XRCD_ALLOCATED) 1332 return -EPERM; 1333 1334 return 0; 1335 } 1336 1337 static int remove_fs_rule_ok(struct res_fs_rule *res) 1338 { 1339 if (res->com.state == RES_FS_RULE_BUSY) 1340 return -EBUSY; 1341 else if (res->com.state != RES_FS_RULE_ALLOCATED) 1342 return -EPERM; 1343 1344 return 0; 1345 } 1346 1347 static int remove_cq_ok(struct res_cq *res) 1348 { 1349 if (res->com.state == RES_CQ_BUSY) 1350 return -EBUSY; 1351 else if (res->com.state != RES_CQ_ALLOCATED) 1352 return -EPERM; 1353 1354 return 0; 1355 } 1356 1357 static int remove_srq_ok(struct res_srq *res) 1358 { 1359 if (res->com.state == RES_SRQ_BUSY) 1360 return -EBUSY; 1361 else if (res->com.state != RES_SRQ_ALLOCATED) 1362 return -EPERM; 1363 1364 return 0; 1365 } 1366 1367 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) 1368 { 1369 switch (type) { 1370 case RES_QP: 1371 return remove_qp_ok((struct res_qp *)res); 1372 case RES_CQ: 1373 return remove_cq_ok((struct res_cq *)res); 1374 case RES_SRQ: 1375 return remove_srq_ok((struct res_srq *)res); 1376 case RES_MPT: 1377 return remove_mpt_ok((struct res_mpt *)res); 1378 case RES_MTT: 1379 return remove_mtt_ok((struct res_mtt *)res, extra); 1380 case RES_MAC: 1381 return -ENOSYS; 1382 case RES_EQ: 1383 return remove_eq_ok((struct res_eq *)res); 1384 case RES_COUNTER: 1385 return remove_counter_ok((struct res_counter *)res); 1386 case RES_XRCD: 1387 return remove_xrcdn_ok((struct res_xrcdn *)res); 1388 case RES_FS_RULE: 1389 return remove_fs_rule_ok((struct res_fs_rule *)res); 1390 default: 1391 return -EINVAL; 1392 } 1393 } 1394 1395 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1396 enum mlx4_resource type, int extra) 1397 { 1398 u64 i; 1399 int err; 1400 struct mlx4_priv *priv = mlx4_priv(dev); 1401 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1402 struct res_common *r; 1403 1404 spin_lock_irq(mlx4_tlock(dev)); 1405 for (i = base; i < base + count; ++i) { 1406 r = res_tracker_lookup(&tracker->res_tree[type], i); 1407 if (!r) { 1408 err = -ENOENT; 1409 goto out; 1410 } 1411 if (r->owner != slave) { 1412 err = -EPERM; 1413 goto out; 1414 } 1415 err = remove_ok(r, type, extra); 1416 if (err) 1417 goto out; 1418 } 1419 1420 for (i = base; i < base + count; ++i) { 1421 r = res_tracker_lookup(&tracker->res_tree[type], i); 1422 rb_erase(&r->node, &tracker->res_tree[type]); 1423 list_del(&r->list); 1424 kfree(r); 1425 } 1426 err = 0; 1427 1428 out: 1429 spin_unlock_irq(mlx4_tlock(dev)); 1430 1431 return err; 1432 } 1433 1434 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, 1435 enum res_qp_states state, struct res_qp **qp, 1436 int alloc) 1437 { 1438 struct mlx4_priv *priv = mlx4_priv(dev); 1439 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1440 struct res_qp *r; 1441 int err = 0; 1442 1443 spin_lock_irq(mlx4_tlock(dev)); 1444 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); 1445 if (!r) 1446 err = -ENOENT; 1447 else if (r->com.owner != slave) 1448 err = -EPERM; 1449 else { 1450 switch (state) { 1451 case RES_QP_BUSY: 1452 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", 1453 __func__, r->com.res_id); 1454 err = -EBUSY; 1455 break; 1456 1457 case RES_QP_RESERVED: 1458 if (r->com.state == RES_QP_MAPPED && !alloc) 1459 break; 1460 1461 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); 1462 err = -EINVAL; 1463 break; 1464 1465 case RES_QP_MAPPED: 1466 if ((r->com.state == RES_QP_RESERVED && alloc) || 1467 r->com.state == RES_QP_HW) 1468 break; 1469 else { 1470 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", 1471 r->com.res_id); 1472 err = -EINVAL; 1473 } 1474 1475 break; 1476 1477 case RES_QP_HW: 1478 if (r->com.state != RES_QP_MAPPED) 1479 err = -EINVAL; 1480 break; 1481 default: 1482 err = -EINVAL; 1483 } 1484 1485 if (!err) { 1486 r->com.from_state = r->com.state; 1487 r->com.to_state = state; 1488 r->com.state = RES_QP_BUSY; 1489 if (qp) 1490 *qp = r; 1491 } 1492 } 1493 1494 spin_unlock_irq(mlx4_tlock(dev)); 1495 1496 return err; 1497 } 1498 1499 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1500 enum res_mpt_states state, struct res_mpt **mpt) 1501 { 1502 struct mlx4_priv *priv = mlx4_priv(dev); 1503 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1504 struct res_mpt *r; 1505 int err = 0; 1506 1507 spin_lock_irq(mlx4_tlock(dev)); 1508 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); 1509 if (!r) 1510 err = -ENOENT; 1511 else if (r->com.owner != slave) 1512 err = -EPERM; 1513 else { 1514 switch (state) { 1515 case RES_MPT_BUSY: 1516 err = -EINVAL; 1517 break; 1518 1519 case RES_MPT_RESERVED: 1520 if (r->com.state != RES_MPT_MAPPED) 1521 err = -EINVAL; 1522 break; 1523 1524 case RES_MPT_MAPPED: 1525 if (r->com.state != RES_MPT_RESERVED && 1526 r->com.state != RES_MPT_HW) 1527 err = -EINVAL; 1528 break; 1529 1530 case RES_MPT_HW: 1531 if (r->com.state != RES_MPT_MAPPED) 1532 err = -EINVAL; 1533 break; 1534 default: 1535 err = -EINVAL; 1536 } 1537 1538 if (!err) { 1539 r->com.from_state = r->com.state; 1540 r->com.to_state = state; 1541 r->com.state = RES_MPT_BUSY; 1542 if (mpt) 1543 *mpt = r; 1544 } 1545 } 1546 1547 spin_unlock_irq(mlx4_tlock(dev)); 1548 1549 return err; 1550 } 1551 1552 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1553 enum res_eq_states state, struct res_eq **eq) 1554 { 1555 struct mlx4_priv *priv = mlx4_priv(dev); 1556 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1557 struct res_eq *r; 1558 int err = 0; 1559 1560 spin_lock_irq(mlx4_tlock(dev)); 1561 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); 1562 if (!r) 1563 err = -ENOENT; 1564 else if (r->com.owner != slave) 1565 err = -EPERM; 1566 else { 1567 switch (state) { 1568 case RES_EQ_BUSY: 1569 err = -EINVAL; 1570 break; 1571 1572 case RES_EQ_RESERVED: 1573 if (r->com.state != RES_EQ_HW) 1574 err = -EINVAL; 1575 break; 1576 1577 case RES_EQ_HW: 1578 if (r->com.state != RES_EQ_RESERVED) 1579 err = -EINVAL; 1580 break; 1581 1582 default: 1583 err = -EINVAL; 1584 } 1585 1586 if (!err) { 1587 r->com.from_state = r->com.state; 1588 r->com.to_state = state; 1589 r->com.state = RES_EQ_BUSY; 1590 if (eq) 1591 *eq = r; 1592 } 1593 } 1594 1595 spin_unlock_irq(mlx4_tlock(dev)); 1596 1597 return err; 1598 } 1599 1600 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, 1601 enum res_cq_states state, struct res_cq **cq) 1602 { 1603 struct mlx4_priv *priv = mlx4_priv(dev); 1604 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1605 struct res_cq *r; 1606 int err; 1607 1608 spin_lock_irq(mlx4_tlock(dev)); 1609 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); 1610 if (!r) { 1611 err = -ENOENT; 1612 } else if (r->com.owner != slave) { 1613 err = -EPERM; 1614 } else if (state == RES_CQ_ALLOCATED) { 1615 if (r->com.state != RES_CQ_HW) 1616 err = -EINVAL; 1617 else if (atomic_read(&r->ref_count)) 1618 err = -EBUSY; 1619 else 1620 err = 0; 1621 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { 1622 err = -EINVAL; 1623 } else { 1624 err = 0; 1625 } 1626 1627 if (!err) { 1628 r->com.from_state = r->com.state; 1629 r->com.to_state = state; 1630 r->com.state = RES_CQ_BUSY; 1631 if (cq) 1632 *cq = r; 1633 } 1634 1635 spin_unlock_irq(mlx4_tlock(dev)); 1636 1637 return err; 1638 } 1639 1640 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1641 enum res_srq_states state, struct res_srq **srq) 1642 { 1643 struct mlx4_priv *priv = mlx4_priv(dev); 1644 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1645 struct res_srq *r; 1646 int err = 0; 1647 1648 spin_lock_irq(mlx4_tlock(dev)); 1649 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); 1650 if (!r) { 1651 err = -ENOENT; 1652 } else if (r->com.owner != slave) { 1653 err = -EPERM; 1654 } else if (state == RES_SRQ_ALLOCATED) { 1655 if (r->com.state != RES_SRQ_HW) 1656 err = -EINVAL; 1657 else if (atomic_read(&r->ref_count)) 1658 err = -EBUSY; 1659 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { 1660 err = -EINVAL; 1661 } 1662 1663 if (!err) { 1664 r->com.from_state = r->com.state; 1665 r->com.to_state = state; 1666 r->com.state = RES_SRQ_BUSY; 1667 if (srq) 1668 *srq = r; 1669 } 1670 1671 spin_unlock_irq(mlx4_tlock(dev)); 1672 1673 return err; 1674 } 1675 1676 static void res_abort_move(struct mlx4_dev *dev, int slave, 1677 enum mlx4_resource type, int id) 1678 { 1679 struct mlx4_priv *priv = mlx4_priv(dev); 1680 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1681 struct res_common *r; 1682 1683 spin_lock_irq(mlx4_tlock(dev)); 1684 r = res_tracker_lookup(&tracker->res_tree[type], id); 1685 if (r && (r->owner == slave)) 1686 r->state = r->from_state; 1687 spin_unlock_irq(mlx4_tlock(dev)); 1688 } 1689 1690 static void res_end_move(struct mlx4_dev *dev, int slave, 1691 enum mlx4_resource type, int id) 1692 { 1693 struct mlx4_priv *priv = mlx4_priv(dev); 1694 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1695 struct res_common *r; 1696 1697 spin_lock_irq(mlx4_tlock(dev)); 1698 r = res_tracker_lookup(&tracker->res_tree[type], id); 1699 if (r && (r->owner == slave)) 1700 r->state = r->to_state; 1701 spin_unlock_irq(mlx4_tlock(dev)); 1702 } 1703 1704 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) 1705 { 1706 return mlx4_is_qp_reserved(dev, qpn) && 1707 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); 1708 } 1709 1710 static int fw_reserved(struct mlx4_dev *dev, int qpn) 1711 { 1712 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1713 } 1714 1715 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1716 u64 in_param, u64 *out_param) 1717 { 1718 int err; 1719 int count; 1720 int align; 1721 int base; 1722 int qpn; 1723 u8 flags; 1724 1725 switch (op) { 1726 case RES_OP_RESERVE: 1727 count = get_param_l(&in_param) & 0xffffff; 1728 /* Turn off all unsupported QP allocation flags that the 1729 * slave tries to set. 1730 */ 1731 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; 1732 align = get_param_h(&in_param); 1733 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1734 if (err) 1735 return err; 1736 1737 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); 1738 if (err) { 1739 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1740 return err; 1741 } 1742 1743 err = add_res_range(dev, slave, base, count, RES_QP, 0); 1744 if (err) { 1745 mlx4_release_resource(dev, slave, RES_QP, count, 0); 1746 __mlx4_qp_release_range(dev, base, count); 1747 return err; 1748 } 1749 set_param_l(out_param, base); 1750 break; 1751 case RES_OP_MAP_ICM: 1752 qpn = get_param_l(&in_param) & 0x7fffff; 1753 if (valid_reserved(dev, slave, qpn)) { 1754 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); 1755 if (err) 1756 return err; 1757 } 1758 1759 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, 1760 NULL, 1); 1761 if (err) 1762 return err; 1763 1764 if (!fw_reserved(dev, qpn)) { 1765 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); 1766 if (err) { 1767 res_abort_move(dev, slave, RES_QP, qpn); 1768 return err; 1769 } 1770 } 1771 1772 res_end_move(dev, slave, RES_QP, qpn); 1773 break; 1774 1775 default: 1776 err = -EINVAL; 1777 break; 1778 } 1779 return err; 1780 } 1781 1782 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1783 u64 in_param, u64 *out_param) 1784 { 1785 int err = -EINVAL; 1786 int base; 1787 int order; 1788 1789 if (op != RES_OP_RESERVE_AND_MAP) 1790 return err; 1791 1792 order = get_param_l(&in_param); 1793 1794 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); 1795 if (err) 1796 return err; 1797 1798 base = __mlx4_alloc_mtt_range(dev, order); 1799 if (base == -1) { 1800 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1801 return -ENOMEM; 1802 } 1803 1804 err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1805 if (err) { 1806 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1807 __mlx4_free_mtt_range(dev, base, order); 1808 } else { 1809 set_param_l(out_param, base); 1810 } 1811 1812 return err; 1813 } 1814 1815 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1816 u64 in_param, u64 *out_param) 1817 { 1818 int err = -EINVAL; 1819 int index; 1820 int id; 1821 struct res_mpt *mpt; 1822 1823 switch (op) { 1824 case RES_OP_RESERVE: 1825 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); 1826 if (err) 1827 break; 1828 1829 index = __mlx4_mpt_reserve(dev); 1830 if (index == -1) { 1831 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1832 break; 1833 } 1834 id = index & mpt_mask(dev); 1835 1836 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1837 if (err) { 1838 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1839 __mlx4_mpt_release(dev, index); 1840 break; 1841 } 1842 set_param_l(out_param, index); 1843 break; 1844 case RES_OP_MAP_ICM: 1845 index = get_param_l(&in_param); 1846 id = index & mpt_mask(dev); 1847 err = mr_res_start_move_to(dev, slave, id, 1848 RES_MPT_MAPPED, &mpt); 1849 if (err) 1850 return err; 1851 1852 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); 1853 if (err) { 1854 res_abort_move(dev, slave, RES_MPT, id); 1855 return err; 1856 } 1857 1858 res_end_move(dev, slave, RES_MPT, id); 1859 break; 1860 } 1861 return err; 1862 } 1863 1864 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1865 u64 in_param, u64 *out_param) 1866 { 1867 int cqn; 1868 int err; 1869 1870 switch (op) { 1871 case RES_OP_RESERVE_AND_MAP: 1872 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); 1873 if (err) 1874 break; 1875 1876 err = __mlx4_cq_alloc_icm(dev, &cqn); 1877 if (err) { 1878 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1879 break; 1880 } 1881 1882 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1883 if (err) { 1884 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1885 __mlx4_cq_free_icm(dev, cqn); 1886 break; 1887 } 1888 1889 set_param_l(out_param, cqn); 1890 break; 1891 1892 default: 1893 err = -EINVAL; 1894 } 1895 1896 return err; 1897 } 1898 1899 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1900 u64 in_param, u64 *out_param) 1901 { 1902 int srqn; 1903 int err; 1904 1905 switch (op) { 1906 case RES_OP_RESERVE_AND_MAP: 1907 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); 1908 if (err) 1909 break; 1910 1911 err = __mlx4_srq_alloc_icm(dev, &srqn); 1912 if (err) { 1913 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1914 break; 1915 } 1916 1917 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1918 if (err) { 1919 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1920 __mlx4_srq_free_icm(dev, srqn); 1921 break; 1922 } 1923 1924 set_param_l(out_param, srqn); 1925 break; 1926 1927 default: 1928 err = -EINVAL; 1929 } 1930 1931 return err; 1932 } 1933 1934 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, 1935 u8 smac_index, u64 *mac) 1936 { 1937 struct mlx4_priv *priv = mlx4_priv(dev); 1938 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1939 struct list_head *mac_list = 1940 &tracker->slave_list[slave].res_list[RES_MAC]; 1941 struct mac_res *res, *tmp; 1942 1943 list_for_each_entry_safe(res, tmp, mac_list, list) { 1944 if (res->smac_index == smac_index && res->port == (u8) port) { 1945 *mac = res->mac; 1946 return 0; 1947 } 1948 } 1949 return -ENOENT; 1950 } 1951 1952 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) 1953 { 1954 struct mlx4_priv *priv = mlx4_priv(dev); 1955 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1956 struct list_head *mac_list = 1957 &tracker->slave_list[slave].res_list[RES_MAC]; 1958 struct mac_res *res, *tmp; 1959 1960 list_for_each_entry_safe(res, tmp, mac_list, list) { 1961 if (res->mac == mac && res->port == (u8) port) { 1962 /* mac found. update ref count */ 1963 ++res->ref_count; 1964 return 0; 1965 } 1966 } 1967 1968 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1969 return -EINVAL; 1970 res = kzalloc(sizeof *res, GFP_KERNEL); 1971 if (!res) { 1972 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1973 return -ENOMEM; 1974 } 1975 res->mac = mac; 1976 res->port = (u8) port; 1977 res->smac_index = smac_index; 1978 res->ref_count = 1; 1979 list_add_tail(&res->list, 1980 &tracker->slave_list[slave].res_list[RES_MAC]); 1981 return 0; 1982 } 1983 1984 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, 1985 int port) 1986 { 1987 struct mlx4_priv *priv = mlx4_priv(dev); 1988 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1989 struct list_head *mac_list = 1990 &tracker->slave_list[slave].res_list[RES_MAC]; 1991 struct mac_res *res, *tmp; 1992 1993 list_for_each_entry_safe(res, tmp, mac_list, list) { 1994 if (res->mac == mac && res->port == (u8) port) { 1995 if (!--res->ref_count) { 1996 list_del(&res->list); 1997 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1998 kfree(res); 1999 } 2000 break; 2001 } 2002 } 2003 } 2004 2005 static void rem_slave_macs(struct mlx4_dev *dev, int slave) 2006 { 2007 struct mlx4_priv *priv = mlx4_priv(dev); 2008 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2009 struct list_head *mac_list = 2010 &tracker->slave_list[slave].res_list[RES_MAC]; 2011 struct mac_res *res, *tmp; 2012 int i; 2013 2014 list_for_each_entry_safe(res, tmp, mac_list, list) { 2015 list_del(&res->list); 2016 /* dereference the mac the num times the slave referenced it */ 2017 for (i = 0; i < res->ref_count; i++) 2018 __mlx4_unregister_mac(dev, res->port, res->mac); 2019 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 2020 kfree(res); 2021 } 2022 } 2023 2024 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2025 u64 in_param, u64 *out_param, int in_port) 2026 { 2027 int err = -EINVAL; 2028 int port; 2029 u64 mac; 2030 u8 smac_index; 2031 2032 if (op != RES_OP_RESERVE_AND_MAP) 2033 return err; 2034 2035 port = !in_port ? get_param_l(out_param) : in_port; 2036 port = mlx4_slave_convert_port( 2037 dev, slave, port); 2038 2039 if (port < 0) 2040 return -EINVAL; 2041 mac = in_param; 2042 2043 err = __mlx4_register_mac(dev, port, mac); 2044 if (err >= 0) { 2045 smac_index = err; 2046 set_param_l(out_param, err); 2047 err = 0; 2048 } 2049 2050 if (!err) { 2051 err = mac_add_to_slave(dev, slave, mac, port, smac_index); 2052 if (err) 2053 __mlx4_unregister_mac(dev, port, mac); 2054 } 2055 return err; 2056 } 2057 2058 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2059 int port, int vlan_index) 2060 { 2061 struct mlx4_priv *priv = mlx4_priv(dev); 2062 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2063 struct list_head *vlan_list = 2064 &tracker->slave_list[slave].res_list[RES_VLAN]; 2065 struct vlan_res *res, *tmp; 2066 2067 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2068 if (res->vlan == vlan && res->port == (u8) port) { 2069 /* vlan found. update ref count */ 2070 ++res->ref_count; 2071 return 0; 2072 } 2073 } 2074 2075 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) 2076 return -EINVAL; 2077 res = kzalloc(sizeof(*res), GFP_KERNEL); 2078 if (!res) { 2079 mlx4_release_resource(dev, slave, RES_VLAN, 1, port); 2080 return -ENOMEM; 2081 } 2082 res->vlan = vlan; 2083 res->port = (u8) port; 2084 res->vlan_index = vlan_index; 2085 res->ref_count = 1; 2086 list_add_tail(&res->list, 2087 &tracker->slave_list[slave].res_list[RES_VLAN]); 2088 return 0; 2089 } 2090 2091 2092 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2093 int port) 2094 { 2095 struct mlx4_priv *priv = mlx4_priv(dev); 2096 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2097 struct list_head *vlan_list = 2098 &tracker->slave_list[slave].res_list[RES_VLAN]; 2099 struct vlan_res *res, *tmp; 2100 2101 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2102 if (res->vlan == vlan && res->port == (u8) port) { 2103 if (!--res->ref_count) { 2104 list_del(&res->list); 2105 mlx4_release_resource(dev, slave, RES_VLAN, 2106 1, port); 2107 kfree(res); 2108 } 2109 break; 2110 } 2111 } 2112 } 2113 2114 static void rem_slave_vlans(struct mlx4_dev *dev, int slave) 2115 { 2116 struct mlx4_priv *priv = mlx4_priv(dev); 2117 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2118 struct list_head *vlan_list = 2119 &tracker->slave_list[slave].res_list[RES_VLAN]; 2120 struct vlan_res *res, *tmp; 2121 int i; 2122 2123 list_for_each_entry_safe(res, tmp, vlan_list, list) { 2124 list_del(&res->list); 2125 /* dereference the vlan the num times the slave referenced it */ 2126 for (i = 0; i < res->ref_count; i++) 2127 __mlx4_unregister_vlan(dev, res->port, res->vlan); 2128 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); 2129 kfree(res); 2130 } 2131 } 2132 2133 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2134 u64 in_param, u64 *out_param, int in_port) 2135 { 2136 struct mlx4_priv *priv = mlx4_priv(dev); 2137 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2138 int err; 2139 u16 vlan; 2140 int vlan_index; 2141 int port; 2142 2143 port = !in_port ? get_param_l(out_param) : in_port; 2144 2145 if (!port || op != RES_OP_RESERVE_AND_MAP) 2146 return -EINVAL; 2147 2148 port = mlx4_slave_convert_port( 2149 dev, slave, port); 2150 2151 if (port < 0) 2152 return -EINVAL; 2153 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 2154 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 2155 slave_state[slave].old_vlan_api = true; 2156 return 0; 2157 } 2158 2159 vlan = (u16) in_param; 2160 2161 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); 2162 if (!err) { 2163 set_param_l(out_param, (u32) vlan_index); 2164 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); 2165 if (err) 2166 __mlx4_unregister_vlan(dev, port, vlan); 2167 } 2168 return err; 2169 } 2170 2171 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2172 u64 in_param, u64 *out_param, int port) 2173 { 2174 u32 index; 2175 int err; 2176 2177 if (op != RES_OP_RESERVE) 2178 return -EINVAL; 2179 2180 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); 2181 if (err) 2182 return err; 2183 2184 err = __mlx4_counter_alloc(dev, &index); 2185 if (err) { 2186 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2187 return err; 2188 } 2189 2190 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port); 2191 if (err) { 2192 __mlx4_counter_free(dev, index); 2193 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2194 } else { 2195 set_param_l(out_param, index); 2196 } 2197 2198 return err; 2199 } 2200 2201 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2202 u64 in_param, u64 *out_param) 2203 { 2204 u32 xrcdn; 2205 int err; 2206 2207 if (op != RES_OP_RESERVE) 2208 return -EINVAL; 2209 2210 err = __mlx4_xrcd_alloc(dev, &xrcdn); 2211 if (err) 2212 return err; 2213 2214 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2215 if (err) 2216 __mlx4_xrcd_free(dev, xrcdn); 2217 else 2218 set_param_l(out_param, xrcdn); 2219 2220 return err; 2221 } 2222 2223 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 2224 struct mlx4_vhcr *vhcr, 2225 struct mlx4_cmd_mailbox *inbox, 2226 struct mlx4_cmd_mailbox *outbox, 2227 struct mlx4_cmd_info *cmd) 2228 { 2229 int err; 2230 int alop = vhcr->op_modifier; 2231 2232 switch (vhcr->in_modifier & 0xFF) { 2233 case RES_QP: 2234 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 2235 vhcr->in_param, &vhcr->out_param); 2236 break; 2237 2238 case RES_MTT: 2239 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2240 vhcr->in_param, &vhcr->out_param); 2241 break; 2242 2243 case RES_MPT: 2244 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2245 vhcr->in_param, &vhcr->out_param); 2246 break; 2247 2248 case RES_CQ: 2249 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2250 vhcr->in_param, &vhcr->out_param); 2251 break; 2252 2253 case RES_SRQ: 2254 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2255 vhcr->in_param, &vhcr->out_param); 2256 break; 2257 2258 case RES_MAC: 2259 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 2260 vhcr->in_param, &vhcr->out_param, 2261 (vhcr->in_modifier >> 8) & 0xFF); 2262 break; 2263 2264 case RES_VLAN: 2265 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 2266 vhcr->in_param, &vhcr->out_param, 2267 (vhcr->in_modifier >> 8) & 0xFF); 2268 break; 2269 2270 case RES_COUNTER: 2271 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, 2272 vhcr->in_param, &vhcr->out_param, 0); 2273 break; 2274 2275 case RES_XRCD: 2276 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, 2277 vhcr->in_param, &vhcr->out_param); 2278 break; 2279 2280 default: 2281 err = -EINVAL; 2282 break; 2283 } 2284 2285 return err; 2286 } 2287 2288 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2289 u64 in_param) 2290 { 2291 int err; 2292 int count; 2293 int base; 2294 int qpn; 2295 2296 switch (op) { 2297 case RES_OP_RESERVE: 2298 base = get_param_l(&in_param) & 0x7fffff; 2299 count = get_param_h(&in_param); 2300 err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2301 if (err) 2302 break; 2303 mlx4_release_resource(dev, slave, RES_QP, count, 0); 2304 __mlx4_qp_release_range(dev, base, count); 2305 break; 2306 case RES_OP_MAP_ICM: 2307 qpn = get_param_l(&in_param) & 0x7fffff; 2308 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, 2309 NULL, 0); 2310 if (err) 2311 return err; 2312 2313 if (!fw_reserved(dev, qpn)) 2314 __mlx4_qp_free_icm(dev, qpn); 2315 2316 res_end_move(dev, slave, RES_QP, qpn); 2317 2318 if (valid_reserved(dev, slave, qpn)) 2319 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); 2320 break; 2321 default: 2322 err = -EINVAL; 2323 break; 2324 } 2325 return err; 2326 } 2327 2328 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2329 u64 in_param, u64 *out_param) 2330 { 2331 int err = -EINVAL; 2332 int base; 2333 int order; 2334 2335 if (op != RES_OP_RESERVE_AND_MAP) 2336 return err; 2337 2338 base = get_param_l(&in_param); 2339 order = get_param_h(&in_param); 2340 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2341 if (!err) { 2342 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 2343 __mlx4_free_mtt_range(dev, base, order); 2344 } 2345 return err; 2346 } 2347 2348 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2349 u64 in_param) 2350 { 2351 int err = -EINVAL; 2352 int index; 2353 int id; 2354 struct res_mpt *mpt; 2355 2356 switch (op) { 2357 case RES_OP_RESERVE: 2358 index = get_param_l(&in_param); 2359 id = index & mpt_mask(dev); 2360 err = get_res(dev, slave, id, RES_MPT, &mpt); 2361 if (err) 2362 break; 2363 index = mpt->key; 2364 put_res(dev, slave, id, RES_MPT); 2365 2366 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2367 if (err) 2368 break; 2369 mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 2370 __mlx4_mpt_release(dev, index); 2371 break; 2372 case RES_OP_MAP_ICM: 2373 index = get_param_l(&in_param); 2374 id = index & mpt_mask(dev); 2375 err = mr_res_start_move_to(dev, slave, id, 2376 RES_MPT_RESERVED, &mpt); 2377 if (err) 2378 return err; 2379 2380 __mlx4_mpt_free_icm(dev, mpt->key); 2381 res_end_move(dev, slave, RES_MPT, id); 2382 return err; 2383 break; 2384 default: 2385 err = -EINVAL; 2386 break; 2387 } 2388 return err; 2389 } 2390 2391 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2392 u64 in_param, u64 *out_param) 2393 { 2394 int cqn; 2395 int err; 2396 2397 switch (op) { 2398 case RES_OP_RESERVE_AND_MAP: 2399 cqn = get_param_l(&in_param); 2400 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); 2401 if (err) 2402 break; 2403 2404 mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 2405 __mlx4_cq_free_icm(dev, cqn); 2406 break; 2407 2408 default: 2409 err = -EINVAL; 2410 break; 2411 } 2412 2413 return err; 2414 } 2415 2416 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2417 u64 in_param, u64 *out_param) 2418 { 2419 int srqn; 2420 int err; 2421 2422 switch (op) { 2423 case RES_OP_RESERVE_AND_MAP: 2424 srqn = get_param_l(&in_param); 2425 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 2426 if (err) 2427 break; 2428 2429 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 2430 __mlx4_srq_free_icm(dev, srqn); 2431 break; 2432 2433 default: 2434 err = -EINVAL; 2435 break; 2436 } 2437 2438 return err; 2439 } 2440 2441 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2442 u64 in_param, u64 *out_param, int in_port) 2443 { 2444 int port; 2445 int err = 0; 2446 2447 switch (op) { 2448 case RES_OP_RESERVE_AND_MAP: 2449 port = !in_port ? get_param_l(out_param) : in_port; 2450 port = mlx4_slave_convert_port( 2451 dev, slave, port); 2452 2453 if (port < 0) 2454 return -EINVAL; 2455 mac_del_from_slave(dev, slave, in_param, port); 2456 __mlx4_unregister_mac(dev, port, in_param); 2457 break; 2458 default: 2459 err = -EINVAL; 2460 break; 2461 } 2462 2463 return err; 2464 2465 } 2466 2467 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2468 u64 in_param, u64 *out_param, int port) 2469 { 2470 struct mlx4_priv *priv = mlx4_priv(dev); 2471 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2472 int err = 0; 2473 2474 port = mlx4_slave_convert_port( 2475 dev, slave, port); 2476 2477 if (port < 0) 2478 return -EINVAL; 2479 switch (op) { 2480 case RES_OP_RESERVE_AND_MAP: 2481 if (slave_state[slave].old_vlan_api) 2482 return 0; 2483 if (!port) 2484 return -EINVAL; 2485 vlan_del_from_slave(dev, slave, in_param, port); 2486 __mlx4_unregister_vlan(dev, port, in_param); 2487 break; 2488 default: 2489 err = -EINVAL; 2490 break; 2491 } 2492 2493 return err; 2494 } 2495 2496 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2497 u64 in_param, u64 *out_param) 2498 { 2499 int index; 2500 int err; 2501 2502 if (op != RES_OP_RESERVE) 2503 return -EINVAL; 2504 2505 index = get_param_l(&in_param); 2506 if (index == MLX4_SINK_COUNTER_INDEX(dev)) 2507 return 0; 2508 2509 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); 2510 if (err) 2511 return err; 2512 2513 __mlx4_counter_free(dev, index); 2514 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2515 2516 return err; 2517 } 2518 2519 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2520 u64 in_param, u64 *out_param) 2521 { 2522 int xrcdn; 2523 int err; 2524 2525 if (op != RES_OP_RESERVE) 2526 return -EINVAL; 2527 2528 xrcdn = get_param_l(&in_param); 2529 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2530 if (err) 2531 return err; 2532 2533 __mlx4_xrcd_free(dev, xrcdn); 2534 2535 return err; 2536 } 2537 2538 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 2539 struct mlx4_vhcr *vhcr, 2540 struct mlx4_cmd_mailbox *inbox, 2541 struct mlx4_cmd_mailbox *outbox, 2542 struct mlx4_cmd_info *cmd) 2543 { 2544 int err = -EINVAL; 2545 int alop = vhcr->op_modifier; 2546 2547 switch (vhcr->in_modifier & 0xFF) { 2548 case RES_QP: 2549 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2550 vhcr->in_param); 2551 break; 2552 2553 case RES_MTT: 2554 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, 2555 vhcr->in_param, &vhcr->out_param); 2556 break; 2557 2558 case RES_MPT: 2559 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, 2560 vhcr->in_param); 2561 break; 2562 2563 case RES_CQ: 2564 err = cq_free_res(dev, slave, vhcr->op_modifier, alop, 2565 vhcr->in_param, &vhcr->out_param); 2566 break; 2567 2568 case RES_SRQ: 2569 err = srq_free_res(dev, slave, vhcr->op_modifier, alop, 2570 vhcr->in_param, &vhcr->out_param); 2571 break; 2572 2573 case RES_MAC: 2574 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2575 vhcr->in_param, &vhcr->out_param, 2576 (vhcr->in_modifier >> 8) & 0xFF); 2577 break; 2578 2579 case RES_VLAN: 2580 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2581 vhcr->in_param, &vhcr->out_param, 2582 (vhcr->in_modifier >> 8) & 0xFF); 2583 break; 2584 2585 case RES_COUNTER: 2586 err = counter_free_res(dev, slave, vhcr->op_modifier, alop, 2587 vhcr->in_param, &vhcr->out_param); 2588 break; 2589 2590 case RES_XRCD: 2591 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, 2592 vhcr->in_param, &vhcr->out_param); 2593 2594 default: 2595 break; 2596 } 2597 return err; 2598 } 2599 2600 /* ugly but other choices are uglier */ 2601 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) 2602 { 2603 return (be32_to_cpu(mpt->flags) >> 9) & 1; 2604 } 2605 2606 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) 2607 { 2608 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; 2609 } 2610 2611 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) 2612 { 2613 return be32_to_cpu(mpt->mtt_sz); 2614 } 2615 2616 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) 2617 { 2618 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; 2619 } 2620 2621 static int mr_is_fmr(struct mlx4_mpt_entry *mpt) 2622 { 2623 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; 2624 } 2625 2626 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) 2627 { 2628 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; 2629 } 2630 2631 static int mr_is_region(struct mlx4_mpt_entry *mpt) 2632 { 2633 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; 2634 } 2635 2636 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 2637 { 2638 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 2639 } 2640 2641 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) 2642 { 2643 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; 2644 } 2645 2646 static int qp_get_mtt_size(struct mlx4_qp_context *qpc) 2647 { 2648 int page_shift = (qpc->log_page_size & 0x3f) + 12; 2649 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; 2650 int log_sq_sride = qpc->sq_size_stride & 7; 2651 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; 2652 int log_rq_stride = qpc->rq_size_stride & 7; 2653 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; 2654 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; 2655 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 2656 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; 2657 int sq_size; 2658 int rq_size; 2659 int total_pages; 2660 int total_mem; 2661 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; 2662 2663 sq_size = 1 << (log_sq_size + log_sq_sride + 4); 2664 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); 2665 total_mem = sq_size + rq_size; 2666 total_pages = 2667 roundup_pow_of_two((total_mem + (page_offset << 6)) >> 2668 page_shift); 2669 2670 return total_pages; 2671 } 2672 2673 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, 2674 int size, struct res_mtt *mtt) 2675 { 2676 int res_start = mtt->com.res_id; 2677 int res_size = (1 << mtt->order); 2678 2679 if (start < res_start || start + size > res_start + res_size) 2680 return -EPERM; 2681 return 0; 2682 } 2683 2684 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2685 struct mlx4_vhcr *vhcr, 2686 struct mlx4_cmd_mailbox *inbox, 2687 struct mlx4_cmd_mailbox *outbox, 2688 struct mlx4_cmd_info *cmd) 2689 { 2690 int err; 2691 int index = vhcr->in_modifier; 2692 struct res_mtt *mtt; 2693 struct res_mpt *mpt; 2694 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; 2695 int phys; 2696 int id; 2697 u32 pd; 2698 int pd_slave; 2699 2700 id = index & mpt_mask(dev); 2701 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); 2702 if (err) 2703 return err; 2704 2705 /* Disable memory windows for VFs. */ 2706 if (!mr_is_region(inbox->buf)) { 2707 err = -EPERM; 2708 goto ex_abort; 2709 } 2710 2711 /* Make sure that the PD bits related to the slave id are zeros. */ 2712 pd = mr_get_pd(inbox->buf); 2713 pd_slave = (pd >> 17) & 0x7f; 2714 if (pd_slave != 0 && --pd_slave != slave) { 2715 err = -EPERM; 2716 goto ex_abort; 2717 } 2718 2719 if (mr_is_fmr(inbox->buf)) { 2720 /* FMR and Bind Enable are forbidden in slave devices. */ 2721 if (mr_is_bind_enabled(inbox->buf)) { 2722 err = -EPERM; 2723 goto ex_abort; 2724 } 2725 /* FMR and Memory Windows are also forbidden. */ 2726 if (!mr_is_region(inbox->buf)) { 2727 err = -EPERM; 2728 goto ex_abort; 2729 } 2730 } 2731 2732 phys = mr_phys_mpt(inbox->buf); 2733 if (!phys) { 2734 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2735 if (err) 2736 goto ex_abort; 2737 2738 err = check_mtt_range(dev, slave, mtt_base, 2739 mr_get_mtt_size(inbox->buf), mtt); 2740 if (err) 2741 goto ex_put; 2742 2743 mpt->mtt = mtt; 2744 } 2745 2746 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2747 if (err) 2748 goto ex_put; 2749 2750 if (!phys) { 2751 atomic_inc(&mtt->ref_count); 2752 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2753 } 2754 2755 res_end_move(dev, slave, RES_MPT, id); 2756 return 0; 2757 2758 ex_put: 2759 if (!phys) 2760 put_res(dev, slave, mtt->com.res_id, RES_MTT); 2761 ex_abort: 2762 res_abort_move(dev, slave, RES_MPT, id); 2763 2764 return err; 2765 } 2766 2767 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2768 struct mlx4_vhcr *vhcr, 2769 struct mlx4_cmd_mailbox *inbox, 2770 struct mlx4_cmd_mailbox *outbox, 2771 struct mlx4_cmd_info *cmd) 2772 { 2773 int err; 2774 int index = vhcr->in_modifier; 2775 struct res_mpt *mpt; 2776 int id; 2777 2778 id = index & mpt_mask(dev); 2779 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); 2780 if (err) 2781 return err; 2782 2783 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2784 if (err) 2785 goto ex_abort; 2786 2787 if (mpt->mtt) 2788 atomic_dec(&mpt->mtt->ref_count); 2789 2790 res_end_move(dev, slave, RES_MPT, id); 2791 return 0; 2792 2793 ex_abort: 2794 res_abort_move(dev, slave, RES_MPT, id); 2795 2796 return err; 2797 } 2798 2799 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 2800 struct mlx4_vhcr *vhcr, 2801 struct mlx4_cmd_mailbox *inbox, 2802 struct mlx4_cmd_mailbox *outbox, 2803 struct mlx4_cmd_info *cmd) 2804 { 2805 int err; 2806 int index = vhcr->in_modifier; 2807 struct res_mpt *mpt; 2808 int id; 2809 2810 id = index & mpt_mask(dev); 2811 err = get_res(dev, slave, id, RES_MPT, &mpt); 2812 if (err) 2813 return err; 2814 2815 if (mpt->com.from_state == RES_MPT_MAPPED) { 2816 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do 2817 * that, the VF must read the MPT. But since the MPT entry memory is not 2818 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the 2819 * entry contents. To guarantee that the MPT cannot be changed, the driver 2820 * must perform HW2SW_MPT before this query and return the MPT entry to HW 2821 * ownership fofollowing the change. The change here allows the VF to 2822 * perform QUERY_MPT also when the entry is in SW ownership. 2823 */ 2824 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( 2825 &mlx4_priv(dev)->mr_table.dmpt_table, 2826 mpt->key, NULL); 2827 2828 if (NULL == mpt_entry || NULL == outbox->buf) { 2829 err = -EINVAL; 2830 goto out; 2831 } 2832 2833 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); 2834 2835 err = 0; 2836 } else if (mpt->com.from_state == RES_MPT_HW) { 2837 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2838 } else { 2839 err = -EBUSY; 2840 goto out; 2841 } 2842 2843 2844 out: 2845 put_res(dev, slave, id, RES_MPT); 2846 return err; 2847 } 2848 2849 static int qp_get_rcqn(struct mlx4_qp_context *qpc) 2850 { 2851 return be32_to_cpu(qpc->cqn_recv) & 0xffffff; 2852 } 2853 2854 static int qp_get_scqn(struct mlx4_qp_context *qpc) 2855 { 2856 return be32_to_cpu(qpc->cqn_send) & 0xffffff; 2857 } 2858 2859 static u32 qp_get_srqn(struct mlx4_qp_context *qpc) 2860 { 2861 return be32_to_cpu(qpc->srqn) & 0x1ffffff; 2862 } 2863 2864 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, 2865 struct mlx4_qp_context *context) 2866 { 2867 u32 qpn = vhcr->in_modifier & 0xffffff; 2868 u32 qkey = 0; 2869 2870 if (mlx4_get_parav_qkey(dev, qpn, &qkey)) 2871 return; 2872 2873 /* adjust qkey in qp context */ 2874 context->qkey = cpu_to_be32(qkey); 2875 } 2876 2877 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 2878 struct mlx4_qp_context *qpc, 2879 struct mlx4_cmd_mailbox *inbox); 2880 2881 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 2882 struct mlx4_vhcr *vhcr, 2883 struct mlx4_cmd_mailbox *inbox, 2884 struct mlx4_cmd_mailbox *outbox, 2885 struct mlx4_cmd_info *cmd) 2886 { 2887 int err; 2888 int qpn = vhcr->in_modifier & 0x7fffff; 2889 struct res_mtt *mtt; 2890 struct res_qp *qp; 2891 struct mlx4_qp_context *qpc = inbox->buf + 8; 2892 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; 2893 int mtt_size = qp_get_mtt_size(qpc); 2894 struct res_cq *rcq; 2895 struct res_cq *scq; 2896 int rcqn = qp_get_rcqn(qpc); 2897 int scqn = qp_get_scqn(qpc); 2898 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2899 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2900 struct res_srq *srq; 2901 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2902 2903 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2904 if (err) 2905 return err; 2906 2907 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); 2908 if (err) 2909 return err; 2910 qp->local_qpn = local_qpn; 2911 qp->sched_queue = 0; 2912 qp->param3 = 0; 2913 qp->vlan_control = 0; 2914 qp->fvl_rx = 0; 2915 qp->pri_path_fl = 0; 2916 qp->vlan_index = 0; 2917 qp->feup = 0; 2918 qp->qpc_flags = be32_to_cpu(qpc->flags); 2919 2920 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2921 if (err) 2922 goto ex_abort; 2923 2924 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 2925 if (err) 2926 goto ex_put_mtt; 2927 2928 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 2929 if (err) 2930 goto ex_put_mtt; 2931 2932 if (scqn != rcqn) { 2933 err = get_res(dev, slave, scqn, RES_CQ, &scq); 2934 if (err) 2935 goto ex_put_rcq; 2936 } else 2937 scq = rcq; 2938 2939 if (use_srq) { 2940 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 2941 if (err) 2942 goto ex_put_scq; 2943 } 2944 2945 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2946 update_pkey_index(dev, slave, inbox); 2947 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2948 if (err) 2949 goto ex_put_srq; 2950 atomic_inc(&mtt->ref_count); 2951 qp->mtt = mtt; 2952 atomic_inc(&rcq->ref_count); 2953 qp->rcq = rcq; 2954 atomic_inc(&scq->ref_count); 2955 qp->scq = scq; 2956 2957 if (scqn != rcqn) 2958 put_res(dev, slave, scqn, RES_CQ); 2959 2960 if (use_srq) { 2961 atomic_inc(&srq->ref_count); 2962 put_res(dev, slave, srqn, RES_SRQ); 2963 qp->srq = srq; 2964 } 2965 put_res(dev, slave, rcqn, RES_CQ); 2966 put_res(dev, slave, mtt_base, RES_MTT); 2967 res_end_move(dev, slave, RES_QP, qpn); 2968 2969 return 0; 2970 2971 ex_put_srq: 2972 if (use_srq) 2973 put_res(dev, slave, srqn, RES_SRQ); 2974 ex_put_scq: 2975 if (scqn != rcqn) 2976 put_res(dev, slave, scqn, RES_CQ); 2977 ex_put_rcq: 2978 put_res(dev, slave, rcqn, RES_CQ); 2979 ex_put_mtt: 2980 put_res(dev, slave, mtt_base, RES_MTT); 2981 ex_abort: 2982 res_abort_move(dev, slave, RES_QP, qpn); 2983 2984 return err; 2985 } 2986 2987 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) 2988 { 2989 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; 2990 } 2991 2992 static int eq_get_mtt_size(struct mlx4_eq_context *eqc) 2993 { 2994 int log_eq_size = eqc->log_eq_size & 0x1f; 2995 int page_shift = (eqc->log_page_size & 0x3f) + 12; 2996 2997 if (log_eq_size + 5 < page_shift) 2998 return 1; 2999 3000 return 1 << (log_eq_size + 5 - page_shift); 3001 } 3002 3003 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) 3004 { 3005 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; 3006 } 3007 3008 static int cq_get_mtt_size(struct mlx4_cq_context *cqc) 3009 { 3010 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; 3011 int page_shift = (cqc->log_page_size & 0x3f) + 12; 3012 3013 if (log_cq_size + 5 < page_shift) 3014 return 1; 3015 3016 return 1 << (log_cq_size + 5 - page_shift); 3017 } 3018 3019 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3020 struct mlx4_vhcr *vhcr, 3021 struct mlx4_cmd_mailbox *inbox, 3022 struct mlx4_cmd_mailbox *outbox, 3023 struct mlx4_cmd_info *cmd) 3024 { 3025 int err; 3026 int eqn = vhcr->in_modifier; 3027 int res_id = (slave << 10) | eqn; 3028 struct mlx4_eq_context *eqc = inbox->buf; 3029 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; 3030 int mtt_size = eq_get_mtt_size(eqc); 3031 struct res_eq *eq; 3032 struct res_mtt *mtt; 3033 3034 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3035 if (err) 3036 return err; 3037 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); 3038 if (err) 3039 goto out_add; 3040 3041 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3042 if (err) 3043 goto out_move; 3044 3045 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 3046 if (err) 3047 goto out_put; 3048 3049 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3050 if (err) 3051 goto out_put; 3052 3053 atomic_inc(&mtt->ref_count); 3054 eq->mtt = mtt; 3055 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3056 res_end_move(dev, slave, RES_EQ, res_id); 3057 return 0; 3058 3059 out_put: 3060 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3061 out_move: 3062 res_abort_move(dev, slave, RES_EQ, res_id); 3063 out_add: 3064 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3065 return err; 3066 } 3067 3068 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, 3069 struct mlx4_vhcr *vhcr, 3070 struct mlx4_cmd_mailbox *inbox, 3071 struct mlx4_cmd_mailbox *outbox, 3072 struct mlx4_cmd_info *cmd) 3073 { 3074 int err; 3075 u8 get = vhcr->op_modifier; 3076 3077 if (get != 1) 3078 return -EPERM; 3079 3080 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3081 3082 return err; 3083 } 3084 3085 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, 3086 int len, struct res_mtt **res) 3087 { 3088 struct mlx4_priv *priv = mlx4_priv(dev); 3089 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3090 struct res_mtt *mtt; 3091 int err = -EINVAL; 3092 3093 spin_lock_irq(mlx4_tlock(dev)); 3094 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], 3095 com.list) { 3096 if (!check_mtt_range(dev, slave, start, len, mtt)) { 3097 *res = mtt; 3098 mtt->com.from_state = mtt->com.state; 3099 mtt->com.state = RES_MTT_BUSY; 3100 err = 0; 3101 break; 3102 } 3103 } 3104 spin_unlock_irq(mlx4_tlock(dev)); 3105 3106 return err; 3107 } 3108 3109 static int verify_qp_parameters(struct mlx4_dev *dev, 3110 struct mlx4_vhcr *vhcr, 3111 struct mlx4_cmd_mailbox *inbox, 3112 enum qp_transition transition, u8 slave) 3113 { 3114 u32 qp_type; 3115 u32 qpn; 3116 struct mlx4_qp_context *qp_ctx; 3117 enum mlx4_qp_optpar optpar; 3118 int port; 3119 int num_gids; 3120 3121 qp_ctx = inbox->buf + 8; 3122 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 3123 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 3124 3125 if (slave != mlx4_master_func_num(dev)) { 3126 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; 3127 /* setting QP rate-limit is disallowed for VFs */ 3128 if (qp_ctx->rate_limit_params) 3129 return -EPERM; 3130 } 3131 3132 switch (qp_type) { 3133 case MLX4_QP_ST_RC: 3134 case MLX4_QP_ST_XRC: 3135 case MLX4_QP_ST_UC: 3136 switch (transition) { 3137 case QP_TRANS_INIT2RTR: 3138 case QP_TRANS_RTR2RTS: 3139 case QP_TRANS_RTS2RTS: 3140 case QP_TRANS_SQD2SQD: 3141 case QP_TRANS_SQD2RTS: 3142 if (slave != mlx4_master_func_num(dev)) 3143 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 3144 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3145 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3146 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3147 else 3148 num_gids = 1; 3149 if (qp_ctx->pri_path.mgid_index >= num_gids) 3150 return -EINVAL; 3151 } 3152 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3153 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 3154 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3155 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3156 else 3157 num_gids = 1; 3158 if (qp_ctx->alt_path.mgid_index >= num_gids) 3159 return -EINVAL; 3160 } 3161 break; 3162 default: 3163 break; 3164 } 3165 break; 3166 3167 case MLX4_QP_ST_MLX: 3168 qpn = vhcr->in_modifier & 0x7fffff; 3169 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3170 if (transition == QP_TRANS_INIT2RTR && 3171 slave != mlx4_master_func_num(dev) && 3172 mlx4_is_qp_reserved(dev, qpn) && 3173 !mlx4_vf_smi_enabled(dev, slave, port)) { 3174 /* only enabled VFs may create MLX proxy QPs */ 3175 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", 3176 __func__, slave, port); 3177 return -EPERM; 3178 } 3179 break; 3180 3181 default: 3182 break; 3183 } 3184 3185 return 0; 3186 } 3187 3188 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 3189 struct mlx4_vhcr *vhcr, 3190 struct mlx4_cmd_mailbox *inbox, 3191 struct mlx4_cmd_mailbox *outbox, 3192 struct mlx4_cmd_info *cmd) 3193 { 3194 struct mlx4_mtt mtt; 3195 __be64 *page_list = inbox->buf; 3196 u64 *pg_list = (u64 *)page_list; 3197 int i; 3198 struct res_mtt *rmtt = NULL; 3199 int start = be64_to_cpu(page_list[0]); 3200 int npages = vhcr->in_modifier; 3201 int err; 3202 3203 err = get_containing_mtt(dev, slave, start, npages, &rmtt); 3204 if (err) 3205 return err; 3206 3207 /* Call the SW implementation of write_mtt: 3208 * - Prepare a dummy mtt struct 3209 * - Translate inbox contents to simple addresses in host endianness */ 3210 mtt.offset = 0; /* TBD this is broken but I don't handle it since 3211 we don't really use it */ 3212 mtt.order = 0; 3213 mtt.page_shift = 0; 3214 for (i = 0; i < npages; ++i) 3215 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); 3216 3217 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, 3218 ((u64 *)page_list + 2)); 3219 3220 if (rmtt) 3221 put_res(dev, slave, rmtt->com.res_id, RES_MTT); 3222 3223 return err; 3224 } 3225 3226 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3227 struct mlx4_vhcr *vhcr, 3228 struct mlx4_cmd_mailbox *inbox, 3229 struct mlx4_cmd_mailbox *outbox, 3230 struct mlx4_cmd_info *cmd) 3231 { 3232 int eqn = vhcr->in_modifier; 3233 int res_id = eqn | (slave << 10); 3234 struct res_eq *eq; 3235 int err; 3236 3237 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); 3238 if (err) 3239 return err; 3240 3241 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); 3242 if (err) 3243 goto ex_abort; 3244 3245 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3246 if (err) 3247 goto ex_put; 3248 3249 atomic_dec(&eq->mtt->ref_count); 3250 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3251 res_end_move(dev, slave, RES_EQ, res_id); 3252 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3253 3254 return 0; 3255 3256 ex_put: 3257 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3258 ex_abort: 3259 res_abort_move(dev, slave, RES_EQ, res_id); 3260 3261 return err; 3262 } 3263 3264 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) 3265 { 3266 struct mlx4_priv *priv = mlx4_priv(dev); 3267 struct mlx4_slave_event_eq_info *event_eq; 3268 struct mlx4_cmd_mailbox *mailbox; 3269 u32 in_modifier = 0; 3270 int err; 3271 int res_id; 3272 struct res_eq *req; 3273 3274 if (!priv->mfunc.master.slave_state) 3275 return -EINVAL; 3276 3277 /* check for slave valid, slave not PF, and slave active */ 3278 if (slave < 0 || slave > dev->persist->num_vfs || 3279 slave == dev->caps.function || 3280 !priv->mfunc.master.slave_state[slave].active) 3281 return 0; 3282 3283 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 3284 3285 /* Create the event only if the slave is registered */ 3286 if (event_eq->eqn < 0) 3287 return 0; 3288 3289 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3290 res_id = (slave << 10) | event_eq->eqn; 3291 err = get_res(dev, slave, res_id, RES_EQ, &req); 3292 if (err) 3293 goto unlock; 3294 3295 if (req->com.from_state != RES_EQ_HW) { 3296 err = -EINVAL; 3297 goto put; 3298 } 3299 3300 mailbox = mlx4_alloc_cmd_mailbox(dev); 3301 if (IS_ERR(mailbox)) { 3302 err = PTR_ERR(mailbox); 3303 goto put; 3304 } 3305 3306 if (eqe->type == MLX4_EVENT_TYPE_CMD) { 3307 ++event_eq->token; 3308 eqe->event.cmd.token = cpu_to_be16(event_eq->token); 3309 } 3310 3311 memcpy(mailbox->buf, (u8 *) eqe, 28); 3312 3313 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); 3314 3315 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, 3316 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, 3317 MLX4_CMD_NATIVE); 3318 3319 put_res(dev, slave, res_id, RES_EQ); 3320 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3321 mlx4_free_cmd_mailbox(dev, mailbox); 3322 return err; 3323 3324 put: 3325 put_res(dev, slave, res_id, RES_EQ); 3326 3327 unlock: 3328 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3329 return err; 3330 } 3331 3332 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 3333 struct mlx4_vhcr *vhcr, 3334 struct mlx4_cmd_mailbox *inbox, 3335 struct mlx4_cmd_mailbox *outbox, 3336 struct mlx4_cmd_info *cmd) 3337 { 3338 int eqn = vhcr->in_modifier; 3339 int res_id = eqn | (slave << 10); 3340 struct res_eq *eq; 3341 int err; 3342 3343 err = get_res(dev, slave, res_id, RES_EQ, &eq); 3344 if (err) 3345 return err; 3346 3347 if (eq->com.from_state != RES_EQ_HW) { 3348 err = -EINVAL; 3349 goto ex_put; 3350 } 3351 3352 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3353 3354 ex_put: 3355 put_res(dev, slave, res_id, RES_EQ); 3356 return err; 3357 } 3358 3359 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3360 struct mlx4_vhcr *vhcr, 3361 struct mlx4_cmd_mailbox *inbox, 3362 struct mlx4_cmd_mailbox *outbox, 3363 struct mlx4_cmd_info *cmd) 3364 { 3365 int err; 3366 int cqn = vhcr->in_modifier; 3367 struct mlx4_cq_context *cqc = inbox->buf; 3368 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3369 struct res_cq *cq = NULL; 3370 struct res_mtt *mtt; 3371 3372 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); 3373 if (err) 3374 return err; 3375 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3376 if (err) 3377 goto out_move; 3378 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3379 if (err) 3380 goto out_put; 3381 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3382 if (err) 3383 goto out_put; 3384 atomic_inc(&mtt->ref_count); 3385 cq->mtt = mtt; 3386 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3387 res_end_move(dev, slave, RES_CQ, cqn); 3388 return 0; 3389 3390 out_put: 3391 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3392 out_move: 3393 res_abort_move(dev, slave, RES_CQ, cqn); 3394 return err; 3395 } 3396 3397 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3398 struct mlx4_vhcr *vhcr, 3399 struct mlx4_cmd_mailbox *inbox, 3400 struct mlx4_cmd_mailbox *outbox, 3401 struct mlx4_cmd_info *cmd) 3402 { 3403 int err; 3404 int cqn = vhcr->in_modifier; 3405 struct res_cq *cq = NULL; 3406 3407 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3408 if (err) 3409 return err; 3410 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3411 if (err) 3412 goto out_move; 3413 atomic_dec(&cq->mtt->ref_count); 3414 res_end_move(dev, slave, RES_CQ, cqn); 3415 return 0; 3416 3417 out_move: 3418 res_abort_move(dev, slave, RES_CQ, cqn); 3419 return err; 3420 } 3421 3422 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3423 struct mlx4_vhcr *vhcr, 3424 struct mlx4_cmd_mailbox *inbox, 3425 struct mlx4_cmd_mailbox *outbox, 3426 struct mlx4_cmd_info *cmd) 3427 { 3428 int cqn = vhcr->in_modifier; 3429 struct res_cq *cq; 3430 int err; 3431 3432 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3433 if (err) 3434 return err; 3435 3436 if (cq->com.from_state != RES_CQ_HW) 3437 goto ex_put; 3438 3439 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3440 ex_put: 3441 put_res(dev, slave, cqn, RES_CQ); 3442 3443 return err; 3444 } 3445 3446 static int handle_resize(struct mlx4_dev *dev, int slave, 3447 struct mlx4_vhcr *vhcr, 3448 struct mlx4_cmd_mailbox *inbox, 3449 struct mlx4_cmd_mailbox *outbox, 3450 struct mlx4_cmd_info *cmd, 3451 struct res_cq *cq) 3452 { 3453 int err; 3454 struct res_mtt *orig_mtt; 3455 struct res_mtt *mtt; 3456 struct mlx4_cq_context *cqc = inbox->buf; 3457 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3458 3459 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); 3460 if (err) 3461 return err; 3462 3463 if (orig_mtt != cq->mtt) { 3464 err = -EINVAL; 3465 goto ex_put; 3466 } 3467 3468 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3469 if (err) 3470 goto ex_put; 3471 3472 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3473 if (err) 3474 goto ex_put1; 3475 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3476 if (err) 3477 goto ex_put1; 3478 atomic_dec(&orig_mtt->ref_count); 3479 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3480 atomic_inc(&mtt->ref_count); 3481 cq->mtt = mtt; 3482 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3483 return 0; 3484 3485 ex_put1: 3486 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3487 ex_put: 3488 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3489 3490 return err; 3491 3492 } 3493 3494 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3495 struct mlx4_vhcr *vhcr, 3496 struct mlx4_cmd_mailbox *inbox, 3497 struct mlx4_cmd_mailbox *outbox, 3498 struct mlx4_cmd_info *cmd) 3499 { 3500 int cqn = vhcr->in_modifier; 3501 struct res_cq *cq; 3502 int err; 3503 3504 err = get_res(dev, slave, cqn, RES_CQ, &cq); 3505 if (err) 3506 return err; 3507 3508 if (cq->com.from_state != RES_CQ_HW) 3509 goto ex_put; 3510 3511 if (vhcr->op_modifier == 0) { 3512 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 3513 goto ex_put; 3514 } 3515 3516 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3517 ex_put: 3518 put_res(dev, slave, cqn, RES_CQ); 3519 3520 return err; 3521 } 3522 3523 static int srq_get_mtt_size(struct mlx4_srq_context *srqc) 3524 { 3525 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; 3526 int log_rq_stride = srqc->logstride & 7; 3527 int page_shift = (srqc->log_page_size & 0x3f) + 12; 3528 3529 if (log_srq_size + log_rq_stride + 4 < page_shift) 3530 return 1; 3531 3532 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); 3533 } 3534 3535 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3536 struct mlx4_vhcr *vhcr, 3537 struct mlx4_cmd_mailbox *inbox, 3538 struct mlx4_cmd_mailbox *outbox, 3539 struct mlx4_cmd_info *cmd) 3540 { 3541 int err; 3542 int srqn = vhcr->in_modifier; 3543 struct res_mtt *mtt; 3544 struct res_srq *srq = NULL; 3545 struct mlx4_srq_context *srqc = inbox->buf; 3546 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3547 3548 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) 3549 return -EINVAL; 3550 3551 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); 3552 if (err) 3553 return err; 3554 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3555 if (err) 3556 goto ex_abort; 3557 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), 3558 mtt); 3559 if (err) 3560 goto ex_put_mtt; 3561 3562 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3563 if (err) 3564 goto ex_put_mtt; 3565 3566 atomic_inc(&mtt->ref_count); 3567 srq->mtt = mtt; 3568 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3569 res_end_move(dev, slave, RES_SRQ, srqn); 3570 return 0; 3571 3572 ex_put_mtt: 3573 put_res(dev, slave, mtt->com.res_id, RES_MTT); 3574 ex_abort: 3575 res_abort_move(dev, slave, RES_SRQ, srqn); 3576 3577 return err; 3578 } 3579 3580 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3581 struct mlx4_vhcr *vhcr, 3582 struct mlx4_cmd_mailbox *inbox, 3583 struct mlx4_cmd_mailbox *outbox, 3584 struct mlx4_cmd_info *cmd) 3585 { 3586 int err; 3587 int srqn = vhcr->in_modifier; 3588 struct res_srq *srq = NULL; 3589 3590 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3591 if (err) 3592 return err; 3593 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3594 if (err) 3595 goto ex_abort; 3596 atomic_dec(&srq->mtt->ref_count); 3597 if (srq->cq) 3598 atomic_dec(&srq->cq->ref_count); 3599 res_end_move(dev, slave, RES_SRQ, srqn); 3600 3601 return 0; 3602 3603 ex_abort: 3604 res_abort_move(dev, slave, RES_SRQ, srqn); 3605 3606 return err; 3607 } 3608 3609 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3610 struct mlx4_vhcr *vhcr, 3611 struct mlx4_cmd_mailbox *inbox, 3612 struct mlx4_cmd_mailbox *outbox, 3613 struct mlx4_cmd_info *cmd) 3614 { 3615 int err; 3616 int srqn = vhcr->in_modifier; 3617 struct res_srq *srq; 3618 3619 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3620 if (err) 3621 return err; 3622 if (srq->com.from_state != RES_SRQ_HW) { 3623 err = -EBUSY; 3624 goto out; 3625 } 3626 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3627 out: 3628 put_res(dev, slave, srqn, RES_SRQ); 3629 return err; 3630 } 3631 3632 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3633 struct mlx4_vhcr *vhcr, 3634 struct mlx4_cmd_mailbox *inbox, 3635 struct mlx4_cmd_mailbox *outbox, 3636 struct mlx4_cmd_info *cmd) 3637 { 3638 int err; 3639 int srqn = vhcr->in_modifier; 3640 struct res_srq *srq; 3641 3642 err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3643 if (err) 3644 return err; 3645 3646 if (srq->com.from_state != RES_SRQ_HW) { 3647 err = -EBUSY; 3648 goto out; 3649 } 3650 3651 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3652 out: 3653 put_res(dev, slave, srqn, RES_SRQ); 3654 return err; 3655 } 3656 3657 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 3658 struct mlx4_vhcr *vhcr, 3659 struct mlx4_cmd_mailbox *inbox, 3660 struct mlx4_cmd_mailbox *outbox, 3661 struct mlx4_cmd_info *cmd) 3662 { 3663 int err; 3664 int qpn = vhcr->in_modifier & 0x7fffff; 3665 struct res_qp *qp; 3666 3667 err = get_res(dev, slave, qpn, RES_QP, &qp); 3668 if (err) 3669 return err; 3670 if (qp->com.from_state != RES_QP_HW) { 3671 err = -EBUSY; 3672 goto out; 3673 } 3674 3675 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3676 out: 3677 put_res(dev, slave, qpn, RES_QP); 3678 return err; 3679 } 3680 3681 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 3682 struct mlx4_vhcr *vhcr, 3683 struct mlx4_cmd_mailbox *inbox, 3684 struct mlx4_cmd_mailbox *outbox, 3685 struct mlx4_cmd_info *cmd) 3686 { 3687 struct mlx4_qp_context *context = inbox->buf + 8; 3688 adjust_proxy_tun_qkey(dev, vhcr, context); 3689 update_pkey_index(dev, slave, inbox); 3690 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3691 } 3692 3693 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 3694 struct mlx4_qp_context *qpc, 3695 struct mlx4_cmd_mailbox *inbox) 3696 { 3697 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); 3698 u8 pri_sched_queue; 3699 int port = mlx4_slave_convert_port( 3700 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; 3701 3702 if (port < 0) 3703 return -EINVAL; 3704 3705 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | 3706 ((port & 1) << 6); 3707 3708 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) || 3709 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) { 3710 qpc->pri_path.sched_queue = pri_sched_queue; 3711 } 3712 3713 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3714 port = mlx4_slave_convert_port( 3715 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) 3716 + 1) - 1; 3717 if (port < 0) 3718 return -EINVAL; 3719 qpc->alt_path.sched_queue = 3720 (qpc->alt_path.sched_queue & ~(1 << 6)) | 3721 (port & 1) << 6; 3722 } 3723 return 0; 3724 } 3725 3726 static int roce_verify_mac(struct mlx4_dev *dev, int slave, 3727 struct mlx4_qp_context *qpc, 3728 struct mlx4_cmd_mailbox *inbox) 3729 { 3730 u64 mac; 3731 int port; 3732 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 3733 u8 sched = *(u8 *)(inbox->buf + 64); 3734 u8 smac_ix; 3735 3736 port = (sched >> 6 & 1) + 1; 3737 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { 3738 smac_ix = qpc->pri_path.grh_mylmc & 0x7f; 3739 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) 3740 return -ENOENT; 3741 } 3742 return 0; 3743 } 3744 3745 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3746 struct mlx4_vhcr *vhcr, 3747 struct mlx4_cmd_mailbox *inbox, 3748 struct mlx4_cmd_mailbox *outbox, 3749 struct mlx4_cmd_info *cmd) 3750 { 3751 int err; 3752 struct mlx4_qp_context *qpc = inbox->buf + 8; 3753 int qpn = vhcr->in_modifier & 0x7fffff; 3754 struct res_qp *qp; 3755 u8 orig_sched_queue; 3756 __be32 orig_param3 = qpc->param3; 3757 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3758 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3759 u8 orig_pri_path_fl = qpc->pri_path.fl; 3760 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3761 u8 orig_feup = qpc->pri_path.feup; 3762 3763 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 3764 if (err) 3765 return err; 3766 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); 3767 if (err) 3768 return err; 3769 3770 if (roce_verify_mac(dev, slave, qpc, inbox)) 3771 return -EINVAL; 3772 3773 update_pkey_index(dev, slave, inbox); 3774 update_gid(dev, inbox, (u8)slave); 3775 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3776 orig_sched_queue = qpc->pri_path.sched_queue; 3777 3778 err = get_res(dev, slave, qpn, RES_QP, &qp); 3779 if (err) 3780 return err; 3781 if (qp->com.from_state != RES_QP_HW) { 3782 err = -EBUSY; 3783 goto out; 3784 } 3785 3786 err = update_vport_qp_param(dev, inbox, slave, qpn); 3787 if (err) 3788 goto out; 3789 3790 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3791 out: 3792 /* if no error, save sched queue value passed in by VF. This is 3793 * essentially the QOS value provided by the VF. This will be useful 3794 * if we allow dynamic changes from VST back to VGT 3795 */ 3796 if (!err) { 3797 qp->sched_queue = orig_sched_queue; 3798 qp->param3 = orig_param3; 3799 qp->vlan_control = orig_vlan_control; 3800 qp->fvl_rx = orig_fvl_rx; 3801 qp->pri_path_fl = orig_pri_path_fl; 3802 qp->vlan_index = orig_vlan_index; 3803 qp->feup = orig_feup; 3804 } 3805 put_res(dev, slave, qpn, RES_QP); 3806 return err; 3807 } 3808 3809 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3810 struct mlx4_vhcr *vhcr, 3811 struct mlx4_cmd_mailbox *inbox, 3812 struct mlx4_cmd_mailbox *outbox, 3813 struct mlx4_cmd_info *cmd) 3814 { 3815 int err; 3816 struct mlx4_qp_context *context = inbox->buf + 8; 3817 3818 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3819 if (err) 3820 return err; 3821 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); 3822 if (err) 3823 return err; 3824 3825 update_pkey_index(dev, slave, inbox); 3826 update_gid(dev, inbox, (u8)slave); 3827 adjust_proxy_tun_qkey(dev, vhcr, context); 3828 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3829 } 3830 3831 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3832 struct mlx4_vhcr *vhcr, 3833 struct mlx4_cmd_mailbox *inbox, 3834 struct mlx4_cmd_mailbox *outbox, 3835 struct mlx4_cmd_info *cmd) 3836 { 3837 int err; 3838 struct mlx4_qp_context *context = inbox->buf + 8; 3839 3840 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3841 if (err) 3842 return err; 3843 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); 3844 if (err) 3845 return err; 3846 3847 update_pkey_index(dev, slave, inbox); 3848 update_gid(dev, inbox, (u8)slave); 3849 adjust_proxy_tun_qkey(dev, vhcr, context); 3850 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3851 } 3852 3853 3854 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3855 struct mlx4_vhcr *vhcr, 3856 struct mlx4_cmd_mailbox *inbox, 3857 struct mlx4_cmd_mailbox *outbox, 3858 struct mlx4_cmd_info *cmd) 3859 { 3860 struct mlx4_qp_context *context = inbox->buf + 8; 3861 int err = adjust_qp_sched_queue(dev, slave, context, inbox); 3862 if (err) 3863 return err; 3864 adjust_proxy_tun_qkey(dev, vhcr, context); 3865 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3866 } 3867 3868 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 3869 struct mlx4_vhcr *vhcr, 3870 struct mlx4_cmd_mailbox *inbox, 3871 struct mlx4_cmd_mailbox *outbox, 3872 struct mlx4_cmd_info *cmd) 3873 { 3874 int err; 3875 struct mlx4_qp_context *context = inbox->buf + 8; 3876 3877 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3878 if (err) 3879 return err; 3880 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); 3881 if (err) 3882 return err; 3883 3884 adjust_proxy_tun_qkey(dev, vhcr, context); 3885 update_gid(dev, inbox, (u8)slave); 3886 update_pkey_index(dev, slave, inbox); 3887 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3888 } 3889 3890 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3891 struct mlx4_vhcr *vhcr, 3892 struct mlx4_cmd_mailbox *inbox, 3893 struct mlx4_cmd_mailbox *outbox, 3894 struct mlx4_cmd_info *cmd) 3895 { 3896 int err; 3897 struct mlx4_qp_context *context = inbox->buf + 8; 3898 3899 err = adjust_qp_sched_queue(dev, slave, context, inbox); 3900 if (err) 3901 return err; 3902 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); 3903 if (err) 3904 return err; 3905 3906 adjust_proxy_tun_qkey(dev, vhcr, context); 3907 update_gid(dev, inbox, (u8)slave); 3908 update_pkey_index(dev, slave, inbox); 3909 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3910 } 3911 3912 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 3913 struct mlx4_vhcr *vhcr, 3914 struct mlx4_cmd_mailbox *inbox, 3915 struct mlx4_cmd_mailbox *outbox, 3916 struct mlx4_cmd_info *cmd) 3917 { 3918 int err; 3919 int qpn = vhcr->in_modifier & 0x7fffff; 3920 struct res_qp *qp; 3921 3922 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); 3923 if (err) 3924 return err; 3925 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3926 if (err) 3927 goto ex_abort; 3928 3929 atomic_dec(&qp->mtt->ref_count); 3930 atomic_dec(&qp->rcq->ref_count); 3931 atomic_dec(&qp->scq->ref_count); 3932 if (qp->srq) 3933 atomic_dec(&qp->srq->ref_count); 3934 res_end_move(dev, slave, RES_QP, qpn); 3935 return 0; 3936 3937 ex_abort: 3938 res_abort_move(dev, slave, RES_QP, qpn); 3939 3940 return err; 3941 } 3942 3943 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, 3944 struct res_qp *rqp, u8 *gid) 3945 { 3946 struct res_gid *res; 3947 3948 list_for_each_entry(res, &rqp->mcg_list, list) { 3949 if (!memcmp(res->gid, gid, 16)) 3950 return res; 3951 } 3952 return NULL; 3953 } 3954 3955 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3956 u8 *gid, enum mlx4_protocol prot, 3957 enum mlx4_steer_type steer, u64 reg_id) 3958 { 3959 struct res_gid *res; 3960 int err; 3961 3962 res = kzalloc(sizeof *res, GFP_KERNEL); 3963 if (!res) 3964 return -ENOMEM; 3965 3966 spin_lock_irq(&rqp->mcg_spl); 3967 if (find_gid(dev, slave, rqp, gid)) { 3968 kfree(res); 3969 err = -EEXIST; 3970 } else { 3971 memcpy(res->gid, gid, 16); 3972 res->prot = prot; 3973 res->steer = steer; 3974 res->reg_id = reg_id; 3975 list_add_tail(&res->list, &rqp->mcg_list); 3976 err = 0; 3977 } 3978 spin_unlock_irq(&rqp->mcg_spl); 3979 3980 return err; 3981 } 3982 3983 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3984 u8 *gid, enum mlx4_protocol prot, 3985 enum mlx4_steer_type steer, u64 *reg_id) 3986 { 3987 struct res_gid *res; 3988 int err; 3989 3990 spin_lock_irq(&rqp->mcg_spl); 3991 res = find_gid(dev, slave, rqp, gid); 3992 if (!res || res->prot != prot || res->steer != steer) 3993 err = -EINVAL; 3994 else { 3995 *reg_id = res->reg_id; 3996 list_del(&res->list); 3997 kfree(res); 3998 err = 0; 3999 } 4000 spin_unlock_irq(&rqp->mcg_spl); 4001 4002 return err; 4003 } 4004 4005 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, 4006 u8 gid[16], int block_loopback, enum mlx4_protocol prot, 4007 enum mlx4_steer_type type, u64 *reg_id) 4008 { 4009 switch (dev->caps.steering_mode) { 4010 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 4011 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4012 if (port < 0) 4013 return port; 4014 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 4015 block_loopback, prot, 4016 reg_id); 4017 } 4018 case MLX4_STEERING_MODE_B0: 4019 if (prot == MLX4_PROT_ETH) { 4020 int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4021 if (port < 0) 4022 return port; 4023 gid[5] = port; 4024 } 4025 return mlx4_qp_attach_common(dev, qp, gid, 4026 block_loopback, prot, type); 4027 default: 4028 return -EINVAL; 4029 } 4030 } 4031 4032 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 4033 u8 gid[16], enum mlx4_protocol prot, 4034 enum mlx4_steer_type type, u64 reg_id) 4035 { 4036 switch (dev->caps.steering_mode) { 4037 case MLX4_STEERING_MODE_DEVICE_MANAGED: 4038 return mlx4_flow_detach(dev, reg_id); 4039 case MLX4_STEERING_MODE_B0: 4040 return mlx4_qp_detach_common(dev, qp, gid, prot, type); 4041 default: 4042 return -EINVAL; 4043 } 4044 } 4045 4046 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, 4047 u8 *gid, enum mlx4_protocol prot) 4048 { 4049 int real_port; 4050 4051 if (prot != MLX4_PROT_ETH) 4052 return 0; 4053 4054 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || 4055 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 4056 real_port = mlx4_slave_convert_port(dev, slave, gid[5]); 4057 if (real_port < 0) 4058 return -EINVAL; 4059 gid[5] = real_port; 4060 } 4061 4062 return 0; 4063 } 4064 4065 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4066 struct mlx4_vhcr *vhcr, 4067 struct mlx4_cmd_mailbox *inbox, 4068 struct mlx4_cmd_mailbox *outbox, 4069 struct mlx4_cmd_info *cmd) 4070 { 4071 struct mlx4_qp qp; /* dummy for calling attach/detach */ 4072 u8 *gid = inbox->buf; 4073 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; 4074 int err; 4075 int qpn; 4076 struct res_qp *rqp; 4077 u64 reg_id = 0; 4078 int attach = vhcr->op_modifier; 4079 int block_loopback = vhcr->in_modifier >> 31; 4080 u8 steer_type_mask = 2; 4081 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 4082 4083 qpn = vhcr->in_modifier & 0xffffff; 4084 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4085 if (err) 4086 return err; 4087 4088 qp.qpn = qpn; 4089 if (attach) { 4090 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, 4091 type, ®_id); 4092 if (err) { 4093 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 4094 goto ex_put; 4095 } 4096 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); 4097 if (err) 4098 goto ex_detach; 4099 } else { 4100 err = mlx4_adjust_port(dev, slave, gid, prot); 4101 if (err) 4102 goto ex_put; 4103 4104 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); 4105 if (err) 4106 goto ex_put; 4107 4108 err = qp_detach(dev, &qp, gid, prot, type, reg_id); 4109 if (err) 4110 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", 4111 qpn, reg_id); 4112 } 4113 put_res(dev, slave, qpn, RES_QP); 4114 return err; 4115 4116 ex_detach: 4117 qp_detach(dev, &qp, gid, prot, type, reg_id); 4118 ex_put: 4119 put_res(dev, slave, qpn, RES_QP); 4120 return err; 4121 } 4122 4123 /* 4124 * MAC validation for Flow Steering rules. 4125 * VF can attach rules only with a mac address which is assigned to it. 4126 */ 4127 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, 4128 struct list_head *rlist) 4129 { 4130 struct mac_res *res, *tmp; 4131 __be64 be_mac; 4132 4133 /* make sure it isn't multicast or broadcast mac*/ 4134 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && 4135 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4136 list_for_each_entry_safe(res, tmp, rlist, list) { 4137 be_mac = cpu_to_be64(res->mac << 16); 4138 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) 4139 return 0; 4140 } 4141 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", 4142 eth_header->eth.dst_mac, slave); 4143 return -EINVAL; 4144 } 4145 return 0; 4146 } 4147 4148 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, 4149 struct _rule_hw *eth_header) 4150 { 4151 if (is_multicast_ether_addr(eth_header->eth.dst_mac) || 4152 is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4153 struct mlx4_net_trans_rule_hw_eth *eth = 4154 (struct mlx4_net_trans_rule_hw_eth *)eth_header; 4155 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); 4156 bool last_rule = next_rule->size == 0 && next_rule->id == 0 && 4157 next_rule->rsvd == 0; 4158 4159 if (last_rule) 4160 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); 4161 } 4162 } 4163 4164 /* 4165 * In case of missing eth header, append eth header with a MAC address 4166 * assigned to the VF. 4167 */ 4168 static int add_eth_header(struct mlx4_dev *dev, int slave, 4169 struct mlx4_cmd_mailbox *inbox, 4170 struct list_head *rlist, int header_id) 4171 { 4172 struct mac_res *res, *tmp; 4173 u8 port; 4174 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4175 struct mlx4_net_trans_rule_hw_eth *eth_header; 4176 struct mlx4_net_trans_rule_hw_ipv4 *ip_header; 4177 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; 4178 __be64 be_mac = 0; 4179 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 4180 4181 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4182 port = ctrl->port; 4183 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); 4184 4185 /* Clear a space in the inbox for eth header */ 4186 switch (header_id) { 4187 case MLX4_NET_TRANS_RULE_ID_IPV4: 4188 ip_header = 4189 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); 4190 memmove(ip_header, eth_header, 4191 sizeof(*ip_header) + sizeof(*l4_header)); 4192 break; 4193 case MLX4_NET_TRANS_RULE_ID_TCP: 4194 case MLX4_NET_TRANS_RULE_ID_UDP: 4195 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) 4196 (eth_header + 1); 4197 memmove(l4_header, eth_header, sizeof(*l4_header)); 4198 break; 4199 default: 4200 return -EINVAL; 4201 } 4202 list_for_each_entry_safe(res, tmp, rlist, list) { 4203 if (port == res->port) { 4204 be_mac = cpu_to_be64(res->mac << 16); 4205 break; 4206 } 4207 } 4208 if (!be_mac) { 4209 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", 4210 port); 4211 return -EINVAL; 4212 } 4213 4214 memset(eth_header, 0, sizeof(*eth_header)); 4215 eth_header->size = sizeof(*eth_header) >> 2; 4216 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); 4217 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); 4218 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); 4219 4220 return 0; 4221 4222 } 4223 4224 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \ 4225 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\ 4226 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB) 4227 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 4228 struct mlx4_vhcr *vhcr, 4229 struct mlx4_cmd_mailbox *inbox, 4230 struct mlx4_cmd_mailbox *outbox, 4231 struct mlx4_cmd_info *cmd_info) 4232 { 4233 int err; 4234 u32 qpn = vhcr->in_modifier & 0xffffff; 4235 struct res_qp *rqp; 4236 u64 mac; 4237 unsigned port; 4238 u64 pri_addr_path_mask; 4239 struct mlx4_update_qp_context *cmd; 4240 int smac_index; 4241 4242 cmd = (struct mlx4_update_qp_context *)inbox->buf; 4243 4244 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); 4245 if (cmd->qp_mask || cmd->secondary_addr_path_mask || 4246 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) 4247 return -EPERM; 4248 4249 if ((pri_addr_path_mask & 4250 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && 4251 !(dev->caps.flags2 & 4252 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { 4253 mlx4_warn(dev, 4254 "Src check LB for slave %d isn't supported\n", 4255 slave); 4256 return -ENOTSUPP; 4257 } 4258 4259 /* Just change the smac for the QP */ 4260 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4261 if (err) { 4262 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); 4263 return err; 4264 } 4265 4266 port = (rqp->sched_queue >> 6 & 1) + 1; 4267 4268 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { 4269 smac_index = cmd->qp_context.pri_path.grh_mylmc; 4270 err = mac_find_smac_ix_in_slave(dev, slave, port, 4271 smac_index, &mac); 4272 4273 if (err) { 4274 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", 4275 qpn, smac_index); 4276 goto err_mac; 4277 } 4278 } 4279 4280 err = mlx4_cmd(dev, inbox->dma, 4281 vhcr->in_modifier, 0, 4282 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 4283 MLX4_CMD_NATIVE); 4284 if (err) { 4285 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); 4286 goto err_mac; 4287 } 4288 4289 err_mac: 4290 put_res(dev, slave, qpn, RES_QP); 4291 return err; 4292 } 4293 4294 static u32 qp_attach_mbox_size(void *mbox) 4295 { 4296 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); 4297 struct _rule_hw *rule_header; 4298 4299 rule_header = (struct _rule_hw *)(mbox + size); 4300 4301 while (rule_header->size) { 4302 size += rule_header->size * sizeof(u32); 4303 rule_header += 1; 4304 } 4305 return size; 4306 } 4307 4308 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); 4309 4310 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4311 struct mlx4_vhcr *vhcr, 4312 struct mlx4_cmd_mailbox *inbox, 4313 struct mlx4_cmd_mailbox *outbox, 4314 struct mlx4_cmd_info *cmd) 4315 { 4316 4317 struct mlx4_priv *priv = mlx4_priv(dev); 4318 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4319 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 4320 int err; 4321 int qpn; 4322 struct res_qp *rqp; 4323 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4324 struct _rule_hw *rule_header; 4325 int header_id; 4326 struct res_fs_rule *rrule; 4327 u32 mbox_size; 4328 4329 if (dev->caps.steering_mode != 4330 MLX4_STEERING_MODE_DEVICE_MANAGED) 4331 return -EOPNOTSUPP; 4332 4333 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4334 err = mlx4_slave_convert_port(dev, slave, ctrl->port); 4335 if (err <= 0) 4336 return -EINVAL; 4337 ctrl->port = err; 4338 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4339 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4340 if (err) { 4341 pr_err("Steering rule with qpn 0x%x rejected\n", qpn); 4342 return err; 4343 } 4344 rule_header = (struct _rule_hw *)(ctrl + 1); 4345 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 4346 4347 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) 4348 handle_eth_header_mcast_prio(ctrl, rule_header); 4349 4350 if (slave == dev->caps.function) 4351 goto execute; 4352 4353 switch (header_id) { 4354 case MLX4_NET_TRANS_RULE_ID_ETH: 4355 if (validate_eth_header_mac(slave, rule_header, rlist)) { 4356 err = -EINVAL; 4357 goto err_put_qp; 4358 } 4359 break; 4360 case MLX4_NET_TRANS_RULE_ID_IB: 4361 break; 4362 case MLX4_NET_TRANS_RULE_ID_IPV4: 4363 case MLX4_NET_TRANS_RULE_ID_TCP: 4364 case MLX4_NET_TRANS_RULE_ID_UDP: 4365 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); 4366 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4367 err = -EINVAL; 4368 goto err_put_qp; 4369 } 4370 vhcr->in_modifier += 4371 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4372 break; 4373 default: 4374 pr_err("Corrupted mailbox\n"); 4375 err = -EINVAL; 4376 goto err_put_qp; 4377 } 4378 4379 execute: 4380 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 4381 vhcr->in_modifier, 0, 4382 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4383 MLX4_CMD_NATIVE); 4384 if (err) 4385 goto err_put_qp; 4386 4387 4388 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4389 if (err) { 4390 mlx4_err(dev, "Fail to add flow steering resources\n"); 4391 goto err_detach; 4392 } 4393 4394 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); 4395 if (err) 4396 goto err_detach; 4397 4398 mbox_size = qp_attach_mbox_size(inbox->buf); 4399 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); 4400 if (!rrule->mirr_mbox) { 4401 err = -ENOMEM; 4402 goto err_put_rule; 4403 } 4404 rrule->mirr_mbox_size = mbox_size; 4405 rrule->mirr_rule_id = 0; 4406 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); 4407 4408 /* set different port */ 4409 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; 4410 if (ctrl->port == 1) 4411 ctrl->port = 2; 4412 else 4413 ctrl->port = 1; 4414 4415 if (mlx4_is_bonded(dev)) 4416 mlx4_do_mirror_rule(dev, rrule); 4417 4418 atomic_inc(&rqp->ref_count); 4419 4420 err_put_rule: 4421 put_res(dev, slave, vhcr->out_param, RES_FS_RULE); 4422 err_detach: 4423 /* detach rule on error */ 4424 if (err) 4425 mlx4_cmd(dev, vhcr->out_param, 0, 0, 4426 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4427 MLX4_CMD_NATIVE); 4428 err_put_qp: 4429 put_res(dev, slave, qpn, RES_QP); 4430 return err; 4431 } 4432 4433 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) 4434 { 4435 int err; 4436 4437 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); 4438 if (err) { 4439 mlx4_err(dev, "Fail to remove flow steering resources\n"); 4440 return err; 4441 } 4442 4443 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, 4444 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 4445 return 0; 4446 } 4447 4448 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 4449 struct mlx4_vhcr *vhcr, 4450 struct mlx4_cmd_mailbox *inbox, 4451 struct mlx4_cmd_mailbox *outbox, 4452 struct mlx4_cmd_info *cmd) 4453 { 4454 int err; 4455 struct res_qp *rqp; 4456 struct res_fs_rule *rrule; 4457 u64 mirr_reg_id; 4458 4459 if (dev->caps.steering_mode != 4460 MLX4_STEERING_MODE_DEVICE_MANAGED) 4461 return -EOPNOTSUPP; 4462 4463 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); 4464 if (err) 4465 return err; 4466 4467 if (!rrule->mirr_mbox) { 4468 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); 4469 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4470 return -EINVAL; 4471 } 4472 mirr_reg_id = rrule->mirr_rule_id; 4473 kfree(rrule->mirr_mbox); 4474 4475 /* Release the rule form busy state before removal */ 4476 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4477 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); 4478 if (err) 4479 return err; 4480 4481 if (mirr_reg_id && mlx4_is_bonded(dev)) { 4482 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); 4483 if (err) { 4484 mlx4_err(dev, "Fail to get resource of mirror rule\n"); 4485 } else { 4486 put_res(dev, slave, mirr_reg_id, RES_FS_RULE); 4487 mlx4_undo_mirror_rule(dev, rrule); 4488 } 4489 } 4490 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4491 if (err) { 4492 mlx4_err(dev, "Fail to remove flow steering resources\n"); 4493 goto out; 4494 } 4495 4496 err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 4497 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4498 MLX4_CMD_NATIVE); 4499 if (!err) 4500 atomic_dec(&rqp->ref_count); 4501 out: 4502 put_res(dev, slave, rrule->qpn, RES_QP); 4503 return err; 4504 } 4505 4506 enum { 4507 BUSY_MAX_RETRIES = 10 4508 }; 4509 4510 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 4511 struct mlx4_vhcr *vhcr, 4512 struct mlx4_cmd_mailbox *inbox, 4513 struct mlx4_cmd_mailbox *outbox, 4514 struct mlx4_cmd_info *cmd) 4515 { 4516 int err; 4517 int index = vhcr->in_modifier & 0xffff; 4518 4519 err = get_res(dev, slave, index, RES_COUNTER, NULL); 4520 if (err) 4521 return err; 4522 4523 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 4524 put_res(dev, slave, index, RES_COUNTER); 4525 return err; 4526 } 4527 4528 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) 4529 { 4530 struct res_gid *rgid; 4531 struct res_gid *tmp; 4532 struct mlx4_qp qp; /* dummy for calling attach/detach */ 4533 4534 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 4535 switch (dev->caps.steering_mode) { 4536 case MLX4_STEERING_MODE_DEVICE_MANAGED: 4537 mlx4_flow_detach(dev, rgid->reg_id); 4538 break; 4539 case MLX4_STEERING_MODE_B0: 4540 qp.qpn = rqp->local_qpn; 4541 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, 4542 rgid->prot, rgid->steer); 4543 break; 4544 } 4545 list_del(&rgid->list); 4546 kfree(rgid); 4547 } 4548 } 4549 4550 static int _move_all_busy(struct mlx4_dev *dev, int slave, 4551 enum mlx4_resource type, int print) 4552 { 4553 struct mlx4_priv *priv = mlx4_priv(dev); 4554 struct mlx4_resource_tracker *tracker = 4555 &priv->mfunc.master.res_tracker; 4556 struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; 4557 struct res_common *r; 4558 struct res_common *tmp; 4559 int busy; 4560 4561 busy = 0; 4562 spin_lock_irq(mlx4_tlock(dev)); 4563 list_for_each_entry_safe(r, tmp, rlist, list) { 4564 if (r->owner == slave) { 4565 if (!r->removing) { 4566 if (r->state == RES_ANY_BUSY) { 4567 if (print) 4568 mlx4_dbg(dev, 4569 "%s id 0x%llx is busy\n", 4570 resource_str(type), 4571 r->res_id); 4572 ++busy; 4573 } else { 4574 r->from_state = r->state; 4575 r->state = RES_ANY_BUSY; 4576 r->removing = 1; 4577 } 4578 } 4579 } 4580 } 4581 spin_unlock_irq(mlx4_tlock(dev)); 4582 4583 return busy; 4584 } 4585 4586 static int move_all_busy(struct mlx4_dev *dev, int slave, 4587 enum mlx4_resource type) 4588 { 4589 unsigned long begin; 4590 int busy; 4591 4592 begin = jiffies; 4593 do { 4594 busy = _move_all_busy(dev, slave, type, 0); 4595 if (time_after(jiffies, begin + 5 * HZ)) 4596 break; 4597 if (busy) 4598 cond_resched(); 4599 } while (busy); 4600 4601 if (busy) 4602 busy = _move_all_busy(dev, slave, type, 1); 4603 4604 return busy; 4605 } 4606 static void rem_slave_qps(struct mlx4_dev *dev, int slave) 4607 { 4608 struct mlx4_priv *priv = mlx4_priv(dev); 4609 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4610 struct list_head *qp_list = 4611 &tracker->slave_list[slave].res_list[RES_QP]; 4612 struct res_qp *qp; 4613 struct res_qp *tmp; 4614 int state; 4615 u64 in_param; 4616 int qpn; 4617 int err; 4618 4619 err = move_all_busy(dev, slave, RES_QP); 4620 if (err) 4621 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", 4622 slave); 4623 4624 spin_lock_irq(mlx4_tlock(dev)); 4625 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4626 spin_unlock_irq(mlx4_tlock(dev)); 4627 if (qp->com.owner == slave) { 4628 qpn = qp->com.res_id; 4629 detach_qp(dev, slave, qp); 4630 state = qp->com.from_state; 4631 while (state != 0) { 4632 switch (state) { 4633 case RES_QP_RESERVED: 4634 spin_lock_irq(mlx4_tlock(dev)); 4635 rb_erase(&qp->com.node, 4636 &tracker->res_tree[RES_QP]); 4637 list_del(&qp->com.list); 4638 spin_unlock_irq(mlx4_tlock(dev)); 4639 if (!valid_reserved(dev, slave, qpn)) { 4640 __mlx4_qp_release_range(dev, qpn, 1); 4641 mlx4_release_resource(dev, slave, 4642 RES_QP, 1, 0); 4643 } 4644 kfree(qp); 4645 state = 0; 4646 break; 4647 case RES_QP_MAPPED: 4648 if (!valid_reserved(dev, slave, qpn)) 4649 __mlx4_qp_free_icm(dev, qpn); 4650 state = RES_QP_RESERVED; 4651 break; 4652 case RES_QP_HW: 4653 in_param = slave; 4654 err = mlx4_cmd(dev, in_param, 4655 qp->local_qpn, 2, 4656 MLX4_CMD_2RST_QP, 4657 MLX4_CMD_TIME_CLASS_A, 4658 MLX4_CMD_NATIVE); 4659 if (err) 4660 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", 4661 slave, qp->local_qpn); 4662 atomic_dec(&qp->rcq->ref_count); 4663 atomic_dec(&qp->scq->ref_count); 4664 atomic_dec(&qp->mtt->ref_count); 4665 if (qp->srq) 4666 atomic_dec(&qp->srq->ref_count); 4667 state = RES_QP_MAPPED; 4668 break; 4669 default: 4670 state = 0; 4671 } 4672 } 4673 } 4674 spin_lock_irq(mlx4_tlock(dev)); 4675 } 4676 spin_unlock_irq(mlx4_tlock(dev)); 4677 } 4678 4679 static void rem_slave_srqs(struct mlx4_dev *dev, int slave) 4680 { 4681 struct mlx4_priv *priv = mlx4_priv(dev); 4682 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4683 struct list_head *srq_list = 4684 &tracker->slave_list[slave].res_list[RES_SRQ]; 4685 struct res_srq *srq; 4686 struct res_srq *tmp; 4687 int state; 4688 u64 in_param; 4689 LIST_HEAD(tlist); 4690 int srqn; 4691 int err; 4692 4693 err = move_all_busy(dev, slave, RES_SRQ); 4694 if (err) 4695 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", 4696 slave); 4697 4698 spin_lock_irq(mlx4_tlock(dev)); 4699 list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4700 spin_unlock_irq(mlx4_tlock(dev)); 4701 if (srq->com.owner == slave) { 4702 srqn = srq->com.res_id; 4703 state = srq->com.from_state; 4704 while (state != 0) { 4705 switch (state) { 4706 case RES_SRQ_ALLOCATED: 4707 __mlx4_srq_free_icm(dev, srqn); 4708 spin_lock_irq(mlx4_tlock(dev)); 4709 rb_erase(&srq->com.node, 4710 &tracker->res_tree[RES_SRQ]); 4711 list_del(&srq->com.list); 4712 spin_unlock_irq(mlx4_tlock(dev)); 4713 mlx4_release_resource(dev, slave, 4714 RES_SRQ, 1, 0); 4715 kfree(srq); 4716 state = 0; 4717 break; 4718 4719 case RES_SRQ_HW: 4720 in_param = slave; 4721 err = mlx4_cmd(dev, in_param, srqn, 1, 4722 MLX4_CMD_HW2SW_SRQ, 4723 MLX4_CMD_TIME_CLASS_A, 4724 MLX4_CMD_NATIVE); 4725 if (err) 4726 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", 4727 slave, srqn); 4728 4729 atomic_dec(&srq->mtt->ref_count); 4730 if (srq->cq) 4731 atomic_dec(&srq->cq->ref_count); 4732 state = RES_SRQ_ALLOCATED; 4733 break; 4734 4735 default: 4736 state = 0; 4737 } 4738 } 4739 } 4740 spin_lock_irq(mlx4_tlock(dev)); 4741 } 4742 spin_unlock_irq(mlx4_tlock(dev)); 4743 } 4744 4745 static void rem_slave_cqs(struct mlx4_dev *dev, int slave) 4746 { 4747 struct mlx4_priv *priv = mlx4_priv(dev); 4748 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4749 struct list_head *cq_list = 4750 &tracker->slave_list[slave].res_list[RES_CQ]; 4751 struct res_cq *cq; 4752 struct res_cq *tmp; 4753 int state; 4754 u64 in_param; 4755 LIST_HEAD(tlist); 4756 int cqn; 4757 int err; 4758 4759 err = move_all_busy(dev, slave, RES_CQ); 4760 if (err) 4761 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", 4762 slave); 4763 4764 spin_lock_irq(mlx4_tlock(dev)); 4765 list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4766 spin_unlock_irq(mlx4_tlock(dev)); 4767 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { 4768 cqn = cq->com.res_id; 4769 state = cq->com.from_state; 4770 while (state != 0) { 4771 switch (state) { 4772 case RES_CQ_ALLOCATED: 4773 __mlx4_cq_free_icm(dev, cqn); 4774 spin_lock_irq(mlx4_tlock(dev)); 4775 rb_erase(&cq->com.node, 4776 &tracker->res_tree[RES_CQ]); 4777 list_del(&cq->com.list); 4778 spin_unlock_irq(mlx4_tlock(dev)); 4779 mlx4_release_resource(dev, slave, 4780 RES_CQ, 1, 0); 4781 kfree(cq); 4782 state = 0; 4783 break; 4784 4785 case RES_CQ_HW: 4786 in_param = slave; 4787 err = mlx4_cmd(dev, in_param, cqn, 1, 4788 MLX4_CMD_HW2SW_CQ, 4789 MLX4_CMD_TIME_CLASS_A, 4790 MLX4_CMD_NATIVE); 4791 if (err) 4792 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", 4793 slave, cqn); 4794 atomic_dec(&cq->mtt->ref_count); 4795 state = RES_CQ_ALLOCATED; 4796 break; 4797 4798 default: 4799 state = 0; 4800 } 4801 } 4802 } 4803 spin_lock_irq(mlx4_tlock(dev)); 4804 } 4805 spin_unlock_irq(mlx4_tlock(dev)); 4806 } 4807 4808 static void rem_slave_mrs(struct mlx4_dev *dev, int slave) 4809 { 4810 struct mlx4_priv *priv = mlx4_priv(dev); 4811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4812 struct list_head *mpt_list = 4813 &tracker->slave_list[slave].res_list[RES_MPT]; 4814 struct res_mpt *mpt; 4815 struct res_mpt *tmp; 4816 int state; 4817 u64 in_param; 4818 LIST_HEAD(tlist); 4819 int mptn; 4820 int err; 4821 4822 err = move_all_busy(dev, slave, RES_MPT); 4823 if (err) 4824 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", 4825 slave); 4826 4827 spin_lock_irq(mlx4_tlock(dev)); 4828 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4829 spin_unlock_irq(mlx4_tlock(dev)); 4830 if (mpt->com.owner == slave) { 4831 mptn = mpt->com.res_id; 4832 state = mpt->com.from_state; 4833 while (state != 0) { 4834 switch (state) { 4835 case RES_MPT_RESERVED: 4836 __mlx4_mpt_release(dev, mpt->key); 4837 spin_lock_irq(mlx4_tlock(dev)); 4838 rb_erase(&mpt->com.node, 4839 &tracker->res_tree[RES_MPT]); 4840 list_del(&mpt->com.list); 4841 spin_unlock_irq(mlx4_tlock(dev)); 4842 mlx4_release_resource(dev, slave, 4843 RES_MPT, 1, 0); 4844 kfree(mpt); 4845 state = 0; 4846 break; 4847 4848 case RES_MPT_MAPPED: 4849 __mlx4_mpt_free_icm(dev, mpt->key); 4850 state = RES_MPT_RESERVED; 4851 break; 4852 4853 case RES_MPT_HW: 4854 in_param = slave; 4855 err = mlx4_cmd(dev, in_param, mptn, 0, 4856 MLX4_CMD_HW2SW_MPT, 4857 MLX4_CMD_TIME_CLASS_A, 4858 MLX4_CMD_NATIVE); 4859 if (err) 4860 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", 4861 slave, mptn); 4862 if (mpt->mtt) 4863 atomic_dec(&mpt->mtt->ref_count); 4864 state = RES_MPT_MAPPED; 4865 break; 4866 default: 4867 state = 0; 4868 } 4869 } 4870 } 4871 spin_lock_irq(mlx4_tlock(dev)); 4872 } 4873 spin_unlock_irq(mlx4_tlock(dev)); 4874 } 4875 4876 static void rem_slave_mtts(struct mlx4_dev *dev, int slave) 4877 { 4878 struct mlx4_priv *priv = mlx4_priv(dev); 4879 struct mlx4_resource_tracker *tracker = 4880 &priv->mfunc.master.res_tracker; 4881 struct list_head *mtt_list = 4882 &tracker->slave_list[slave].res_list[RES_MTT]; 4883 struct res_mtt *mtt; 4884 struct res_mtt *tmp; 4885 int state; 4886 LIST_HEAD(tlist); 4887 int base; 4888 int err; 4889 4890 err = move_all_busy(dev, slave, RES_MTT); 4891 if (err) 4892 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", 4893 slave); 4894 4895 spin_lock_irq(mlx4_tlock(dev)); 4896 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4897 spin_unlock_irq(mlx4_tlock(dev)); 4898 if (mtt->com.owner == slave) { 4899 base = mtt->com.res_id; 4900 state = mtt->com.from_state; 4901 while (state != 0) { 4902 switch (state) { 4903 case RES_MTT_ALLOCATED: 4904 __mlx4_free_mtt_range(dev, base, 4905 mtt->order); 4906 spin_lock_irq(mlx4_tlock(dev)); 4907 rb_erase(&mtt->com.node, 4908 &tracker->res_tree[RES_MTT]); 4909 list_del(&mtt->com.list); 4910 spin_unlock_irq(mlx4_tlock(dev)); 4911 mlx4_release_resource(dev, slave, RES_MTT, 4912 1 << mtt->order, 0); 4913 kfree(mtt); 4914 state = 0; 4915 break; 4916 4917 default: 4918 state = 0; 4919 } 4920 } 4921 } 4922 spin_lock_irq(mlx4_tlock(dev)); 4923 } 4924 spin_unlock_irq(mlx4_tlock(dev)); 4925 } 4926 4927 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) 4928 { 4929 struct mlx4_cmd_mailbox *mailbox; 4930 int err; 4931 struct res_fs_rule *mirr_rule; 4932 u64 reg_id; 4933 4934 mailbox = mlx4_alloc_cmd_mailbox(dev); 4935 if (IS_ERR(mailbox)) 4936 return PTR_ERR(mailbox); 4937 4938 if (!fs_rule->mirr_mbox) { 4939 mlx4_err(dev, "rule mirroring mailbox is null\n"); 4940 return -EINVAL; 4941 } 4942 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); 4943 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, 4944 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4945 MLX4_CMD_NATIVE); 4946 mlx4_free_cmd_mailbox(dev, mailbox); 4947 4948 if (err) 4949 goto err; 4950 4951 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); 4952 if (err) 4953 goto err_detach; 4954 4955 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); 4956 if (err) 4957 goto err_rem; 4958 4959 fs_rule->mirr_rule_id = reg_id; 4960 mirr_rule->mirr_rule_id = 0; 4961 mirr_rule->mirr_mbox_size = 0; 4962 mirr_rule->mirr_mbox = NULL; 4963 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); 4964 4965 return 0; 4966 err_rem: 4967 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); 4968 err_detach: 4969 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, 4970 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 4971 err: 4972 return err; 4973 } 4974 4975 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) 4976 { 4977 struct mlx4_priv *priv = mlx4_priv(dev); 4978 struct mlx4_resource_tracker *tracker = 4979 &priv->mfunc.master.res_tracker; 4980 struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; 4981 struct rb_node *p; 4982 struct res_fs_rule *fs_rule; 4983 int err = 0; 4984 LIST_HEAD(mirr_list); 4985 4986 for (p = rb_first(root); p; p = rb_next(p)) { 4987 fs_rule = rb_entry(p, struct res_fs_rule, com.node); 4988 if ((bond && fs_rule->mirr_mbox_size) || 4989 (!bond && !fs_rule->mirr_mbox_size)) 4990 list_add_tail(&fs_rule->mirr_list, &mirr_list); 4991 } 4992 4993 list_for_each_entry(fs_rule, &mirr_list, mirr_list) { 4994 if (bond) 4995 err += mlx4_do_mirror_rule(dev, fs_rule); 4996 else 4997 err += mlx4_undo_mirror_rule(dev, fs_rule); 4998 } 4999 return err; 5000 } 5001 5002 int mlx4_bond_fs_rules(struct mlx4_dev *dev) 5003 { 5004 return mlx4_mirror_fs_rules(dev, true); 5005 } 5006 5007 int mlx4_unbond_fs_rules(struct mlx4_dev *dev) 5008 { 5009 return mlx4_mirror_fs_rules(dev, false); 5010 } 5011 5012 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) 5013 { 5014 struct mlx4_priv *priv = mlx4_priv(dev); 5015 struct mlx4_resource_tracker *tracker = 5016 &priv->mfunc.master.res_tracker; 5017 struct list_head *fs_rule_list = 5018 &tracker->slave_list[slave].res_list[RES_FS_RULE]; 5019 struct res_fs_rule *fs_rule; 5020 struct res_fs_rule *tmp; 5021 int state; 5022 u64 base; 5023 int err; 5024 5025 err = move_all_busy(dev, slave, RES_FS_RULE); 5026 if (err) 5027 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", 5028 slave); 5029 5030 spin_lock_irq(mlx4_tlock(dev)); 5031 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { 5032 spin_unlock_irq(mlx4_tlock(dev)); 5033 if (fs_rule->com.owner == slave) { 5034 base = fs_rule->com.res_id; 5035 state = fs_rule->com.from_state; 5036 while (state != 0) { 5037 switch (state) { 5038 case RES_FS_RULE_ALLOCATED: 5039 /* detach rule */ 5040 err = mlx4_cmd(dev, base, 0, 0, 5041 MLX4_QP_FLOW_STEERING_DETACH, 5042 MLX4_CMD_TIME_CLASS_A, 5043 MLX4_CMD_NATIVE); 5044 5045 spin_lock_irq(mlx4_tlock(dev)); 5046 rb_erase(&fs_rule->com.node, 5047 &tracker->res_tree[RES_FS_RULE]); 5048 list_del(&fs_rule->com.list); 5049 spin_unlock_irq(mlx4_tlock(dev)); 5050 kfree(fs_rule); 5051 state = 0; 5052 break; 5053 5054 default: 5055 state = 0; 5056 } 5057 } 5058 } 5059 spin_lock_irq(mlx4_tlock(dev)); 5060 } 5061 spin_unlock_irq(mlx4_tlock(dev)); 5062 } 5063 5064 static void rem_slave_eqs(struct mlx4_dev *dev, int slave) 5065 { 5066 struct mlx4_priv *priv = mlx4_priv(dev); 5067 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5068 struct list_head *eq_list = 5069 &tracker->slave_list[slave].res_list[RES_EQ]; 5070 struct res_eq *eq; 5071 struct res_eq *tmp; 5072 int err; 5073 int state; 5074 LIST_HEAD(tlist); 5075 int eqn; 5076 5077 err = move_all_busy(dev, slave, RES_EQ); 5078 if (err) 5079 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", 5080 slave); 5081 5082 spin_lock_irq(mlx4_tlock(dev)); 5083 list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 5084 spin_unlock_irq(mlx4_tlock(dev)); 5085 if (eq->com.owner == slave) { 5086 eqn = eq->com.res_id; 5087 state = eq->com.from_state; 5088 while (state != 0) { 5089 switch (state) { 5090 case RES_EQ_RESERVED: 5091 spin_lock_irq(mlx4_tlock(dev)); 5092 rb_erase(&eq->com.node, 5093 &tracker->res_tree[RES_EQ]); 5094 list_del(&eq->com.list); 5095 spin_unlock_irq(mlx4_tlock(dev)); 5096 kfree(eq); 5097 state = 0; 5098 break; 5099 5100 case RES_EQ_HW: 5101 err = mlx4_cmd(dev, slave, eqn & 0x3ff, 5102 1, MLX4_CMD_HW2SW_EQ, 5103 MLX4_CMD_TIME_CLASS_A, 5104 MLX4_CMD_NATIVE); 5105 if (err) 5106 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 5107 slave, eqn & 0x3ff); 5108 atomic_dec(&eq->mtt->ref_count); 5109 state = RES_EQ_RESERVED; 5110 break; 5111 5112 default: 5113 state = 0; 5114 } 5115 } 5116 } 5117 spin_lock_irq(mlx4_tlock(dev)); 5118 } 5119 spin_unlock_irq(mlx4_tlock(dev)); 5120 } 5121 5122 static void rem_slave_counters(struct mlx4_dev *dev, int slave) 5123 { 5124 struct mlx4_priv *priv = mlx4_priv(dev); 5125 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5126 struct list_head *counter_list = 5127 &tracker->slave_list[slave].res_list[RES_COUNTER]; 5128 struct res_counter *counter; 5129 struct res_counter *tmp; 5130 int err; 5131 int *counters_arr = NULL; 5132 int i, j; 5133 5134 err = move_all_busy(dev, slave, RES_COUNTER); 5135 if (err) 5136 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 5137 slave); 5138 5139 counters_arr = kmalloc_array(dev->caps.max_counters, 5140 sizeof(*counters_arr), GFP_KERNEL); 5141 if (!counters_arr) 5142 return; 5143 5144 do { 5145 i = 0; 5146 j = 0; 5147 spin_lock_irq(mlx4_tlock(dev)); 5148 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 5149 if (counter->com.owner == slave) { 5150 counters_arr[i++] = counter->com.res_id; 5151 rb_erase(&counter->com.node, 5152 &tracker->res_tree[RES_COUNTER]); 5153 list_del(&counter->com.list); 5154 kfree(counter); 5155 } 5156 } 5157 spin_unlock_irq(mlx4_tlock(dev)); 5158 5159 while (j < i) { 5160 __mlx4_counter_free(dev, counters_arr[j++]); 5161 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 5162 } 5163 } while (i); 5164 5165 kfree(counters_arr); 5166 } 5167 5168 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 5169 { 5170 struct mlx4_priv *priv = mlx4_priv(dev); 5171 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5172 struct list_head *xrcdn_list = 5173 &tracker->slave_list[slave].res_list[RES_XRCD]; 5174 struct res_xrcdn *xrcd; 5175 struct res_xrcdn *tmp; 5176 int err; 5177 int xrcdn; 5178 5179 err = move_all_busy(dev, slave, RES_XRCD); 5180 if (err) 5181 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", 5182 slave); 5183 5184 spin_lock_irq(mlx4_tlock(dev)); 5185 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 5186 if (xrcd->com.owner == slave) { 5187 xrcdn = xrcd->com.res_id; 5188 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); 5189 list_del(&xrcd->com.list); 5190 kfree(xrcd); 5191 __mlx4_xrcd_free(dev, xrcdn); 5192 } 5193 } 5194 spin_unlock_irq(mlx4_tlock(dev)); 5195 } 5196 5197 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 5198 { 5199 struct mlx4_priv *priv = mlx4_priv(dev); 5200 mlx4_reset_roce_gids(dev, slave); 5201 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5202 rem_slave_vlans(dev, slave); 5203 rem_slave_macs(dev, slave); 5204 rem_slave_fs_rule(dev, slave); 5205 rem_slave_qps(dev, slave); 5206 rem_slave_srqs(dev, slave); 5207 rem_slave_cqs(dev, slave); 5208 rem_slave_mrs(dev, slave); 5209 rem_slave_eqs(dev, slave); 5210 rem_slave_mtts(dev, slave); 5211 rem_slave_counters(dev, slave); 5212 rem_slave_xrcdns(dev, slave); 5213 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5214 } 5215 5216 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5217 { 5218 struct mlx4_vf_immed_vlan_work *work = 5219 container_of(_work, struct mlx4_vf_immed_vlan_work, work); 5220 struct mlx4_cmd_mailbox *mailbox; 5221 struct mlx4_update_qp_context *upd_context; 5222 struct mlx4_dev *dev = &work->priv->dev; 5223 struct mlx4_resource_tracker *tracker = 5224 &work->priv->mfunc.master.res_tracker; 5225 struct list_head *qp_list = 5226 &tracker->slave_list[work->slave].res_list[RES_QP]; 5227 struct res_qp *qp; 5228 struct res_qp *tmp; 5229 u64 qp_path_mask_vlan_ctrl = 5230 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 5231 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 5232 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 5233 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 5234 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 5235 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); 5236 5237 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 5238 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | 5239 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | 5240 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | 5241 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | 5242 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | 5243 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 5244 5245 int err; 5246 int port, errors = 0; 5247 u8 vlan_control; 5248 5249 if (mlx4_is_slave(dev)) { 5250 mlx4_warn(dev, "Trying to update-qp in slave %d\n", 5251 work->slave); 5252 goto out; 5253 } 5254 5255 mailbox = mlx4_alloc_cmd_mailbox(dev); 5256 if (IS_ERR(mailbox)) 5257 goto out; 5258 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ 5259 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5260 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 5261 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 5262 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5263 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 5264 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5265 else if (!work->vlan_id) 5266 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5267 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5268 else 5269 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5270 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5271 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 5272 5273 upd_context = mailbox->buf; 5274 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); 5275 5276 spin_lock_irq(mlx4_tlock(dev)); 5277 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 5278 spin_unlock_irq(mlx4_tlock(dev)); 5279 if (qp->com.owner == work->slave) { 5280 if (qp->com.from_state != RES_QP_HW || 5281 !qp->sched_queue || /* no INIT2RTR trans yet */ 5282 mlx4_is_qp_reserved(dev, qp->local_qpn) || 5283 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { 5284 spin_lock_irq(mlx4_tlock(dev)); 5285 continue; 5286 } 5287 port = (qp->sched_queue >> 6 & 1) + 1; 5288 if (port != work->port) { 5289 spin_lock_irq(mlx4_tlock(dev)); 5290 continue; 5291 } 5292 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) 5293 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); 5294 else 5295 upd_context->primary_addr_path_mask = 5296 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); 5297 if (work->vlan_id == MLX4_VGT) { 5298 upd_context->qp_context.param3 = qp->param3; 5299 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; 5300 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; 5301 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; 5302 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; 5303 upd_context->qp_context.pri_path.feup = qp->feup; 5304 upd_context->qp_context.pri_path.sched_queue = 5305 qp->sched_queue; 5306 } else { 5307 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); 5308 upd_context->qp_context.pri_path.vlan_control = vlan_control; 5309 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; 5310 upd_context->qp_context.pri_path.fvl_rx = 5311 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; 5312 upd_context->qp_context.pri_path.fl = 5313 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; 5314 upd_context->qp_context.pri_path.feup = 5315 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 5316 upd_context->qp_context.pri_path.sched_queue = 5317 qp->sched_queue & 0xC7; 5318 upd_context->qp_context.pri_path.sched_queue |= 5319 ((work->qos & 0x7) << 3); 5320 upd_context->qp_mask |= 5321 cpu_to_be64(1ULL << 5322 MLX4_UPD_QP_MASK_QOS_VPP); 5323 upd_context->qp_context.qos_vport = 5324 work->qos_vport; 5325 } 5326 5327 err = mlx4_cmd(dev, mailbox->dma, 5328 qp->local_qpn & 0xffffff, 5329 0, MLX4_CMD_UPDATE_QP, 5330 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 5331 if (err) { 5332 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", 5333 work->slave, port, qp->local_qpn, err); 5334 errors++; 5335 } 5336 } 5337 spin_lock_irq(mlx4_tlock(dev)); 5338 } 5339 spin_unlock_irq(mlx4_tlock(dev)); 5340 mlx4_free_cmd_mailbox(dev, mailbox); 5341 5342 if (errors) 5343 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", 5344 errors, work->slave, work->port); 5345 5346 /* unregister previous vlan_id if needed and we had no errors 5347 * while updating the QPs 5348 */ 5349 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 5350 NO_INDX != work->orig_vlan_ix) 5351 __mlx4_unregister_vlan(&work->priv->dev, work->port, 5352 work->orig_vlan_id); 5353 out: 5354 kfree(work); 5355 return; 5356 } 5357