1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #ifndef MLX4_H 38 #define MLX4_H 39 40 #include <linux/mutex.h> 41 #include <linux/radix-tree.h> 42 #include <linux/rbtree.h> 43 #include <linux/timer.h> 44 #include <linux/semaphore.h> 45 #include <linux/workqueue.h> 46 #include <linux/interrupt.h> 47 #include <linux/spinlock.h> 48 49 #include <linux/mlx4/device.h> 50 #include <linux/mlx4/driver.h> 51 #include <linux/mlx4/doorbell.h> 52 #include <linux/mlx4/cmd.h> 53 54 #define DRV_NAME "mlx4_core" 55 #define PFX DRV_NAME ": " 56 #define DRV_VERSION "2.2-1" 57 #define DRV_RELDATE "Feb, 2014" 58 59 #define MLX4_FS_UDP_UC_EN (1 << 1) 60 #define MLX4_FS_TCP_UC_EN (1 << 2) 61 #define MLX4_FS_NUM_OF_L2_ADDR 8 62 #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 63 #define MLX4_FS_NUM_MCG (1 << 17) 64 65 #define INIT_HCA_TPT_MW_ENABLE (1 << 7) 66 67 struct mlx4_set_port_prio2tc_context { 68 u8 prio2tc[4]; 69 }; 70 71 struct mlx4_port_scheduler_tc_cfg_be { 72 __be16 pg; 73 __be16 bw_precentage; 74 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ 75 __be16 max_bw_value; 76 }; 77 78 struct mlx4_set_port_scheduler_context { 79 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; 80 }; 81 82 enum { 83 MLX4_HCR_BASE = 0x80680, 84 MLX4_HCR_SIZE = 0x0001c, 85 MLX4_CLR_INT_SIZE = 0x00008, 86 MLX4_SLAVE_COMM_BASE = 0x0, 87 MLX4_COMM_PAGESIZE = 0x1000, 88 MLX4_CLOCK_SIZE = 0x00008 89 }; 90 91 enum { 92 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, 93 MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, 94 MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, 95 MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), 96 MLX4_MTT_ENTRY_PER_SEG = 8, 97 }; 98 99 enum { 100 MLX4_NUM_PDS = 1 << 15 101 }; 102 103 enum { 104 MLX4_CMPT_TYPE_QP = 0, 105 MLX4_CMPT_TYPE_SRQ = 1, 106 MLX4_CMPT_TYPE_CQ = 2, 107 MLX4_CMPT_TYPE_EQ = 3, 108 MLX4_CMPT_NUM_TYPE 109 }; 110 111 enum { 112 MLX4_CMPT_SHIFT = 24, 113 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT 114 }; 115 116 enum mlx4_mpt_state { 117 MLX4_MPT_DISABLED = 0, 118 MLX4_MPT_EN_HW, 119 MLX4_MPT_EN_SW 120 }; 121 122 #define MLX4_COMM_TIME 10000 123 enum { 124 MLX4_COMM_CMD_RESET, 125 MLX4_COMM_CMD_VHCR0, 126 MLX4_COMM_CMD_VHCR1, 127 MLX4_COMM_CMD_VHCR2, 128 MLX4_COMM_CMD_VHCR_EN, 129 MLX4_COMM_CMD_VHCR_POST, 130 MLX4_COMM_CMD_FLR = 254 131 }; 132 133 enum { 134 MLX4_VF_SMI_DISABLED, 135 MLX4_VF_SMI_ENABLED 136 }; 137 138 /*The flag indicates that the slave should delay the RESET cmd*/ 139 #define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb 140 /*indicates how many retries will be done if we are in the middle of FLR*/ 141 #define NUM_OF_RESET_RETRIES 10 142 #define SLEEP_TIME_IN_RESET (2 * 1000) 143 enum mlx4_resource { 144 RES_QP, 145 RES_CQ, 146 RES_SRQ, 147 RES_XRCD, 148 RES_MPT, 149 RES_MTT, 150 RES_MAC, 151 RES_VLAN, 152 RES_EQ, 153 RES_COUNTER, 154 RES_FS_RULE, 155 MLX4_NUM_OF_RESOURCE_TYPE 156 }; 157 158 enum mlx4_alloc_mode { 159 RES_OP_RESERVE, 160 RES_OP_RESERVE_AND_MAP, 161 RES_OP_MAP_ICM, 162 }; 163 164 enum mlx4_res_tracker_free_type { 165 RES_TR_FREE_ALL, 166 RES_TR_FREE_SLAVES_ONLY, 167 RES_TR_FREE_STRUCTS_ONLY, 168 }; 169 170 /* 171 *Virtual HCR structures. 172 * mlx4_vhcr is the sw representation, in machine endianess 173 * 174 * mlx4_vhcr_cmd is the formalized structure, the one that is passed 175 * to FW to go through communication channel. 176 * It is big endian, and has the same structure as the physical HCR 177 * used by command interface 178 */ 179 struct mlx4_vhcr { 180 u64 in_param; 181 u64 out_param; 182 u32 in_modifier; 183 u32 errno; 184 u16 op; 185 u16 token; 186 u8 op_modifier; 187 u8 e_bit; 188 }; 189 190 struct mlx4_vhcr_cmd { 191 __be64 in_param; 192 __be32 in_modifier; 193 __be64 out_param; 194 __be16 token; 195 u16 reserved; 196 u8 status; 197 u8 flags; 198 __be16 opcode; 199 }; 200 201 struct mlx4_cmd_info { 202 u16 opcode; 203 bool has_inbox; 204 bool has_outbox; 205 bool out_is_imm; 206 bool encode_slave_id; 207 int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, 208 struct mlx4_cmd_mailbox *inbox); 209 int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, 210 struct mlx4_cmd_mailbox *inbox, 211 struct mlx4_cmd_mailbox *outbox, 212 struct mlx4_cmd_info *cmd); 213 }; 214 215 #ifdef CONFIG_MLX4_DEBUG 216 extern int mlx4_debug_level; 217 #else /* CONFIG_MLX4_DEBUG */ 218 #define mlx4_debug_level (0) 219 #endif /* CONFIG_MLX4_DEBUG */ 220 221 #define mlx4_dbg(mdev, format, ...) \ 222 do { \ 223 if (mlx4_debug_level) \ 224 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ 225 ##__VA_ARGS__); \ 226 } while (0) 227 228 #define mlx4_err(mdev, format, ...) \ 229 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 230 #define mlx4_info(mdev, format, ...) \ 231 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 232 #define mlx4_warn(mdev, format, ...) \ 233 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 234 235 extern int mlx4_log_num_mgm_entry_size; 236 extern int log_mtts_per_seg; 237 238 #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) 239 #define ALL_SLAVES 0xff 240 241 struct mlx4_bitmap { 242 u32 last; 243 u32 top; 244 u32 max; 245 u32 reserved_top; 246 u32 mask; 247 u32 avail; 248 u32 effective_len; 249 spinlock_t lock; 250 unsigned long *table; 251 }; 252 253 struct mlx4_buddy { 254 unsigned long **bits; 255 unsigned int *num_free; 256 u32 max_order; 257 spinlock_t lock; 258 }; 259 260 struct mlx4_icm; 261 262 struct mlx4_icm_table { 263 u64 virt; 264 int num_icm; 265 u32 num_obj; 266 int obj_size; 267 int lowmem; 268 int coherent; 269 struct mutex mutex; 270 struct mlx4_icm **icm; 271 }; 272 273 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 274 #define MLX4_MPT_FLAG_FREE (0x3UL << 28) 275 #define MLX4_MPT_FLAG_MIO (1 << 17) 276 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 277 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 278 #define MLX4_MPT_FLAG_REGION (1 << 8) 279 280 #define MLX4_MPT_PD_MASK (0x1FFFFUL) 281 #define MLX4_MPT_PD_VF_MASK (0xFE0000UL) 282 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 283 #define MLX4_MPT_PD_FLAG_RAE (1 << 28) 284 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 285 286 #define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) 287 288 #define MLX4_MPT_STATUS_SW 0xF0 289 #define MLX4_MPT_STATUS_HW 0x00 290 291 #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 292 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 293 294 /* 295 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 296 */ 297 struct mlx4_mpt_entry { 298 __be32 flags; 299 __be32 qpn; 300 __be32 key; 301 __be32 pd_flags; 302 __be64 start; 303 __be64 length; 304 __be32 lkey; 305 __be32 win_cnt; 306 u8 reserved1[3]; 307 u8 mtt_rep; 308 __be64 mtt_addr; 309 __be32 mtt_sz; 310 __be32 entity_size; 311 __be32 first_byte_offset; 312 } __packed; 313 314 /* 315 * Must be packed because start is 64 bits but only aligned to 32 bits. 316 */ 317 struct mlx4_eq_context { 318 __be32 flags; 319 u16 reserved1[3]; 320 __be16 page_offset; 321 u8 log_eq_size; 322 u8 reserved2[4]; 323 u8 eq_period; 324 u8 reserved3; 325 u8 eq_max_count; 326 u8 reserved4[3]; 327 u8 intr; 328 u8 log_page_size; 329 u8 reserved5[2]; 330 u8 mtt_base_addr_h; 331 __be32 mtt_base_addr_l; 332 u32 reserved6[2]; 333 __be32 consumer_index; 334 __be32 producer_index; 335 u32 reserved7[4]; 336 }; 337 338 struct mlx4_cq_context { 339 __be32 flags; 340 u16 reserved1[3]; 341 __be16 page_offset; 342 __be32 logsize_usrpage; 343 __be16 cq_period; 344 __be16 cq_max_count; 345 u8 reserved2[3]; 346 u8 comp_eqn; 347 u8 log_page_size; 348 u8 reserved3[2]; 349 u8 mtt_base_addr_h; 350 __be32 mtt_base_addr_l; 351 __be32 last_notified_index; 352 __be32 solicit_producer_index; 353 __be32 consumer_index; 354 __be32 producer_index; 355 u32 reserved4[2]; 356 __be64 db_rec_addr; 357 }; 358 359 struct mlx4_srq_context { 360 __be32 state_logsize_srqn; 361 u8 logstride; 362 u8 reserved1; 363 __be16 xrcd; 364 __be32 pg_offset_cqn; 365 u32 reserved2; 366 u8 log_page_size; 367 u8 reserved3[2]; 368 u8 mtt_base_addr_h; 369 __be32 mtt_base_addr_l; 370 __be32 pd; 371 __be16 limit_watermark; 372 __be16 wqe_cnt; 373 u16 reserved4; 374 __be16 wqe_counter; 375 u32 reserved5; 376 __be64 db_rec_addr; 377 }; 378 379 struct mlx4_eq_tasklet { 380 struct list_head list; 381 struct list_head process_list; 382 struct tasklet_struct task; 383 /* lock on completion tasklet list */ 384 spinlock_t lock; 385 }; 386 387 struct mlx4_eq { 388 struct mlx4_dev *dev; 389 void __iomem *doorbell; 390 int eqn; 391 u32 cons_index; 392 u16 irq; 393 u16 have_irq; 394 int nent; 395 struct mlx4_buf_list *page_list; 396 struct mlx4_mtt mtt; 397 struct mlx4_eq_tasklet tasklet_ctx; 398 }; 399 400 struct mlx4_slave_eqe { 401 u8 type; 402 u8 port; 403 u32 param; 404 }; 405 406 struct mlx4_slave_event_eq_info { 407 int eqn; 408 u16 token; 409 }; 410 411 struct mlx4_profile { 412 int num_qp; 413 int rdmarc_per_qp; 414 int num_srq; 415 int num_cq; 416 int num_mcg; 417 int num_mpt; 418 unsigned num_mtt; 419 }; 420 421 struct mlx4_fw { 422 u64 clr_int_base; 423 u64 catas_offset; 424 u64 comm_base; 425 u64 clock_offset; 426 struct mlx4_icm *fw_icm; 427 struct mlx4_icm *aux_icm; 428 u32 catas_size; 429 u16 fw_pages; 430 u8 clr_int_bar; 431 u8 catas_bar; 432 u8 comm_bar; 433 u8 clock_bar; 434 }; 435 436 struct mlx4_comm { 437 u32 slave_write; 438 u32 slave_read; 439 }; 440 441 enum { 442 MLX4_MCAST_CONFIG = 0, 443 MLX4_MCAST_DISABLE = 1, 444 MLX4_MCAST_ENABLE = 2, 445 }; 446 447 #define VLAN_FLTR_SIZE 128 448 449 struct mlx4_vlan_fltr { 450 __be32 entry[VLAN_FLTR_SIZE]; 451 }; 452 453 struct mlx4_mcast_entry { 454 struct list_head list; 455 u64 addr; 456 }; 457 458 struct mlx4_promisc_qp { 459 struct list_head list; 460 u32 qpn; 461 }; 462 463 struct mlx4_steer_index { 464 struct list_head list; 465 unsigned int index; 466 struct list_head duplicates; 467 }; 468 469 #define MLX4_EVENT_TYPES_NUM 64 470 471 struct mlx4_slave_state { 472 u8 comm_toggle; 473 u8 last_cmd; 474 u8 init_port_mask; 475 bool active; 476 bool old_vlan_api; 477 u8 function; 478 dma_addr_t vhcr_dma; 479 u16 mtu[MLX4_MAX_PORTS + 1]; 480 __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; 481 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; 482 struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; 483 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; 484 /* event type to eq number lookup */ 485 struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; 486 u16 eq_pi; 487 u16 eq_ci; 488 spinlock_t lock; 489 /*initialized via the kzalloc*/ 490 u8 is_slave_going_down; 491 u32 cookie; 492 enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; 493 }; 494 495 #define MLX4_VGT 4095 496 #define NO_INDX (-1) 497 498 struct mlx4_vport_state { 499 u64 mac; 500 u16 default_vlan; 501 u8 default_qos; 502 u32 tx_rate; 503 bool spoofchk; 504 u32 link_state; 505 }; 506 507 struct mlx4_vf_admin_state { 508 struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; 509 u8 enable_smi[MLX4_MAX_PORTS + 1]; 510 }; 511 512 struct mlx4_vport_oper_state { 513 struct mlx4_vport_state state; 514 int mac_idx; 515 int vlan_idx; 516 }; 517 518 struct mlx4_vf_oper_state { 519 struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; 520 u8 smi_enabled[MLX4_MAX_PORTS + 1]; 521 }; 522 523 struct slave_list { 524 struct mutex mutex; 525 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 526 }; 527 528 struct resource_allocator { 529 spinlock_t alloc_lock; /* protect quotas */ 530 union { 531 int res_reserved; 532 int res_port_rsvd[MLX4_MAX_PORTS]; 533 }; 534 union { 535 int res_free; 536 int res_port_free[MLX4_MAX_PORTS]; 537 }; 538 int *quota; 539 int *allocated; 540 int *guaranteed; 541 }; 542 543 struct mlx4_resource_tracker { 544 spinlock_t lock; 545 /* tree for each resources */ 546 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 547 /* num_of_slave's lists, one per slave */ 548 struct slave_list *slave_list; 549 struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; 550 }; 551 552 #define SLAVE_EVENT_EQ_SIZE 128 553 struct mlx4_slave_event_eq { 554 u32 eqn; 555 u32 cons; 556 u32 prod; 557 spinlock_t event_lock; 558 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; 559 }; 560 561 struct mlx4_master_qp0_state { 562 int proxy_qp0_active; 563 int qp0_active; 564 int port_active; 565 }; 566 567 struct mlx4_mfunc_master_ctx { 568 struct mlx4_slave_state *slave_state; 569 struct mlx4_vf_admin_state *vf_admin; 570 struct mlx4_vf_oper_state *vf_oper; 571 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 572 int init_port_ref[MLX4_MAX_PORTS + 1]; 573 u16 max_mtu[MLX4_MAX_PORTS + 1]; 574 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 575 struct mlx4_resource_tracker res_tracker; 576 struct workqueue_struct *comm_wq; 577 struct work_struct comm_work; 578 struct work_struct slave_event_work; 579 struct work_struct slave_flr_event_work; 580 spinlock_t slave_state_lock; 581 __be32 comm_arm_bit_vector[4]; 582 struct mlx4_eqe cmd_eqe; 583 struct mlx4_slave_event_eq slave_eq; 584 struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; 585 }; 586 587 struct mlx4_mfunc { 588 struct mlx4_comm __iomem *comm; 589 struct mlx4_vhcr_cmd *vhcr; 590 dma_addr_t vhcr_dma; 591 592 struct mlx4_mfunc_master_ctx master; 593 }; 594 595 #define MGM_QPN_MASK 0x00FFFFFF 596 #define MGM_BLCK_LB_BIT 30 597 598 struct mlx4_mgm { 599 __be32 next_gid_index; 600 __be32 members_count; 601 u32 reserved[2]; 602 u8 gid[16]; 603 __be32 qp[MLX4_MAX_QP_PER_MGM]; 604 }; 605 606 struct mlx4_cmd { 607 struct pci_pool *pool; 608 void __iomem *hcr; 609 struct mutex hcr_mutex; 610 struct mutex slave_cmd_mutex; 611 struct semaphore poll_sem; 612 struct semaphore event_sem; 613 int max_cmds; 614 spinlock_t context_lock; 615 int free_head; 616 struct mlx4_cmd_context *context; 617 u16 token_mask; 618 u8 use_events; 619 u8 toggle; 620 u8 comm_toggle; 621 u8 initialized; 622 }; 623 624 enum { 625 MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, 626 MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, 627 MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, 628 }; 629 struct mlx4_vf_immed_vlan_work { 630 struct work_struct work; 631 struct mlx4_priv *priv; 632 int flags; 633 int slave; 634 int vlan_ix; 635 int orig_vlan_ix; 636 u8 port; 637 u8 qos; 638 u16 vlan_id; 639 u16 orig_vlan_id; 640 }; 641 642 643 struct mlx4_uar_table { 644 struct mlx4_bitmap bitmap; 645 }; 646 647 struct mlx4_mr_table { 648 struct mlx4_bitmap mpt_bitmap; 649 struct mlx4_buddy mtt_buddy; 650 u64 mtt_base; 651 u64 mpt_base; 652 struct mlx4_icm_table mtt_table; 653 struct mlx4_icm_table dmpt_table; 654 }; 655 656 struct mlx4_cq_table { 657 struct mlx4_bitmap bitmap; 658 spinlock_t lock; 659 struct radix_tree_root tree; 660 struct mlx4_icm_table table; 661 struct mlx4_icm_table cmpt_table; 662 }; 663 664 struct mlx4_eq_table { 665 struct mlx4_bitmap bitmap; 666 char *irq_names; 667 void __iomem *clr_int; 668 void __iomem **uar_map; 669 u32 clr_mask; 670 struct mlx4_eq *eq; 671 struct mlx4_icm_table table; 672 struct mlx4_icm_table cmpt_table; 673 int have_irq; 674 u8 inta_pin; 675 }; 676 677 struct mlx4_srq_table { 678 struct mlx4_bitmap bitmap; 679 spinlock_t lock; 680 struct radix_tree_root tree; 681 struct mlx4_icm_table table; 682 struct mlx4_icm_table cmpt_table; 683 }; 684 685 enum mlx4_qp_table_zones { 686 MLX4_QP_TABLE_ZONE_GENERAL, 687 MLX4_QP_TABLE_ZONE_RSS, 688 MLX4_QP_TABLE_ZONE_RAW_ETH, 689 MLX4_QP_TABLE_ZONE_NUM 690 }; 691 692 struct mlx4_qp_table { 693 struct mlx4_bitmap *bitmap_gen; 694 struct mlx4_zone_allocator *zones; 695 u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; 696 u32 rdmarc_base; 697 int rdmarc_shift; 698 spinlock_t lock; 699 struct mlx4_icm_table qp_table; 700 struct mlx4_icm_table auxc_table; 701 struct mlx4_icm_table altc_table; 702 struct mlx4_icm_table rdmarc_table; 703 struct mlx4_icm_table cmpt_table; 704 }; 705 706 struct mlx4_mcg_table { 707 struct mutex mutex; 708 struct mlx4_bitmap bitmap; 709 struct mlx4_icm_table table; 710 }; 711 712 struct mlx4_catas_err { 713 u32 __iomem *map; 714 struct timer_list timer; 715 struct list_head list; 716 }; 717 718 #define MLX4_MAX_MAC_NUM 128 719 #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) 720 721 struct mlx4_mac_table { 722 __be64 entries[MLX4_MAX_MAC_NUM]; 723 int refs[MLX4_MAX_MAC_NUM]; 724 struct mutex mutex; 725 int total; 726 int max; 727 }; 728 729 #define MLX4_ROCE_GID_ENTRY_SIZE 16 730 731 struct mlx4_roce_gid_entry { 732 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; 733 }; 734 735 struct mlx4_roce_gid_table { 736 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; 737 struct mutex mutex; 738 }; 739 740 #define MLX4_MAX_VLAN_NUM 128 741 #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 742 743 struct mlx4_vlan_table { 744 __be32 entries[MLX4_MAX_VLAN_NUM]; 745 int refs[MLX4_MAX_VLAN_NUM]; 746 struct mutex mutex; 747 int total; 748 int max; 749 }; 750 751 #define SET_PORT_GEN_ALL_VALID 0x7 752 #define SET_PORT_PROMISC_SHIFT 31 753 #define SET_PORT_MC_PROMISC_SHIFT 30 754 755 enum { 756 MCAST_DIRECT_ONLY = 0, 757 MCAST_DIRECT = 1, 758 MCAST_DEFAULT = 2 759 }; 760 761 762 struct mlx4_set_port_general_context { 763 u8 reserved[3]; 764 u8 flags; 765 u16 reserved2; 766 __be16 mtu; 767 u8 pptx; 768 u8 pfctx; 769 u16 reserved3; 770 u8 pprx; 771 u8 pfcrx; 772 u16 reserved4; 773 }; 774 775 struct mlx4_set_port_rqp_calc_context { 776 __be32 base_qpn; 777 u8 rererved; 778 u8 n_mac; 779 u8 n_vlan; 780 u8 n_prio; 781 u8 reserved2[3]; 782 u8 mac_miss; 783 u8 intra_no_vlan; 784 u8 no_vlan; 785 u8 intra_vlan_miss; 786 u8 vlan_miss; 787 u8 reserved3[3]; 788 u8 no_vlan_prio; 789 __be32 promisc; 790 __be32 mcast; 791 }; 792 793 struct mlx4_port_info { 794 struct mlx4_dev *dev; 795 int port; 796 char dev_name[16]; 797 struct device_attribute port_attr; 798 enum mlx4_port_type tmp_type; 799 char dev_mtu_name[16]; 800 struct device_attribute port_mtu_attr; 801 struct mlx4_mac_table mac_table; 802 struct mlx4_vlan_table vlan_table; 803 struct mlx4_roce_gid_table gid_table; 804 int base_qpn; 805 }; 806 807 struct mlx4_sense { 808 struct mlx4_dev *dev; 809 u8 do_sense_port[MLX4_MAX_PORTS + 1]; 810 u8 sense_allowed[MLX4_MAX_PORTS + 1]; 811 struct delayed_work sense_poll; 812 }; 813 814 struct mlx4_msix_ctl { 815 u64 pool_bm; 816 struct mutex pool_lock; 817 }; 818 819 struct mlx4_steer { 820 struct list_head promisc_qps[MLX4_NUM_STEERS]; 821 struct list_head steer_entries[MLX4_NUM_STEERS]; 822 }; 823 824 enum { 825 MLX4_PCI_DEV_IS_VF = 1 << 0, 826 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, 827 }; 828 829 enum { 830 MLX4_NO_RR = 0, 831 MLX4_USE_RR = 1, 832 }; 833 834 struct mlx4_priv { 835 struct mlx4_dev dev; 836 837 struct list_head dev_list; 838 struct list_head ctx_list; 839 spinlock_t ctx_lock; 840 841 int pci_dev_data; 842 int removed; 843 844 struct list_head pgdir_list; 845 struct mutex pgdir_mutex; 846 847 struct mlx4_fw fw; 848 struct mlx4_cmd cmd; 849 struct mlx4_mfunc mfunc; 850 851 struct mlx4_bitmap pd_bitmap; 852 struct mlx4_bitmap xrcd_bitmap; 853 struct mlx4_uar_table uar_table; 854 struct mlx4_mr_table mr_table; 855 struct mlx4_cq_table cq_table; 856 struct mlx4_eq_table eq_table; 857 struct mlx4_srq_table srq_table; 858 struct mlx4_qp_table qp_table; 859 struct mlx4_mcg_table mcg_table; 860 struct mlx4_bitmap counters_bitmap; 861 862 struct mlx4_catas_err catas_err; 863 864 void __iomem *clr_base; 865 866 struct mlx4_uar driver_uar; 867 void __iomem *kar; 868 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 869 struct mlx4_sense sense; 870 struct mutex port_mutex; 871 struct mlx4_msix_ctl msix_ctl; 872 struct mlx4_steer *steer; 873 struct list_head bf_list; 874 struct mutex bf_mutex; 875 struct io_mapping *bf_mapping; 876 void __iomem *clock_mapping; 877 int reserved_mtts; 878 int fs_hash_mode; 879 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 880 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 881 882 atomic_t opreq_count; 883 struct work_struct opreq_task; 884 }; 885 886 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 887 { 888 return container_of(dev, struct mlx4_priv, dev); 889 } 890 891 #define MLX4_SENSE_RANGE (HZ * 3) 892 893 extern struct workqueue_struct *mlx4_wq; 894 895 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 896 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); 897 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, 898 int align, u32 skip_mask); 899 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 900 int use_rr); 901 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); 902 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 903 u32 reserved_bot, u32 resetrved_top); 904 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 905 906 int mlx4_reset(struct mlx4_dev *dev); 907 908 int mlx4_alloc_eq_table(struct mlx4_dev *dev); 909 void mlx4_free_eq_table(struct mlx4_dev *dev); 910 911 int mlx4_init_pd_table(struct mlx4_dev *dev); 912 int mlx4_init_xrcd_table(struct mlx4_dev *dev); 913 int mlx4_init_uar_table(struct mlx4_dev *dev); 914 int mlx4_init_mr_table(struct mlx4_dev *dev); 915 int mlx4_init_eq_table(struct mlx4_dev *dev); 916 int mlx4_init_cq_table(struct mlx4_dev *dev); 917 int mlx4_init_qp_table(struct mlx4_dev *dev); 918 int mlx4_init_srq_table(struct mlx4_dev *dev); 919 int mlx4_init_mcg_table(struct mlx4_dev *dev); 920 921 void mlx4_cleanup_pd_table(struct mlx4_dev *dev); 922 void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); 923 void mlx4_cleanup_uar_table(struct mlx4_dev *dev); 924 void mlx4_cleanup_mr_table(struct mlx4_dev *dev); 925 void mlx4_cleanup_eq_table(struct mlx4_dev *dev); 926 void mlx4_cleanup_cq_table(struct mlx4_dev *dev); 927 void mlx4_cleanup_qp_table(struct mlx4_dev *dev); 928 void mlx4_cleanup_srq_table(struct mlx4_dev *dev); 929 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); 930 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); 931 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); 932 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); 933 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); 934 int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); 935 void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 936 int __mlx4_mpt_reserve(struct mlx4_dev *dev); 937 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); 938 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); 939 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); 940 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); 941 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); 942 943 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 944 struct mlx4_vhcr *vhcr, 945 struct mlx4_cmd_mailbox *inbox, 946 struct mlx4_cmd_mailbox *outbox, 947 struct mlx4_cmd_info *cmd); 948 int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, 949 struct mlx4_vhcr *vhcr, 950 struct mlx4_cmd_mailbox *inbox, 951 struct mlx4_cmd_mailbox *outbox, 952 struct mlx4_cmd_info *cmd); 953 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 954 struct mlx4_vhcr *vhcr, 955 struct mlx4_cmd_mailbox *inbox, 956 struct mlx4_cmd_mailbox *outbox, 957 struct mlx4_cmd_info *cmd); 958 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 959 struct mlx4_vhcr *vhcr, 960 struct mlx4_cmd_mailbox *inbox, 961 struct mlx4_cmd_mailbox *outbox, 962 struct mlx4_cmd_info *cmd); 963 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 964 struct mlx4_vhcr *vhcr, 965 struct mlx4_cmd_mailbox *inbox, 966 struct mlx4_cmd_mailbox *outbox, 967 struct mlx4_cmd_info *cmd); 968 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 969 struct mlx4_vhcr *vhcr, 970 struct mlx4_cmd_mailbox *inbox, 971 struct mlx4_cmd_mailbox *outbox, 972 struct mlx4_cmd_info *cmd); 973 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, 974 struct mlx4_vhcr *vhcr, 975 struct mlx4_cmd_mailbox *inbox, 976 struct mlx4_cmd_mailbox *outbox, 977 struct mlx4_cmd_info *cmd); 978 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, 979 struct mlx4_vhcr *vhcr, 980 struct mlx4_cmd_mailbox *inbox, 981 struct mlx4_cmd_mailbox *outbox, 982 struct mlx4_cmd_info *cmd); 983 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, 984 int *base, u8 flags); 985 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 986 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); 987 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); 988 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 989 int start_index, int npages, u64 *page_list); 990 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 991 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 992 int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); 993 void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); 994 995 void mlx4_start_catas_poll(struct mlx4_dev *dev); 996 void mlx4_stop_catas_poll(struct mlx4_dev *dev); 997 void mlx4_catas_init(void); 998 int mlx4_restart_one(struct pci_dev *pdev); 999 int mlx4_register_device(struct mlx4_dev *dev); 1000 void mlx4_unregister_device(struct mlx4_dev *dev); 1001 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 1002 unsigned long param); 1003 1004 struct mlx4_dev_cap; 1005 struct mlx4_init_hca_param; 1006 1007 u64 mlx4_make_profile(struct mlx4_dev *dev, 1008 struct mlx4_profile *request, 1009 struct mlx4_dev_cap *dev_cap, 1010 struct mlx4_init_hca_param *init_hca); 1011 void mlx4_master_comm_channel(struct work_struct *work); 1012 void mlx4_gen_slave_eqe(struct work_struct *work); 1013 void mlx4_master_handle_slave_flr(struct work_struct *work); 1014 1015 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 1016 struct mlx4_vhcr *vhcr, 1017 struct mlx4_cmd_mailbox *inbox, 1018 struct mlx4_cmd_mailbox *outbox, 1019 struct mlx4_cmd_info *cmd); 1020 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 1021 struct mlx4_vhcr *vhcr, 1022 struct mlx4_cmd_mailbox *inbox, 1023 struct mlx4_cmd_mailbox *outbox, 1024 struct mlx4_cmd_info *cmd); 1025 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 1026 struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, 1027 struct mlx4_cmd_mailbox *outbox, 1028 struct mlx4_cmd_info *cmd); 1029 int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, 1030 struct mlx4_vhcr *vhcr, 1031 struct mlx4_cmd_mailbox *inbox, 1032 struct mlx4_cmd_mailbox *outbox, 1033 struct mlx4_cmd_info *cmd); 1034 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 1035 struct mlx4_vhcr *vhcr, 1036 struct mlx4_cmd_mailbox *inbox, 1037 struct mlx4_cmd_mailbox *outbox, 1038 struct mlx4_cmd_info *cmd); 1039 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 1040 struct mlx4_vhcr *vhcr, 1041 struct mlx4_cmd_mailbox *inbox, 1042 struct mlx4_cmd_mailbox *outbox, 1043 struct mlx4_cmd_info *cmd); 1044 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 1045 struct mlx4_vhcr *vhcr, 1046 struct mlx4_cmd_mailbox *inbox, 1047 struct mlx4_cmd_mailbox *outbox, 1048 struct mlx4_cmd_info *cmd); 1049 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 1050 struct mlx4_vhcr *vhcr, 1051 struct mlx4_cmd_mailbox *inbox, 1052 struct mlx4_cmd_mailbox *outbox, 1053 struct mlx4_cmd_info *cmd); 1054 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 1055 struct mlx4_vhcr *vhcr, 1056 struct mlx4_cmd_mailbox *inbox, 1057 struct mlx4_cmd_mailbox *outbox, 1058 struct mlx4_cmd_info *cmd); 1059 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 1060 struct mlx4_vhcr *vhcr, 1061 struct mlx4_cmd_mailbox *inbox, 1062 struct mlx4_cmd_mailbox *outbox, 1063 struct mlx4_cmd_info *cmd); 1064 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1065 struct mlx4_vhcr *vhcr, 1066 struct mlx4_cmd_mailbox *inbox, 1067 struct mlx4_cmd_mailbox *outbox, 1068 struct mlx4_cmd_info *cmd); 1069 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1070 struct mlx4_vhcr *vhcr, 1071 struct mlx4_cmd_mailbox *inbox, 1072 struct mlx4_cmd_mailbox *outbox, 1073 struct mlx4_cmd_info *cmd); 1074 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1075 struct mlx4_vhcr *vhcr, 1076 struct mlx4_cmd_mailbox *inbox, 1077 struct mlx4_cmd_mailbox *outbox, 1078 struct mlx4_cmd_info *cmd); 1079 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 1080 struct mlx4_vhcr *vhcr, 1081 struct mlx4_cmd_mailbox *inbox, 1082 struct mlx4_cmd_mailbox *outbox, 1083 struct mlx4_cmd_info *cmd); 1084 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 1085 struct mlx4_vhcr *vhcr, 1086 struct mlx4_cmd_mailbox *inbox, 1087 struct mlx4_cmd_mailbox *outbox, 1088 struct mlx4_cmd_info *cmd); 1089 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 1090 struct mlx4_vhcr *vhcr, 1091 struct mlx4_cmd_mailbox *inbox, 1092 struct mlx4_cmd_mailbox *outbox, 1093 struct mlx4_cmd_info *cmd); 1094 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 1095 struct mlx4_vhcr *vhcr, 1096 struct mlx4_cmd_mailbox *inbox, 1097 struct mlx4_cmd_mailbox *outbox, 1098 struct mlx4_cmd_info *cmd); 1099 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 1100 struct mlx4_vhcr *vhcr, 1101 struct mlx4_cmd_mailbox *inbox, 1102 struct mlx4_cmd_mailbox *outbox, 1103 struct mlx4_cmd_info *cmd); 1104 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1105 struct mlx4_vhcr *vhcr, 1106 struct mlx4_cmd_mailbox *inbox, 1107 struct mlx4_cmd_mailbox *outbox, 1108 struct mlx4_cmd_info *cmd); 1109 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1110 struct mlx4_vhcr *vhcr, 1111 struct mlx4_cmd_mailbox *inbox, 1112 struct mlx4_cmd_mailbox *outbox, 1113 struct mlx4_cmd_info *cmd); 1114 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1115 struct mlx4_vhcr *vhcr, 1116 struct mlx4_cmd_mailbox *inbox, 1117 struct mlx4_cmd_mailbox *outbox, 1118 struct mlx4_cmd_info *cmd); 1119 int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave, 1120 struct mlx4_vhcr *vhcr, 1121 struct mlx4_cmd_mailbox *inbox, 1122 struct mlx4_cmd_mailbox *outbox, 1123 struct mlx4_cmd_info *cmd); 1124 int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 1125 struct mlx4_vhcr *vhcr, 1126 struct mlx4_cmd_mailbox *inbox, 1127 struct mlx4_cmd_mailbox *outbox, 1128 struct mlx4_cmd_info *cmd); 1129 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 1130 struct mlx4_vhcr *vhcr, 1131 struct mlx4_cmd_mailbox *inbox, 1132 struct mlx4_cmd_mailbox *outbox, 1133 struct mlx4_cmd_info *cmd); 1134 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 1135 struct mlx4_vhcr *vhcr, 1136 struct mlx4_cmd_mailbox *inbox, 1137 struct mlx4_cmd_mailbox *outbox, 1138 struct mlx4_cmd_info *cmd); 1139 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 1140 struct mlx4_vhcr *vhcr, 1141 struct mlx4_cmd_mailbox *inbox, 1142 struct mlx4_cmd_mailbox *outbox, 1143 struct mlx4_cmd_info *cmd); 1144 int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, 1145 struct mlx4_vhcr *vhcr, 1146 struct mlx4_cmd_mailbox *inbox, 1147 struct mlx4_cmd_mailbox *outbox, 1148 struct mlx4_cmd_info *cmd); 1149 1150 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); 1151 1152 enum { 1153 MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, 1154 MLX4_CMD_CLEANUP_POOL = 1UL << 1, 1155 MLX4_CMD_CLEANUP_HCR = 1UL << 2, 1156 MLX4_CMD_CLEANUP_VHCR = 1UL << 3, 1157 MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 1158 }; 1159 1160 int mlx4_cmd_init(struct mlx4_dev *dev); 1161 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); 1162 int mlx4_multi_func_init(struct mlx4_dev *dev); 1163 void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1164 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 1165 int mlx4_cmd_use_events(struct mlx4_dev *dev); 1166 void mlx4_cmd_use_polling(struct mlx4_dev *dev); 1167 1168 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 1169 unsigned long timeout); 1170 1171 void mlx4_cq_tasklet_cb(unsigned long data); 1172 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); 1173 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); 1174 1175 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); 1176 1177 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1178 1179 void mlx4_handle_catas_err(struct mlx4_dev *dev); 1180 1181 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1182 enum mlx4_port_type *type); 1183 void mlx4_do_sense_ports(struct mlx4_dev *dev, 1184 enum mlx4_port_type *stype, 1185 enum mlx4_port_type *defaults); 1186 void mlx4_start_sense(struct mlx4_dev *dev); 1187 void mlx4_stop_sense(struct mlx4_dev *dev); 1188 void mlx4_sense_init(struct mlx4_dev *dev); 1189 int mlx4_check_port_params(struct mlx4_dev *dev, 1190 enum mlx4_port_type *port_type); 1191 int mlx4_change_port_types(struct mlx4_dev *dev, 1192 enum mlx4_port_type *port_types); 1193 1194 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1195 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1196 void mlx4_init_roce_gid_table(struct mlx4_dev *dev, 1197 struct mlx4_roce_gid_table *table); 1198 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1199 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1200 1201 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); 1202 /* resource tracker functions*/ 1203 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 1204 enum mlx4_resource resource_type, 1205 u64 resource_id, int *slave); 1206 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1207 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); 1208 int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1209 1210 void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1211 enum mlx4_res_tracker_free_type type); 1212 1213 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1214 struct mlx4_vhcr *vhcr, 1215 struct mlx4_cmd_mailbox *inbox, 1216 struct mlx4_cmd_mailbox *outbox, 1217 struct mlx4_cmd_info *cmd); 1218 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1219 struct mlx4_vhcr *vhcr, 1220 struct mlx4_cmd_mailbox *inbox, 1221 struct mlx4_cmd_mailbox *outbox, 1222 struct mlx4_cmd_info *cmd); 1223 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 1224 struct mlx4_vhcr *vhcr, 1225 struct mlx4_cmd_mailbox *inbox, 1226 struct mlx4_cmd_mailbox *outbox, 1227 struct mlx4_cmd_info *cmd); 1228 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 1229 struct mlx4_vhcr *vhcr, 1230 struct mlx4_cmd_mailbox *inbox, 1231 struct mlx4_cmd_mailbox *outbox, 1232 struct mlx4_cmd_info *cmd); 1233 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1234 struct mlx4_vhcr *vhcr, 1235 struct mlx4_cmd_mailbox *inbox, 1236 struct mlx4_cmd_mailbox *outbox, 1237 struct mlx4_cmd_info *cmd); 1238 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1239 struct mlx4_vhcr *vhcr, 1240 struct mlx4_cmd_mailbox *inbox, 1241 struct mlx4_cmd_mailbox *outbox, 1242 struct mlx4_cmd_info *cmd); 1243 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 1244 1245 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1246 int *gid_tbl_len, int *pkey_tbl_len); 1247 1248 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 1249 struct mlx4_vhcr *vhcr, 1250 struct mlx4_cmd_mailbox *inbox, 1251 struct mlx4_cmd_mailbox *outbox, 1252 struct mlx4_cmd_info *cmd); 1253 1254 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 1255 struct mlx4_vhcr *vhcr, 1256 struct mlx4_cmd_mailbox *inbox, 1257 struct mlx4_cmd_mailbox *outbox, 1258 struct mlx4_cmd_info *cmd); 1259 1260 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1261 struct mlx4_vhcr *vhcr, 1262 struct mlx4_cmd_mailbox *inbox, 1263 struct mlx4_cmd_mailbox *outbox, 1264 struct mlx4_cmd_info *cmd); 1265 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1266 enum mlx4_protocol prot, enum mlx4_steer_type steer); 1267 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1268 int block_mcast_loopback, enum mlx4_protocol prot, 1269 enum mlx4_steer_type steer); 1270 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1271 u8 gid[16], u8 port, 1272 int block_mcast_loopback, 1273 enum mlx4_protocol prot, u64 *reg_id); 1274 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1275 struct mlx4_vhcr *vhcr, 1276 struct mlx4_cmd_mailbox *inbox, 1277 struct mlx4_cmd_mailbox *outbox, 1278 struct mlx4_cmd_info *cmd); 1279 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1280 struct mlx4_vhcr *vhcr, 1281 struct mlx4_cmd_mailbox *inbox, 1282 struct mlx4_cmd_mailbox *outbox, 1283 struct mlx4_cmd_info *cmd); 1284 int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, 1285 int port, void *buf); 1286 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, 1287 struct mlx4_cmd_mailbox *outbox); 1288 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1289 struct mlx4_vhcr *vhcr, 1290 struct mlx4_cmd_mailbox *inbox, 1291 struct mlx4_cmd_mailbox *outbox, 1292 struct mlx4_cmd_info *cmd); 1293 int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, 1294 struct mlx4_vhcr *vhcr, 1295 struct mlx4_cmd_mailbox *inbox, 1296 struct mlx4_cmd_mailbox *outbox, 1297 struct mlx4_cmd_info *cmd); 1298 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 1299 struct mlx4_vhcr *vhcr, 1300 struct mlx4_cmd_mailbox *inbox, 1301 struct mlx4_cmd_mailbox *outbox, 1302 struct mlx4_cmd_info *cmd); 1303 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 1304 struct mlx4_vhcr *vhcr, 1305 struct mlx4_cmd_mailbox *inbox, 1306 struct mlx4_cmd_mailbox *outbox, 1307 struct mlx4_cmd_info *cmd); 1308 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 1309 struct mlx4_vhcr *vhcr, 1310 struct mlx4_cmd_mailbox *inbox, 1311 struct mlx4_cmd_mailbox *outbox, 1312 struct mlx4_cmd_info *cmd); 1313 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 1314 struct mlx4_vhcr *vhcr, 1315 struct mlx4_cmd_mailbox *inbox, 1316 struct mlx4_cmd_mailbox *outbox, 1317 struct mlx4_cmd_info *cmd); 1318 1319 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); 1320 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); 1321 1322 static inline void set_param_l(u64 *arg, u32 val) 1323 { 1324 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; 1325 } 1326 1327 static inline void set_param_h(u64 *arg, u32 val) 1328 { 1329 *arg = (*arg & 0xffffffff) | ((u64) val << 32); 1330 } 1331 1332 static inline u32 get_param_l(u64 *arg) 1333 { 1334 return (u32) (*arg & 0xffffffff); 1335 } 1336 1337 static inline u32 get_param_h(u64 *arg) 1338 { 1339 return (u32)(*arg >> 32); 1340 } 1341 1342 static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) 1343 { 1344 return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; 1345 } 1346 1347 #define NOT_MASKED_PD_BITS 17 1348 1349 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); 1350 1351 void mlx4_init_quotas(struct mlx4_dev *dev); 1352 1353 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); 1354 /* Returns the VF index of slave */ 1355 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1356 int mlx4_config_mad_demux(struct mlx4_dev *dev); 1357 1358 enum mlx4_zone_flags { 1359 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, 1360 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, 1361 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, 1362 MLX4_ZONE_USE_RR = 1UL << 3, 1363 }; 1364 1365 enum mlx4_zone_alloc_flags { 1366 /* No two objects could overlap between zones. UID 1367 * could be left unused. If this flag is given and 1368 * two overlapped zones are used, an object will be free'd 1369 * from the smallest possible matching zone. 1370 */ 1371 MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, 1372 }; 1373 1374 struct mlx4_zone_allocator; 1375 1376 /* Create a new zone allocator */ 1377 struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); 1378 1379 /* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator 1380 * <zone_alloc>. Allocating an object from this zone adds an offset <offset>. 1381 * Similarly, when searching for an object to free, this offset it taken into 1382 * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap> 1383 * is given through the MLX4_ZONE_USE_RR flag in <flags>. 1384 * When an allocation fails, <zone_alloc> tries to allocate from other zones 1385 * according to the policy set by <flags>. <puid> is the unique identifier 1386 * received to this zone. 1387 */ 1388 int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, 1389 struct mlx4_bitmap *bitmap, 1390 u32 flags, 1391 int priority, 1392 int offset, 1393 u32 *puid); 1394 1395 /* Remove bitmap indicated by <uid> from <zone_alloc> */ 1396 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); 1397 1398 /* Delete the zone allocator <zone_alloc. This function doesn't destroy 1399 * the attached bitmaps. 1400 */ 1401 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc); 1402 1403 /* Allocate <count> objects with align <align> and skip_mask <skip_mask> 1404 * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually 1405 * allocated from is returned in <puid>. If the allocation fails, a negative 1406 * number is returned. Otherwise, the offset of the first object is returned. 1407 */ 1408 u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, 1409 int align, u32 skip_mask, u32 *puid); 1410 1411 /* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator 1412 * <zones>. 1413 */ 1414 u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, 1415 u32 uid, u32 obj, u32 count); 1416 1417 /* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of 1418 * specifying the uid when freeing an object, zone allocator could figure it by 1419 * itself. Other parameters are similar to mlx4_zone_free. 1420 */ 1421 u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); 1422 1423 /* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */ 1424 struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); 1425 1426 #endif /* MLX4_H */ 1427