1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/etherdevice.h> 36 #include <linux/mlx4/cmd.h> 37 #include <linux/module.h> 38 #include <linux/cache.h> 39 40 #include "fw.h" 41 #include "icm.h" 42 43 enum { 44 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 45 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 47 }; 48 49 extern void __buggy_use_of_MLX4_GET(void); 50 extern void __buggy_use_of_MLX4_PUT(void); 51 52 static bool enable_qos; 53 module_param(enable_qos, bool, 0444); 54 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); 55 56 #define MLX4_GET(dest, source, offset) \ 57 do { \ 58 void *__p = (char *) (source) + (offset); \ 59 u64 val; \ 60 switch (sizeof (dest)) { \ 61 case 1: (dest) = *(u8 *) __p; break; \ 62 case 2: (dest) = be16_to_cpup(__p); break; \ 63 case 4: (dest) = be32_to_cpup(__p); break; \ 64 case 8: val = get_unaligned((u64 *)__p); \ 65 (dest) = be64_to_cpu(val); break; \ 66 default: __buggy_use_of_MLX4_GET(); \ 67 } \ 68 } while (0) 69 70 #define MLX4_PUT(dest, source, offset) \ 71 do { \ 72 void *__d = ((char *) (dest) + (offset)); \ 73 switch (sizeof(source)) { \ 74 case 1: *(u8 *) __d = (source); break; \ 75 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 76 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 77 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 78 default: __buggy_use_of_MLX4_PUT(); \ 79 } \ 80 } while (0) 81 82 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 83 { 84 static const char *fname[] = { 85 [ 0] = "RC transport", 86 [ 1] = "UC transport", 87 [ 2] = "UD transport", 88 [ 3] = "XRC transport", 89 [ 6] = "SRQ support", 90 [ 7] = "IPoIB checksum offload", 91 [ 8] = "P_Key violation counter", 92 [ 9] = "Q_Key violation counter", 93 [12] = "Dual Port Different Protocol (DPDP) support", 94 [15] = "Big LSO headers", 95 [16] = "MW support", 96 [17] = "APM support", 97 [18] = "Atomic ops support", 98 [19] = "Raw multicast support", 99 [20] = "Address vector port checking support", 100 [21] = "UD multicast support", 101 [30] = "IBoE support", 102 [32] = "Unicast loopback support", 103 [34] = "FCS header control", 104 [37] = "Wake On LAN (port1) support", 105 [38] = "Wake On LAN (port2) support", 106 [40] = "UDP RSS support", 107 [41] = "Unicast VEP steering support", 108 [42] = "Multicast VEP steering support", 109 [48] = "Counters support", 110 [52] = "RSS IP fragments support", 111 [53] = "Port ETS Scheduler support", 112 [55] = "Port link type sensing support", 113 [59] = "Port management change event support", 114 [61] = "64 byte EQE support", 115 [62] = "64 byte CQE support", 116 }; 117 int i; 118 119 mlx4_dbg(dev, "DEV_CAP flags:\n"); 120 for (i = 0; i < ARRAY_SIZE(fname); ++i) 121 if (fname[i] && (flags & (1LL << i))) 122 mlx4_dbg(dev, " %s\n", fname[i]); 123 } 124 125 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 126 { 127 static const char * const fname[] = { 128 [0] = "RSS support", 129 [1] = "RSS Toeplitz Hash Function support", 130 [2] = "RSS XOR Hash Function support", 131 [3] = "Device managed flow steering support", 132 [4] = "Automatic MAC reassignment support", 133 [5] = "Time stamping support", 134 [6] = "VST (control vlan insertion/stripping) support", 135 [7] = "FSM (MAC anti-spoofing) support", 136 [8] = "Dynamic QP updates support", 137 [9] = "Device managed flow steering IPoIB support", 138 [10] = "TCP/IP offloads/flow-steering for VXLAN support", 139 [11] = "MAD DEMUX (Secure-Host) support", 140 [12] = "Large cache line (>64B) CQE stride support", 141 [13] = "Large cache line (>64B) EQE stride support", 142 [14] = "Ethernet protocol control support", 143 [15] = "Ethernet Backplane autoneg support", 144 [16] = "CONFIG DEV support", 145 [17] = "Asymmetric EQs support", 146 [18] = "More than 80 VFs support", 147 [19] = "Performance optimized for limited rule configuration flow steering support", 148 [20] = "Recoverable error events support", 149 [21] = "Port Remap support", 150 [22] = "QCN support", 151 [23] = "QP rate limiting support", 152 [24] = "Ethernet Flow control statistics support", 153 [25] = "Granular QoS per VF support", 154 [26] = "Port ETS Scheduler support", 155 [27] = "Port beacon support", 156 [28] = "RX-ALL support", 157 [29] = "802.1ad offload support", 158 [31] = "Modifying loopback source checks using UPDATE_QP support", 159 [32] = "Loopback source checks support", 160 [33] = "RoCEv2 support", 161 [34] = "DMFS Sniffer support (UC & MC)", 162 [35] = "Diag counters per port", 163 [36] = "QinQ VST mode support", 164 [37] = "sl to vl mapping table change event support", 165 }; 166 int i; 167 168 for (i = 0; i < ARRAY_SIZE(fname); ++i) 169 if (fname[i] && (flags & (1LL << i))) 170 mlx4_dbg(dev, " %s\n", fname[i]); 171 } 172 173 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 174 { 175 struct mlx4_cmd_mailbox *mailbox; 176 u32 *inbox; 177 int err = 0; 178 179 #define MOD_STAT_CFG_IN_SIZE 0x100 180 181 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 182 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 183 184 mailbox = mlx4_alloc_cmd_mailbox(dev); 185 if (IS_ERR(mailbox)) 186 return PTR_ERR(mailbox); 187 inbox = mailbox->buf; 188 189 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 190 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 191 192 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 193 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 194 195 mlx4_free_cmd_mailbox(dev, mailbox); 196 return err; 197 } 198 199 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 200 { 201 struct mlx4_cmd_mailbox *mailbox; 202 u32 *outbox; 203 u8 in_modifier; 204 u8 field; 205 u16 field16; 206 int err; 207 208 #define QUERY_FUNC_BUS_OFFSET 0x00 209 #define QUERY_FUNC_DEVICE_OFFSET 0x01 210 #define QUERY_FUNC_FUNCTION_OFFSET 0x01 211 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 212 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 213 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 214 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 215 216 mailbox = mlx4_alloc_cmd_mailbox(dev); 217 if (IS_ERR(mailbox)) 218 return PTR_ERR(mailbox); 219 outbox = mailbox->buf; 220 221 in_modifier = slave; 222 223 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 224 MLX4_CMD_QUERY_FUNC, 225 MLX4_CMD_TIME_CLASS_A, 226 MLX4_CMD_NATIVE); 227 if (err) 228 goto out; 229 230 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 231 func->bus = field & 0xf; 232 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 233 func->device = field & 0xf1; 234 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 235 func->function = field & 0x7; 236 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 237 func->physical_function = field & 0xf; 238 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 239 func->rsvd_eqs = field16 & 0xffff; 240 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 241 func->max_eq = field16 & 0xffff; 242 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 243 func->rsvd_uars = field & 0x0f; 244 245 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 246 func->bus, func->device, func->function, func->physical_function, 247 func->max_eq, func->rsvd_eqs, func->rsvd_uars); 248 249 out: 250 mlx4_free_cmd_mailbox(dev, mailbox); 251 return err; 252 } 253 254 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port) 255 { 256 struct mlx4_vport_oper_state *vp_oper; 257 struct mlx4_vport_state *vp_admin; 258 int err; 259 260 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 261 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 262 263 if (vp_admin->default_vlan != vp_oper->state.default_vlan) { 264 err = __mlx4_register_vlan(&priv->dev, port, 265 vp_admin->default_vlan, 266 &vp_oper->vlan_idx); 267 if (err) { 268 vp_oper->vlan_idx = NO_INDX; 269 mlx4_warn(&priv->dev, 270 "No vlan resources slave %d, port %d\n", 271 slave, port); 272 return err; 273 } 274 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 275 (int)(vp_oper->state.default_vlan), 276 vp_oper->vlan_idx, slave, port); 277 } 278 vp_oper->state.vlan_proto = vp_admin->vlan_proto; 279 vp_oper->state.default_vlan = vp_admin->default_vlan; 280 vp_oper->state.default_qos = vp_admin->default_qos; 281 282 return 0; 283 } 284 285 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port) 286 { 287 struct mlx4_vport_oper_state *vp_oper; 288 struct mlx4_slave_state *slave_state; 289 struct mlx4_vport_state *vp_admin; 290 int err; 291 292 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 293 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 294 slave_state = &priv->mfunc.master.slave_state[slave]; 295 296 if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) || 297 (!slave_state->active)) 298 return 0; 299 300 if (vp_oper->state.vlan_proto == vp_admin->vlan_proto && 301 vp_oper->state.default_vlan == vp_admin->default_vlan && 302 vp_oper->state.default_qos == vp_admin->default_qos) 303 return 0; 304 305 if (!slave_state->vst_qinq_supported) { 306 /* Warn and revert the request to set vst QinQ mode */ 307 vp_admin->vlan_proto = vp_oper->state.vlan_proto; 308 vp_admin->default_vlan = vp_oper->state.default_vlan; 309 vp_admin->default_qos = vp_oper->state.default_qos; 310 311 mlx4_warn(&priv->dev, 312 "Slave %d does not support VST QinQ mode\n", slave); 313 return 0; 314 } 315 316 err = mlx4_activate_vst_qinq(priv, slave, port); 317 return err; 318 } 319 320 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 321 struct mlx4_vhcr *vhcr, 322 struct mlx4_cmd_mailbox *inbox, 323 struct mlx4_cmd_mailbox *outbox, 324 struct mlx4_cmd_info *cmd) 325 { 326 struct mlx4_priv *priv = mlx4_priv(dev); 327 u8 field, port; 328 u32 size, proxy_qp, qkey; 329 int err = 0; 330 struct mlx4_func func; 331 332 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 333 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 334 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 335 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 336 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 337 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 338 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 339 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 340 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 341 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 342 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 343 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 344 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 345 346 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 347 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 348 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 349 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 350 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 351 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 352 353 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c 354 355 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 356 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 357 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 358 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 359 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 360 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 361 362 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 363 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) 364 365 /* when opcode modifier = 1 */ 366 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 367 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 368 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 369 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc 370 371 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 372 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 373 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 374 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c 375 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 376 377 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 378 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 379 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 380 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 381 382 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 383 #define QUERY_FUNC_CAP_PHV_BIT 0x40 384 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20 385 386 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30) 387 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31) 388 389 if (vhcr->op_modifier == 1) { 390 struct mlx4_active_ports actv_ports = 391 mlx4_get_active_ports(dev, slave); 392 int converted_port = mlx4_slave_convert_port( 393 dev, slave, vhcr->in_modifier); 394 struct mlx4_vport_oper_state *vp_oper; 395 396 if (converted_port < 0) 397 return -EINVAL; 398 399 vhcr->in_modifier = converted_port; 400 /* phys-port = logical-port */ 401 field = vhcr->in_modifier - 402 find_first_bit(actv_ports.ports, dev->caps.num_ports); 403 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 404 405 port = vhcr->in_modifier; 406 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; 407 408 /* Set nic_info bit to mark new fields support */ 409 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 410 411 if (mlx4_vf_smi_enabled(dev, slave, port) && 412 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { 413 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; 414 MLX4_PUT(outbox->buf, qkey, 415 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 416 } 417 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 418 419 /* size is now the QP number */ 420 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; 421 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 422 423 size += 2; 424 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 425 426 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); 427 proxy_qp += 2; 428 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); 429 430 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], 431 QUERY_FUNC_CAP_PHYS_PORT_ID); 432 433 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 434 err = mlx4_handle_vst_qinq(priv, slave, port); 435 if (err) 436 return err; 437 438 field = 0; 439 if (dev->caps.phv_bit[port]) 440 field |= QUERY_FUNC_CAP_PHV_BIT; 441 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 442 field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE; 443 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); 444 445 } else if (vhcr->op_modifier == 0) { 446 struct mlx4_active_ports actv_ports = 447 mlx4_get_active_ports(dev, slave); 448 struct mlx4_slave_state *slave_state = 449 &priv->mfunc.master.slave_state[slave]; 450 451 /* enable rdma and ethernet interfaces, new quota locations, 452 * and reserved lkey 453 */ 454 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 455 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 456 QUERY_FUNC_CAP_FLAG_RESD_LKEY); 457 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 458 459 field = min( 460 bitmap_weight(actv_ports.ports, dev->caps.num_ports), 461 dev->caps.num_ports); 462 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 463 464 size = dev->caps.function_caps; /* set PF behaviours */ 465 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 466 467 field = 0; /* protected FMR support not available as yet */ 468 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 469 470 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 471 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 472 size = dev->caps.num_qps; 473 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 474 475 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 476 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 477 size = dev->caps.num_srqs; 478 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 479 480 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 481 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 482 size = dev->caps.num_cqs; 483 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 484 485 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 486 mlx4_QUERY_FUNC(dev, &func, slave)) { 487 size = vhcr->in_modifier & 488 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 489 dev->caps.num_eqs : 490 rounddown_pow_of_two(dev->caps.num_eqs); 491 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 492 size = dev->caps.reserved_eqs; 493 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 494 } else { 495 size = vhcr->in_modifier & 496 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 497 func.max_eq : 498 rounddown_pow_of_two(func.max_eq); 499 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 500 size = func.rsvd_eqs; 501 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 502 } 503 504 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 505 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 506 size = dev->caps.num_mpts; 507 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 508 509 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 510 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 511 size = dev->caps.num_mtts; 512 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 513 514 size = dev->caps.num_mgms + dev->caps.num_amgms; 515 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 516 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 517 518 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 519 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 520 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 521 522 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 523 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 524 525 if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ) 526 slave_state->vst_qinq_supported = true; 527 528 } else 529 err = -EINVAL; 530 531 return err; 532 } 533 534 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 535 struct mlx4_func_cap *func_cap) 536 { 537 struct mlx4_cmd_mailbox *mailbox; 538 u32 *outbox; 539 u8 field, op_modifier; 540 u32 size, qkey; 541 int err = 0, quotas = 0; 542 u32 in_modifier; 543 u32 slave_caps; 544 545 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 546 slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ | 547 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 548 in_modifier = op_modifier ? gen_or_port : slave_caps; 549 550 mailbox = mlx4_alloc_cmd_mailbox(dev); 551 if (IS_ERR(mailbox)) 552 return PTR_ERR(mailbox); 553 554 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 555 MLX4_CMD_QUERY_FUNC_CAP, 556 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 557 if (err) 558 goto out; 559 560 outbox = mailbox->buf; 561 562 if (!op_modifier) { 563 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 564 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 565 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 566 err = -EPROTONOSUPPORT; 567 goto out; 568 } 569 func_cap->flags = field; 570 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 571 572 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 573 func_cap->num_ports = field; 574 575 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 576 func_cap->pf_context_behaviour = size; 577 578 if (quotas) { 579 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 580 func_cap->qp_quota = size & 0xFFFFFF; 581 582 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 583 func_cap->srq_quota = size & 0xFFFFFF; 584 585 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 586 func_cap->cq_quota = size & 0xFFFFFF; 587 588 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 589 func_cap->mpt_quota = size & 0xFFFFFF; 590 591 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 592 func_cap->mtt_quota = size & 0xFFFFFF; 593 594 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 595 func_cap->mcg_quota = size & 0xFFFFFF; 596 597 } else { 598 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 599 func_cap->qp_quota = size & 0xFFFFFF; 600 601 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 602 func_cap->srq_quota = size & 0xFFFFFF; 603 604 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 605 func_cap->cq_quota = size & 0xFFFFFF; 606 607 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 608 func_cap->mpt_quota = size & 0xFFFFFF; 609 610 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 611 func_cap->mtt_quota = size & 0xFFFFFF; 612 613 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 614 func_cap->mcg_quota = size & 0xFFFFFF; 615 } 616 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 617 func_cap->max_eq = size & 0xFFFFFF; 618 619 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 620 func_cap->reserved_eq = size & 0xFFFFFF; 621 622 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 623 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 624 func_cap->reserved_lkey = size; 625 } else { 626 func_cap->reserved_lkey = 0; 627 } 628 629 func_cap->extra_flags = 0; 630 631 /* Mailbox data from 0x6c and onward should only be treated if 632 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags 633 */ 634 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { 635 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 636 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) 637 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; 638 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) 639 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; 640 } 641 642 goto out; 643 } 644 645 /* logical port query */ 646 if (gen_or_port > dev->caps.num_ports) { 647 err = -EINVAL; 648 goto out; 649 } 650 651 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); 652 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 653 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { 654 mlx4_err(dev, "VLAN is enforced on this port\n"); 655 err = -EPROTONOSUPPORT; 656 goto out; 657 } 658 659 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { 660 mlx4_err(dev, "Force mac is enabled on this port\n"); 661 err = -EPROTONOSUPPORT; 662 goto out; 663 } 664 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 665 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 666 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 667 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); 668 err = -EPROTONOSUPPORT; 669 goto out; 670 } 671 } 672 673 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 674 func_cap->physical_port = field; 675 if (func_cap->physical_port != gen_or_port) { 676 err = -EINVAL; 677 goto out; 678 } 679 680 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { 681 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 682 func_cap->qp0_qkey = qkey; 683 } else { 684 func_cap->qp0_qkey = 0; 685 } 686 687 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 688 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; 689 690 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 691 func_cap->qp0_proxy_qpn = size & 0xFFFFFF; 692 693 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 694 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; 695 696 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 697 func_cap->qp1_proxy_qpn = size & 0xFFFFFF; 698 699 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) 700 MLX4_GET(func_cap->phys_port_id, outbox, 701 QUERY_FUNC_CAP_PHYS_PORT_ID); 702 703 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 704 705 /* All other resources are allocated by the master, but we still report 706 * 'num' and 'reserved' capabilities as follows: 707 * - num remains the maximum resource index 708 * - 'num - reserved' is the total available objects of a resource, but 709 * resource indices may be less than 'reserved' 710 * TODO: set per-resource quotas */ 711 712 out: 713 mlx4_free_cmd_mailbox(dev, mailbox); 714 715 return err; 716 } 717 718 static void disable_unsupported_roce_caps(void *buf); 719 720 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 721 { 722 struct mlx4_cmd_mailbox *mailbox; 723 u32 *outbox; 724 u8 field; 725 u32 field32, flags, ext_flags; 726 u16 size; 727 u16 stat_rate; 728 int err; 729 int i; 730 731 #define QUERY_DEV_CAP_OUT_SIZE 0x100 732 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 733 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 734 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 735 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 736 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 737 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 738 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 739 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 740 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 741 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 742 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 743 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 744 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 745 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 746 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 747 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 748 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 749 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 750 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 751 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 752 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 753 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 754 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 755 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e 756 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 757 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 758 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 759 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 760 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 761 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 762 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 763 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 764 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 765 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 766 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 767 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 768 #define QUERY_DEV_CAP_WOL_OFFSET 0x43 769 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 770 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 771 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 772 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 773 #define QUERY_DEV_CAP_BF_OFFSET 0x4c 774 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 775 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 776 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 777 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 778 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 779 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 780 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 781 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D 782 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 783 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 784 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 785 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 786 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 787 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 788 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 789 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 790 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 791 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 792 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 793 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 794 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 795 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78 796 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 797 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b 798 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 799 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 800 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 801 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 802 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 803 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 804 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 805 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 806 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 807 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 808 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 809 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 810 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 811 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 812 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 813 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c 814 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c 815 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 816 #define QUERY_DEV_CAP_VXLAN 0x9e 817 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 818 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 819 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac 820 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc 821 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 822 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 823 824 825 dev_cap->flags2 = 0; 826 mailbox = mlx4_alloc_cmd_mailbox(dev); 827 if (IS_ERR(mailbox)) 828 return PTR_ERR(mailbox); 829 outbox = mailbox->buf; 830 831 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 832 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 833 if (err) 834 goto out; 835 836 if (mlx4_is_mfunc(dev)) 837 disable_unsupported_roce_caps(outbox); 838 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 839 dev_cap->reserved_qps = 1 << (field & 0xf); 840 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 841 dev_cap->max_qps = 1 << (field & 0x1f); 842 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 843 dev_cap->reserved_srqs = 1 << (field >> 4); 844 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 845 dev_cap->max_srqs = 1 << (field & 0x1f); 846 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 847 dev_cap->max_cq_sz = 1 << field; 848 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 849 dev_cap->reserved_cqs = 1 << (field & 0xf); 850 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 851 dev_cap->max_cqs = 1 << (field & 0x1f); 852 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 853 dev_cap->max_mpts = 1 << (field & 0x3f); 854 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 855 dev_cap->reserved_eqs = 1 << (field & 0xf); 856 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 857 dev_cap->max_eqs = 1 << (field & 0xf); 858 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 859 dev_cap->reserved_mtts = 1 << (field >> 4); 860 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 861 dev_cap->reserved_mrws = 1 << (field & 0xf); 862 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 863 dev_cap->num_sys_eqs = size & 0xfff; 864 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 865 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 866 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 867 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 868 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 869 field &= 0x1f; 870 if (!field) 871 dev_cap->max_gso_sz = 0; 872 else 873 dev_cap->max_gso_sz = 1 << field; 874 875 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 876 if (field & 0x20) 877 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 878 if (field & 0x10) 879 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 880 field &= 0xf; 881 if (field) { 882 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 883 dev_cap->max_rss_tbl_sz = 1 << field; 884 } else 885 dev_cap->max_rss_tbl_sz = 0; 886 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 887 dev_cap->max_rdma_global = 1 << (field & 0x3f); 888 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 889 dev_cap->local_ca_ack_delay = field & 0x1f; 890 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 891 dev_cap->num_ports = field & 0xf; 892 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 893 dev_cap->max_msg_sz = 1 << (field & 0x1f); 894 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); 895 if (field & 0x10) 896 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; 897 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 898 if (field & 0x80) 899 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 900 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 901 if (field & 0x20) 902 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; 903 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 904 if (field & 0x80) 905 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; 906 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 907 if (field & 0x80) 908 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 909 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 910 dev_cap->fs_max_num_qp_per_entry = field; 911 MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET); 912 if (field & (1 << 5)) 913 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT; 914 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 915 if (field & 0x1) 916 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; 917 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 918 dev_cap->stat_rate_support = stat_rate; 919 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 920 if (field & 0x80) 921 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; 922 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 923 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 924 dev_cap->flags = flags | (u64)ext_flags << 32; 925 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); 926 dev_cap->wol_port[1] = !!(field & 0x20); 927 dev_cap->wol_port[2] = !!(field & 0x40); 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 929 dev_cap->reserved_uars = field >> 4; 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 931 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 932 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 933 dev_cap->min_page_sz = 1 << field; 934 935 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 936 if (field & 0x80) { 937 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 938 dev_cap->bf_reg_size = 1 << (field & 0x1f); 939 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 940 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 941 field = 3; 942 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 943 } else { 944 dev_cap->bf_reg_size = 0; 945 } 946 947 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 948 dev_cap->max_sq_sg = field; 949 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 950 dev_cap->max_sq_desc_sz = size; 951 952 MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); 953 if (field & 0x1) 954 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; 955 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 956 dev_cap->max_qp_per_mcg = 1 << field; 957 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 958 dev_cap->reserved_mgms = field & 0xf; 959 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 960 dev_cap->max_mcgs = 1 << field; 961 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 962 dev_cap->reserved_pds = field >> 4; 963 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 964 dev_cap->max_pds = 1 << (field & 0x3f); 965 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 966 dev_cap->reserved_xrcds = field >> 4; 967 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 968 dev_cap->max_xrcds = 1 << (field & 0x1f); 969 970 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 971 dev_cap->rdmarc_entry_sz = size; 972 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 973 dev_cap->qpc_entry_sz = size; 974 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 975 dev_cap->aux_entry_sz = size; 976 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 977 dev_cap->altc_entry_sz = size; 978 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 979 dev_cap->eqc_entry_sz = size; 980 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 981 dev_cap->cqc_entry_sz = size; 982 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 983 dev_cap->srq_entry_sz = size; 984 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 985 dev_cap->cmpt_entry_sz = size; 986 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 987 dev_cap->mtt_entry_sz = size; 988 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 989 dev_cap->dmpt_entry_sz = size; 990 991 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 992 dev_cap->max_srq_sz = 1 << field; 993 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 994 dev_cap->max_qp_sz = 1 << field; 995 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 996 dev_cap->resize_srq = field & 1; 997 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 998 dev_cap->max_rq_sg = field; 999 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 1000 dev_cap->max_rq_desc_sz = size; 1001 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1002 if (field & (1 << 4)) 1003 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; 1004 if (field & (1 << 5)) 1005 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 1006 if (field & (1 << 6)) 1007 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 1008 if (field & (1 << 7)) 1009 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 1010 MLX4_GET(dev_cap->bmme_flags, outbox, 1011 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1012 if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) 1013 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; 1014 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) 1015 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; 1016 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1017 if (field & 0x20) 1018 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 1019 if (field & (1 << 2)) 1020 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 1021 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); 1022 if (field & 0x80) 1023 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; 1024 if (field & 0x40) 1025 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; 1026 1027 MLX4_GET(dev_cap->reserved_lkey, outbox, 1028 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 1029 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 1030 if (field32 & (1 << 0)) 1031 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 1032 if (field32 & (1 << 7)) 1033 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 1034 MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); 1035 if (field32 & (1 << 17)) 1036 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; 1037 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 1038 if (field & 1<<6) 1039 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 1040 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 1041 if (field & 1<<3) 1042 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 1043 if (field & (1 << 5)) 1044 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 1045 MLX4_GET(dev_cap->max_icm_sz, outbox, 1046 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 1047 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1048 MLX4_GET(dev_cap->max_counters, outbox, 1049 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 1050 1051 MLX4_GET(field32, outbox, 1052 QUERY_DEV_CAP_MAD_DEMUX_OFFSET); 1053 if (field32 & (1 << 0)) 1054 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 1055 1056 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, 1057 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); 1058 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; 1059 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, 1060 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); 1061 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; 1062 1063 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1064 dev_cap->rl_caps.num_rates = size; 1065 if (dev_cap->rl_caps.num_rates) { 1066 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; 1067 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); 1068 dev_cap->rl_caps.max_val = size & 0xfff; 1069 dev_cap->rl_caps.max_unit = size >> 14; 1070 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); 1071 dev_cap->rl_caps.min_val = size & 0xfff; 1072 dev_cap->rl_caps.min_unit = size >> 14; 1073 } 1074 1075 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1076 if (field32 & (1 << 16)) 1077 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 1078 if (field32 & (1 << 18)) 1079 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB; 1080 if (field32 & (1 << 19)) 1081 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; 1082 if (field32 & (1 << 26)) 1083 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 1084 if (field32 & (1 << 20)) 1085 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 1086 if (field32 & (1 << 21)) 1087 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 1088 1089 for (i = 1; i <= dev_cap->num_ports; i++) { 1090 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); 1091 if (err) 1092 goto out; 1093 } 1094 1095 /* 1096 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 1097 * we can't use any EQs whose doorbell falls on that page, 1098 * even if the EQ itself isn't reserved. 1099 */ 1100 if (dev_cap->num_sys_eqs == 0) 1101 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 1102 dev_cap->reserved_eqs); 1103 else 1104 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 1105 1106 out: 1107 mlx4_free_cmd_mailbox(dev, mailbox); 1108 return err; 1109 } 1110 1111 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 1112 { 1113 if (dev_cap->bf_reg_size > 0) 1114 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 1115 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 1116 else 1117 mlx4_dbg(dev, "BlueFlame not available\n"); 1118 1119 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 1120 dev_cap->bmme_flags, dev_cap->reserved_lkey); 1121 mlx4_dbg(dev, "Max ICM size %lld MB\n", 1122 (unsigned long long) dev_cap->max_icm_sz >> 20); 1123 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1124 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 1125 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1126 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 1127 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1128 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 1129 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 1130 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 1131 dev_cap->eqc_entry_sz); 1132 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 1133 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 1134 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 1135 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 1136 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 1137 dev_cap->max_pds, dev_cap->reserved_mgms); 1138 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 1139 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 1140 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 1141 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, 1142 dev_cap->port_cap[1].max_port_width); 1143 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 1144 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 1145 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 1146 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 1147 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 1148 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 1149 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 1150 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", 1151 dev_cap->dmfs_high_rate_qpn_base); 1152 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", 1153 dev_cap->dmfs_high_rate_qpn_range); 1154 1155 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { 1156 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; 1157 1158 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", 1159 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, 1160 rl_caps->min_unit, rl_caps->min_val); 1161 } 1162 1163 dump_dev_cap_flags(dev, dev_cap->flags); 1164 dump_dev_cap_flags2(dev, dev_cap->flags2); 1165 } 1166 1167 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) 1168 { 1169 struct mlx4_cmd_mailbox *mailbox; 1170 u32 *outbox; 1171 u8 field; 1172 u32 field32; 1173 int err; 1174 1175 mailbox = mlx4_alloc_cmd_mailbox(dev); 1176 if (IS_ERR(mailbox)) 1177 return PTR_ERR(mailbox); 1178 outbox = mailbox->buf; 1179 1180 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1181 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1182 MLX4_CMD_TIME_CLASS_A, 1183 MLX4_CMD_NATIVE); 1184 1185 if (err) 1186 goto out; 1187 1188 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 1189 port_cap->max_vl = field >> 4; 1190 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 1191 port_cap->ib_mtu = field >> 4; 1192 port_cap->max_port_width = field & 0xf; 1193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 1194 port_cap->max_gids = 1 << (field & 0xf); 1195 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 1196 port_cap->max_pkeys = 1 << (field & 0xf); 1197 } else { 1198 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 1199 #define QUERY_PORT_MTU_OFFSET 0x01 1200 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 1201 #define QUERY_PORT_WIDTH_OFFSET 0x06 1202 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 1203 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 1204 #define QUERY_PORT_MAX_VL_OFFSET 0x0b 1205 #define QUERY_PORT_MAC_OFFSET 0x10 1206 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 1207 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 1208 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 1209 1210 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, 1211 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1212 if (err) 1213 goto out; 1214 1215 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1216 port_cap->link_state = (field & 0x80) >> 7; 1217 port_cap->supported_port_types = field & 3; 1218 port_cap->suggested_type = (field >> 3) & 1; 1219 port_cap->default_sense = (field >> 4) & 1; 1220 port_cap->dmfs_optimized_state = (field >> 5) & 1; 1221 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1222 port_cap->ib_mtu = field & 0xf; 1223 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1224 port_cap->max_port_width = field & 0xf; 1225 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 1226 port_cap->max_gids = 1 << (field >> 4); 1227 port_cap->max_pkeys = 1 << (field & 0xf); 1228 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1229 port_cap->max_vl = field & 0xf; 1230 port_cap->max_tc_eth = field >> 4; 1231 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1232 port_cap->log_max_macs = field & 0xf; 1233 port_cap->log_max_vlans = field >> 4; 1234 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); 1235 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); 1236 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 1237 port_cap->trans_type = field32 >> 24; 1238 port_cap->vendor_oui = field32 & 0xffffff; 1239 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); 1240 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); 1241 } 1242 1243 out: 1244 mlx4_free_cmd_mailbox(dev, mailbox); 1245 return err; 1246 } 1247 1248 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) 1249 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) 1250 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) 1251 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) 1252 1253 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1254 struct mlx4_vhcr *vhcr, 1255 struct mlx4_cmd_mailbox *inbox, 1256 struct mlx4_cmd_mailbox *outbox, 1257 struct mlx4_cmd_info *cmd) 1258 { 1259 u64 flags; 1260 int err = 0; 1261 u8 field; 1262 u16 field16; 1263 u32 bmme_flags, field32; 1264 int real_port; 1265 int slave_port; 1266 int first_port; 1267 struct mlx4_active_ports actv_ports; 1268 1269 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1270 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1271 if (err) 1272 return err; 1273 1274 disable_unsupported_roce_caps(outbox->buf); 1275 /* add port mng change event capability and disable mw type 1 1276 * unconditionally to slaves 1277 */ 1278 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1279 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 1280 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 1281 actv_ports = mlx4_get_active_ports(dev, slave); 1282 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 1283 for (slave_port = 0, real_port = first_port; 1284 real_port < first_port + 1285 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 1286 ++real_port, ++slave_port) { 1287 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) 1288 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; 1289 else 1290 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1291 } 1292 for (; slave_port < dev->caps.num_ports; ++slave_port) 1293 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1294 1295 /* Not exposing RSS IP fragments to guests */ 1296 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; 1297 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1298 1299 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1300 field &= ~0x0F; 1301 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; 1302 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); 1303 1304 /* For guests, disable timestamp */ 1305 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1306 field &= 0x7f; 1307 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1308 1309 /* For guests, disable vxlan tunneling and QoS support */ 1310 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1311 field &= 0xd7; 1312 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1313 1314 /* For guests, disable port BEACON */ 1315 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1316 field &= 0x7f; 1317 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1318 1319 /* For guests, report Blueflame disabled */ 1320 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1321 field &= 0x7f; 1322 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1323 1324 /* For guests, disable mw type 2 and port remap*/ 1325 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1326 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1327 bmme_flags &= ~MLX4_FLAG_PORT_REMAP; 1328 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1329 1330 /* turn off device-managed steering capability if not enabled */ 1331 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1332 MLX4_GET(field, outbox->buf, 1333 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1334 field &= 0x7f; 1335 MLX4_PUT(outbox->buf, field, 1336 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1337 } 1338 1339 /* turn off ipoib managed steering for guests */ 1340 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1341 field &= ~0x80; 1342 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1343 1344 /* turn off host side virt features (VST, FSM, etc) for guests */ 1345 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1346 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | 1347 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); 1348 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1349 1350 /* turn off QCN for guests */ 1351 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1352 field &= 0xfe; 1353 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1354 1355 /* turn off QP max-rate limiting for guests */ 1356 field16 = 0; 1357 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1358 1359 /* turn off QoS per VF support for guests */ 1360 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1361 field &= 0xef; 1362 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1363 1364 /* turn off ignore FCS feature for guests */ 1365 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1366 field &= 0xfb; 1367 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1368 1369 return 0; 1370 } 1371 1372 static void disable_unsupported_roce_caps(void *buf) 1373 { 1374 u32 flags; 1375 1376 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1377 flags &= ~(1UL << 31); 1378 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1379 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1380 flags &= ~(1UL << 24); 1381 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1382 MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1383 flags &= ~(MLX4_FLAG_ROCE_V1_V2); 1384 MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1385 } 1386 1387 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1388 struct mlx4_vhcr *vhcr, 1389 struct mlx4_cmd_mailbox *inbox, 1390 struct mlx4_cmd_mailbox *outbox, 1391 struct mlx4_cmd_info *cmd) 1392 { 1393 struct mlx4_priv *priv = mlx4_priv(dev); 1394 u64 def_mac; 1395 u8 port_type; 1396 u16 short_field; 1397 int err; 1398 int admin_link_state; 1399 int port = mlx4_slave_convert_port(dev, slave, 1400 vhcr->in_modifier & 0xFF); 1401 1402 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 1403 #define MLX4_PORT_LINK_UP_MASK 0x80 1404 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 1405 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 1406 1407 if (port < 0) 1408 return -EINVAL; 1409 1410 /* Protect against untrusted guests: enforce that this is the 1411 * QUERY_PORT general query. 1412 */ 1413 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) 1414 return -EINVAL; 1415 1416 vhcr->in_modifier = port; 1417 1418 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 1419 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1420 MLX4_CMD_NATIVE); 1421 1422 if (!err && dev->caps.function != slave) { 1423 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 1424 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 1425 1426 /* get port type - currently only eth is enabled */ 1427 MLX4_GET(port_type, outbox->buf, 1428 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1429 1430 /* No link sensing allowed */ 1431 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 1432 /* set port type to currently operating port type */ 1433 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 1434 1435 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; 1436 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) 1437 port_type |= MLX4_PORT_LINK_UP_MASK; 1438 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) 1439 port_type &= ~MLX4_PORT_LINK_UP_MASK; 1440 else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { 1441 int other_port = (port == 1) ? 2 : 1; 1442 struct mlx4_port_cap port_cap; 1443 1444 err = mlx4_QUERY_PORT(dev, other_port, &port_cap); 1445 if (err) 1446 goto out; 1447 port_type |= (port_cap.link_state << 7); 1448 } 1449 1450 MLX4_PUT(outbox->buf, port_type, 1451 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1452 1453 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 1454 short_field = mlx4_get_slave_num_gids(dev, slave, port); 1455 else 1456 short_field = 1; /* slave max gids */ 1457 MLX4_PUT(outbox->buf, short_field, 1458 QUERY_PORT_CUR_MAX_GID_OFFSET); 1459 1460 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 1461 MLX4_PUT(outbox->buf, short_field, 1462 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1463 } 1464 out: 1465 return err; 1466 } 1467 1468 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1469 int *gid_tbl_len, int *pkey_tbl_len) 1470 { 1471 struct mlx4_cmd_mailbox *mailbox; 1472 u32 *outbox; 1473 u16 field; 1474 int err; 1475 1476 mailbox = mlx4_alloc_cmd_mailbox(dev); 1477 if (IS_ERR(mailbox)) 1478 return PTR_ERR(mailbox); 1479 1480 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 1481 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1482 MLX4_CMD_WRAPPED); 1483 if (err) 1484 goto out; 1485 1486 outbox = mailbox->buf; 1487 1488 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 1489 *gid_tbl_len = field; 1490 1491 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1492 *pkey_tbl_len = field; 1493 1494 out: 1495 mlx4_free_cmd_mailbox(dev, mailbox); 1496 return err; 1497 } 1498 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 1499 1500 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 1501 { 1502 struct mlx4_cmd_mailbox *mailbox; 1503 struct mlx4_icm_iter iter; 1504 __be64 *pages; 1505 int lg; 1506 int nent = 0; 1507 int i; 1508 int err = 0; 1509 int ts = 0, tc = 0; 1510 1511 mailbox = mlx4_alloc_cmd_mailbox(dev); 1512 if (IS_ERR(mailbox)) 1513 return PTR_ERR(mailbox); 1514 pages = mailbox->buf; 1515 1516 for (mlx4_icm_first(icm, &iter); 1517 !mlx4_icm_last(&iter); 1518 mlx4_icm_next(&iter)) { 1519 /* 1520 * We have to pass pages that are aligned to their 1521 * size, so find the least significant 1 in the 1522 * address or size and use that as our log2 size. 1523 */ 1524 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1525 if (lg < MLX4_ICM_PAGE_SHIFT) { 1526 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", 1527 MLX4_ICM_PAGE_SIZE, 1528 (unsigned long long) mlx4_icm_addr(&iter), 1529 mlx4_icm_size(&iter)); 1530 err = -EINVAL; 1531 goto out; 1532 } 1533 1534 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 1535 if (virt != -1) { 1536 pages[nent * 2] = cpu_to_be64(virt); 1537 virt += 1 << lg; 1538 } 1539 1540 pages[nent * 2 + 1] = 1541 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 1542 (lg - MLX4_ICM_PAGE_SHIFT)); 1543 ts += 1 << (lg - 10); 1544 ++tc; 1545 1546 if (++nent == MLX4_MAILBOX_SIZE / 16) { 1547 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1548 MLX4_CMD_TIME_CLASS_B, 1549 MLX4_CMD_NATIVE); 1550 if (err) 1551 goto out; 1552 nent = 0; 1553 } 1554 } 1555 } 1556 1557 if (nent) 1558 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1559 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1560 if (err) 1561 goto out; 1562 1563 switch (op) { 1564 case MLX4_CMD_MAP_FA: 1565 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); 1566 break; 1567 case MLX4_CMD_MAP_ICM_AUX: 1568 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); 1569 break; 1570 case MLX4_CMD_MAP_ICM: 1571 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", 1572 tc, ts, (unsigned long long) virt - (ts << 10)); 1573 break; 1574 } 1575 1576 out: 1577 mlx4_free_cmd_mailbox(dev, mailbox); 1578 return err; 1579 } 1580 1581 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 1582 { 1583 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 1584 } 1585 1586 int mlx4_UNMAP_FA(struct mlx4_dev *dev) 1587 { 1588 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 1589 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1590 } 1591 1592 1593 int mlx4_RUN_FW(struct mlx4_dev *dev) 1594 { 1595 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 1596 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1597 } 1598 1599 int mlx4_QUERY_FW(struct mlx4_dev *dev) 1600 { 1601 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 1602 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 1603 struct mlx4_cmd_mailbox *mailbox; 1604 u32 *outbox; 1605 int err = 0; 1606 u64 fw_ver; 1607 u16 cmd_if_rev; 1608 u8 lg; 1609 1610 #define QUERY_FW_OUT_SIZE 0x100 1611 #define QUERY_FW_VER_OFFSET 0x00 1612 #define QUERY_FW_PPF_ID 0x09 1613 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 1614 #define QUERY_FW_MAX_CMD_OFFSET 0x0f 1615 #define QUERY_FW_ERR_START_OFFSET 0x30 1616 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 1617 #define QUERY_FW_ERR_BAR_OFFSET 0x3c 1618 1619 #define QUERY_FW_SIZE_OFFSET 0x00 1620 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1621 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1622 1623 #define QUERY_FW_COMM_BASE_OFFSET 0x40 1624 #define QUERY_FW_COMM_BAR_OFFSET 0x48 1625 1626 #define QUERY_FW_CLOCK_OFFSET 0x50 1627 #define QUERY_FW_CLOCK_BAR 0x58 1628 1629 mailbox = mlx4_alloc_cmd_mailbox(dev); 1630 if (IS_ERR(mailbox)) 1631 return PTR_ERR(mailbox); 1632 outbox = mailbox->buf; 1633 1634 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1635 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1636 if (err) 1637 goto out; 1638 1639 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1640 /* 1641 * FW subminor version is at more significant bits than minor 1642 * version, so swap here. 1643 */ 1644 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1645 ((fw_ver & 0xffff0000ull) >> 16) | 1646 ((fw_ver & 0x0000ffffull) << 16); 1647 1648 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1649 dev->caps.function = lg; 1650 1651 if (mlx4_is_slave(dev)) 1652 goto out; 1653 1654 1655 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1656 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1657 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1658 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", 1659 cmd_if_rev); 1660 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1661 (int) (dev->caps.fw_ver >> 32), 1662 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1663 (int) dev->caps.fw_ver & 0xffff); 1664 mlx4_err(dev, "This driver version supports only revisions %d to %d\n", 1665 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1666 err = -ENODEV; 1667 goto out; 1668 } 1669 1670 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1671 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1672 1673 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1674 cmd->max_cmds = 1 << lg; 1675 1676 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1677 (int) (dev->caps.fw_ver >> 32), 1678 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1679 (int) dev->caps.fw_ver & 0xffff, 1680 cmd_if_rev, cmd->max_cmds); 1681 1682 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1683 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1684 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1685 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1686 1687 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1688 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1689 1690 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1691 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1692 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1693 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1694 1695 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1696 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1697 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1698 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1699 fw->comm_bar, fw->comm_base); 1700 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1701 1702 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1703 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1704 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1705 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1706 fw->clock_bar, fw->clock_offset); 1707 1708 /* 1709 * Round up number of system pages needed in case 1710 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1711 */ 1712 fw->fw_pages = 1713 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1714 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1715 1716 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1717 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1718 1719 out: 1720 mlx4_free_cmd_mailbox(dev, mailbox); 1721 return err; 1722 } 1723 1724 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1725 struct mlx4_vhcr *vhcr, 1726 struct mlx4_cmd_mailbox *inbox, 1727 struct mlx4_cmd_mailbox *outbox, 1728 struct mlx4_cmd_info *cmd) 1729 { 1730 u8 *outbuf; 1731 int err; 1732 1733 outbuf = outbox->buf; 1734 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1735 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1736 if (err) 1737 return err; 1738 1739 /* for slaves, set pci PPF ID to invalid and zero out everything 1740 * else except FW version */ 1741 outbuf[0] = outbuf[1] = 0; 1742 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1743 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1744 1745 return 0; 1746 } 1747 1748 static void get_board_id(void *vsd, char *board_id) 1749 { 1750 int i; 1751 1752 #define VSD_OFFSET_SIG1 0x00 1753 #define VSD_OFFSET_SIG2 0xde 1754 #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1755 #define VSD_OFFSET_TS_BOARD_ID 0x20 1756 1757 #define VSD_SIGNATURE_TOPSPIN 0x5ad 1758 1759 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1760 1761 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1762 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1763 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1764 } else { 1765 /* 1766 * The board ID is a string but the firmware byte 1767 * swaps each 4-byte word before passing it back to 1768 * us. Therefore we need to swab it before printing. 1769 */ 1770 u32 *bid_u32 = (u32 *)board_id; 1771 1772 for (i = 0; i < 4; ++i) { 1773 u32 *addr; 1774 u32 val; 1775 1776 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); 1777 val = get_unaligned(addr); 1778 val = swab32(val); 1779 put_unaligned(val, &bid_u32[i]); 1780 } 1781 } 1782 } 1783 1784 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1785 { 1786 struct mlx4_cmd_mailbox *mailbox; 1787 u32 *outbox; 1788 int err; 1789 1790 #define QUERY_ADAPTER_OUT_SIZE 0x100 1791 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1792 #define QUERY_ADAPTER_VSD_OFFSET 0x20 1793 1794 mailbox = mlx4_alloc_cmd_mailbox(dev); 1795 if (IS_ERR(mailbox)) 1796 return PTR_ERR(mailbox); 1797 outbox = mailbox->buf; 1798 1799 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1800 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1801 if (err) 1802 goto out; 1803 1804 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1805 1806 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1807 adapter->board_id); 1808 1809 out: 1810 mlx4_free_cmd_mailbox(dev, mailbox); 1811 return err; 1812 } 1813 1814 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1815 { 1816 struct mlx4_cmd_mailbox *mailbox; 1817 __be32 *inbox; 1818 int err; 1819 static const u8 a0_dmfs_hw_steering[] = { 1820 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, 1821 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, 1822 [MLX4_STEERING_DMFS_A0_STATIC] = 2, 1823 [MLX4_STEERING_DMFS_A0_DISABLE] = 3 1824 }; 1825 1826 #define INIT_HCA_IN_SIZE 0x200 1827 #define INIT_HCA_VERSION_OFFSET 0x000 1828 #define INIT_HCA_VERSION 2 1829 #define INIT_HCA_VXLAN_OFFSET 0x0c 1830 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1831 #define INIT_HCA_FLAGS_OFFSET 0x014 1832 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1833 #define INIT_HCA_QPC_OFFSET 0x020 1834 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1835 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1836 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1837 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1838 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1839 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1840 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1841 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) 1842 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1843 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1844 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1845 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1846 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1847 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1848 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1849 #define INIT_HCA_MCAST_OFFSET 0x0c0 1850 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1851 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1852 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1853 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1854 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1855 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1856 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1857 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1858 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1859 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) 1860 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1861 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1862 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1863 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1864 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1865 #define INIT_HCA_TPT_OFFSET 0x0f0 1866 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1867 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) 1868 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1869 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1870 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1871 #define INIT_HCA_UAR_OFFSET 0x120 1872 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1873 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1874 1875 mailbox = mlx4_alloc_cmd_mailbox(dev); 1876 if (IS_ERR(mailbox)) 1877 return PTR_ERR(mailbox); 1878 inbox = mailbox->buf; 1879 1880 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1881 1882 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1883 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); 1884 1885 #if defined(__LITTLE_ENDIAN) 1886 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1887 #elif defined(__BIG_ENDIAN) 1888 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1889 #else 1890 #error Host endianness not defined 1891 #endif 1892 /* Check port for UD address vector: */ 1893 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1894 1895 /* Enable IPoIB checksumming if we can: */ 1896 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1897 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1898 1899 /* Enable QoS support if module parameter set */ 1900 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) 1901 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1902 1903 /* enable counters */ 1904 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1905 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1906 1907 /* Enable RSS spread to fragmented IP packets when supported */ 1908 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) 1909 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); 1910 1911 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1912 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1913 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1914 dev->caps.eqe_size = 64; 1915 dev->caps.eqe_factor = 1; 1916 } else { 1917 dev->caps.eqe_size = 32; 1918 dev->caps.eqe_factor = 0; 1919 } 1920 1921 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1922 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1923 dev->caps.cqe_size = 64; 1924 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1925 } else { 1926 dev->caps.cqe_size = 32; 1927 } 1928 1929 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 1930 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && 1931 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { 1932 dev->caps.eqe_size = cache_line_size(); 1933 dev->caps.cqe_size = cache_line_size(); 1934 dev->caps.eqe_factor = 0; 1935 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | 1936 (ilog2(dev->caps.eqe_size) - 5)), 1937 INIT_HCA_EQE_CQE_STRIDE_OFFSET); 1938 1939 /* User still need to know to support CQE > 32B */ 1940 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1941 } 1942 1943 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1944 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); 1945 1946 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1947 1948 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1949 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1950 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1951 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1952 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1953 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1954 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1955 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1956 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1957 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1958 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1959 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1960 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1961 1962 /* steering attributes */ 1963 if (dev->caps.steering_mode == 1964 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1965 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1966 cpu_to_be32(1 << 1967 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1968 1969 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1970 MLX4_PUT(inbox, param->log_mc_entry_sz, 1971 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1972 MLX4_PUT(inbox, param->log_mc_table_sz, 1973 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1974 /* Enable Ethernet flow steering 1975 * with udp unicast and tcp unicast 1976 */ 1977 if (dev->caps.dmfs_high_steer_mode != 1978 MLX4_STEERING_DMFS_A0_STATIC) 1979 MLX4_PUT(inbox, 1980 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1981 INIT_HCA_FS_ETH_BITS_OFFSET); 1982 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1983 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1984 /* Enable IPoIB flow steering 1985 * with udp unicast and tcp unicast 1986 */ 1987 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1988 INIT_HCA_FS_IB_BITS_OFFSET); 1989 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1990 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1991 1992 if (dev->caps.dmfs_high_steer_mode != 1993 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1994 MLX4_PUT(inbox, 1995 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] 1996 << 6)), 1997 INIT_HCA_FS_A0_OFFSET); 1998 } else { 1999 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 2000 MLX4_PUT(inbox, param->log_mc_entry_sz, 2001 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2002 MLX4_PUT(inbox, param->log_mc_hash_sz, 2003 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2004 MLX4_PUT(inbox, param->log_mc_table_sz, 2005 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2006 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 2007 MLX4_PUT(inbox, (u8) (1 << 3), 2008 INIT_HCA_UC_STEERING_OFFSET); 2009 } 2010 2011 /* TPT attributes */ 2012 2013 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 2014 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); 2015 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 2016 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 2017 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 2018 2019 /* UAR attributes */ 2020 2021 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2022 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 2023 2024 /* set parser VXLAN attributes */ 2025 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { 2026 u8 parser_params = 0; 2027 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 2028 } 2029 2030 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 2031 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2032 2033 if (err) 2034 mlx4_err(dev, "INIT_HCA returns %d\n", err); 2035 2036 mlx4_free_cmd_mailbox(dev, mailbox); 2037 return err; 2038 } 2039 2040 int mlx4_QUERY_HCA(struct mlx4_dev *dev, 2041 struct mlx4_init_hca_param *param) 2042 { 2043 struct mlx4_cmd_mailbox *mailbox; 2044 __be32 *outbox; 2045 u32 dword_field; 2046 int err; 2047 u8 byte_field; 2048 static const u8 a0_dmfs_query_hw_steering[] = { 2049 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2050 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2051 [2] = MLX4_STEERING_DMFS_A0_STATIC, 2052 [3] = MLX4_STEERING_DMFS_A0_DISABLE 2053 }; 2054 2055 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 2056 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 2057 2058 mailbox = mlx4_alloc_cmd_mailbox(dev); 2059 if (IS_ERR(mailbox)) 2060 return PTR_ERR(mailbox); 2061 outbox = mailbox->buf; 2062 2063 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2064 MLX4_CMD_QUERY_HCA, 2065 MLX4_CMD_TIME_CLASS_B, 2066 !mlx4_is_slave(dev)); 2067 if (err) 2068 goto out; 2069 2070 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 2071 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2072 2073 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2074 2075 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2076 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2077 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2078 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2079 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2080 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2081 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2082 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2083 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2084 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2085 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2086 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2087 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2088 2089 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2090 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2091 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2092 } else { 2093 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 2094 if (byte_field & 0x8) 2095 param->steering_mode = MLX4_STEERING_MODE_B0; 2096 else 2097 param->steering_mode = MLX4_STEERING_MODE_A0; 2098 } 2099 2100 if (dword_field & (1 << 13)) 2101 param->rss_ip_frags = 1; 2102 2103 /* steering attributes */ 2104 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2105 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2106 MLX4_GET(param->log_mc_entry_sz, outbox, 2107 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2108 MLX4_GET(param->log_mc_table_sz, outbox, 2109 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2110 MLX4_GET(byte_field, outbox, 2111 INIT_HCA_FS_A0_OFFSET); 2112 param->dmfs_high_steer_mode = 2113 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2114 } else { 2115 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2116 MLX4_GET(param->log_mc_entry_sz, outbox, 2117 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2118 MLX4_GET(param->log_mc_hash_sz, outbox, 2119 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2120 MLX4_GET(param->log_mc_table_sz, outbox, 2121 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2122 } 2123 2124 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2125 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 2126 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 2127 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 2128 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 2129 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 2130 2131 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 2132 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); 2133 if (byte_field) { 2134 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; 2135 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; 2136 param->cqe_size = 1 << ((byte_field & 2137 MLX4_CQE_SIZE_MASK_STRIDE) + 5); 2138 param->eqe_size = 1 << (((byte_field & 2139 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); 2140 } 2141 2142 /* TPT attributes */ 2143 2144 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2145 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2146 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2147 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2148 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2149 2150 /* UAR attributes */ 2151 2152 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2153 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2154 2155 /* phv_check enable */ 2156 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2157 if (byte_field & 0x2) 2158 param->phv_check_en = 1; 2159 out: 2160 mlx4_free_cmd_mailbox(dev, mailbox); 2161 2162 return err; 2163 } 2164 2165 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 2166 { 2167 struct mlx4_cmd_mailbox *mailbox; 2168 __be32 *outbox; 2169 int err; 2170 2171 mailbox = mlx4_alloc_cmd_mailbox(dev); 2172 if (IS_ERR(mailbox)) { 2173 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 2174 return PTR_ERR(mailbox); 2175 } 2176 outbox = mailbox->buf; 2177 2178 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2179 MLX4_CMD_QUERY_HCA, 2180 MLX4_CMD_TIME_CLASS_B, 2181 !mlx4_is_slave(dev)); 2182 if (err) { 2183 mlx4_warn(dev, "hca_core_clock update failed\n"); 2184 goto out; 2185 } 2186 2187 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2188 2189 out: 2190 mlx4_free_cmd_mailbox(dev, mailbox); 2191 2192 return err; 2193 } 2194 2195 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 2196 * and real QP0 are active, so that the paravirtualized QP0 is ready 2197 * to operate */ 2198 static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 2199 { 2200 struct mlx4_priv *priv = mlx4_priv(dev); 2201 /* irrelevant if not infiniband */ 2202 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 2203 priv->mfunc.master.qp0_state[port].qp0_active) 2204 return 1; 2205 return 0; 2206 } 2207 2208 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 2209 struct mlx4_vhcr *vhcr, 2210 struct mlx4_cmd_mailbox *inbox, 2211 struct mlx4_cmd_mailbox *outbox, 2212 struct mlx4_cmd_info *cmd) 2213 { 2214 struct mlx4_priv *priv = mlx4_priv(dev); 2215 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2216 int err; 2217 2218 if (port < 0) 2219 return -EINVAL; 2220 2221 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 2222 return 0; 2223 2224 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2225 /* Enable port only if it was previously disabled */ 2226 if (!priv->mfunc.master.init_port_ref[port]) { 2227 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2228 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2229 if (err) 2230 return err; 2231 } 2232 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2233 } else { 2234 if (slave == mlx4_master_func_num(dev)) { 2235 if (check_qp0_state(dev, slave, port) && 2236 !priv->mfunc.master.qp0_state[port].port_active) { 2237 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2238 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2239 if (err) 2240 return err; 2241 priv->mfunc.master.qp0_state[port].port_active = 1; 2242 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2243 } 2244 } else 2245 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2246 } 2247 ++priv->mfunc.master.init_port_ref[port]; 2248 return 0; 2249 } 2250 2251 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 2252 { 2253 struct mlx4_cmd_mailbox *mailbox; 2254 u32 *inbox; 2255 int err; 2256 u32 flags; 2257 u16 field; 2258 2259 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 2260 #define INIT_PORT_IN_SIZE 256 2261 #define INIT_PORT_FLAGS_OFFSET 0x00 2262 #define INIT_PORT_FLAG_SIG (1 << 18) 2263 #define INIT_PORT_FLAG_NG (1 << 17) 2264 #define INIT_PORT_FLAG_G0 (1 << 16) 2265 #define INIT_PORT_VL_SHIFT 4 2266 #define INIT_PORT_PORT_WIDTH_SHIFT 8 2267 #define INIT_PORT_MTU_OFFSET 0x04 2268 #define INIT_PORT_MAX_GID_OFFSET 0x06 2269 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a 2270 #define INIT_PORT_GUID0_OFFSET 0x10 2271 #define INIT_PORT_NODE_GUID_OFFSET 0x18 2272 #define INIT_PORT_SI_GUID_OFFSET 0x20 2273 2274 mailbox = mlx4_alloc_cmd_mailbox(dev); 2275 if (IS_ERR(mailbox)) 2276 return PTR_ERR(mailbox); 2277 inbox = mailbox->buf; 2278 2279 flags = 0; 2280 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 2281 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 2282 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 2283 2284 field = 128 << dev->caps.ib_mtu_cap[port]; 2285 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 2286 field = dev->caps.gid_table_len[port]; 2287 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 2288 field = dev->caps.pkey_table_len[port]; 2289 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 2290 2291 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 2292 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2293 2294 mlx4_free_cmd_mailbox(dev, mailbox); 2295 } else 2296 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2297 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2298 2299 if (!err) 2300 mlx4_hca_core_clock_update(dev); 2301 2302 return err; 2303 } 2304 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2305 2306 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 2307 struct mlx4_vhcr *vhcr, 2308 struct mlx4_cmd_mailbox *inbox, 2309 struct mlx4_cmd_mailbox *outbox, 2310 struct mlx4_cmd_info *cmd) 2311 { 2312 struct mlx4_priv *priv = mlx4_priv(dev); 2313 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2314 int err; 2315 2316 if (port < 0) 2317 return -EINVAL; 2318 2319 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 2320 (1 << port))) 2321 return 0; 2322 2323 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2324 if (priv->mfunc.master.init_port_ref[port] == 1) { 2325 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2326 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2327 if (err) 2328 return err; 2329 } 2330 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2331 } else { 2332 /* infiniband port */ 2333 if (slave == mlx4_master_func_num(dev)) { 2334 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2335 priv->mfunc.master.qp0_state[port].port_active) { 2336 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2337 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2338 if (err) 2339 return err; 2340 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2341 priv->mfunc.master.qp0_state[port].port_active = 0; 2342 } 2343 } else 2344 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2345 } 2346 --priv->mfunc.master.init_port_ref[port]; 2347 return 0; 2348 } 2349 2350 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2351 { 2352 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2353 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2354 } 2355 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2356 2357 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2358 { 2359 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2360 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2361 } 2362 2363 struct mlx4_config_dev { 2364 __be32 update_flags; 2365 __be32 rsvd1[3]; 2366 __be16 vxlan_udp_dport; 2367 __be16 rsvd2; 2368 __be16 roce_v2_entropy; 2369 __be16 roce_v2_udp_dport; 2370 __be32 roce_flags; 2371 __be32 rsvd4[25]; 2372 __be16 rsvd5; 2373 u8 rsvd6; 2374 u8 rx_checksum_val; 2375 }; 2376 2377 #define MLX4_VXLAN_UDP_DPORT (1 << 0) 2378 #define MLX4_ROCE_V2_UDP_DPORT BIT(3) 2379 #define MLX4_DISABLE_RX_PORT BIT(18) 2380 2381 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2382 { 2383 int err; 2384 struct mlx4_cmd_mailbox *mailbox; 2385 2386 mailbox = mlx4_alloc_cmd_mailbox(dev); 2387 if (IS_ERR(mailbox)) 2388 return PTR_ERR(mailbox); 2389 2390 memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); 2391 2392 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, 2393 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2394 2395 mlx4_free_cmd_mailbox(dev, mailbox); 2396 return err; 2397 } 2398 2399 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2400 { 2401 int err; 2402 struct mlx4_cmd_mailbox *mailbox; 2403 2404 mailbox = mlx4_alloc_cmd_mailbox(dev); 2405 if (IS_ERR(mailbox)) 2406 return PTR_ERR(mailbox); 2407 2408 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, 2409 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2410 2411 if (!err) 2412 memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); 2413 2414 mlx4_free_cmd_mailbox(dev, mailbox); 2415 return err; 2416 } 2417 2418 /* Conversion between the HW values and the actual functionality. 2419 * The value represented by the array index, 2420 * and the functionality determined by the flags. 2421 */ 2422 static const u8 config_dev_csum_flags[] = { 2423 [0] = 0, 2424 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, 2425 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | 2426 MLX4_RX_CSUM_MODE_L4, 2427 [3] = MLX4_RX_CSUM_MODE_L4 | 2428 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | 2429 MLX4_RX_CSUM_MODE_MULTI_VLAN 2430 }; 2431 2432 int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2433 struct mlx4_config_dev_params *params) 2434 { 2435 struct mlx4_config_dev config_dev = {0}; 2436 int err; 2437 u8 csum_mask; 2438 2439 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 2440 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 2441 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 2442 2443 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) 2444 return -EOPNOTSUPP; 2445 2446 err = mlx4_CONFIG_DEV_get(dev, &config_dev); 2447 if (err) 2448 return err; 2449 2450 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & 2451 CONFIG_DEV_RX_CSUM_MODE_MASK; 2452 2453 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2454 return -EINVAL; 2455 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; 2456 2457 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & 2458 CONFIG_DEV_RX_CSUM_MODE_MASK; 2459 2460 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2461 return -EINVAL; 2462 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; 2463 2464 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); 2465 2466 return 0; 2467 } 2468 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); 2469 2470 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) 2471 { 2472 struct mlx4_config_dev config_dev; 2473 2474 memset(&config_dev, 0, sizeof(config_dev)); 2475 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); 2476 config_dev.vxlan_udp_dport = udp_port; 2477 2478 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2479 } 2480 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2481 2482 #define CONFIG_DISABLE_RX_PORT BIT(15) 2483 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) 2484 { 2485 struct mlx4_config_dev config_dev; 2486 2487 memset(&config_dev, 0, sizeof(config_dev)); 2488 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); 2489 if (dis) 2490 config_dev.roce_flags = 2491 cpu_to_be32(CONFIG_DISABLE_RX_PORT); 2492 2493 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2494 } 2495 2496 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) 2497 { 2498 struct mlx4_config_dev config_dev; 2499 2500 memset(&config_dev, 0, sizeof(config_dev)); 2501 config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); 2502 config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); 2503 2504 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2505 } 2506 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); 2507 2508 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) 2509 { 2510 struct mlx4_cmd_mailbox *mailbox; 2511 struct { 2512 __be32 v_port1; 2513 __be32 v_port2; 2514 } *v2p; 2515 int err; 2516 2517 mailbox = mlx4_alloc_cmd_mailbox(dev); 2518 if (IS_ERR(mailbox)) 2519 return -ENOMEM; 2520 2521 v2p = mailbox->buf; 2522 v2p->v_port1 = cpu_to_be32(port1); 2523 v2p->v_port2 = cpu_to_be32(port2); 2524 2525 err = mlx4_cmd(dev, mailbox->dma, 0, 2526 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, 2527 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2528 2529 mlx4_free_cmd_mailbox(dev, mailbox); 2530 return err; 2531 } 2532 2533 2534 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2535 { 2536 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 2537 MLX4_CMD_SET_ICM_SIZE, 2538 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2539 if (ret) 2540 return ret; 2541 2542 /* 2543 * Round up number of system pages needed in case 2544 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 2545 */ 2546 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 2547 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 2548 2549 return 0; 2550 } 2551 2552 int mlx4_NOP(struct mlx4_dev *dev) 2553 { 2554 /* Input modifier of 0x1f means "finish as soon as possible." */ 2555 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2556 MLX4_CMD_NATIVE); 2557 } 2558 2559 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 2560 const u32 offset[], 2561 u32 value[], size_t array_len, u8 port) 2562 { 2563 struct mlx4_cmd_mailbox *mailbox; 2564 u32 *outbox; 2565 size_t i; 2566 int ret; 2567 2568 mailbox = mlx4_alloc_cmd_mailbox(dev); 2569 if (IS_ERR(mailbox)) 2570 return PTR_ERR(mailbox); 2571 2572 outbox = mailbox->buf; 2573 2574 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, 2575 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 2576 MLX4_CMD_NATIVE); 2577 if (ret) 2578 goto out; 2579 2580 for (i = 0; i < array_len; i++) { 2581 if (offset[i] > MLX4_MAILBOX_SIZE) { 2582 ret = -EINVAL; 2583 goto out; 2584 } 2585 2586 MLX4_GET(value[i], outbox, offset[i]); 2587 } 2588 2589 out: 2590 mlx4_free_cmd_mailbox(dev, mailbox); 2591 return ret; 2592 } 2593 EXPORT_SYMBOL(mlx4_query_diag_counters); 2594 2595 int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2596 { 2597 u8 port; 2598 u32 *outbox; 2599 struct mlx4_cmd_mailbox *mailbox; 2600 u32 in_mod; 2601 u32 guid_hi, guid_lo; 2602 int err, ret = 0; 2603 #define MOD_STAT_CFG_PORT_OFFSET 8 2604 #define MOD_STAT_CFG_GUID_H 0X14 2605 #define MOD_STAT_CFG_GUID_L 0X1c 2606 2607 mailbox = mlx4_alloc_cmd_mailbox(dev); 2608 if (IS_ERR(mailbox)) 2609 return PTR_ERR(mailbox); 2610 outbox = mailbox->buf; 2611 2612 for (port = 1; port <= dev->caps.num_ports; port++) { 2613 in_mod = port << MOD_STAT_CFG_PORT_OFFSET; 2614 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, 2615 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2616 MLX4_CMD_NATIVE); 2617 if (err) { 2618 mlx4_err(dev, "Fail to get port %d uplink guid\n", 2619 port); 2620 ret = err; 2621 } else { 2622 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); 2623 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); 2624 dev->caps.phys_port_id[port] = (u64)guid_lo | 2625 (u64)guid_hi << 32; 2626 } 2627 } 2628 mlx4_free_cmd_mailbox(dev, mailbox); 2629 return ret; 2630 } 2631 2632 #define MLX4_WOL_SETUP_MODE (5 << 28) 2633 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 2634 { 2635 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2636 2637 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 2638 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2639 MLX4_CMD_NATIVE); 2640 } 2641 EXPORT_SYMBOL_GPL(mlx4_wol_read); 2642 2643 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 2644 { 2645 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2646 2647 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 2648 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2649 } 2650 EXPORT_SYMBOL_GPL(mlx4_wol_write); 2651 2652 enum { 2653 ADD_TO_MCG = 0x26, 2654 }; 2655 2656 2657 void mlx4_opreq_action(struct work_struct *work) 2658 { 2659 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, 2660 opreq_task); 2661 struct mlx4_dev *dev = &priv->dev; 2662 int num_tasks = atomic_read(&priv->opreq_count); 2663 struct mlx4_cmd_mailbox *mailbox; 2664 struct mlx4_mgm *mgm; 2665 u32 *outbox; 2666 u32 modifier; 2667 u16 token; 2668 u16 type; 2669 int err; 2670 u32 num_qps; 2671 struct mlx4_qp qp; 2672 int i; 2673 u8 rem_mcg; 2674 u8 prot; 2675 2676 #define GET_OP_REQ_MODIFIER_OFFSET 0x08 2677 #define GET_OP_REQ_TOKEN_OFFSET 0x14 2678 #define GET_OP_REQ_TYPE_OFFSET 0x1a 2679 #define GET_OP_REQ_DATA_OFFSET 0x20 2680 2681 mailbox = mlx4_alloc_cmd_mailbox(dev); 2682 if (IS_ERR(mailbox)) { 2683 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 2684 return; 2685 } 2686 outbox = mailbox->buf; 2687 2688 while (num_tasks) { 2689 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2690 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2691 MLX4_CMD_NATIVE); 2692 if (err) { 2693 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2694 err); 2695 return; 2696 } 2697 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2698 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 2699 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 2700 type &= 0xfff; 2701 2702 switch (type) { 2703 case ADD_TO_MCG: 2704 if (dev->caps.steering_mode == 2705 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2706 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); 2707 err = EPERM; 2708 break; 2709 } 2710 mgm = (struct mlx4_mgm *)((u8 *)(outbox) + 2711 GET_OP_REQ_DATA_OFFSET); 2712 num_qps = be32_to_cpu(mgm->members_count) & 2713 MGM_QPN_MASK; 2714 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; 2715 prot = ((u8 *)(&mgm->members_count))[0] >> 6; 2716 2717 for (i = 0; i < num_qps; i++) { 2718 qp.qpn = be32_to_cpu(mgm->qp[i]); 2719 if (rem_mcg) 2720 err = mlx4_multicast_detach(dev, &qp, 2721 mgm->gid, 2722 prot, 0); 2723 else 2724 err = mlx4_multicast_attach(dev, &qp, 2725 mgm->gid, 2726 mgm->gid[5] 2727 , 0, prot, 2728 NULL); 2729 if (err) 2730 break; 2731 } 2732 break; 2733 default: 2734 mlx4_warn(dev, "Bad type for required operation\n"); 2735 err = EINVAL; 2736 break; 2737 } 2738 err = mlx4_cmd(dev, 0, ((u32) err | 2739 (__force u32)cpu_to_be32(token) << 16), 2740 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2741 MLX4_CMD_NATIVE); 2742 if (err) { 2743 mlx4_err(dev, "Failed to acknowledge required request: %d\n", 2744 err); 2745 goto out; 2746 } 2747 memset(outbox, 0, 0xffc); 2748 num_tasks = atomic_dec_return(&priv->opreq_count); 2749 } 2750 2751 out: 2752 mlx4_free_cmd_mailbox(dev, mailbox); 2753 } 2754 2755 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, 2756 struct mlx4_cmd_mailbox *mailbox) 2757 { 2758 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 2759 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 2760 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 2761 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 2762 2763 u32 set_attr_mask, getresp_attr_mask; 2764 u32 trap_attr_mask, traprepress_attr_mask; 2765 2766 MLX4_GET(set_attr_mask, mailbox->buf, 2767 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); 2768 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", 2769 set_attr_mask); 2770 2771 MLX4_GET(getresp_attr_mask, mailbox->buf, 2772 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); 2773 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", 2774 getresp_attr_mask); 2775 2776 MLX4_GET(trap_attr_mask, mailbox->buf, 2777 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); 2778 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", 2779 trap_attr_mask); 2780 2781 MLX4_GET(traprepress_attr_mask, mailbox->buf, 2782 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); 2783 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", 2784 traprepress_attr_mask); 2785 2786 if (set_attr_mask && getresp_attr_mask && trap_attr_mask && 2787 traprepress_attr_mask) 2788 return 1; 2789 2790 return 0; 2791 } 2792 2793 int mlx4_config_mad_demux(struct mlx4_dev *dev) 2794 { 2795 struct mlx4_cmd_mailbox *mailbox; 2796 int err; 2797 2798 /* Check if mad_demux is supported */ 2799 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) 2800 return 0; 2801 2802 mailbox = mlx4_alloc_cmd_mailbox(dev); 2803 if (IS_ERR(mailbox)) { 2804 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); 2805 return -ENOMEM; 2806 } 2807 2808 /* Query mad_demux to find out which MADs are handled by internal sma */ 2809 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, 2810 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, 2811 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2812 if (err) { 2813 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", 2814 err); 2815 goto out; 2816 } 2817 2818 if (mlx4_check_smp_firewall_active(dev, mailbox)) 2819 dev->flags |= MLX4_FLAG_SECURE_HOST; 2820 2821 /* Config mad_demux to handle all MADs returned by the query above */ 2822 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, 2823 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, 2824 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2825 if (err) { 2826 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); 2827 goto out; 2828 } 2829 2830 if (dev->flags & MLX4_FLAG_SECURE_HOST) 2831 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); 2832 out: 2833 mlx4_free_cmd_mailbox(dev, mailbox); 2834 return err; 2835 } 2836 2837 /* Access Reg commands */ 2838 enum mlx4_access_reg_masks { 2839 MLX4_ACCESS_REG_STATUS_MASK = 0x7f, 2840 MLX4_ACCESS_REG_METHOD_MASK = 0x7f, 2841 MLX4_ACCESS_REG_LEN_MASK = 0x7ff 2842 }; 2843 2844 struct mlx4_access_reg { 2845 __be16 constant1; 2846 u8 status; 2847 u8 resrvd1; 2848 __be16 reg_id; 2849 u8 method; 2850 u8 constant2; 2851 __be32 resrvd2[2]; 2852 __be16 len_const; 2853 __be16 resrvd3; 2854 #define MLX4_ACCESS_REG_HEADER_SIZE (20) 2855 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; 2856 } __attribute__((__packed__)); 2857 2858 /** 2859 * mlx4_ACCESS_REG - Generic access reg command. 2860 * @dev: mlx4_dev. 2861 * @reg_id: register ID to access. 2862 * @method: Access method Read/Write. 2863 * @reg_len: register length to Read/Write in bytes. 2864 * @reg_data: reg_data pointer to Read/Write From/To. 2865 * 2866 * Access ConnectX registers FW command. 2867 * Returns 0 on success and copies outbox mlx4_access_reg data 2868 * field into reg_data or a negative error code. 2869 */ 2870 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, 2871 enum mlx4_access_reg_method method, 2872 u16 reg_len, void *reg_data) 2873 { 2874 struct mlx4_cmd_mailbox *inbox, *outbox; 2875 struct mlx4_access_reg *inbuf, *outbuf; 2876 int err; 2877 2878 inbox = mlx4_alloc_cmd_mailbox(dev); 2879 if (IS_ERR(inbox)) 2880 return PTR_ERR(inbox); 2881 2882 outbox = mlx4_alloc_cmd_mailbox(dev); 2883 if (IS_ERR(outbox)) { 2884 mlx4_free_cmd_mailbox(dev, inbox); 2885 return PTR_ERR(outbox); 2886 } 2887 2888 inbuf = inbox->buf; 2889 outbuf = outbox->buf; 2890 2891 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); 2892 inbuf->constant2 = 0x1; 2893 inbuf->reg_id = cpu_to_be16(reg_id); 2894 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; 2895 2896 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); 2897 inbuf->len_const = 2898 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | 2899 ((0x3) << 12)); 2900 2901 memcpy(inbuf->reg_data, reg_data, reg_len); 2902 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, 2903 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2904 MLX4_CMD_WRAPPED); 2905 if (err) 2906 goto out; 2907 2908 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { 2909 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; 2910 mlx4_err(dev, 2911 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", 2912 reg_id, err); 2913 goto out; 2914 } 2915 2916 memcpy(reg_data, outbuf->reg_data, reg_len); 2917 out: 2918 mlx4_free_cmd_mailbox(dev, inbox); 2919 mlx4_free_cmd_mailbox(dev, outbox); 2920 return err; 2921 } 2922 2923 /* ConnectX registers IDs */ 2924 enum mlx4_reg_id { 2925 MLX4_REG_ID_PTYS = 0x5004, 2926 }; 2927 2928 /** 2929 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) 2930 * register 2931 * @dev: mlx4_dev. 2932 * @method: Access method Read/Write. 2933 * @ptys_reg: PTYS register data pointer. 2934 * 2935 * Access ConnectX PTYS register, to Read/Write Port Type/Speed 2936 * configuration 2937 * Returns 0 on success or a negative error code. 2938 */ 2939 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, 2940 enum mlx4_access_reg_method method, 2941 struct mlx4_ptys_reg *ptys_reg) 2942 { 2943 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, 2944 method, sizeof(*ptys_reg), ptys_reg); 2945 } 2946 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); 2947 2948 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 2949 struct mlx4_vhcr *vhcr, 2950 struct mlx4_cmd_mailbox *inbox, 2951 struct mlx4_cmd_mailbox *outbox, 2952 struct mlx4_cmd_info *cmd) 2953 { 2954 struct mlx4_access_reg *inbuf = inbox->buf; 2955 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; 2956 u16 reg_id = be16_to_cpu(inbuf->reg_id); 2957 2958 if (slave != mlx4_master_func_num(dev) && 2959 method == MLX4_ACCESS_REG_WRITE) 2960 return -EPERM; 2961 2962 if (reg_id == MLX4_REG_ID_PTYS) { 2963 struct mlx4_ptys_reg *ptys_reg = 2964 (struct mlx4_ptys_reg *)inbuf->reg_data; 2965 2966 ptys_reg->local_port = 2967 mlx4_slave_convert_port(dev, slave, 2968 ptys_reg->local_port); 2969 } 2970 2971 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, 2972 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2973 MLX4_CMD_NATIVE); 2974 } 2975 2976 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) 2977 { 2978 #define SET_PORT_GEN_PHV_VALID 0x10 2979 #define SET_PORT_GEN_PHV_EN 0x80 2980 2981 struct mlx4_cmd_mailbox *mailbox; 2982 struct mlx4_set_port_general_context *context; 2983 u32 in_mod; 2984 int err; 2985 2986 mailbox = mlx4_alloc_cmd_mailbox(dev); 2987 if (IS_ERR(mailbox)) 2988 return PTR_ERR(mailbox); 2989 context = mailbox->buf; 2990 2991 context->flags2 |= SET_PORT_GEN_PHV_VALID; 2992 if (phv_bit) 2993 context->phv_en |= SET_PORT_GEN_PHV_EN; 2994 2995 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 2996 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 2997 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 2998 MLX4_CMD_NATIVE); 2999 3000 mlx4_free_cmd_mailbox(dev, mailbox); 3001 return err; 3002 } 3003 3004 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) 3005 { 3006 int err; 3007 struct mlx4_func_cap func_cap; 3008 3009 memset(&func_cap, 0, sizeof(func_cap)); 3010 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3011 if (!err) 3012 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; 3013 return err; 3014 } 3015 EXPORT_SYMBOL(get_phv_bit); 3016 3017 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) 3018 { 3019 int ret; 3020 3021 if (mlx4_is_slave(dev)) 3022 return -EPERM; 3023 3024 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3025 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3026 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); 3027 if (!ret) 3028 dev->caps.phv_bit[port] = new_val; 3029 return ret; 3030 } 3031 3032 return -EOPNOTSUPP; 3033 } 3034 EXPORT_SYMBOL(set_phv_bit); 3035 3036 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, 3037 bool *vlan_offload_disabled) 3038 { 3039 struct mlx4_func_cap func_cap; 3040 int err; 3041 3042 memset(&func_cap, 0, sizeof(func_cap)); 3043 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3044 if (!err) 3045 *vlan_offload_disabled = 3046 !!(func_cap.flags0 & 3047 QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE); 3048 return err; 3049 } 3050 EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled); 3051 3052 void mlx4_replace_zero_macs(struct mlx4_dev *dev) 3053 { 3054 int i; 3055 u8 mac_addr[ETH_ALEN]; 3056 3057 dev->port_random_macs = 0; 3058 for (i = 1; i <= dev->caps.num_ports; ++i) 3059 if (!dev->caps.def_mac[i] && 3060 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 3061 eth_random_addr(mac_addr); 3062 dev->port_random_macs |= 1 << i; 3063 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); 3064 } 3065 } 3066 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); 3067