1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/etherdevice.h> 36 #include <linux/mlx4/cmd.h> 37 #include <linux/module.h> 38 #include <linux/cache.h> 39 #include <linux/kernel.h> 40 #include <uapi/rdma/mlx4-abi.h> 41 42 #include "fw.h" 43 #include "icm.h" 44 45 enum { 46 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 47 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 48 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 49 }; 50 51 extern void __buggy_use_of_MLX4_GET(void); 52 extern void __buggy_use_of_MLX4_PUT(void); 53 54 static bool enable_qos; 55 module_param(enable_qos, bool, 0444); 56 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); 57 58 #define MLX4_GET(dest, source, offset) \ 59 do { \ 60 void *__p = (char *) (source) + (offset); \ 61 __be64 val; \ 62 switch (sizeof(dest)) { \ 63 case 1: (dest) = *(u8 *) __p; break; \ 64 case 2: (dest) = be16_to_cpup(__p); break; \ 65 case 4: (dest) = be32_to_cpup(__p); break; \ 66 case 8: val = get_unaligned((__be64 *)__p); \ 67 (dest) = be64_to_cpu(val); break; \ 68 default: __buggy_use_of_MLX4_GET(); \ 69 } \ 70 } while (0) 71 72 #define MLX4_PUT(dest, source, offset) \ 73 do { \ 74 void *__d = ((char *) (dest) + (offset)); \ 75 switch (sizeof(source)) { \ 76 case 1: *(u8 *) __d = (source); break; \ 77 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 78 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 79 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 80 default: __buggy_use_of_MLX4_PUT(); \ 81 } \ 82 } while (0) 83 84 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 85 { 86 static const char *fname[] = { 87 [ 0] = "RC transport", 88 [ 1] = "UC transport", 89 [ 2] = "UD transport", 90 [ 3] = "XRC transport", 91 [ 6] = "SRQ support", 92 [ 7] = "IPoIB checksum offload", 93 [ 8] = "P_Key violation counter", 94 [ 9] = "Q_Key violation counter", 95 [12] = "Dual Port Different Protocol (DPDP) support", 96 [15] = "Big LSO headers", 97 [16] = "MW support", 98 [17] = "APM support", 99 [18] = "Atomic ops support", 100 [19] = "Raw multicast support", 101 [20] = "Address vector port checking support", 102 [21] = "UD multicast support", 103 [30] = "IBoE support", 104 [32] = "Unicast loopback support", 105 [34] = "FCS header control", 106 [37] = "Wake On LAN (port1) support", 107 [38] = "Wake On LAN (port2) support", 108 [40] = "UDP RSS support", 109 [41] = "Unicast VEP steering support", 110 [42] = "Multicast VEP steering support", 111 [48] = "Counters support", 112 [52] = "RSS IP fragments support", 113 [53] = "Port ETS Scheduler support", 114 [55] = "Port link type sensing support", 115 [59] = "Port management change event support", 116 [61] = "64 byte EQE support", 117 [62] = "64 byte CQE support", 118 }; 119 int i; 120 121 mlx4_dbg(dev, "DEV_CAP flags:\n"); 122 for (i = 0; i < ARRAY_SIZE(fname); ++i) 123 if (fname[i] && (flags & (1LL << i))) 124 mlx4_dbg(dev, " %s\n", fname[i]); 125 } 126 127 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 128 { 129 static const char * const fname[] = { 130 [0] = "RSS support", 131 [1] = "RSS Toeplitz Hash Function support", 132 [2] = "RSS XOR Hash Function support", 133 [3] = "Device managed flow steering support", 134 [4] = "Automatic MAC reassignment support", 135 [5] = "Time stamping support", 136 [6] = "VST (control vlan insertion/stripping) support", 137 [7] = "FSM (MAC anti-spoofing) support", 138 [8] = "Dynamic QP updates support", 139 [9] = "Device managed flow steering IPoIB support", 140 [10] = "TCP/IP offloads/flow-steering for VXLAN support", 141 [11] = "MAD DEMUX (Secure-Host) support", 142 [12] = "Large cache line (>64B) CQE stride support", 143 [13] = "Large cache line (>64B) EQE stride support", 144 [14] = "Ethernet protocol control support", 145 [15] = "Ethernet Backplane autoneg support", 146 [16] = "CONFIG DEV support", 147 [17] = "Asymmetric EQs support", 148 [18] = "More than 80 VFs support", 149 [19] = "Performance optimized for limited rule configuration flow steering support", 150 [20] = "Recoverable error events support", 151 [21] = "Port Remap support", 152 [22] = "QCN support", 153 [23] = "QP rate limiting support", 154 [24] = "Ethernet Flow control statistics support", 155 [25] = "Granular QoS per VF support", 156 [26] = "Port ETS Scheduler support", 157 [27] = "Port beacon support", 158 [28] = "RX-ALL support", 159 [29] = "802.1ad offload support", 160 [31] = "Modifying loopback source checks using UPDATE_QP support", 161 [32] = "Loopback source checks support", 162 [33] = "RoCEv2 support", 163 [34] = "DMFS Sniffer support (UC & MC)", 164 [35] = "Diag counters per port", 165 [36] = "QinQ VST mode support", 166 [37] = "sl to vl mapping table change event support", 167 [38] = "user MAC support", 168 }; 169 int i; 170 171 for (i = 0; i < ARRAY_SIZE(fname); ++i) 172 if (fname[i] && (flags & (1LL << i))) 173 mlx4_dbg(dev, " %s\n", fname[i]); 174 } 175 176 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 177 { 178 struct mlx4_cmd_mailbox *mailbox; 179 u32 *inbox; 180 int err = 0; 181 182 #define MOD_STAT_CFG_IN_SIZE 0x100 183 184 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 185 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 186 187 mailbox = mlx4_alloc_cmd_mailbox(dev); 188 if (IS_ERR(mailbox)) 189 return PTR_ERR(mailbox); 190 inbox = mailbox->buf; 191 192 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 193 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 194 195 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 196 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 197 198 mlx4_free_cmd_mailbox(dev, mailbox); 199 return err; 200 } 201 202 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 203 { 204 struct mlx4_cmd_mailbox *mailbox; 205 u32 *outbox; 206 u8 in_modifier; 207 u8 field; 208 u16 field16; 209 int err; 210 211 #define QUERY_FUNC_BUS_OFFSET 0x00 212 #define QUERY_FUNC_DEVICE_OFFSET 0x01 213 #define QUERY_FUNC_FUNCTION_OFFSET 0x01 214 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 215 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 216 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 217 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 218 219 mailbox = mlx4_alloc_cmd_mailbox(dev); 220 if (IS_ERR(mailbox)) 221 return PTR_ERR(mailbox); 222 outbox = mailbox->buf; 223 224 in_modifier = slave; 225 226 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 227 MLX4_CMD_QUERY_FUNC, 228 MLX4_CMD_TIME_CLASS_A, 229 MLX4_CMD_NATIVE); 230 if (err) 231 goto out; 232 233 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 234 func->bus = field & 0xf; 235 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 236 func->device = field & 0xf1; 237 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 238 func->function = field & 0x7; 239 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 240 func->physical_function = field & 0xf; 241 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 242 func->rsvd_eqs = field16 & 0xffff; 243 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 244 func->max_eq = field16 & 0xffff; 245 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 246 func->rsvd_uars = field & 0x0f; 247 248 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 249 func->bus, func->device, func->function, func->physical_function, 250 func->max_eq, func->rsvd_eqs, func->rsvd_uars); 251 252 out: 253 mlx4_free_cmd_mailbox(dev, mailbox); 254 return err; 255 } 256 257 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port) 258 { 259 struct mlx4_vport_oper_state *vp_oper; 260 struct mlx4_vport_state *vp_admin; 261 int err; 262 263 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 264 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 265 266 if (vp_admin->default_vlan != vp_oper->state.default_vlan) { 267 err = __mlx4_register_vlan(&priv->dev, port, 268 vp_admin->default_vlan, 269 &vp_oper->vlan_idx); 270 if (err) { 271 vp_oper->vlan_idx = NO_INDX; 272 mlx4_warn(&priv->dev, 273 "No vlan resources slave %d, port %d\n", 274 slave, port); 275 return err; 276 } 277 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 278 (int)(vp_oper->state.default_vlan), 279 vp_oper->vlan_idx, slave, port); 280 } 281 vp_oper->state.vlan_proto = vp_admin->vlan_proto; 282 vp_oper->state.default_vlan = vp_admin->default_vlan; 283 vp_oper->state.default_qos = vp_admin->default_qos; 284 285 return 0; 286 } 287 288 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port) 289 { 290 struct mlx4_vport_oper_state *vp_oper; 291 struct mlx4_slave_state *slave_state; 292 struct mlx4_vport_state *vp_admin; 293 int err; 294 295 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 296 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 297 slave_state = &priv->mfunc.master.slave_state[slave]; 298 299 if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) || 300 (!slave_state->active)) 301 return 0; 302 303 if (vp_oper->state.vlan_proto == vp_admin->vlan_proto && 304 vp_oper->state.default_vlan == vp_admin->default_vlan && 305 vp_oper->state.default_qos == vp_admin->default_qos) 306 return 0; 307 308 if (!slave_state->vst_qinq_supported) { 309 /* Warn and revert the request to set vst QinQ mode */ 310 vp_admin->vlan_proto = vp_oper->state.vlan_proto; 311 vp_admin->default_vlan = vp_oper->state.default_vlan; 312 vp_admin->default_qos = vp_oper->state.default_qos; 313 314 mlx4_warn(&priv->dev, 315 "Slave %d does not support VST QinQ mode\n", slave); 316 return 0; 317 } 318 319 err = mlx4_activate_vst_qinq(priv, slave, port); 320 return err; 321 } 322 323 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 324 struct mlx4_vhcr *vhcr, 325 struct mlx4_cmd_mailbox *inbox, 326 struct mlx4_cmd_mailbox *outbox, 327 struct mlx4_cmd_info *cmd) 328 { 329 struct mlx4_priv *priv = mlx4_priv(dev); 330 u8 field, port; 331 u32 size, proxy_qp, qkey; 332 int err = 0; 333 struct mlx4_func func; 334 335 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 336 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 337 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 338 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 339 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 340 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 341 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 342 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 343 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 344 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 345 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 346 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 347 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 348 349 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 350 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 351 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 352 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 353 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 354 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 355 356 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c 357 358 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 359 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 360 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 361 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 362 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 363 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 364 365 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 366 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) 367 368 /* when opcode modifier = 1 */ 369 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 370 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 371 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 372 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc 373 374 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 375 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 376 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 377 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c 378 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 379 380 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 381 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 382 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 383 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 384 385 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 386 #define QUERY_FUNC_CAP_PHV_BIT 0x40 387 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20 388 389 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30) 390 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31) 391 392 if (vhcr->op_modifier == 1) { 393 struct mlx4_active_ports actv_ports = 394 mlx4_get_active_ports(dev, slave); 395 int converted_port = mlx4_slave_convert_port( 396 dev, slave, vhcr->in_modifier); 397 struct mlx4_vport_oper_state *vp_oper; 398 399 if (converted_port < 0) 400 return -EINVAL; 401 402 vhcr->in_modifier = converted_port; 403 /* phys-port = logical-port */ 404 field = vhcr->in_modifier - 405 find_first_bit(actv_ports.ports, dev->caps.num_ports); 406 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 407 408 port = vhcr->in_modifier; 409 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; 410 411 /* Set nic_info bit to mark new fields support */ 412 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 413 414 if (mlx4_vf_smi_enabled(dev, slave, port) && 415 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { 416 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; 417 MLX4_PUT(outbox->buf, qkey, 418 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 419 } 420 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 421 422 /* size is now the QP number */ 423 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; 424 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 425 426 size += 2; 427 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 428 429 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); 430 proxy_qp += 2; 431 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); 432 433 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], 434 QUERY_FUNC_CAP_PHYS_PORT_ID); 435 436 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 437 err = mlx4_handle_vst_qinq(priv, slave, port); 438 if (err) 439 return err; 440 441 field = 0; 442 if (dev->caps.phv_bit[port]) 443 field |= QUERY_FUNC_CAP_PHV_BIT; 444 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 445 field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE; 446 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); 447 448 } else if (vhcr->op_modifier == 0) { 449 struct mlx4_active_ports actv_ports = 450 mlx4_get_active_ports(dev, slave); 451 struct mlx4_slave_state *slave_state = 452 &priv->mfunc.master.slave_state[slave]; 453 454 /* enable rdma and ethernet interfaces, new quota locations, 455 * and reserved lkey 456 */ 457 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 458 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 459 QUERY_FUNC_CAP_FLAG_RESD_LKEY); 460 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 461 462 field = min( 463 bitmap_weight(actv_ports.ports, dev->caps.num_ports), 464 dev->caps.num_ports); 465 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 466 467 size = dev->caps.function_caps; /* set PF behaviours */ 468 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 469 470 field = 0; /* protected FMR support not available as yet */ 471 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 472 473 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 474 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 475 size = dev->caps.num_qps; 476 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 477 478 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 479 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 480 size = dev->caps.num_srqs; 481 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 482 483 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 484 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 485 size = dev->caps.num_cqs; 486 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 487 488 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 489 mlx4_QUERY_FUNC(dev, &func, slave)) { 490 size = vhcr->in_modifier & 491 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 492 dev->caps.num_eqs : 493 rounddown_pow_of_two(dev->caps.num_eqs); 494 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 495 size = dev->caps.reserved_eqs; 496 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 497 } else { 498 size = vhcr->in_modifier & 499 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 500 func.max_eq : 501 rounddown_pow_of_two(func.max_eq); 502 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 503 size = func.rsvd_eqs; 504 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 505 } 506 507 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 508 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 509 size = dev->caps.num_mpts; 510 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 511 512 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 513 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 514 size = dev->caps.num_mtts; 515 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 516 517 size = dev->caps.num_mgms + dev->caps.num_amgms; 518 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 519 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 520 521 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 522 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 523 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 524 525 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 526 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 527 528 if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ) 529 slave_state->vst_qinq_supported = true; 530 531 } else 532 err = -EINVAL; 533 534 return err; 535 } 536 537 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 538 struct mlx4_func_cap *func_cap) 539 { 540 struct mlx4_cmd_mailbox *mailbox; 541 u32 *outbox; 542 u8 field, op_modifier; 543 u32 size, qkey; 544 int err = 0, quotas = 0; 545 u32 in_modifier; 546 u32 slave_caps; 547 548 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 549 slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ | 550 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 551 in_modifier = op_modifier ? gen_or_port : slave_caps; 552 553 mailbox = mlx4_alloc_cmd_mailbox(dev); 554 if (IS_ERR(mailbox)) 555 return PTR_ERR(mailbox); 556 557 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 558 MLX4_CMD_QUERY_FUNC_CAP, 559 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 560 if (err) 561 goto out; 562 563 outbox = mailbox->buf; 564 565 if (!op_modifier) { 566 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 567 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 568 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 569 err = -EPROTONOSUPPORT; 570 goto out; 571 } 572 func_cap->flags = field; 573 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 574 575 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 576 func_cap->num_ports = field; 577 578 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 579 func_cap->pf_context_behaviour = size; 580 581 if (quotas) { 582 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 583 func_cap->qp_quota = size & 0xFFFFFF; 584 585 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 586 func_cap->srq_quota = size & 0xFFFFFF; 587 588 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 589 func_cap->cq_quota = size & 0xFFFFFF; 590 591 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 592 func_cap->mpt_quota = size & 0xFFFFFF; 593 594 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 595 func_cap->mtt_quota = size & 0xFFFFFF; 596 597 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 598 func_cap->mcg_quota = size & 0xFFFFFF; 599 600 } else { 601 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 602 func_cap->qp_quota = size & 0xFFFFFF; 603 604 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 605 func_cap->srq_quota = size & 0xFFFFFF; 606 607 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 608 func_cap->cq_quota = size & 0xFFFFFF; 609 610 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 611 func_cap->mpt_quota = size & 0xFFFFFF; 612 613 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 614 func_cap->mtt_quota = size & 0xFFFFFF; 615 616 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 617 func_cap->mcg_quota = size & 0xFFFFFF; 618 } 619 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 620 func_cap->max_eq = size & 0xFFFFFF; 621 622 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 623 func_cap->reserved_eq = size & 0xFFFFFF; 624 625 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 626 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 627 func_cap->reserved_lkey = size; 628 } else { 629 func_cap->reserved_lkey = 0; 630 } 631 632 func_cap->extra_flags = 0; 633 634 /* Mailbox data from 0x6c and onward should only be treated if 635 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags 636 */ 637 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { 638 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 639 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) 640 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; 641 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) 642 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; 643 } 644 645 goto out; 646 } 647 648 /* logical port query */ 649 if (gen_or_port > dev->caps.num_ports) { 650 err = -EINVAL; 651 goto out; 652 } 653 654 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); 655 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 656 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { 657 mlx4_err(dev, "VLAN is enforced on this port\n"); 658 err = -EPROTONOSUPPORT; 659 goto out; 660 } 661 662 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { 663 mlx4_err(dev, "Force mac is enabled on this port\n"); 664 err = -EPROTONOSUPPORT; 665 goto out; 666 } 667 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 668 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 669 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 670 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); 671 err = -EPROTONOSUPPORT; 672 goto out; 673 } 674 } 675 676 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 677 func_cap->physical_port = field; 678 if (func_cap->physical_port != gen_or_port) { 679 err = -EINVAL; 680 goto out; 681 } 682 683 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { 684 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 685 func_cap->spec_qps.qp0_qkey = qkey; 686 } else { 687 func_cap->spec_qps.qp0_qkey = 0; 688 } 689 690 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 691 func_cap->spec_qps.qp0_tunnel = size & 0xFFFFFF; 692 693 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 694 func_cap->spec_qps.qp0_proxy = size & 0xFFFFFF; 695 696 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 697 func_cap->spec_qps.qp1_tunnel = size & 0xFFFFFF; 698 699 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 700 func_cap->spec_qps.qp1_proxy = size & 0xFFFFFF; 701 702 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) 703 MLX4_GET(func_cap->phys_port_id, outbox, 704 QUERY_FUNC_CAP_PHYS_PORT_ID); 705 706 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 707 708 /* All other resources are allocated by the master, but we still report 709 * 'num' and 'reserved' capabilities as follows: 710 * - num remains the maximum resource index 711 * - 'num - reserved' is the total available objects of a resource, but 712 * resource indices may be less than 'reserved' 713 * TODO: set per-resource quotas */ 714 715 out: 716 mlx4_free_cmd_mailbox(dev, mailbox); 717 718 return err; 719 } 720 721 static void disable_unsupported_roce_caps(void *buf); 722 723 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 724 { 725 struct mlx4_cmd_mailbox *mailbox; 726 u32 *outbox; 727 u8 field; 728 u32 field32, flags, ext_flags; 729 u16 size; 730 u16 stat_rate; 731 int err; 732 int i; 733 734 #define QUERY_DEV_CAP_OUT_SIZE 0x100 735 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 736 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 737 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 738 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 739 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 740 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 741 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 742 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 743 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 744 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 745 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 746 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 747 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 748 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 749 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 750 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 751 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 752 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 753 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 754 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 755 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 756 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 757 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 758 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e 759 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 760 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 761 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 762 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 763 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 764 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 765 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 766 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 767 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 768 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 769 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 770 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 771 #define QUERY_DEV_CAP_WOL_OFFSET 0x43 772 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 773 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 774 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 775 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 776 #define QUERY_DEV_CAP_BF_OFFSET 0x4c 777 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 778 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 779 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 780 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 781 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 782 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 783 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 784 #define QUERY_DEV_CAP_USER_MAC_EN_OFFSET 0x5C 785 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D 786 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 787 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 788 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 789 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 790 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 791 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 792 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 793 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 794 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 795 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 796 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 797 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 798 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 799 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78 800 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 801 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b 802 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 803 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 804 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 805 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 806 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 807 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 808 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 809 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 810 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 811 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 812 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 813 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 814 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 815 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 816 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 817 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c 818 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c 819 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 820 #define QUERY_DEV_CAP_VXLAN 0x9e 821 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 822 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 823 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac 824 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc 825 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 826 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 827 828 829 dev_cap->flags2 = 0; 830 mailbox = mlx4_alloc_cmd_mailbox(dev); 831 if (IS_ERR(mailbox)) 832 return PTR_ERR(mailbox); 833 outbox = mailbox->buf; 834 835 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 836 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 837 if (err) 838 goto out; 839 840 if (mlx4_is_mfunc(dev)) 841 disable_unsupported_roce_caps(outbox); 842 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 843 dev_cap->reserved_qps = 1 << (field & 0xf); 844 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 845 dev_cap->max_qps = 1 << (field & 0x1f); 846 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 847 dev_cap->reserved_srqs = 1 << (field >> 4); 848 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 849 dev_cap->max_srqs = 1 << (field & 0x1f); 850 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 851 dev_cap->max_cq_sz = 1 << field; 852 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 853 dev_cap->reserved_cqs = 1 << (field & 0xf); 854 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 855 dev_cap->max_cqs = 1 << (field & 0x1f); 856 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 857 dev_cap->max_mpts = 1 << (field & 0x3f); 858 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 859 dev_cap->reserved_eqs = 1 << (field & 0xf); 860 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 861 dev_cap->max_eqs = 1 << (field & 0xf); 862 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 863 dev_cap->reserved_mtts = 1 << (field >> 4); 864 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 865 dev_cap->reserved_mrws = 1 << (field & 0xf); 866 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 867 dev_cap->num_sys_eqs = size & 0xfff; 868 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 869 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 870 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 871 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 872 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 873 field &= 0x1f; 874 if (!field) 875 dev_cap->max_gso_sz = 0; 876 else 877 dev_cap->max_gso_sz = 1 << field; 878 879 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 880 if (field & 0x20) 881 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 882 if (field & 0x10) 883 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 884 field &= 0xf; 885 if (field) { 886 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 887 dev_cap->max_rss_tbl_sz = 1 << field; 888 } else 889 dev_cap->max_rss_tbl_sz = 0; 890 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 891 dev_cap->max_rdma_global = 1 << (field & 0x3f); 892 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 893 dev_cap->local_ca_ack_delay = field & 0x1f; 894 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 895 dev_cap->num_ports = field & 0xf; 896 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 897 dev_cap->max_msg_sz = 1 << (field & 0x1f); 898 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); 899 if (field & 0x10) 900 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; 901 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 902 if (field & 0x80) 903 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 904 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 905 if (field & 0x20) 906 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; 907 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 908 if (field & 0x80) 909 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; 910 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 911 if (field & 0x80) 912 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 913 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 914 dev_cap->fs_max_num_qp_per_entry = field; 915 MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET); 916 if (field & (1 << 5)) 917 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT; 918 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 919 if (field & 0x1) 920 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; 921 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 922 dev_cap->stat_rate_support = stat_rate; 923 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 924 if (field & 0x80) 925 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; 926 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 927 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 928 dev_cap->flags = flags | (u64)ext_flags << 32; 929 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); 930 dev_cap->wol_port[1] = !!(field & 0x20); 931 dev_cap->wol_port[2] = !!(field & 0x40); 932 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 933 dev_cap->reserved_uars = field >> 4; 934 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 935 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 936 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 937 dev_cap->min_page_sz = 1 << field; 938 939 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 940 if (field & 0x80) { 941 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 942 dev_cap->bf_reg_size = 1 << (field & 0x1f); 943 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 944 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 945 field = 3; 946 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 947 } else { 948 dev_cap->bf_reg_size = 0; 949 } 950 951 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 952 dev_cap->max_sq_sg = field; 953 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 954 dev_cap->max_sq_desc_sz = size; 955 956 MLX4_GET(field, outbox, QUERY_DEV_CAP_USER_MAC_EN_OFFSET); 957 if (field & (1 << 2)) 958 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_USER_MAC_EN; 959 MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); 960 if (field & 0x1) 961 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; 962 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 963 dev_cap->max_qp_per_mcg = 1 << field; 964 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 965 dev_cap->reserved_mgms = field & 0xf; 966 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 967 dev_cap->max_mcgs = 1 << field; 968 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 969 dev_cap->reserved_pds = field >> 4; 970 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 971 dev_cap->max_pds = 1 << (field & 0x3f); 972 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 973 dev_cap->reserved_xrcds = field >> 4; 974 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 975 dev_cap->max_xrcds = 1 << (field & 0x1f); 976 977 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 978 dev_cap->rdmarc_entry_sz = size; 979 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 980 dev_cap->qpc_entry_sz = size; 981 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 982 dev_cap->aux_entry_sz = size; 983 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 984 dev_cap->altc_entry_sz = size; 985 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 986 dev_cap->eqc_entry_sz = size; 987 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 988 dev_cap->cqc_entry_sz = size; 989 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 990 dev_cap->srq_entry_sz = size; 991 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 992 dev_cap->cmpt_entry_sz = size; 993 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 994 dev_cap->mtt_entry_sz = size; 995 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 996 dev_cap->dmpt_entry_sz = size; 997 998 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 999 dev_cap->max_srq_sz = 1 << field; 1000 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 1001 dev_cap->max_qp_sz = 1 << field; 1002 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 1003 dev_cap->resize_srq = field & 1; 1004 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 1005 dev_cap->max_rq_sg = field; 1006 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 1007 dev_cap->max_rq_desc_sz = size; 1008 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1009 if (field & (1 << 4)) 1010 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; 1011 if (field & (1 << 5)) 1012 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 1013 if (field & (1 << 6)) 1014 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 1015 if (field & (1 << 7)) 1016 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 1017 MLX4_GET(dev_cap->bmme_flags, outbox, 1018 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1019 if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) 1020 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; 1021 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) 1022 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; 1023 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1024 if (field & 0x20) 1025 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 1026 if (field & (1 << 2)) 1027 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 1028 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); 1029 if (field & 0x80) 1030 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; 1031 if (field & 0x40) 1032 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; 1033 1034 MLX4_GET(dev_cap->reserved_lkey, outbox, 1035 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 1036 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 1037 if (field32 & (1 << 0)) 1038 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 1039 if (field32 & (1 << 7)) 1040 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 1041 MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); 1042 if (field32 & (1 << 17)) 1043 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; 1044 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 1045 if (field & 1<<6) 1046 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 1047 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 1048 if (field & 1<<3) 1049 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 1050 if (field & (1 << 5)) 1051 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 1052 MLX4_GET(dev_cap->max_icm_sz, outbox, 1053 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 1054 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1055 MLX4_GET(dev_cap->max_counters, outbox, 1056 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 1057 1058 MLX4_GET(field32, outbox, 1059 QUERY_DEV_CAP_MAD_DEMUX_OFFSET); 1060 if (field32 & (1 << 0)) 1061 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 1062 1063 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, 1064 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); 1065 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; 1066 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, 1067 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); 1068 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; 1069 1070 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1071 dev_cap->rl_caps.num_rates = size; 1072 if (dev_cap->rl_caps.num_rates) { 1073 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; 1074 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); 1075 dev_cap->rl_caps.max_val = size & 0xfff; 1076 dev_cap->rl_caps.max_unit = size >> 14; 1077 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); 1078 dev_cap->rl_caps.min_val = size & 0xfff; 1079 dev_cap->rl_caps.min_unit = size >> 14; 1080 } 1081 1082 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1083 if (field32 & (1 << 16)) 1084 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 1085 if (field32 & (1 << 18)) 1086 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB; 1087 if (field32 & (1 << 19)) 1088 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; 1089 if (field32 & (1 << 26)) 1090 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 1091 if (field32 & (1 << 20)) 1092 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 1093 if (field32 & (1 << 21)) 1094 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 1095 1096 for (i = 1; i <= dev_cap->num_ports; i++) { 1097 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); 1098 if (err) 1099 goto out; 1100 } 1101 1102 /* 1103 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 1104 * we can't use any EQs whose doorbell falls on that page, 1105 * even if the EQ itself isn't reserved. 1106 */ 1107 if (dev_cap->num_sys_eqs == 0) 1108 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 1109 dev_cap->reserved_eqs); 1110 else 1111 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 1112 1113 out: 1114 mlx4_free_cmd_mailbox(dev, mailbox); 1115 return err; 1116 } 1117 1118 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 1119 { 1120 if (dev_cap->bf_reg_size > 0) 1121 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 1122 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 1123 else 1124 mlx4_dbg(dev, "BlueFlame not available\n"); 1125 1126 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 1127 dev_cap->bmme_flags, dev_cap->reserved_lkey); 1128 mlx4_dbg(dev, "Max ICM size %lld MB\n", 1129 (unsigned long long) dev_cap->max_icm_sz >> 20); 1130 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1131 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 1132 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1133 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 1134 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1135 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 1136 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 1137 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 1138 dev_cap->eqc_entry_sz); 1139 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 1140 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 1141 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 1142 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 1143 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 1144 dev_cap->max_pds, dev_cap->reserved_mgms); 1145 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 1146 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 1147 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 1148 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, 1149 dev_cap->port_cap[1].max_port_width); 1150 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 1151 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 1152 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 1153 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 1154 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 1155 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 1156 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 1157 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", 1158 dev_cap->dmfs_high_rate_qpn_base); 1159 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", 1160 dev_cap->dmfs_high_rate_qpn_range); 1161 1162 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { 1163 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; 1164 1165 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", 1166 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, 1167 rl_caps->min_unit, rl_caps->min_val); 1168 } 1169 1170 dump_dev_cap_flags(dev, dev_cap->flags); 1171 dump_dev_cap_flags2(dev, dev_cap->flags2); 1172 } 1173 1174 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) 1175 { 1176 struct mlx4_cmd_mailbox *mailbox; 1177 u32 *outbox; 1178 u8 field; 1179 u32 field32; 1180 int err; 1181 1182 mailbox = mlx4_alloc_cmd_mailbox(dev); 1183 if (IS_ERR(mailbox)) 1184 return PTR_ERR(mailbox); 1185 outbox = mailbox->buf; 1186 1187 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1188 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1189 MLX4_CMD_TIME_CLASS_A, 1190 MLX4_CMD_NATIVE); 1191 1192 if (err) 1193 goto out; 1194 1195 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 1196 port_cap->max_vl = field >> 4; 1197 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 1198 port_cap->ib_mtu = field >> 4; 1199 port_cap->max_port_width = field & 0xf; 1200 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 1201 port_cap->max_gids = 1 << (field & 0xf); 1202 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 1203 port_cap->max_pkeys = 1 << (field & 0xf); 1204 } else { 1205 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 1206 #define QUERY_PORT_MTU_OFFSET 0x01 1207 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 1208 #define QUERY_PORT_WIDTH_OFFSET 0x06 1209 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 1210 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 1211 #define QUERY_PORT_MAX_VL_OFFSET 0x0b 1212 #define QUERY_PORT_MAC_OFFSET 0x10 1213 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 1214 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 1215 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 1216 1217 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, 1218 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1219 if (err) 1220 goto out; 1221 1222 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1223 port_cap->link_state = (field & 0x80) >> 7; 1224 port_cap->supported_port_types = field & 3; 1225 port_cap->suggested_type = (field >> 3) & 1; 1226 port_cap->default_sense = (field >> 4) & 1; 1227 port_cap->dmfs_optimized_state = (field >> 5) & 1; 1228 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1229 port_cap->ib_mtu = field & 0xf; 1230 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1231 port_cap->max_port_width = field & 0xf; 1232 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 1233 port_cap->max_gids = 1 << (field >> 4); 1234 port_cap->max_pkeys = 1 << (field & 0xf); 1235 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1236 port_cap->max_vl = field & 0xf; 1237 port_cap->max_tc_eth = field >> 4; 1238 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1239 port_cap->log_max_macs = field & 0xf; 1240 port_cap->log_max_vlans = field >> 4; 1241 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); 1242 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); 1243 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 1244 port_cap->trans_type = field32 >> 24; 1245 port_cap->vendor_oui = field32 & 0xffffff; 1246 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); 1247 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); 1248 } 1249 1250 out: 1251 mlx4_free_cmd_mailbox(dev, mailbox); 1252 return err; 1253 } 1254 1255 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) 1256 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) 1257 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) 1258 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) 1259 1260 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1261 struct mlx4_vhcr *vhcr, 1262 struct mlx4_cmd_mailbox *inbox, 1263 struct mlx4_cmd_mailbox *outbox, 1264 struct mlx4_cmd_info *cmd) 1265 { 1266 u64 flags; 1267 int err = 0; 1268 u8 field; 1269 u16 field16; 1270 u32 bmme_flags, field32; 1271 int real_port; 1272 int slave_port; 1273 int first_port; 1274 struct mlx4_active_ports actv_ports; 1275 1276 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1277 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1278 if (err) 1279 return err; 1280 1281 disable_unsupported_roce_caps(outbox->buf); 1282 /* add port mng change event capability and disable mw type 1 1283 * unconditionally to slaves 1284 */ 1285 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1286 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 1287 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 1288 actv_ports = mlx4_get_active_ports(dev, slave); 1289 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 1290 for (slave_port = 0, real_port = first_port; 1291 real_port < first_port + 1292 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 1293 ++real_port, ++slave_port) { 1294 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) 1295 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; 1296 else 1297 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1298 } 1299 for (; slave_port < dev->caps.num_ports; ++slave_port) 1300 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1301 1302 /* Not exposing RSS IP fragments to guests */ 1303 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; 1304 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1305 1306 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1307 field &= ~0x0F; 1308 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; 1309 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); 1310 1311 /* For guests, disable timestamp */ 1312 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1313 field &= 0x7f; 1314 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1315 1316 /* For guests, disable vxlan tunneling and QoS support */ 1317 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1318 field &= 0xd7; 1319 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1320 1321 /* For guests, disable port BEACON */ 1322 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1323 field &= 0x7f; 1324 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1325 1326 /* For guests, report Blueflame disabled */ 1327 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1328 field &= 0x7f; 1329 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1330 1331 /* For guests, disable mw type 2 and port remap*/ 1332 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1333 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1334 bmme_flags &= ~MLX4_FLAG_PORT_REMAP; 1335 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1336 1337 /* turn off device-managed steering capability if not enabled */ 1338 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1339 MLX4_GET(field, outbox->buf, 1340 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1341 field &= 0x7f; 1342 MLX4_PUT(outbox->buf, field, 1343 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1344 } 1345 1346 /* turn off ipoib managed steering for guests */ 1347 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1348 field &= ~0x80; 1349 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1350 1351 /* turn off host side virt features (VST, FSM, etc) for guests */ 1352 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1353 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | 1354 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); 1355 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1356 1357 /* turn off QCN for guests */ 1358 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1359 field &= 0xfe; 1360 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1361 1362 /* turn off QP max-rate limiting for guests */ 1363 field16 = 0; 1364 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1365 1366 /* turn off QoS per VF support for guests */ 1367 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1368 field &= 0xef; 1369 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1370 1371 /* turn off ignore FCS feature for guests */ 1372 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1373 field &= 0xfb; 1374 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1375 1376 return 0; 1377 } 1378 1379 static void disable_unsupported_roce_caps(void *buf) 1380 { 1381 u32 flags; 1382 1383 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1384 flags &= ~(1UL << 31); 1385 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1386 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1387 flags &= ~(1UL << 24); 1388 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1389 MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1390 flags &= ~(MLX4_FLAG_ROCE_V1_V2); 1391 MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1392 } 1393 1394 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1395 struct mlx4_vhcr *vhcr, 1396 struct mlx4_cmd_mailbox *inbox, 1397 struct mlx4_cmd_mailbox *outbox, 1398 struct mlx4_cmd_info *cmd) 1399 { 1400 struct mlx4_priv *priv = mlx4_priv(dev); 1401 u64 def_mac; 1402 u8 port_type; 1403 u16 short_field; 1404 int err; 1405 int admin_link_state; 1406 int port = mlx4_slave_convert_port(dev, slave, 1407 vhcr->in_modifier & 0xFF); 1408 1409 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 1410 #define MLX4_PORT_LINK_UP_MASK 0x80 1411 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 1412 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 1413 1414 if (port < 0) 1415 return -EINVAL; 1416 1417 /* Protect against untrusted guests: enforce that this is the 1418 * QUERY_PORT general query. 1419 */ 1420 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) 1421 return -EINVAL; 1422 1423 vhcr->in_modifier = port; 1424 1425 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 1426 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1427 MLX4_CMD_NATIVE); 1428 1429 if (!err && dev->caps.function != slave) { 1430 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 1431 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 1432 1433 /* get port type - currently only eth is enabled */ 1434 MLX4_GET(port_type, outbox->buf, 1435 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1436 1437 /* No link sensing allowed */ 1438 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 1439 /* set port type to currently operating port type */ 1440 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 1441 1442 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; 1443 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) 1444 port_type |= MLX4_PORT_LINK_UP_MASK; 1445 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) 1446 port_type &= ~MLX4_PORT_LINK_UP_MASK; 1447 else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) { 1448 int other_port = (port == 1) ? 2 : 1; 1449 struct mlx4_port_cap port_cap; 1450 1451 err = mlx4_QUERY_PORT(dev, other_port, &port_cap); 1452 if (err) 1453 goto out; 1454 port_type |= (port_cap.link_state << 7); 1455 } 1456 1457 MLX4_PUT(outbox->buf, port_type, 1458 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1459 1460 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 1461 short_field = mlx4_get_slave_num_gids(dev, slave, port); 1462 else 1463 short_field = 1; /* slave max gids */ 1464 MLX4_PUT(outbox->buf, short_field, 1465 QUERY_PORT_CUR_MAX_GID_OFFSET); 1466 1467 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 1468 MLX4_PUT(outbox->buf, short_field, 1469 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1470 } 1471 out: 1472 return err; 1473 } 1474 1475 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1476 int *gid_tbl_len, int *pkey_tbl_len) 1477 { 1478 struct mlx4_cmd_mailbox *mailbox; 1479 u32 *outbox; 1480 u16 field; 1481 int err; 1482 1483 mailbox = mlx4_alloc_cmd_mailbox(dev); 1484 if (IS_ERR(mailbox)) 1485 return PTR_ERR(mailbox); 1486 1487 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 1488 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1489 MLX4_CMD_WRAPPED); 1490 if (err) 1491 goto out; 1492 1493 outbox = mailbox->buf; 1494 1495 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 1496 *gid_tbl_len = field; 1497 1498 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1499 *pkey_tbl_len = field; 1500 1501 out: 1502 mlx4_free_cmd_mailbox(dev, mailbox); 1503 return err; 1504 } 1505 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 1506 1507 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 1508 { 1509 struct mlx4_cmd_mailbox *mailbox; 1510 struct mlx4_icm_iter iter; 1511 __be64 *pages; 1512 int lg; 1513 int nent = 0; 1514 int i; 1515 int err = 0; 1516 int ts = 0, tc = 0; 1517 1518 mailbox = mlx4_alloc_cmd_mailbox(dev); 1519 if (IS_ERR(mailbox)) 1520 return PTR_ERR(mailbox); 1521 pages = mailbox->buf; 1522 1523 for (mlx4_icm_first(icm, &iter); 1524 !mlx4_icm_last(&iter); 1525 mlx4_icm_next(&iter)) { 1526 /* 1527 * We have to pass pages that are aligned to their 1528 * size, so find the least significant 1 in the 1529 * address or size and use that as our log2 size. 1530 */ 1531 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1532 if (lg < MLX4_ICM_PAGE_SHIFT) { 1533 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", 1534 MLX4_ICM_PAGE_SIZE, 1535 (unsigned long long) mlx4_icm_addr(&iter), 1536 mlx4_icm_size(&iter)); 1537 err = -EINVAL; 1538 goto out; 1539 } 1540 1541 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 1542 if (virt != -1) { 1543 pages[nent * 2] = cpu_to_be64(virt); 1544 virt += 1ULL << lg; 1545 } 1546 1547 pages[nent * 2 + 1] = 1548 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 1549 (lg - MLX4_ICM_PAGE_SHIFT)); 1550 ts += 1 << (lg - 10); 1551 ++tc; 1552 1553 if (++nent == MLX4_MAILBOX_SIZE / 16) { 1554 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1555 MLX4_CMD_TIME_CLASS_B, 1556 MLX4_CMD_NATIVE); 1557 if (err) 1558 goto out; 1559 nent = 0; 1560 } 1561 } 1562 } 1563 1564 if (nent) 1565 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1566 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1567 if (err) 1568 goto out; 1569 1570 switch (op) { 1571 case MLX4_CMD_MAP_FA: 1572 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); 1573 break; 1574 case MLX4_CMD_MAP_ICM_AUX: 1575 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); 1576 break; 1577 case MLX4_CMD_MAP_ICM: 1578 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", 1579 tc, ts, (unsigned long long) virt - (ts << 10)); 1580 break; 1581 } 1582 1583 out: 1584 mlx4_free_cmd_mailbox(dev, mailbox); 1585 return err; 1586 } 1587 1588 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 1589 { 1590 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 1591 } 1592 1593 int mlx4_UNMAP_FA(struct mlx4_dev *dev) 1594 { 1595 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 1596 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1597 } 1598 1599 1600 int mlx4_RUN_FW(struct mlx4_dev *dev) 1601 { 1602 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 1603 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1604 } 1605 1606 int mlx4_QUERY_FW(struct mlx4_dev *dev) 1607 { 1608 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 1609 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 1610 struct mlx4_cmd_mailbox *mailbox; 1611 u32 *outbox; 1612 int err = 0; 1613 u64 fw_ver; 1614 u16 cmd_if_rev; 1615 u8 lg; 1616 1617 #define QUERY_FW_OUT_SIZE 0x100 1618 #define QUERY_FW_VER_OFFSET 0x00 1619 #define QUERY_FW_PPF_ID 0x09 1620 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 1621 #define QUERY_FW_MAX_CMD_OFFSET 0x0f 1622 #define QUERY_FW_ERR_START_OFFSET 0x30 1623 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 1624 #define QUERY_FW_ERR_BAR_OFFSET 0x3c 1625 1626 #define QUERY_FW_SIZE_OFFSET 0x00 1627 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1628 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1629 1630 #define QUERY_FW_COMM_BASE_OFFSET 0x40 1631 #define QUERY_FW_COMM_BAR_OFFSET 0x48 1632 1633 #define QUERY_FW_CLOCK_OFFSET 0x50 1634 #define QUERY_FW_CLOCK_BAR 0x58 1635 1636 mailbox = mlx4_alloc_cmd_mailbox(dev); 1637 if (IS_ERR(mailbox)) 1638 return PTR_ERR(mailbox); 1639 outbox = mailbox->buf; 1640 1641 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1642 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1643 if (err) 1644 goto out; 1645 1646 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1647 /* 1648 * FW subminor version is at more significant bits than minor 1649 * version, so swap here. 1650 */ 1651 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1652 ((fw_ver & 0xffff0000ull) >> 16) | 1653 ((fw_ver & 0x0000ffffull) << 16); 1654 1655 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1656 dev->caps.function = lg; 1657 1658 if (mlx4_is_slave(dev)) 1659 goto out; 1660 1661 1662 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1663 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1664 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1665 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", 1666 cmd_if_rev); 1667 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1668 (int) (dev->caps.fw_ver >> 32), 1669 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1670 (int) dev->caps.fw_ver & 0xffff); 1671 mlx4_err(dev, "This driver version supports only revisions %d to %d\n", 1672 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1673 err = -ENODEV; 1674 goto out; 1675 } 1676 1677 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1678 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1679 1680 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1681 cmd->max_cmds = 1 << lg; 1682 1683 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1684 (int) (dev->caps.fw_ver >> 32), 1685 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1686 (int) dev->caps.fw_ver & 0xffff, 1687 cmd_if_rev, cmd->max_cmds); 1688 1689 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1690 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1691 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1692 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1693 1694 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1695 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1696 1697 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1698 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1699 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1700 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1701 1702 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1703 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1704 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1705 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1706 fw->comm_bar, fw->comm_base); 1707 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1708 1709 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1710 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1711 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1712 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1713 fw->clock_bar, fw->clock_offset); 1714 1715 /* 1716 * Round up number of system pages needed in case 1717 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1718 */ 1719 fw->fw_pages = 1720 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1721 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1722 1723 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1724 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1725 1726 out: 1727 mlx4_free_cmd_mailbox(dev, mailbox); 1728 return err; 1729 } 1730 1731 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1732 struct mlx4_vhcr *vhcr, 1733 struct mlx4_cmd_mailbox *inbox, 1734 struct mlx4_cmd_mailbox *outbox, 1735 struct mlx4_cmd_info *cmd) 1736 { 1737 u8 *outbuf; 1738 int err; 1739 1740 outbuf = outbox->buf; 1741 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1742 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1743 if (err) 1744 return err; 1745 1746 /* for slaves, set pci PPF ID to invalid and zero out everything 1747 * else except FW version */ 1748 outbuf[0] = outbuf[1] = 0; 1749 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1750 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1751 1752 return 0; 1753 } 1754 1755 static void get_board_id(void *vsd, char *board_id) 1756 { 1757 int i; 1758 1759 #define VSD_OFFSET_SIG1 0x00 1760 #define VSD_OFFSET_SIG2 0xde 1761 #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1762 #define VSD_OFFSET_TS_BOARD_ID 0x20 1763 1764 #define VSD_SIGNATURE_TOPSPIN 0x5ad 1765 1766 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1767 1768 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1769 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1770 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1771 } else { 1772 /* 1773 * The board ID is a string but the firmware byte 1774 * swaps each 4-byte word before passing it back to 1775 * us. Therefore we need to swab it before printing. 1776 */ 1777 u32 *bid_u32 = (u32 *)board_id; 1778 1779 for (i = 0; i < 4; ++i) { 1780 u32 *addr; 1781 u32 val; 1782 1783 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); 1784 val = get_unaligned(addr); 1785 val = swab32(val); 1786 put_unaligned(val, &bid_u32[i]); 1787 } 1788 } 1789 } 1790 1791 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1792 { 1793 struct mlx4_cmd_mailbox *mailbox; 1794 u32 *outbox; 1795 int err; 1796 1797 #define QUERY_ADAPTER_OUT_SIZE 0x100 1798 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1799 #define QUERY_ADAPTER_VSD_OFFSET 0x20 1800 1801 mailbox = mlx4_alloc_cmd_mailbox(dev); 1802 if (IS_ERR(mailbox)) 1803 return PTR_ERR(mailbox); 1804 outbox = mailbox->buf; 1805 1806 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1807 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1808 if (err) 1809 goto out; 1810 1811 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1812 1813 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1814 adapter->board_id); 1815 1816 out: 1817 mlx4_free_cmd_mailbox(dev, mailbox); 1818 return err; 1819 } 1820 1821 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1822 { 1823 struct mlx4_cmd_mailbox *mailbox; 1824 __be32 *inbox; 1825 int err; 1826 static const u8 a0_dmfs_hw_steering[] = { 1827 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, 1828 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, 1829 [MLX4_STEERING_DMFS_A0_STATIC] = 2, 1830 [MLX4_STEERING_DMFS_A0_DISABLE] = 3 1831 }; 1832 1833 #define INIT_HCA_IN_SIZE 0x200 1834 #define INIT_HCA_VERSION_OFFSET 0x000 1835 #define INIT_HCA_VERSION 2 1836 #define INIT_HCA_VXLAN_OFFSET 0x0c 1837 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1838 #define INIT_HCA_FLAGS_OFFSET 0x014 1839 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1840 #define INIT_HCA_QPC_OFFSET 0x020 1841 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1842 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1843 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1844 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1845 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1846 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1847 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1848 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) 1849 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1850 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1851 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1852 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1853 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1854 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1855 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1856 #define INIT_HCA_MCAST_OFFSET 0x0c0 1857 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1858 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1859 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1860 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1861 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1862 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1863 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1864 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1865 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1866 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) 1867 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1868 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1869 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1870 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1871 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1872 #define INIT_HCA_TPT_OFFSET 0x0f0 1873 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1874 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) 1875 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1876 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1877 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1878 #define INIT_HCA_UAR_OFFSET 0x120 1879 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1880 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1881 1882 mailbox = mlx4_alloc_cmd_mailbox(dev); 1883 if (IS_ERR(mailbox)) 1884 return PTR_ERR(mailbox); 1885 inbox = mailbox->buf; 1886 1887 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1888 1889 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1890 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4); 1891 1892 #if defined(__LITTLE_ENDIAN) 1893 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1894 #elif defined(__BIG_ENDIAN) 1895 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1896 #else 1897 #error Host endianness not defined 1898 #endif 1899 /* Check port for UD address vector: */ 1900 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1901 1902 /* Enable IPoIB checksumming if we can: */ 1903 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1904 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1905 1906 /* Enable QoS support if module parameter set */ 1907 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) 1908 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1909 1910 /* enable counters */ 1911 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1912 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1913 1914 /* Enable RSS spread to fragmented IP packets when supported */ 1915 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) 1916 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); 1917 1918 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1919 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1920 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1921 dev->caps.eqe_size = 64; 1922 dev->caps.eqe_factor = 1; 1923 } else { 1924 dev->caps.eqe_size = 32; 1925 dev->caps.eqe_factor = 0; 1926 } 1927 1928 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1929 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1930 dev->caps.cqe_size = 64; 1931 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1932 } else { 1933 dev->caps.cqe_size = 32; 1934 } 1935 1936 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 1937 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && 1938 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { 1939 dev->caps.eqe_size = cache_line_size(); 1940 dev->caps.cqe_size = cache_line_size(); 1941 dev->caps.eqe_factor = 0; 1942 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | 1943 (ilog2(dev->caps.eqe_size) - 5)), 1944 INIT_HCA_EQE_CQE_STRIDE_OFFSET); 1945 1946 /* User still need to know to support CQE > 32B */ 1947 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1948 } 1949 1950 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1951 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); 1952 1953 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1954 1955 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1956 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1957 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1958 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1959 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1960 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1961 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1962 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1963 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1964 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1965 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1966 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1967 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1968 1969 /* steering attributes */ 1970 if (dev->caps.steering_mode == 1971 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1972 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1973 cpu_to_be32(1 << 1974 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1975 1976 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1977 MLX4_PUT(inbox, param->log_mc_entry_sz, 1978 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1979 MLX4_PUT(inbox, param->log_mc_table_sz, 1980 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1981 /* Enable Ethernet flow steering 1982 * with udp unicast and tcp unicast 1983 */ 1984 if (dev->caps.dmfs_high_steer_mode != 1985 MLX4_STEERING_DMFS_A0_STATIC) 1986 MLX4_PUT(inbox, 1987 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1988 INIT_HCA_FS_ETH_BITS_OFFSET); 1989 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1990 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1991 /* Enable IPoIB flow steering 1992 * with udp unicast and tcp unicast 1993 */ 1994 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1995 INIT_HCA_FS_IB_BITS_OFFSET); 1996 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1997 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1998 1999 if (dev->caps.dmfs_high_steer_mode != 2000 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2001 MLX4_PUT(inbox, 2002 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] 2003 << 6)), 2004 INIT_HCA_FS_A0_OFFSET); 2005 } else { 2006 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 2007 MLX4_PUT(inbox, param->log_mc_entry_sz, 2008 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2009 MLX4_PUT(inbox, param->log_mc_hash_sz, 2010 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2011 MLX4_PUT(inbox, param->log_mc_table_sz, 2012 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2013 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 2014 MLX4_PUT(inbox, (u8) (1 << 3), 2015 INIT_HCA_UC_STEERING_OFFSET); 2016 } 2017 2018 /* TPT attributes */ 2019 2020 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 2021 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); 2022 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 2023 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 2024 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 2025 2026 /* UAR attributes */ 2027 2028 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2029 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 2030 2031 /* set parser VXLAN attributes */ 2032 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { 2033 u8 parser_params = 0; 2034 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 2035 } 2036 2037 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 2038 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2039 2040 if (err) 2041 mlx4_err(dev, "INIT_HCA returns %d\n", err); 2042 2043 mlx4_free_cmd_mailbox(dev, mailbox); 2044 return err; 2045 } 2046 2047 int mlx4_QUERY_HCA(struct mlx4_dev *dev, 2048 struct mlx4_init_hca_param *param) 2049 { 2050 struct mlx4_cmd_mailbox *mailbox; 2051 __be32 *outbox; 2052 u32 dword_field; 2053 int err; 2054 u8 byte_field; 2055 static const u8 a0_dmfs_query_hw_steering[] = { 2056 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2057 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2058 [2] = MLX4_STEERING_DMFS_A0_STATIC, 2059 [3] = MLX4_STEERING_DMFS_A0_DISABLE 2060 }; 2061 2062 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 2063 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 2064 2065 mailbox = mlx4_alloc_cmd_mailbox(dev); 2066 if (IS_ERR(mailbox)) 2067 return PTR_ERR(mailbox); 2068 outbox = mailbox->buf; 2069 2070 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2071 MLX4_CMD_QUERY_HCA, 2072 MLX4_CMD_TIME_CLASS_B, 2073 !mlx4_is_slave(dev)); 2074 if (err) 2075 goto out; 2076 2077 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 2078 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2079 2080 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2081 2082 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2083 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2084 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2085 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2086 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2087 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2088 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2089 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2090 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2091 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2092 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2093 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2094 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2095 2096 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2097 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2098 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2099 } else { 2100 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 2101 if (byte_field & 0x8) 2102 param->steering_mode = MLX4_STEERING_MODE_B0; 2103 else 2104 param->steering_mode = MLX4_STEERING_MODE_A0; 2105 } 2106 2107 if (dword_field & (1 << 13)) 2108 param->rss_ip_frags = 1; 2109 2110 /* steering attributes */ 2111 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2112 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2113 MLX4_GET(param->log_mc_entry_sz, outbox, 2114 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2115 MLX4_GET(param->log_mc_table_sz, outbox, 2116 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2117 MLX4_GET(byte_field, outbox, 2118 INIT_HCA_FS_A0_OFFSET); 2119 param->dmfs_high_steer_mode = 2120 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2121 } else { 2122 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2123 MLX4_GET(param->log_mc_entry_sz, outbox, 2124 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2125 MLX4_GET(param->log_mc_hash_sz, outbox, 2126 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2127 MLX4_GET(param->log_mc_table_sz, outbox, 2128 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2129 } 2130 2131 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2132 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 2133 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 2134 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 2135 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 2136 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 2137 2138 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 2139 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); 2140 if (byte_field) { 2141 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; 2142 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; 2143 param->cqe_size = 1 << ((byte_field & 2144 MLX4_CQE_SIZE_MASK_STRIDE) + 5); 2145 param->eqe_size = 1 << (((byte_field & 2146 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); 2147 } 2148 2149 /* TPT attributes */ 2150 2151 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2152 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2153 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2154 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2155 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2156 2157 /* UAR attributes */ 2158 2159 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2160 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2161 2162 /* phv_check enable */ 2163 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2164 if (byte_field & 0x2) 2165 param->phv_check_en = 1; 2166 out: 2167 mlx4_free_cmd_mailbox(dev, mailbox); 2168 2169 return err; 2170 } 2171 2172 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 2173 { 2174 struct mlx4_cmd_mailbox *mailbox; 2175 __be32 *outbox; 2176 int err; 2177 2178 mailbox = mlx4_alloc_cmd_mailbox(dev); 2179 if (IS_ERR(mailbox)) { 2180 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 2181 return PTR_ERR(mailbox); 2182 } 2183 outbox = mailbox->buf; 2184 2185 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2186 MLX4_CMD_QUERY_HCA, 2187 MLX4_CMD_TIME_CLASS_B, 2188 !mlx4_is_slave(dev)); 2189 if (err) { 2190 mlx4_warn(dev, "hca_core_clock update failed\n"); 2191 goto out; 2192 } 2193 2194 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2195 2196 out: 2197 mlx4_free_cmd_mailbox(dev, mailbox); 2198 2199 return err; 2200 } 2201 2202 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 2203 * and real QP0 are active, so that the paravirtualized QP0 is ready 2204 * to operate */ 2205 static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 2206 { 2207 struct mlx4_priv *priv = mlx4_priv(dev); 2208 /* irrelevant if not infiniband */ 2209 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 2210 priv->mfunc.master.qp0_state[port].qp0_active) 2211 return 1; 2212 return 0; 2213 } 2214 2215 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 2216 struct mlx4_vhcr *vhcr, 2217 struct mlx4_cmd_mailbox *inbox, 2218 struct mlx4_cmd_mailbox *outbox, 2219 struct mlx4_cmd_info *cmd) 2220 { 2221 struct mlx4_priv *priv = mlx4_priv(dev); 2222 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2223 int err; 2224 2225 if (port < 0) 2226 return -EINVAL; 2227 2228 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 2229 return 0; 2230 2231 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2232 /* Enable port only if it was previously disabled */ 2233 if (!priv->mfunc.master.init_port_ref[port]) { 2234 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2235 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2236 if (err) 2237 return err; 2238 } 2239 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2240 } else { 2241 if (slave == mlx4_master_func_num(dev)) { 2242 if (check_qp0_state(dev, slave, port) && 2243 !priv->mfunc.master.qp0_state[port].port_active) { 2244 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2245 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2246 if (err) 2247 return err; 2248 priv->mfunc.master.qp0_state[port].port_active = 1; 2249 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2250 } 2251 } else 2252 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2253 } 2254 ++priv->mfunc.master.init_port_ref[port]; 2255 return 0; 2256 } 2257 2258 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 2259 { 2260 struct mlx4_cmd_mailbox *mailbox; 2261 u32 *inbox; 2262 int err; 2263 u32 flags; 2264 u16 field; 2265 2266 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 2267 #define INIT_PORT_IN_SIZE 256 2268 #define INIT_PORT_FLAGS_OFFSET 0x00 2269 #define INIT_PORT_FLAG_SIG (1 << 18) 2270 #define INIT_PORT_FLAG_NG (1 << 17) 2271 #define INIT_PORT_FLAG_G0 (1 << 16) 2272 #define INIT_PORT_VL_SHIFT 4 2273 #define INIT_PORT_PORT_WIDTH_SHIFT 8 2274 #define INIT_PORT_MTU_OFFSET 0x04 2275 #define INIT_PORT_MAX_GID_OFFSET 0x06 2276 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a 2277 #define INIT_PORT_GUID0_OFFSET 0x10 2278 #define INIT_PORT_NODE_GUID_OFFSET 0x18 2279 #define INIT_PORT_SI_GUID_OFFSET 0x20 2280 2281 mailbox = mlx4_alloc_cmd_mailbox(dev); 2282 if (IS_ERR(mailbox)) 2283 return PTR_ERR(mailbox); 2284 inbox = mailbox->buf; 2285 2286 flags = 0; 2287 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 2288 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 2289 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 2290 2291 field = 128 << dev->caps.ib_mtu_cap[port]; 2292 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 2293 field = dev->caps.gid_table_len[port]; 2294 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 2295 field = dev->caps.pkey_table_len[port]; 2296 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 2297 2298 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 2299 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2300 2301 mlx4_free_cmd_mailbox(dev, mailbox); 2302 } else 2303 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2304 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2305 2306 if (!err) 2307 mlx4_hca_core_clock_update(dev); 2308 2309 return err; 2310 } 2311 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2312 2313 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 2314 struct mlx4_vhcr *vhcr, 2315 struct mlx4_cmd_mailbox *inbox, 2316 struct mlx4_cmd_mailbox *outbox, 2317 struct mlx4_cmd_info *cmd) 2318 { 2319 struct mlx4_priv *priv = mlx4_priv(dev); 2320 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2321 int err; 2322 2323 if (port < 0) 2324 return -EINVAL; 2325 2326 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 2327 (1 << port))) 2328 return 0; 2329 2330 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2331 if (priv->mfunc.master.init_port_ref[port] == 1) { 2332 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2333 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2334 if (err) 2335 return err; 2336 } 2337 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2338 } else { 2339 /* infiniband port */ 2340 if (slave == mlx4_master_func_num(dev)) { 2341 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2342 priv->mfunc.master.qp0_state[port].port_active) { 2343 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2344 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2345 if (err) 2346 return err; 2347 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2348 priv->mfunc.master.qp0_state[port].port_active = 0; 2349 } 2350 } else 2351 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2352 } 2353 --priv->mfunc.master.init_port_ref[port]; 2354 return 0; 2355 } 2356 2357 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2358 { 2359 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2360 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2361 } 2362 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2363 2364 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2365 { 2366 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2367 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2368 } 2369 2370 struct mlx4_config_dev { 2371 __be32 update_flags; 2372 __be32 rsvd1[3]; 2373 __be16 vxlan_udp_dport; 2374 __be16 rsvd2; 2375 __be16 roce_v2_entropy; 2376 __be16 roce_v2_udp_dport; 2377 __be32 roce_flags; 2378 __be32 rsvd4[25]; 2379 __be16 rsvd5; 2380 u8 rsvd6; 2381 u8 rx_checksum_val; 2382 }; 2383 2384 #define MLX4_VXLAN_UDP_DPORT (1 << 0) 2385 #define MLX4_ROCE_V2_UDP_DPORT BIT(3) 2386 #define MLX4_DISABLE_RX_PORT BIT(18) 2387 2388 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2389 { 2390 int err; 2391 struct mlx4_cmd_mailbox *mailbox; 2392 2393 mailbox = mlx4_alloc_cmd_mailbox(dev); 2394 if (IS_ERR(mailbox)) 2395 return PTR_ERR(mailbox); 2396 2397 memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); 2398 2399 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, 2400 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2401 2402 mlx4_free_cmd_mailbox(dev, mailbox); 2403 return err; 2404 } 2405 2406 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2407 { 2408 int err; 2409 struct mlx4_cmd_mailbox *mailbox; 2410 2411 mailbox = mlx4_alloc_cmd_mailbox(dev); 2412 if (IS_ERR(mailbox)) 2413 return PTR_ERR(mailbox); 2414 2415 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, 2416 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2417 2418 if (!err) 2419 memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); 2420 2421 mlx4_free_cmd_mailbox(dev, mailbox); 2422 return err; 2423 } 2424 2425 /* Conversion between the HW values and the actual functionality. 2426 * The value represented by the array index, 2427 * and the functionality determined by the flags. 2428 */ 2429 static const u8 config_dev_csum_flags[] = { 2430 [0] = 0, 2431 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, 2432 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | 2433 MLX4_RX_CSUM_MODE_L4, 2434 [3] = MLX4_RX_CSUM_MODE_L4 | 2435 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | 2436 MLX4_RX_CSUM_MODE_MULTI_VLAN 2437 }; 2438 2439 int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2440 struct mlx4_config_dev_params *params) 2441 { 2442 struct mlx4_config_dev config_dev = {0}; 2443 int err; 2444 u8 csum_mask; 2445 2446 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 2447 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 2448 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 2449 2450 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) 2451 return -EOPNOTSUPP; 2452 2453 err = mlx4_CONFIG_DEV_get(dev, &config_dev); 2454 if (err) 2455 return err; 2456 2457 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & 2458 CONFIG_DEV_RX_CSUM_MODE_MASK; 2459 2460 if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) 2461 return -EINVAL; 2462 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; 2463 2464 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & 2465 CONFIG_DEV_RX_CSUM_MODE_MASK; 2466 2467 if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags)) 2468 return -EINVAL; 2469 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; 2470 2471 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); 2472 2473 return 0; 2474 } 2475 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); 2476 2477 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) 2478 { 2479 struct mlx4_config_dev config_dev; 2480 2481 memset(&config_dev, 0, sizeof(config_dev)); 2482 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); 2483 config_dev.vxlan_udp_dport = udp_port; 2484 2485 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2486 } 2487 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2488 2489 #define CONFIG_DISABLE_RX_PORT BIT(15) 2490 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) 2491 { 2492 struct mlx4_config_dev config_dev; 2493 2494 memset(&config_dev, 0, sizeof(config_dev)); 2495 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); 2496 if (dis) 2497 config_dev.roce_flags = 2498 cpu_to_be32(CONFIG_DISABLE_RX_PORT); 2499 2500 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2501 } 2502 2503 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) 2504 { 2505 struct mlx4_config_dev config_dev; 2506 2507 memset(&config_dev, 0, sizeof(config_dev)); 2508 config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); 2509 config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); 2510 2511 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2512 } 2513 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); 2514 2515 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) 2516 { 2517 struct mlx4_cmd_mailbox *mailbox; 2518 struct { 2519 __be32 v_port1; 2520 __be32 v_port2; 2521 } *v2p; 2522 int err; 2523 2524 mailbox = mlx4_alloc_cmd_mailbox(dev); 2525 if (IS_ERR(mailbox)) 2526 return -ENOMEM; 2527 2528 v2p = mailbox->buf; 2529 v2p->v_port1 = cpu_to_be32(port1); 2530 v2p->v_port2 = cpu_to_be32(port2); 2531 2532 err = mlx4_cmd(dev, mailbox->dma, 0, 2533 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, 2534 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2535 2536 mlx4_free_cmd_mailbox(dev, mailbox); 2537 return err; 2538 } 2539 2540 2541 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2542 { 2543 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 2544 MLX4_CMD_SET_ICM_SIZE, 2545 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2546 if (ret) 2547 return ret; 2548 2549 /* 2550 * Round up number of system pages needed in case 2551 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 2552 */ 2553 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 2554 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 2555 2556 return 0; 2557 } 2558 2559 int mlx4_NOP(struct mlx4_dev *dev) 2560 { 2561 /* Input modifier of 0x1f means "finish as soon as possible." */ 2562 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2563 MLX4_CMD_NATIVE); 2564 } 2565 2566 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 2567 const u32 offset[], 2568 u32 value[], size_t array_len, u8 port) 2569 { 2570 struct mlx4_cmd_mailbox *mailbox; 2571 u32 *outbox; 2572 size_t i; 2573 int ret; 2574 2575 mailbox = mlx4_alloc_cmd_mailbox(dev); 2576 if (IS_ERR(mailbox)) 2577 return PTR_ERR(mailbox); 2578 2579 outbox = mailbox->buf; 2580 2581 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, 2582 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 2583 MLX4_CMD_NATIVE); 2584 if (ret) 2585 goto out; 2586 2587 for (i = 0; i < array_len; i++) { 2588 if (offset[i] > MLX4_MAILBOX_SIZE) { 2589 ret = -EINVAL; 2590 goto out; 2591 } 2592 2593 MLX4_GET(value[i], outbox, offset[i]); 2594 } 2595 2596 out: 2597 mlx4_free_cmd_mailbox(dev, mailbox); 2598 return ret; 2599 } 2600 EXPORT_SYMBOL(mlx4_query_diag_counters); 2601 2602 int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2603 { 2604 u8 port; 2605 u32 *outbox; 2606 struct mlx4_cmd_mailbox *mailbox; 2607 u32 in_mod; 2608 u32 guid_hi, guid_lo; 2609 int err, ret = 0; 2610 #define MOD_STAT_CFG_PORT_OFFSET 8 2611 #define MOD_STAT_CFG_GUID_H 0X14 2612 #define MOD_STAT_CFG_GUID_L 0X1c 2613 2614 mailbox = mlx4_alloc_cmd_mailbox(dev); 2615 if (IS_ERR(mailbox)) 2616 return PTR_ERR(mailbox); 2617 outbox = mailbox->buf; 2618 2619 for (port = 1; port <= dev->caps.num_ports; port++) { 2620 in_mod = port << MOD_STAT_CFG_PORT_OFFSET; 2621 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, 2622 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2623 MLX4_CMD_NATIVE); 2624 if (err) { 2625 mlx4_err(dev, "Fail to get port %d uplink guid\n", 2626 port); 2627 ret = err; 2628 } else { 2629 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); 2630 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); 2631 dev->caps.phys_port_id[port] = (u64)guid_lo | 2632 (u64)guid_hi << 32; 2633 } 2634 } 2635 mlx4_free_cmd_mailbox(dev, mailbox); 2636 return ret; 2637 } 2638 2639 #define MLX4_WOL_SETUP_MODE (5 << 28) 2640 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 2641 { 2642 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2643 2644 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 2645 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2646 MLX4_CMD_NATIVE); 2647 } 2648 EXPORT_SYMBOL_GPL(mlx4_wol_read); 2649 2650 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 2651 { 2652 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2653 2654 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 2655 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2656 } 2657 EXPORT_SYMBOL_GPL(mlx4_wol_write); 2658 2659 enum { 2660 ADD_TO_MCG = 0x26, 2661 }; 2662 2663 2664 void mlx4_opreq_action(struct work_struct *work) 2665 { 2666 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, 2667 opreq_task); 2668 struct mlx4_dev *dev = &priv->dev; 2669 int num_tasks = atomic_read(&priv->opreq_count); 2670 struct mlx4_cmd_mailbox *mailbox; 2671 struct mlx4_mgm *mgm; 2672 u32 *outbox; 2673 u32 modifier; 2674 u16 token; 2675 u16 type; 2676 int err; 2677 u32 num_qps; 2678 struct mlx4_qp qp; 2679 int i; 2680 u8 rem_mcg; 2681 u8 prot; 2682 2683 #define GET_OP_REQ_MODIFIER_OFFSET 0x08 2684 #define GET_OP_REQ_TOKEN_OFFSET 0x14 2685 #define GET_OP_REQ_TYPE_OFFSET 0x1a 2686 #define GET_OP_REQ_DATA_OFFSET 0x20 2687 2688 mailbox = mlx4_alloc_cmd_mailbox(dev); 2689 if (IS_ERR(mailbox)) { 2690 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 2691 return; 2692 } 2693 outbox = mailbox->buf; 2694 2695 while (num_tasks) { 2696 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2697 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2698 MLX4_CMD_NATIVE); 2699 if (err) { 2700 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2701 err); 2702 return; 2703 } 2704 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2705 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 2706 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 2707 type &= 0xfff; 2708 2709 switch (type) { 2710 case ADD_TO_MCG: 2711 if (dev->caps.steering_mode == 2712 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2713 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); 2714 err = EPERM; 2715 break; 2716 } 2717 mgm = (struct mlx4_mgm *)((u8 *)(outbox) + 2718 GET_OP_REQ_DATA_OFFSET); 2719 num_qps = be32_to_cpu(mgm->members_count) & 2720 MGM_QPN_MASK; 2721 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; 2722 prot = ((u8 *)(&mgm->members_count))[0] >> 6; 2723 2724 for (i = 0; i < num_qps; i++) { 2725 qp.qpn = be32_to_cpu(mgm->qp[i]); 2726 if (rem_mcg) 2727 err = mlx4_multicast_detach(dev, &qp, 2728 mgm->gid, 2729 prot, 0); 2730 else 2731 err = mlx4_multicast_attach(dev, &qp, 2732 mgm->gid, 2733 mgm->gid[5] 2734 , 0, prot, 2735 NULL); 2736 if (err) 2737 break; 2738 } 2739 break; 2740 default: 2741 mlx4_warn(dev, "Bad type for required operation\n"); 2742 err = EINVAL; 2743 break; 2744 } 2745 err = mlx4_cmd(dev, 0, ((u32) err | 2746 (__force u32)cpu_to_be32(token) << 16), 2747 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2748 MLX4_CMD_NATIVE); 2749 if (err) { 2750 mlx4_err(dev, "Failed to acknowledge required request: %d\n", 2751 err); 2752 goto out; 2753 } 2754 memset(outbox, 0, 0xffc); 2755 num_tasks = atomic_dec_return(&priv->opreq_count); 2756 } 2757 2758 out: 2759 mlx4_free_cmd_mailbox(dev, mailbox); 2760 } 2761 2762 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, 2763 struct mlx4_cmd_mailbox *mailbox) 2764 { 2765 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 2766 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 2767 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 2768 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 2769 2770 u32 set_attr_mask, getresp_attr_mask; 2771 u32 trap_attr_mask, traprepress_attr_mask; 2772 2773 MLX4_GET(set_attr_mask, mailbox->buf, 2774 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); 2775 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", 2776 set_attr_mask); 2777 2778 MLX4_GET(getresp_attr_mask, mailbox->buf, 2779 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); 2780 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", 2781 getresp_attr_mask); 2782 2783 MLX4_GET(trap_attr_mask, mailbox->buf, 2784 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); 2785 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", 2786 trap_attr_mask); 2787 2788 MLX4_GET(traprepress_attr_mask, mailbox->buf, 2789 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); 2790 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", 2791 traprepress_attr_mask); 2792 2793 if (set_attr_mask && getresp_attr_mask && trap_attr_mask && 2794 traprepress_attr_mask) 2795 return 1; 2796 2797 return 0; 2798 } 2799 2800 int mlx4_config_mad_demux(struct mlx4_dev *dev) 2801 { 2802 struct mlx4_cmd_mailbox *mailbox; 2803 int err; 2804 2805 /* Check if mad_demux is supported */ 2806 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) 2807 return 0; 2808 2809 mailbox = mlx4_alloc_cmd_mailbox(dev); 2810 if (IS_ERR(mailbox)) { 2811 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); 2812 return -ENOMEM; 2813 } 2814 2815 /* Query mad_demux to find out which MADs are handled by internal sma */ 2816 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, 2817 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, 2818 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2819 if (err) { 2820 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", 2821 err); 2822 goto out; 2823 } 2824 2825 if (mlx4_check_smp_firewall_active(dev, mailbox)) 2826 dev->flags |= MLX4_FLAG_SECURE_HOST; 2827 2828 /* Config mad_demux to handle all MADs returned by the query above */ 2829 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, 2830 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, 2831 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2832 if (err) { 2833 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); 2834 goto out; 2835 } 2836 2837 if (dev->flags & MLX4_FLAG_SECURE_HOST) 2838 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); 2839 out: 2840 mlx4_free_cmd_mailbox(dev, mailbox); 2841 return err; 2842 } 2843 2844 /* Access Reg commands */ 2845 enum mlx4_access_reg_masks { 2846 MLX4_ACCESS_REG_STATUS_MASK = 0x7f, 2847 MLX4_ACCESS_REG_METHOD_MASK = 0x7f, 2848 MLX4_ACCESS_REG_LEN_MASK = 0x7ff 2849 }; 2850 2851 struct mlx4_access_reg { 2852 __be16 constant1; 2853 u8 status; 2854 u8 resrvd1; 2855 __be16 reg_id; 2856 u8 method; 2857 u8 constant2; 2858 __be32 resrvd2[2]; 2859 __be16 len_const; 2860 __be16 resrvd3; 2861 #define MLX4_ACCESS_REG_HEADER_SIZE (20) 2862 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; 2863 } __attribute__((__packed__)); 2864 2865 /** 2866 * mlx4_ACCESS_REG - Generic access reg command. 2867 * @dev: mlx4_dev. 2868 * @reg_id: register ID to access. 2869 * @method: Access method Read/Write. 2870 * @reg_len: register length to Read/Write in bytes. 2871 * @reg_data: reg_data pointer to Read/Write From/To. 2872 * 2873 * Access ConnectX registers FW command. 2874 * Returns 0 on success and copies outbox mlx4_access_reg data 2875 * field into reg_data or a negative error code. 2876 */ 2877 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, 2878 enum mlx4_access_reg_method method, 2879 u16 reg_len, void *reg_data) 2880 { 2881 struct mlx4_cmd_mailbox *inbox, *outbox; 2882 struct mlx4_access_reg *inbuf, *outbuf; 2883 int err; 2884 2885 inbox = mlx4_alloc_cmd_mailbox(dev); 2886 if (IS_ERR(inbox)) 2887 return PTR_ERR(inbox); 2888 2889 outbox = mlx4_alloc_cmd_mailbox(dev); 2890 if (IS_ERR(outbox)) { 2891 mlx4_free_cmd_mailbox(dev, inbox); 2892 return PTR_ERR(outbox); 2893 } 2894 2895 inbuf = inbox->buf; 2896 outbuf = outbox->buf; 2897 2898 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); 2899 inbuf->constant2 = 0x1; 2900 inbuf->reg_id = cpu_to_be16(reg_id); 2901 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; 2902 2903 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); 2904 inbuf->len_const = 2905 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | 2906 ((0x3) << 12)); 2907 2908 memcpy(inbuf->reg_data, reg_data, reg_len); 2909 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, 2910 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2911 MLX4_CMD_WRAPPED); 2912 if (err) 2913 goto out; 2914 2915 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { 2916 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; 2917 mlx4_err(dev, 2918 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", 2919 reg_id, err); 2920 goto out; 2921 } 2922 2923 memcpy(reg_data, outbuf->reg_data, reg_len); 2924 out: 2925 mlx4_free_cmd_mailbox(dev, inbox); 2926 mlx4_free_cmd_mailbox(dev, outbox); 2927 return err; 2928 } 2929 2930 /* ConnectX registers IDs */ 2931 enum mlx4_reg_id { 2932 MLX4_REG_ID_PTYS = 0x5004, 2933 }; 2934 2935 /** 2936 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) 2937 * register 2938 * @dev: mlx4_dev. 2939 * @method: Access method Read/Write. 2940 * @ptys_reg: PTYS register data pointer. 2941 * 2942 * Access ConnectX PTYS register, to Read/Write Port Type/Speed 2943 * configuration 2944 * Returns 0 on success or a negative error code. 2945 */ 2946 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, 2947 enum mlx4_access_reg_method method, 2948 struct mlx4_ptys_reg *ptys_reg) 2949 { 2950 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, 2951 method, sizeof(*ptys_reg), ptys_reg); 2952 } 2953 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); 2954 2955 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 2956 struct mlx4_vhcr *vhcr, 2957 struct mlx4_cmd_mailbox *inbox, 2958 struct mlx4_cmd_mailbox *outbox, 2959 struct mlx4_cmd_info *cmd) 2960 { 2961 struct mlx4_access_reg *inbuf = inbox->buf; 2962 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; 2963 u16 reg_id = be16_to_cpu(inbuf->reg_id); 2964 2965 if (slave != mlx4_master_func_num(dev) && 2966 method == MLX4_ACCESS_REG_WRITE) 2967 return -EPERM; 2968 2969 if (reg_id == MLX4_REG_ID_PTYS) { 2970 struct mlx4_ptys_reg *ptys_reg = 2971 (struct mlx4_ptys_reg *)inbuf->reg_data; 2972 2973 ptys_reg->local_port = 2974 mlx4_slave_convert_port(dev, slave, 2975 ptys_reg->local_port); 2976 } 2977 2978 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, 2979 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2980 MLX4_CMD_NATIVE); 2981 } 2982 2983 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) 2984 { 2985 #define SET_PORT_GEN_PHV_VALID 0x10 2986 #define SET_PORT_GEN_PHV_EN 0x80 2987 2988 struct mlx4_cmd_mailbox *mailbox; 2989 struct mlx4_set_port_general_context *context; 2990 u32 in_mod; 2991 int err; 2992 2993 mailbox = mlx4_alloc_cmd_mailbox(dev); 2994 if (IS_ERR(mailbox)) 2995 return PTR_ERR(mailbox); 2996 context = mailbox->buf; 2997 2998 context->flags2 |= SET_PORT_GEN_PHV_VALID; 2999 if (phv_bit) 3000 context->phv_en |= SET_PORT_GEN_PHV_EN; 3001 3002 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 3003 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 3004 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 3005 MLX4_CMD_NATIVE); 3006 3007 mlx4_free_cmd_mailbox(dev, mailbox); 3008 return err; 3009 } 3010 3011 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) 3012 { 3013 int err; 3014 struct mlx4_func_cap func_cap; 3015 3016 memset(&func_cap, 0, sizeof(func_cap)); 3017 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3018 if (!err) 3019 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; 3020 return err; 3021 } 3022 EXPORT_SYMBOL(get_phv_bit); 3023 3024 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) 3025 { 3026 int ret; 3027 3028 if (mlx4_is_slave(dev)) 3029 return -EPERM; 3030 3031 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3032 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3033 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); 3034 if (!ret) 3035 dev->caps.phv_bit[port] = new_val; 3036 return ret; 3037 } 3038 3039 return -EOPNOTSUPP; 3040 } 3041 EXPORT_SYMBOL(set_phv_bit); 3042 3043 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, 3044 bool *vlan_offload_disabled) 3045 { 3046 struct mlx4_func_cap func_cap; 3047 int err; 3048 3049 memset(&func_cap, 0, sizeof(func_cap)); 3050 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3051 if (!err) 3052 *vlan_offload_disabled = 3053 !!(func_cap.flags0 & 3054 QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE); 3055 return err; 3056 } 3057 EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled); 3058 3059 void mlx4_replace_zero_macs(struct mlx4_dev *dev) 3060 { 3061 int i; 3062 u8 mac_addr[ETH_ALEN]; 3063 3064 dev->port_random_macs = 0; 3065 for (i = 1; i <= dev->caps.num_ports; ++i) 3066 if (!dev->caps.def_mac[i] && 3067 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 3068 eth_random_addr(mac_addr); 3069 dev->port_random_macs |= 1 << i; 3070 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); 3071 } 3072 } 3073 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); 3074