1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
39 
40 #include "fw.h"
41 #include "icm.h"
42 
43 enum {
44 	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
45 	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
46 	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
47 };
48 
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
51 
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
55 
56 #define MLX4_GET(dest, source, offset)				      \
57 	do {							      \
58 		void *__p = (char *) (source) + (offset);	      \
59 		u64 val;                                              \
60 		switch (sizeof (dest)) {			      \
61 		case 1: (dest) = *(u8 *) __p;	    break;	      \
62 		case 2: (dest) = be16_to_cpup(__p); break;	      \
63 		case 4: (dest) = be32_to_cpup(__p); break;	      \
64 		case 8: val = get_unaligned((u64 *)__p);              \
65 			(dest) = be64_to_cpu(val);  break;            \
66 		default: __buggy_use_of_MLX4_GET();		      \
67 		}						      \
68 	} while (0)
69 
70 #define MLX4_PUT(dest, source, offset)				      \
71 	do {							      \
72 		void *__d = ((char *) (dest) + (offset));	      \
73 		switch (sizeof(source)) {			      \
74 		case 1: *(u8 *) __d = (source);		       break; \
75 		case 2:	*(__be16 *) __d = cpu_to_be16(source); break; \
76 		case 4:	*(__be32 *) __d = cpu_to_be32(source); break; \
77 		case 8:	*(__be64 *) __d = cpu_to_be64(source); break; \
78 		default: __buggy_use_of_MLX4_PUT();		      \
79 		}						      \
80 	} while (0)
81 
82 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
83 {
84 	static const char *fname[] = {
85 		[ 0] = "RC transport",
86 		[ 1] = "UC transport",
87 		[ 2] = "UD transport",
88 		[ 3] = "XRC transport",
89 		[ 6] = "SRQ support",
90 		[ 7] = "IPoIB checksum offload",
91 		[ 8] = "P_Key violation counter",
92 		[ 9] = "Q_Key violation counter",
93 		[12] = "Dual Port Different Protocol (DPDP) support",
94 		[15] = "Big LSO headers",
95 		[16] = "MW support",
96 		[17] = "APM support",
97 		[18] = "Atomic ops support",
98 		[19] = "Raw multicast support",
99 		[20] = "Address vector port checking support",
100 		[21] = "UD multicast support",
101 		[30] = "IBoE support",
102 		[32] = "Unicast loopback support",
103 		[34] = "FCS header control",
104 		[37] = "Wake On LAN (port1) support",
105 		[38] = "Wake On LAN (port2) support",
106 		[40] = "UDP RSS support",
107 		[41] = "Unicast VEP steering support",
108 		[42] = "Multicast VEP steering support",
109 		[48] = "Counters support",
110 		[52] = "RSS IP fragments support",
111 		[53] = "Port ETS Scheduler support",
112 		[55] = "Port link type sensing support",
113 		[59] = "Port management change event support",
114 		[61] = "64 byte EQE support",
115 		[62] = "64 byte CQE support",
116 	};
117 	int i;
118 
119 	mlx4_dbg(dev, "DEV_CAP flags:\n");
120 	for (i = 0; i < ARRAY_SIZE(fname); ++i)
121 		if (fname[i] && (flags & (1LL << i)))
122 			mlx4_dbg(dev, "    %s\n", fname[i]);
123 }
124 
125 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
126 {
127 	static const char * const fname[] = {
128 		[0] = "RSS support",
129 		[1] = "RSS Toeplitz Hash Function support",
130 		[2] = "RSS XOR Hash Function support",
131 		[3] = "Device managed flow steering support",
132 		[4] = "Automatic MAC reassignment support",
133 		[5] = "Time stamping support",
134 		[6] = "VST (control vlan insertion/stripping) support",
135 		[7] = "FSM (MAC anti-spoofing) support",
136 		[8] = "Dynamic QP updates support",
137 		[9] = "Device managed flow steering IPoIB support",
138 		[10] = "TCP/IP offloads/flow-steering for VXLAN support",
139 		[11] = "MAD DEMUX (Secure-Host) support",
140 		[12] = "Large cache line (>64B) CQE stride support",
141 		[13] = "Large cache line (>64B) EQE stride support",
142 		[14] = "Ethernet protocol control support",
143 		[15] = "Ethernet Backplane autoneg support",
144 		[16] = "CONFIG DEV support",
145 		[17] = "Asymmetric EQs support",
146 		[18] = "More than 80 VFs support",
147 		[19] = "Performance optimized for limited rule configuration flow steering support",
148 		[20] = "Recoverable error events support",
149 		[21] = "Port Remap support",
150 		[22] = "QCN support",
151 		[23] = "QP rate limiting support",
152 		[24] = "Ethernet Flow control statistics support",
153 		[25] = "Granular QoS per VF support",
154 		[26] = "Port ETS Scheduler support",
155 		[27] = "Port beacon support",
156 		[28] = "RX-ALL support",
157 		[29] = "802.1ad offload support",
158 		[31] = "Modifying loopback source checks using UPDATE_QP support",
159 		[32] = "Loopback source checks support",
160 		[33] = "RoCEv2 support",
161 		[34] = "DMFS Sniffer support (UC & MC)",
162 		[35] = "QinQ VST mode support",
163 		[36] = "sl to vl mapping table change event support"
164 	};
165 	int i;
166 
167 	for (i = 0; i < ARRAY_SIZE(fname); ++i)
168 		if (fname[i] && (flags & (1LL << i)))
169 			mlx4_dbg(dev, "    %s\n", fname[i]);
170 }
171 
172 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
173 {
174 	struct mlx4_cmd_mailbox *mailbox;
175 	u32 *inbox;
176 	int err = 0;
177 
178 #define MOD_STAT_CFG_IN_SIZE		0x100
179 
180 #define MOD_STAT_CFG_PG_SZ_M_OFFSET	0x002
181 #define MOD_STAT_CFG_PG_SZ_OFFSET	0x003
182 
183 	mailbox = mlx4_alloc_cmd_mailbox(dev);
184 	if (IS_ERR(mailbox))
185 		return PTR_ERR(mailbox);
186 	inbox = mailbox->buf;
187 
188 	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
189 	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
190 
191 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
192 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
193 
194 	mlx4_free_cmd_mailbox(dev, mailbox);
195 	return err;
196 }
197 
198 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
199 {
200 	struct mlx4_cmd_mailbox *mailbox;
201 	u32 *outbox;
202 	u8 in_modifier;
203 	u8 field;
204 	u16 field16;
205 	int err;
206 
207 #define QUERY_FUNC_BUS_OFFSET			0x00
208 #define QUERY_FUNC_DEVICE_OFFSET		0x01
209 #define QUERY_FUNC_FUNCTION_OFFSET		0x01
210 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET	0x03
211 #define QUERY_FUNC_RSVD_EQS_OFFSET		0x04
212 #define QUERY_FUNC_MAX_EQ_OFFSET		0x06
213 #define QUERY_FUNC_RSVD_UARS_OFFSET		0x0b
214 
215 	mailbox = mlx4_alloc_cmd_mailbox(dev);
216 	if (IS_ERR(mailbox))
217 		return PTR_ERR(mailbox);
218 	outbox = mailbox->buf;
219 
220 	in_modifier = slave;
221 
222 	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
223 			   MLX4_CMD_QUERY_FUNC,
224 			   MLX4_CMD_TIME_CLASS_A,
225 			   MLX4_CMD_NATIVE);
226 	if (err)
227 		goto out;
228 
229 	MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
230 	func->bus = field & 0xf;
231 	MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
232 	func->device = field & 0xf1;
233 	MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
234 	func->function = field & 0x7;
235 	MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
236 	func->physical_function = field & 0xf;
237 	MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
238 	func->rsvd_eqs = field16 & 0xffff;
239 	MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
240 	func->max_eq = field16 & 0xffff;
241 	MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
242 	func->rsvd_uars = field & 0x0f;
243 
244 	mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
245 		 func->bus, func->device, func->function, func->physical_function,
246 		 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
247 
248 out:
249 	mlx4_free_cmd_mailbox(dev, mailbox);
250 	return err;
251 }
252 
253 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
254 {
255 	struct mlx4_vport_oper_state *vp_oper;
256 	struct mlx4_vport_state *vp_admin;
257 	int err;
258 
259 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
260 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
261 
262 	if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
263 		err = __mlx4_register_vlan(&priv->dev, port,
264 					   vp_admin->default_vlan,
265 					   &vp_oper->vlan_idx);
266 		if (err) {
267 			vp_oper->vlan_idx = NO_INDX;
268 			mlx4_warn(&priv->dev,
269 				  "No vlan resources slave %d, port %d\n",
270 				  slave, port);
271 			return err;
272 		}
273 		mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
274 			 (int)(vp_oper->state.default_vlan),
275 			 vp_oper->vlan_idx, slave, port);
276 	}
277 	vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
278 	vp_oper->state.default_vlan = vp_admin->default_vlan;
279 	vp_oper->state.default_qos  = vp_admin->default_qos;
280 
281 	return 0;
282 }
283 
284 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
285 {
286 	struct mlx4_vport_oper_state *vp_oper;
287 	struct mlx4_slave_state *slave_state;
288 	struct mlx4_vport_state *vp_admin;
289 	int err;
290 
291 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
292 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
293 	slave_state = &priv->mfunc.master.slave_state[slave];
294 
295 	if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
296 	    (!slave_state->active))
297 		return 0;
298 
299 	if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
300 	    vp_oper->state.default_vlan == vp_admin->default_vlan &&
301 	    vp_oper->state.default_qos == vp_admin->default_qos)
302 		return 0;
303 
304 	if (!slave_state->vst_qinq_supported) {
305 		/* Warn and revert the request to set vst QinQ mode */
306 		vp_admin->vlan_proto   = vp_oper->state.vlan_proto;
307 		vp_admin->default_vlan = vp_oper->state.default_vlan;
308 		vp_admin->default_qos  = vp_oper->state.default_qos;
309 
310 		mlx4_warn(&priv->dev,
311 			  "Slave %d does not support VST QinQ mode\n", slave);
312 		return 0;
313 	}
314 
315 	err = mlx4_activate_vst_qinq(priv, slave, port);
316 	return err;
317 }
318 
319 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
320 				struct mlx4_vhcr *vhcr,
321 				struct mlx4_cmd_mailbox *inbox,
322 				struct mlx4_cmd_mailbox *outbox,
323 				struct mlx4_cmd_info *cmd)
324 {
325 	struct mlx4_priv *priv = mlx4_priv(dev);
326 	u8	field, port;
327 	u32	size, proxy_qp, qkey;
328 	int	err = 0;
329 	struct mlx4_func func;
330 
331 #define QUERY_FUNC_CAP_FLAGS_OFFSET		0x0
332 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
333 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
334 #define QUERY_FUNC_CAP_FMR_OFFSET		0x8
335 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP	0x10
336 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP	0x14
337 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP	0x18
338 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP	0x20
339 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP	0x24
340 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP	0x28
341 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
342 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0x30
343 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET	0x48
344 
345 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x50
346 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x54
347 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x58
348 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x60
349 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x64
350 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x68
351 
352 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET	0x6c
353 
354 #define QUERY_FUNC_CAP_FMR_FLAG			0x80
355 #define QUERY_FUNC_CAP_FLAG_RDMA		0x40
356 #define QUERY_FUNC_CAP_FLAG_ETH			0x80
357 #define QUERY_FUNC_CAP_FLAG_QUOTAS		0x10
358 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY		0x08
359 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX	0x04
360 
361 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG	(1UL << 31)
362 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG	(1UL << 30)
363 
364 /* when opcode modifier = 1 */
365 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
366 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET	0x4
367 #define QUERY_FUNC_CAP_FLAGS0_OFFSET		0x8
368 #define QUERY_FUNC_CAP_FLAGS1_OFFSET		0xc
369 
370 #define QUERY_FUNC_CAP_QP0_TUNNEL		0x10
371 #define QUERY_FUNC_CAP_QP0_PROXY		0x14
372 #define QUERY_FUNC_CAP_QP1_TUNNEL		0x18
373 #define QUERY_FUNC_CAP_QP1_PROXY		0x1c
374 #define QUERY_FUNC_CAP_PHYS_PORT_ID		0x28
375 
376 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC		0x40
377 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN	0x80
378 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO			0x10
379 #define QUERY_FUNC_CAP_VF_ENABLE_QP0		0x08
380 
381 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
382 #define QUERY_FUNC_CAP_PHV_BIT			0x40
383 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE	0x20
384 
385 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ	BIT(30)
386 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
387 
388 	if (vhcr->op_modifier == 1) {
389 		struct mlx4_active_ports actv_ports =
390 			mlx4_get_active_ports(dev, slave);
391 		int converted_port = mlx4_slave_convert_port(
392 				dev, slave, vhcr->in_modifier);
393 		struct mlx4_vport_oper_state *vp_oper;
394 
395 		if (converted_port < 0)
396 			return -EINVAL;
397 
398 		vhcr->in_modifier = converted_port;
399 		/* phys-port = logical-port */
400 		field = vhcr->in_modifier -
401 			find_first_bit(actv_ports.ports, dev->caps.num_ports);
402 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
403 
404 		port = vhcr->in_modifier;
405 		proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
406 
407 		/* Set nic_info bit to mark new fields support */
408 		field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
409 
410 		if (mlx4_vf_smi_enabled(dev, slave, port) &&
411 		    !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
412 			field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
413 			MLX4_PUT(outbox->buf, qkey,
414 				 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
415 		}
416 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
417 
418 		/* size is now the QP number */
419 		size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
420 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
421 
422 		size += 2;
423 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
424 
425 		MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
426 		proxy_qp += 2;
427 		MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
428 
429 		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
430 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
431 
432 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
433 		err = mlx4_handle_vst_qinq(priv, slave, port);
434 		if (err)
435 			return err;
436 
437 		field = 0;
438 		if (dev->caps.phv_bit[port])
439 			field |= QUERY_FUNC_CAP_PHV_BIT;
440 		if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
441 			field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
442 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
443 
444 	} else if (vhcr->op_modifier == 0) {
445 		struct mlx4_active_ports actv_ports =
446 			mlx4_get_active_ports(dev, slave);
447 		struct mlx4_slave_state *slave_state =
448 			&priv->mfunc.master.slave_state[slave];
449 
450 		/* enable rdma and ethernet interfaces, new quota locations,
451 		 * and reserved lkey
452 		 */
453 		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
454 			 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
455 			 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
456 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
457 
458 		field = min(
459 			bitmap_weight(actv_ports.ports, dev->caps.num_ports),
460 			dev->caps.num_ports);
461 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
462 
463 		size = dev->caps.function_caps; /* set PF behaviours */
464 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
465 
466 		field = 0; /* protected FMR support not available as yet */
467 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
468 
469 		size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
470 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
471 		size = dev->caps.num_qps;
472 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
473 
474 		size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
475 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
476 		size = dev->caps.num_srqs;
477 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
478 
479 		size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
480 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
481 		size = dev->caps.num_cqs;
482 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
483 
484 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
485 		    mlx4_QUERY_FUNC(dev, &func, slave)) {
486 			size = vhcr->in_modifier &
487 				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
488 				dev->caps.num_eqs :
489 				rounddown_pow_of_two(dev->caps.num_eqs);
490 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
491 			size = dev->caps.reserved_eqs;
492 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
493 		} else {
494 			size = vhcr->in_modifier &
495 				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
496 				func.max_eq :
497 				rounddown_pow_of_two(func.max_eq);
498 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
499 			size = func.rsvd_eqs;
500 			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
501 		}
502 
503 		size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
504 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
505 		size = dev->caps.num_mpts;
506 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
507 
508 		size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
509 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
510 		size = dev->caps.num_mtts;
511 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
512 
513 		size = dev->caps.num_mgms + dev->caps.num_amgms;
514 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
515 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
516 
517 		size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
518 			QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
519 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
520 
521 		size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
522 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
523 
524 		if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
525 			slave_state->vst_qinq_supported = true;
526 
527 	} else
528 		err = -EINVAL;
529 
530 	return err;
531 }
532 
533 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
534 			struct mlx4_func_cap *func_cap)
535 {
536 	struct mlx4_cmd_mailbox *mailbox;
537 	u32			*outbox;
538 	u8			field, op_modifier;
539 	u32			size, qkey;
540 	int			err = 0, quotas = 0;
541 	u32                     in_modifier;
542 	u32			slave_caps;
543 
544 	op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
545 	slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
546 		QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
547 	in_modifier = op_modifier ? gen_or_port : slave_caps;
548 
549 	mailbox = mlx4_alloc_cmd_mailbox(dev);
550 	if (IS_ERR(mailbox))
551 		return PTR_ERR(mailbox);
552 
553 	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
554 			   MLX4_CMD_QUERY_FUNC_CAP,
555 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
556 	if (err)
557 		goto out;
558 
559 	outbox = mailbox->buf;
560 
561 	if (!op_modifier) {
562 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
563 		if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
564 			mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
565 			err = -EPROTONOSUPPORT;
566 			goto out;
567 		}
568 		func_cap->flags = field;
569 		quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
570 
571 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
572 		func_cap->num_ports = field;
573 
574 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
575 		func_cap->pf_context_behaviour = size;
576 
577 		if (quotas) {
578 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
579 			func_cap->qp_quota = size & 0xFFFFFF;
580 
581 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
582 			func_cap->srq_quota = size & 0xFFFFFF;
583 
584 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
585 			func_cap->cq_quota = size & 0xFFFFFF;
586 
587 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
588 			func_cap->mpt_quota = size & 0xFFFFFF;
589 
590 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
591 			func_cap->mtt_quota = size & 0xFFFFFF;
592 
593 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
594 			func_cap->mcg_quota = size & 0xFFFFFF;
595 
596 		} else {
597 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
598 			func_cap->qp_quota = size & 0xFFFFFF;
599 
600 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
601 			func_cap->srq_quota = size & 0xFFFFFF;
602 
603 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
604 			func_cap->cq_quota = size & 0xFFFFFF;
605 
606 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
607 			func_cap->mpt_quota = size & 0xFFFFFF;
608 
609 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
610 			func_cap->mtt_quota = size & 0xFFFFFF;
611 
612 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
613 			func_cap->mcg_quota = size & 0xFFFFFF;
614 		}
615 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
616 		func_cap->max_eq = size & 0xFFFFFF;
617 
618 		MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
619 		func_cap->reserved_eq = size & 0xFFFFFF;
620 
621 		if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
622 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
623 			func_cap->reserved_lkey = size;
624 		} else {
625 			func_cap->reserved_lkey = 0;
626 		}
627 
628 		func_cap->extra_flags = 0;
629 
630 		/* Mailbox data from 0x6c and onward should only be treated if
631 		 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
632 		 */
633 		if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
634 			MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
635 			if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
636 				func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
637 			if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
638 				func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
639 		}
640 
641 		goto out;
642 	}
643 
644 	/* logical port query */
645 	if (gen_or_port > dev->caps.num_ports) {
646 		err = -EINVAL;
647 		goto out;
648 	}
649 
650 	MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
651 	if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
652 		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
653 			mlx4_err(dev, "VLAN is enforced on this port\n");
654 			err = -EPROTONOSUPPORT;
655 			goto out;
656 		}
657 
658 		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
659 			mlx4_err(dev, "Force mac is enabled on this port\n");
660 			err = -EPROTONOSUPPORT;
661 			goto out;
662 		}
663 	} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
664 		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
665 		if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
666 			mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
667 			err = -EPROTONOSUPPORT;
668 			goto out;
669 		}
670 	}
671 
672 	MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
673 	func_cap->physical_port = field;
674 	if (func_cap->physical_port != gen_or_port) {
675 		err = -EINVAL;
676 		goto out;
677 	}
678 
679 	if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
680 		MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
681 		func_cap->qp0_qkey = qkey;
682 	} else {
683 		func_cap->qp0_qkey = 0;
684 	}
685 
686 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
687 	func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
688 
689 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
690 	func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
691 
692 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
693 	func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
694 
695 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
696 	func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
697 
698 	if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
699 		MLX4_GET(func_cap->phys_port_id, outbox,
700 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
701 
702 	MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
703 
704 	/* All other resources are allocated by the master, but we still report
705 	 * 'num' and 'reserved' capabilities as follows:
706 	 * - num remains the maximum resource index
707 	 * - 'num - reserved' is the total available objects of a resource, but
708 	 *   resource indices may be less than 'reserved'
709 	 * TODO: set per-resource quotas */
710 
711 out:
712 	mlx4_free_cmd_mailbox(dev, mailbox);
713 
714 	return err;
715 }
716 
717 static void disable_unsupported_roce_caps(void *buf);
718 
719 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
720 {
721 	struct mlx4_cmd_mailbox *mailbox;
722 	u32 *outbox;
723 	u8 field;
724 	u32 field32, flags, ext_flags;
725 	u16 size;
726 	u16 stat_rate;
727 	int err;
728 	int i;
729 
730 #define QUERY_DEV_CAP_OUT_SIZE		       0x100
731 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
732 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET		0x11
733 #define QUERY_DEV_CAP_RSVD_QP_OFFSET		0x12
734 #define QUERY_DEV_CAP_MAX_QP_OFFSET		0x13
735 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET		0x14
736 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET		0x15
737 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET		0x16
738 #define QUERY_DEV_CAP_MAX_EEC_OFFSET		0x17
739 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET		0x19
740 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET		0x1a
741 #define QUERY_DEV_CAP_MAX_CQ_OFFSET		0x1b
742 #define QUERY_DEV_CAP_MAX_MPT_OFFSET		0x1d
743 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET		0x1e
744 #define QUERY_DEV_CAP_MAX_EQ_OFFSET		0x1f
745 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET		0x20
746 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET		0x21
747 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET		0x22
748 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET	0x23
749 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET		0x26
750 #define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
751 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
752 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
753 #define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
754 #define QUERY_DEV_CAP_RSS_OFFSET		0x2e
755 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
756 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
757 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET	0x34
758 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
759 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET		0x36
760 #define QUERY_DEV_CAP_VL_PORT_OFFSET		0x37
761 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET		0x38
762 #define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
763 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
764 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET	0x3e
765 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
766 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET		0x40
767 #define QUERY_DEV_CAP_WOL_OFFSET		0x43
768 #define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
769 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
770 #define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
771 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET		0x4b
772 #define QUERY_DEV_CAP_BF_OFFSET			0x4c
773 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET	0x4d
774 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET	0x4e
775 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET	0x4f
776 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET		0x51
777 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET	0x52
778 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET		0x55
779 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET	0x56
780 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET	0x5D
781 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET		0x61
782 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET		0x62
783 #define QUERY_DEV_CAP_MAX_MCG_OFFSET		0x63
784 #define QUERY_DEV_CAP_RSVD_PD_OFFSET		0x64
785 #define QUERY_DEV_CAP_MAX_PD_OFFSET		0x65
786 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET		0x66
787 #define QUERY_DEV_CAP_MAX_XRC_OFFSET		0x67
788 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET	0x68
789 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET	0x70
790 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET	0x70
791 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET	0x74
792 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET	0x76
793 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET	0x77
794 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET	0x78
795 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE	0x7a
796 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET	0x7b
797 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
798 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
799 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
800 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET	0x86
801 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET	0x88
802 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET	0x8a
803 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET	0x8c
804 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET	0x8e
805 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
806 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
807 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
808 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
809 #define QUERY_DEV_CAP_PHV_EN_OFFSET		0x96
810 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
811 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
812 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
813 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT	0x9c
814 #define QUERY_DEV_CAP_FW_REASSIGN_MAC		0x9d
815 #define QUERY_DEV_CAP_VXLAN			0x9e
816 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET		0xb0
817 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET	0xa8
818 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET	0xac
819 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET	0xcc
820 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET	0xd0
821 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET	0xd2
822 
823 
824 	dev_cap->flags2 = 0;
825 	mailbox = mlx4_alloc_cmd_mailbox(dev);
826 	if (IS_ERR(mailbox))
827 		return PTR_ERR(mailbox);
828 	outbox = mailbox->buf;
829 
830 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
831 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
832 	if (err)
833 		goto out;
834 
835 	if (mlx4_is_mfunc(dev))
836 		disable_unsupported_roce_caps(outbox);
837 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
838 	dev_cap->reserved_qps = 1 << (field & 0xf);
839 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
840 	dev_cap->max_qps = 1 << (field & 0x1f);
841 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
842 	dev_cap->reserved_srqs = 1 << (field >> 4);
843 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
844 	dev_cap->max_srqs = 1 << (field & 0x1f);
845 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
846 	dev_cap->max_cq_sz = 1 << field;
847 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
848 	dev_cap->reserved_cqs = 1 << (field & 0xf);
849 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
850 	dev_cap->max_cqs = 1 << (field & 0x1f);
851 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
852 	dev_cap->max_mpts = 1 << (field & 0x3f);
853 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
854 	dev_cap->reserved_eqs = 1 << (field & 0xf);
855 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
856 	dev_cap->max_eqs = 1 << (field & 0xf);
857 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
858 	dev_cap->reserved_mtts = 1 << (field >> 4);
859 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
860 	dev_cap->reserved_mrws = 1 << (field & 0xf);
861 	MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
862 	dev_cap->num_sys_eqs = size & 0xfff;
863 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
864 	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
865 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
866 	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
867 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
868 	field &= 0x1f;
869 	if (!field)
870 		dev_cap->max_gso_sz = 0;
871 	else
872 		dev_cap->max_gso_sz = 1 << field;
873 
874 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
875 	if (field & 0x20)
876 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
877 	if (field & 0x10)
878 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
879 	field &= 0xf;
880 	if (field) {
881 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
882 		dev_cap->max_rss_tbl_sz = 1 << field;
883 	} else
884 		dev_cap->max_rss_tbl_sz = 0;
885 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
886 	dev_cap->max_rdma_global = 1 << (field & 0x3f);
887 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
888 	dev_cap->local_ca_ack_delay = field & 0x1f;
889 	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
890 	dev_cap->num_ports = field & 0xf;
891 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
892 	dev_cap->max_msg_sz = 1 << (field & 0x1f);
893 	MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
894 	if (field & 0x10)
895 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
896 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
897 	if (field & 0x80)
898 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
899 	dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
900 	if (field & 0x20)
901 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
902 	MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
903 	if (field & 0x80)
904 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
905 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
906 	if (field & 0x80)
907 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
908 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
909 	dev_cap->fs_max_num_qp_per_entry = field;
910 	MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
911 	if (field & (1 << 5))
912 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
913 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
914 	if (field & 0x1)
915 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
916 	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
917 	dev_cap->stat_rate_support = stat_rate;
918 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
919 	if (field & 0x80)
920 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
921 	MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
922 	MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
923 	dev_cap->flags = flags | (u64)ext_flags << 32;
924 	MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
925 	dev_cap->wol_port[1] = !!(field & 0x20);
926 	dev_cap->wol_port[2] = !!(field & 0x40);
927 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
928 	dev_cap->reserved_uars = field >> 4;
929 	MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
930 	dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
931 	MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
932 	dev_cap->min_page_sz = 1 << field;
933 
934 	MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
935 	if (field & 0x80) {
936 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
937 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
938 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
939 		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
940 			field = 3;
941 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
942 	} else {
943 		dev_cap->bf_reg_size = 0;
944 	}
945 
946 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
947 	dev_cap->max_sq_sg = field;
948 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
949 	dev_cap->max_sq_desc_sz = size;
950 
951 	MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
952 	if (field & 0x1)
953 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
954 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
955 	dev_cap->max_qp_per_mcg = 1 << field;
956 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
957 	dev_cap->reserved_mgms = field & 0xf;
958 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
959 	dev_cap->max_mcgs = 1 << field;
960 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
961 	dev_cap->reserved_pds = field >> 4;
962 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
963 	dev_cap->max_pds = 1 << (field & 0x3f);
964 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
965 	dev_cap->reserved_xrcds = field >> 4;
966 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
967 	dev_cap->max_xrcds = 1 << (field & 0x1f);
968 
969 	MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
970 	dev_cap->rdmarc_entry_sz = size;
971 	MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
972 	dev_cap->qpc_entry_sz = size;
973 	MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
974 	dev_cap->aux_entry_sz = size;
975 	MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
976 	dev_cap->altc_entry_sz = size;
977 	MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
978 	dev_cap->eqc_entry_sz = size;
979 	MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
980 	dev_cap->cqc_entry_sz = size;
981 	MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
982 	dev_cap->srq_entry_sz = size;
983 	MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
984 	dev_cap->cmpt_entry_sz = size;
985 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
986 	dev_cap->mtt_entry_sz = size;
987 	MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
988 	dev_cap->dmpt_entry_sz = size;
989 
990 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
991 	dev_cap->max_srq_sz = 1 << field;
992 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
993 	dev_cap->max_qp_sz = 1 << field;
994 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
995 	dev_cap->resize_srq = field & 1;
996 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
997 	dev_cap->max_rq_sg = field;
998 	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
999 	dev_cap->max_rq_desc_sz = size;
1000 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1001 	if (field & (1 << 4))
1002 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
1003 	if (field & (1 << 5))
1004 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
1005 	if (field & (1 << 6))
1006 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
1007 	if (field & (1 << 7))
1008 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
1009 	MLX4_GET(dev_cap->bmme_flags, outbox,
1010 		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1011 	if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2)
1012 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2;
1013 	if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
1014 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
1015 	MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1016 	if (field & 0x20)
1017 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
1018 	if (field & (1 << 2))
1019 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
1020 	MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
1021 	if (field & 0x80)
1022 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
1023 	if (field & 0x40)
1024 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
1025 
1026 	MLX4_GET(dev_cap->reserved_lkey, outbox,
1027 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
1028 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
1029 	if (field32 & (1 << 0))
1030 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
1031 	if (field32 & (1 << 7))
1032 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
1033 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
1034 	if (field32 & (1 << 17))
1035 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
1036 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
1037 	if (field & 1<<6)
1038 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
1039 	MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
1040 	if (field & 1<<3)
1041 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
1042 	if (field & (1 << 5))
1043 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
1044 	MLX4_GET(dev_cap->max_icm_sz, outbox,
1045 		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
1046 	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1047 		MLX4_GET(dev_cap->max_counters, outbox,
1048 			 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
1049 
1050 	MLX4_GET(field32, outbox,
1051 		 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
1052 	if (field32 & (1 << 0))
1053 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
1054 
1055 	MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
1056 		 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
1057 	dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
1058 	MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
1059 		 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
1060 	dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
1061 
1062 	MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1063 	dev_cap->rl_caps.num_rates = size;
1064 	if (dev_cap->rl_caps.num_rates) {
1065 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
1066 		MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
1067 		dev_cap->rl_caps.max_val  = size & 0xfff;
1068 		dev_cap->rl_caps.max_unit = size >> 14;
1069 		MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
1070 		dev_cap->rl_caps.min_val  = size & 0xfff;
1071 		dev_cap->rl_caps.min_unit = size >> 14;
1072 	}
1073 
1074 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1075 	if (field32 & (1 << 16))
1076 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
1077 	if (field32 & (1 << 18))
1078 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB;
1079 	if (field32 & (1 << 19))
1080 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
1081 	if (field32 & (1 << 26))
1082 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
1083 	if (field32 & (1 << 20))
1084 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
1085 	if (field32 & (1 << 21))
1086 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
1087 
1088 	for (i = 1; i <= dev_cap->num_ports; i++) {
1089 		err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
1090 		if (err)
1091 			goto out;
1092 	}
1093 
1094 	/*
1095 	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
1096 	 * we can't use any EQs whose doorbell falls on that page,
1097 	 * even if the EQ itself isn't reserved.
1098 	 */
1099 	if (dev_cap->num_sys_eqs == 0)
1100 		dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
1101 					    dev_cap->reserved_eqs);
1102 	else
1103 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
1104 
1105 out:
1106 	mlx4_free_cmd_mailbox(dev, mailbox);
1107 	return err;
1108 }
1109 
1110 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
1111 {
1112 	if (dev_cap->bf_reg_size > 0)
1113 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
1114 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
1115 	else
1116 		mlx4_dbg(dev, "BlueFlame not available\n");
1117 
1118 	mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
1119 		 dev_cap->bmme_flags, dev_cap->reserved_lkey);
1120 	mlx4_dbg(dev, "Max ICM size %lld MB\n",
1121 		 (unsigned long long) dev_cap->max_icm_sz >> 20);
1122 	mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1123 		 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
1124 	mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1125 		 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
1126 	mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1127 		 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
1128 	mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
1129 		 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
1130 		 dev_cap->eqc_entry_sz);
1131 	mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1132 		 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
1133 	mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1134 		 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
1135 	mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1136 		 dev_cap->max_pds, dev_cap->reserved_mgms);
1137 	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1138 		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
1139 	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
1140 		 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
1141 		 dev_cap->port_cap[1].max_port_width);
1142 	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
1143 		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
1144 	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
1145 		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
1146 	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
1147 	mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
1148 	mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
1149 	mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
1150 		 dev_cap->dmfs_high_rate_qpn_base);
1151 	mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
1152 		 dev_cap->dmfs_high_rate_qpn_range);
1153 
1154 	if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
1155 		struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
1156 
1157 		mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1158 			 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
1159 			 rl_caps->min_unit, rl_caps->min_val);
1160 	}
1161 
1162 	dump_dev_cap_flags(dev, dev_cap->flags);
1163 	dump_dev_cap_flags2(dev, dev_cap->flags2);
1164 }
1165 
1166 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
1167 {
1168 	struct mlx4_cmd_mailbox *mailbox;
1169 	u32 *outbox;
1170 	u8 field;
1171 	u32 field32;
1172 	int err;
1173 
1174 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1175 	if (IS_ERR(mailbox))
1176 		return PTR_ERR(mailbox);
1177 	outbox = mailbox->buf;
1178 
1179 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1180 		err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1181 				   MLX4_CMD_TIME_CLASS_A,
1182 				   MLX4_CMD_NATIVE);
1183 
1184 		if (err)
1185 			goto out;
1186 
1187 		MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
1188 		port_cap->max_vl	   = field >> 4;
1189 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
1190 		port_cap->ib_mtu	   = field >> 4;
1191 		port_cap->max_port_width = field & 0xf;
1192 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
1193 		port_cap->max_gids	   = 1 << (field & 0xf);
1194 		MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
1195 		port_cap->max_pkeys	   = 1 << (field & 0xf);
1196 	} else {
1197 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
1198 #define QUERY_PORT_MTU_OFFSET			0x01
1199 #define QUERY_PORT_ETH_MTU_OFFSET		0x02
1200 #define QUERY_PORT_WIDTH_OFFSET			0x06
1201 #define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
1202 #define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
1203 #define QUERY_PORT_MAX_VL_OFFSET		0x0b
1204 #define QUERY_PORT_MAC_OFFSET			0x10
1205 #define QUERY_PORT_TRANS_VENDOR_OFFSET		0x18
1206 #define QUERY_PORT_WAVELENGTH_OFFSET		0x1c
1207 #define QUERY_PORT_TRANS_CODE_OFFSET		0x20
1208 
1209 		err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1210 				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1211 		if (err)
1212 			goto out;
1213 
1214 		MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1215 		port_cap->link_state = (field & 0x80) >> 7;
1216 		port_cap->supported_port_types = field & 3;
1217 		port_cap->suggested_type = (field >> 3) & 1;
1218 		port_cap->default_sense = (field >> 4) & 1;
1219 		port_cap->dmfs_optimized_state = (field >> 5) & 1;
1220 		MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1221 		port_cap->ib_mtu	   = field & 0xf;
1222 		MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1223 		port_cap->max_port_width = field & 0xf;
1224 		MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1225 		port_cap->max_gids	   = 1 << (field >> 4);
1226 		port_cap->max_pkeys	   = 1 << (field & 0xf);
1227 		MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1228 		port_cap->max_vl	   = field & 0xf;
1229 		port_cap->max_tc_eth	   = field >> 4;
1230 		MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1231 		port_cap->log_max_macs  = field & 0xf;
1232 		port_cap->log_max_vlans = field >> 4;
1233 		MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1234 		MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1235 		MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1236 		port_cap->trans_type = field32 >> 24;
1237 		port_cap->vendor_oui = field32 & 0xffffff;
1238 		MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1239 		MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1240 	}
1241 
1242 out:
1243 	mlx4_free_cmd_mailbox(dev, mailbox);
1244 	return err;
1245 }
1246 
1247 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS	(1 << 28)
1248 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1249 #define DEV_CAP_EXT_2_FLAG_80_VFS	(1 << 21)
1250 #define DEV_CAP_EXT_2_FLAG_FSM		(1 << 20)
1251 
1252 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1253 			       struct mlx4_vhcr *vhcr,
1254 			       struct mlx4_cmd_mailbox *inbox,
1255 			       struct mlx4_cmd_mailbox *outbox,
1256 			       struct mlx4_cmd_info *cmd)
1257 {
1258 	u64	flags;
1259 	int	err = 0;
1260 	u8	field;
1261 	u16	field16;
1262 	u32	bmme_flags, field32;
1263 	int	real_port;
1264 	int	slave_port;
1265 	int	first_port;
1266 	struct mlx4_active_ports actv_ports;
1267 
1268 	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1269 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1270 	if (err)
1271 		return err;
1272 
1273 	disable_unsupported_roce_caps(outbox->buf);
1274 	/* add port mng change event capability and disable mw type 1
1275 	 * unconditionally to slaves
1276 	 */
1277 	MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1278 	flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1279 	flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1280 	actv_ports = mlx4_get_active_ports(dev, slave);
1281 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1282 	for (slave_port = 0, real_port = first_port;
1283 	     real_port < first_port +
1284 	     bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1285 	     ++real_port, ++slave_port) {
1286 		if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1287 			flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1288 		else
1289 			flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1290 	}
1291 	for (; slave_port < dev->caps.num_ports; ++slave_port)
1292 		flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1293 
1294 	/* Not exposing RSS IP fragments to guests */
1295 	flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
1296 	MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1297 
1298 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1299 	field &= ~0x0F;
1300 	field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1301 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1302 
1303 	/* For guests, disable timestamp */
1304 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1305 	field &= 0x7f;
1306 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1307 
1308 	/* For guests, disable vxlan tunneling and QoS support */
1309 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1310 	field &= 0xd7;
1311 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1312 
1313 	/* For guests, disable port BEACON */
1314 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1315 	field &= 0x7f;
1316 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1317 
1318 	/* For guests, report Blueflame disabled */
1319 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1320 	field &= 0x7f;
1321 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1322 
1323 	/* For guests, disable mw type 2 and port remap*/
1324 	MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1325 	bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1326 	bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1327 	MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1328 
1329 	/* turn off device-managed steering capability if not enabled */
1330 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1331 		MLX4_GET(field, outbox->buf,
1332 			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1333 		field &= 0x7f;
1334 		MLX4_PUT(outbox->buf, field,
1335 			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1336 	}
1337 
1338 	/* turn off ipoib managed steering for guests */
1339 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1340 	field &= ~0x80;
1341 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1342 
1343 	/* turn off host side virt features (VST, FSM, etc) for guests */
1344 	MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1345 	field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1346 		     DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
1347 	MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1348 
1349 	/* turn off QCN for guests */
1350 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1351 	field &= 0xfe;
1352 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1353 
1354 	/* turn off QP max-rate limiting for guests */
1355 	field16 = 0;
1356 	MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1357 
1358 	/* turn off QoS per VF support for guests */
1359 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1360 	field &= 0xef;
1361 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1362 
1363 	/* turn off ignore FCS feature for guests */
1364 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1365 	field &= 0xfb;
1366 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1367 
1368 	return 0;
1369 }
1370 
1371 static void disable_unsupported_roce_caps(void *buf)
1372 {
1373 	u32 flags;
1374 
1375 	MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1376 	flags &= ~(1UL << 31);
1377 	MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1378 	MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1379 	flags &= ~(1UL << 24);
1380 	MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1381 	MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1382 	flags &= ~(MLX4_FLAG_ROCE_V1_V2);
1383 	MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1384 }
1385 
1386 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1387 			    struct mlx4_vhcr *vhcr,
1388 			    struct mlx4_cmd_mailbox *inbox,
1389 			    struct mlx4_cmd_mailbox *outbox,
1390 			    struct mlx4_cmd_info *cmd)
1391 {
1392 	struct mlx4_priv *priv = mlx4_priv(dev);
1393 	u64 def_mac;
1394 	u8 port_type;
1395 	u16 short_field;
1396 	int err;
1397 	int admin_link_state;
1398 	int port = mlx4_slave_convert_port(dev, slave,
1399 					   vhcr->in_modifier & 0xFF);
1400 
1401 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK	0xE0
1402 #define MLX4_PORT_LINK_UP_MASK		0x80
1403 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET	0x0c
1404 #define QUERY_PORT_CUR_MAX_GID_OFFSET	0x0e
1405 
1406 	if (port < 0)
1407 		return -EINVAL;
1408 
1409 	/* Protect against untrusted guests: enforce that this is the
1410 	 * QUERY_PORT general query.
1411 	 */
1412 	if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1413 		return -EINVAL;
1414 
1415 	vhcr->in_modifier = port;
1416 
1417 	err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1418 			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1419 			   MLX4_CMD_NATIVE);
1420 
1421 	if (!err && dev->caps.function != slave) {
1422 		def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1423 		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1424 
1425 		/* get port type - currently only eth is enabled */
1426 		MLX4_GET(port_type, outbox->buf,
1427 			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1428 
1429 		/* No link sensing allowed */
1430 		port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1431 		/* set port type to currently operating port type */
1432 		port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1433 
1434 		admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1435 		if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1436 			port_type |= MLX4_PORT_LINK_UP_MASK;
1437 		else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1438 			port_type &= ~MLX4_PORT_LINK_UP_MASK;
1439 		else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) {
1440 			int other_port = (port == 1) ? 2 : 1;
1441 			struct mlx4_port_cap port_cap;
1442 
1443 			err = mlx4_QUERY_PORT(dev, other_port, &port_cap);
1444 			if (err)
1445 				goto out;
1446 			port_type |= (port_cap.link_state << 7);
1447 		}
1448 
1449 		MLX4_PUT(outbox->buf, port_type,
1450 			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1451 
1452 		if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1453 			short_field = mlx4_get_slave_num_gids(dev, slave, port);
1454 		else
1455 			short_field = 1; /* slave max gids */
1456 		MLX4_PUT(outbox->buf, short_field,
1457 			 QUERY_PORT_CUR_MAX_GID_OFFSET);
1458 
1459 		short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1460 		MLX4_PUT(outbox->buf, short_field,
1461 			 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1462 	}
1463 out:
1464 	return err;
1465 }
1466 
1467 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1468 				    int *gid_tbl_len, int *pkey_tbl_len)
1469 {
1470 	struct mlx4_cmd_mailbox *mailbox;
1471 	u32			*outbox;
1472 	u16			field;
1473 	int			err;
1474 
1475 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1476 	if (IS_ERR(mailbox))
1477 		return PTR_ERR(mailbox);
1478 
1479 	err =  mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1480 			    MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1481 			    MLX4_CMD_WRAPPED);
1482 	if (err)
1483 		goto out;
1484 
1485 	outbox = mailbox->buf;
1486 
1487 	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1488 	*gid_tbl_len = field;
1489 
1490 	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1491 	*pkey_tbl_len = field;
1492 
1493 out:
1494 	mlx4_free_cmd_mailbox(dev, mailbox);
1495 	return err;
1496 }
1497 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1498 
1499 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1500 {
1501 	struct mlx4_cmd_mailbox *mailbox;
1502 	struct mlx4_icm_iter iter;
1503 	__be64 *pages;
1504 	int lg;
1505 	int nent = 0;
1506 	int i;
1507 	int err = 0;
1508 	int ts = 0, tc = 0;
1509 
1510 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1511 	if (IS_ERR(mailbox))
1512 		return PTR_ERR(mailbox);
1513 	pages = mailbox->buf;
1514 
1515 	for (mlx4_icm_first(icm, &iter);
1516 	     !mlx4_icm_last(&iter);
1517 	     mlx4_icm_next(&iter)) {
1518 		/*
1519 		 * We have to pass pages that are aligned to their
1520 		 * size, so find the least significant 1 in the
1521 		 * address or size and use that as our log2 size.
1522 		 */
1523 		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1524 		if (lg < MLX4_ICM_PAGE_SHIFT) {
1525 			mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1526 				  MLX4_ICM_PAGE_SIZE,
1527 				  (unsigned long long) mlx4_icm_addr(&iter),
1528 				  mlx4_icm_size(&iter));
1529 			err = -EINVAL;
1530 			goto out;
1531 		}
1532 
1533 		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1534 			if (virt != -1) {
1535 				pages[nent * 2] = cpu_to_be64(virt);
1536 				virt += 1 << lg;
1537 			}
1538 
1539 			pages[nent * 2 + 1] =
1540 				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1541 					    (lg - MLX4_ICM_PAGE_SHIFT));
1542 			ts += 1 << (lg - 10);
1543 			++tc;
1544 
1545 			if (++nent == MLX4_MAILBOX_SIZE / 16) {
1546 				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1547 						MLX4_CMD_TIME_CLASS_B,
1548 						MLX4_CMD_NATIVE);
1549 				if (err)
1550 					goto out;
1551 				nent = 0;
1552 			}
1553 		}
1554 	}
1555 
1556 	if (nent)
1557 		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1558 			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1559 	if (err)
1560 		goto out;
1561 
1562 	switch (op) {
1563 	case MLX4_CMD_MAP_FA:
1564 		mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1565 		break;
1566 	case MLX4_CMD_MAP_ICM_AUX:
1567 		mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1568 		break;
1569 	case MLX4_CMD_MAP_ICM:
1570 		mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1571 			 tc, ts, (unsigned long long) virt - (ts << 10));
1572 		break;
1573 	}
1574 
1575 out:
1576 	mlx4_free_cmd_mailbox(dev, mailbox);
1577 	return err;
1578 }
1579 
1580 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1581 {
1582 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1583 }
1584 
1585 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1586 {
1587 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1588 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1589 }
1590 
1591 
1592 int mlx4_RUN_FW(struct mlx4_dev *dev)
1593 {
1594 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1595 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1596 }
1597 
1598 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1599 {
1600 	struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
1601 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1602 	struct mlx4_cmd_mailbox *mailbox;
1603 	u32 *outbox;
1604 	int err = 0;
1605 	u64 fw_ver;
1606 	u16 cmd_if_rev;
1607 	u8 lg;
1608 
1609 #define QUERY_FW_OUT_SIZE             0x100
1610 #define QUERY_FW_VER_OFFSET            0x00
1611 #define QUERY_FW_PPF_ID		       0x09
1612 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
1613 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
1614 #define QUERY_FW_ERR_START_OFFSET      0x30
1615 #define QUERY_FW_ERR_SIZE_OFFSET       0x38
1616 #define QUERY_FW_ERR_BAR_OFFSET        0x3c
1617 
1618 #define QUERY_FW_SIZE_OFFSET           0x00
1619 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
1620 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
1621 
1622 #define QUERY_FW_COMM_BASE_OFFSET      0x40
1623 #define QUERY_FW_COMM_BAR_OFFSET       0x48
1624 
1625 #define QUERY_FW_CLOCK_OFFSET	       0x50
1626 #define QUERY_FW_CLOCK_BAR	       0x58
1627 
1628 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1629 	if (IS_ERR(mailbox))
1630 		return PTR_ERR(mailbox);
1631 	outbox = mailbox->buf;
1632 
1633 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1634 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1635 	if (err)
1636 		goto out;
1637 
1638 	MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1639 	/*
1640 	 * FW subminor version is at more significant bits than minor
1641 	 * version, so swap here.
1642 	 */
1643 	dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1644 		((fw_ver & 0xffff0000ull) >> 16) |
1645 		((fw_ver & 0x0000ffffull) << 16);
1646 
1647 	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1648 	dev->caps.function = lg;
1649 
1650 	if (mlx4_is_slave(dev))
1651 		goto out;
1652 
1653 
1654 	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1655 	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1656 	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1657 		mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1658 			 cmd_if_rev);
1659 		mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1660 			 (int) (dev->caps.fw_ver >> 32),
1661 			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1662 			 (int) dev->caps.fw_ver & 0xffff);
1663 		mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1664 			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1665 		err = -ENODEV;
1666 		goto out;
1667 	}
1668 
1669 	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1670 		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1671 
1672 	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1673 	cmd->max_cmds = 1 << lg;
1674 
1675 	mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1676 		 (int) (dev->caps.fw_ver >> 32),
1677 		 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1678 		 (int) dev->caps.fw_ver & 0xffff,
1679 		 cmd_if_rev, cmd->max_cmds);
1680 
1681 	MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1682 	MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
1683 	MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
1684 	fw->catas_bar = (fw->catas_bar >> 6) * 2;
1685 
1686 	mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1687 		 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1688 
1689 	MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
1690 	MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1691 	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1692 	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1693 
1694 	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1695 	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
1696 	fw->comm_bar = (fw->comm_bar >> 6) * 2;
1697 	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1698 		 fw->comm_bar, fw->comm_base);
1699 	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1700 
1701 	MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1702 	MLX4_GET(fw->clock_bar,    outbox, QUERY_FW_CLOCK_BAR);
1703 	fw->clock_bar = (fw->clock_bar >> 6) * 2;
1704 	mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1705 		 fw->clock_bar, fw->clock_offset);
1706 
1707 	/*
1708 	 * Round up number of system pages needed in case
1709 	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1710 	 */
1711 	fw->fw_pages =
1712 		ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1713 		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1714 
1715 	mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1716 		 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1717 
1718 out:
1719 	mlx4_free_cmd_mailbox(dev, mailbox);
1720 	return err;
1721 }
1722 
1723 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1724 			  struct mlx4_vhcr *vhcr,
1725 			  struct mlx4_cmd_mailbox *inbox,
1726 			  struct mlx4_cmd_mailbox *outbox,
1727 			  struct mlx4_cmd_info *cmd)
1728 {
1729 	u8 *outbuf;
1730 	int err;
1731 
1732 	outbuf = outbox->buf;
1733 	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1734 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1735 	if (err)
1736 		return err;
1737 
1738 	/* for slaves, set pci PPF ID to invalid and zero out everything
1739 	 * else except FW version */
1740 	outbuf[0] = outbuf[1] = 0;
1741 	memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1742 	outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1743 
1744 	return 0;
1745 }
1746 
1747 static void get_board_id(void *vsd, char *board_id)
1748 {
1749 	int i;
1750 
1751 #define VSD_OFFSET_SIG1		0x00
1752 #define VSD_OFFSET_SIG2		0xde
1753 #define VSD_OFFSET_MLX_BOARD_ID	0xd0
1754 #define VSD_OFFSET_TS_BOARD_ID	0x20
1755 
1756 #define VSD_SIGNATURE_TOPSPIN	0x5ad
1757 
1758 	memset(board_id, 0, MLX4_BOARD_ID_LEN);
1759 
1760 	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1761 	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1762 		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1763 	} else {
1764 		/*
1765 		 * The board ID is a string but the firmware byte
1766 		 * swaps each 4-byte word before passing it back to
1767 		 * us.  Therefore we need to swab it before printing.
1768 		 */
1769 		u32 *bid_u32 = (u32 *)board_id;
1770 
1771 		for (i = 0; i < 4; ++i) {
1772 			u32 *addr;
1773 			u32 val;
1774 
1775 			addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
1776 			val = get_unaligned(addr);
1777 			val = swab32(val);
1778 			put_unaligned(val, &bid_u32[i]);
1779 		}
1780 	}
1781 }
1782 
1783 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1784 {
1785 	struct mlx4_cmd_mailbox *mailbox;
1786 	u32 *outbox;
1787 	int err;
1788 
1789 #define QUERY_ADAPTER_OUT_SIZE             0x100
1790 #define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1791 #define QUERY_ADAPTER_VSD_OFFSET           0x20
1792 
1793 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1794 	if (IS_ERR(mailbox))
1795 		return PTR_ERR(mailbox);
1796 	outbox = mailbox->buf;
1797 
1798 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1799 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1800 	if (err)
1801 		goto out;
1802 
1803 	MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1804 
1805 	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1806 		     adapter->board_id);
1807 
1808 out:
1809 	mlx4_free_cmd_mailbox(dev, mailbox);
1810 	return err;
1811 }
1812 
1813 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1814 {
1815 	struct mlx4_cmd_mailbox *mailbox;
1816 	__be32 *inbox;
1817 	int err;
1818 	static const u8 a0_dmfs_hw_steering[] =  {
1819 		[MLX4_STEERING_DMFS_A0_DEFAULT]		= 0,
1820 		[MLX4_STEERING_DMFS_A0_DYNAMIC]		= 1,
1821 		[MLX4_STEERING_DMFS_A0_STATIC]		= 2,
1822 		[MLX4_STEERING_DMFS_A0_DISABLE]		= 3
1823 	};
1824 
1825 #define INIT_HCA_IN_SIZE		 0x200
1826 #define INIT_HCA_VERSION_OFFSET		 0x000
1827 #define	 INIT_HCA_VERSION		 2
1828 #define INIT_HCA_VXLAN_OFFSET		 0x0c
1829 #define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
1830 #define INIT_HCA_FLAGS_OFFSET		 0x014
1831 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1832 #define INIT_HCA_QPC_OFFSET		 0x020
1833 #define	 INIT_HCA_QPC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x10)
1834 #define	 INIT_HCA_LOG_QP_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x17)
1835 #define	 INIT_HCA_SRQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x28)
1836 #define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
1837 #define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
1838 #define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
1839 #define	 INIT_HCA_EQE_CQE_OFFSETS	 (INIT_HCA_QPC_OFFSET + 0x38)
1840 #define	 INIT_HCA_EQE_CQE_STRIDE_OFFSET  (INIT_HCA_QPC_OFFSET + 0x3b)
1841 #define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
1842 #define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
1843 #define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
1844 #define	 INIT_HCA_LOG_EQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x67)
1845 #define	INIT_HCA_NUM_SYS_EQS_OFFSET	(INIT_HCA_QPC_OFFSET + 0x6a)
1846 #define	 INIT_HCA_RDMARC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x70)
1847 #define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
1848 #define INIT_HCA_MCAST_OFFSET		 0x0c0
1849 #define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
1850 #define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1851 #define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
1852 #define  INIT_HCA_UC_STEERING_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x18)
1853 #define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1854 #define  INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN	0x6
1855 #define  INIT_HCA_FS_PARAM_OFFSET         0x1d0
1856 #define  INIT_HCA_FS_BASE_OFFSET          (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1857 #define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1858 #define  INIT_HCA_FS_A0_OFFSET		  (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1859 #define  INIT_HCA_FS_LOG_TABLE_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1860 #define  INIT_HCA_FS_ETH_BITS_OFFSET      (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1861 #define  INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1862 #define  INIT_HCA_FS_IB_BITS_OFFSET       (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1863 #define  INIT_HCA_FS_IB_NUM_ADDRS_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1864 #define INIT_HCA_TPT_OFFSET		 0x0f0
1865 #define	 INIT_HCA_DMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x00)
1866 #define  INIT_HCA_TPT_MW_OFFSET		 (INIT_HCA_TPT_OFFSET + 0x08)
1867 #define	 INIT_HCA_LOG_MPT_SZ_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x0b)
1868 #define	 INIT_HCA_MTT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x10)
1869 #define	 INIT_HCA_CMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x18)
1870 #define INIT_HCA_UAR_OFFSET		 0x120
1871 #define	 INIT_HCA_LOG_UAR_SZ_OFFSET	 (INIT_HCA_UAR_OFFSET + 0x0a)
1872 #define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1873 
1874 	mailbox = mlx4_alloc_cmd_mailbox(dev);
1875 	if (IS_ERR(mailbox))
1876 		return PTR_ERR(mailbox);
1877 	inbox = mailbox->buf;
1878 
1879 	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1880 
1881 	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1882 		((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1883 
1884 #if defined(__LITTLE_ENDIAN)
1885 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1886 #elif defined(__BIG_ENDIAN)
1887 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1888 #else
1889 #error Host endianness not defined
1890 #endif
1891 	/* Check port for UD address vector: */
1892 	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1893 
1894 	/* Enable IPoIB checksumming if we can: */
1895 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1896 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1897 
1898 	/* Enable QoS support if module parameter set */
1899 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1900 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1901 
1902 	/* enable counters */
1903 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1904 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1905 
1906 	/* Enable RSS spread to fragmented IP packets when supported */
1907 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1908 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1909 
1910 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1911 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1912 		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1913 		dev->caps.eqe_size   = 64;
1914 		dev->caps.eqe_factor = 1;
1915 	} else {
1916 		dev->caps.eqe_size   = 32;
1917 		dev->caps.eqe_factor = 0;
1918 	}
1919 
1920 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1921 		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1922 		dev->caps.cqe_size   = 64;
1923 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1924 	} else {
1925 		dev->caps.cqe_size   = 32;
1926 	}
1927 
1928 	/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1929 	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1930 	    (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1931 		dev->caps.eqe_size = cache_line_size();
1932 		dev->caps.cqe_size = cache_line_size();
1933 		dev->caps.eqe_factor = 0;
1934 		MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1935 				      (ilog2(dev->caps.eqe_size) - 5)),
1936 			 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1937 
1938 		/* User still need to know to support CQE > 32B */
1939 		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1940 	}
1941 
1942 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1943 		*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1944 
1945 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1946 
1947 	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
1948 	MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
1949 	MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
1950 	MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
1951 	MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
1952 	MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
1953 	MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
1954 	MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
1955 	MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
1956 	MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
1957 	MLX4_PUT(inbox, param->num_sys_eqs,   INIT_HCA_NUM_SYS_EQS_OFFSET);
1958 	MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
1959 	MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1960 
1961 	/* steering attributes */
1962 	if (dev->caps.steering_mode ==
1963 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1964 		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1965 			cpu_to_be32(1 <<
1966 				    INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1967 
1968 		MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1969 		MLX4_PUT(inbox, param->log_mc_entry_sz,
1970 			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1971 		MLX4_PUT(inbox, param->log_mc_table_sz,
1972 			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1973 		/* Enable Ethernet flow steering
1974 		 * with udp unicast and tcp unicast
1975 		 */
1976 		if (dev->caps.dmfs_high_steer_mode !=
1977 		    MLX4_STEERING_DMFS_A0_STATIC)
1978 			MLX4_PUT(inbox,
1979 				 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1980 				 INIT_HCA_FS_ETH_BITS_OFFSET);
1981 		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1982 			 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1983 		/* Enable IPoIB flow steering
1984 		 * with udp unicast and tcp unicast
1985 		 */
1986 		MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1987 			 INIT_HCA_FS_IB_BITS_OFFSET);
1988 		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1989 			 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1990 
1991 		if (dev->caps.dmfs_high_steer_mode !=
1992 		    MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1993 			MLX4_PUT(inbox,
1994 				 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1995 				       << 6)),
1996 				 INIT_HCA_FS_A0_OFFSET);
1997 	} else {
1998 		MLX4_PUT(inbox, param->mc_base,	INIT_HCA_MC_BASE_OFFSET);
1999 		MLX4_PUT(inbox, param->log_mc_entry_sz,
2000 			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2001 		MLX4_PUT(inbox, param->log_mc_hash_sz,
2002 			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2003 		MLX4_PUT(inbox, param->log_mc_table_sz,
2004 			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2005 		if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
2006 			MLX4_PUT(inbox, (u8) (1 << 3),
2007 				 INIT_HCA_UC_STEERING_OFFSET);
2008 	}
2009 
2010 	/* TPT attributes */
2011 
2012 	MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
2013 	MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
2014 	MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
2015 	MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
2016 	MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
2017 
2018 	/* UAR attributes */
2019 
2020 	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
2021 	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
2022 
2023 	/* set parser VXLAN attributes */
2024 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
2025 		u8 parser_params = 0;
2026 		MLX4_PUT(inbox, parser_params,	INIT_HCA_VXLAN_OFFSET);
2027 	}
2028 
2029 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
2030 		       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2031 
2032 	if (err)
2033 		mlx4_err(dev, "INIT_HCA returns %d\n", err);
2034 
2035 	mlx4_free_cmd_mailbox(dev, mailbox);
2036 	return err;
2037 }
2038 
2039 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2040 		   struct mlx4_init_hca_param *param)
2041 {
2042 	struct mlx4_cmd_mailbox *mailbox;
2043 	__be32 *outbox;
2044 	u32 dword_field;
2045 	int err;
2046 	u8 byte_field;
2047 	static const u8 a0_dmfs_query_hw_steering[] =  {
2048 		[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
2049 		[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
2050 		[2] = MLX4_STEERING_DMFS_A0_STATIC,
2051 		[3] = MLX4_STEERING_DMFS_A0_DISABLE
2052 	};
2053 
2054 #define QUERY_HCA_GLOBAL_CAPS_OFFSET	0x04
2055 #define QUERY_HCA_CORE_CLOCK_OFFSET	0x0c
2056 
2057 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2058 	if (IS_ERR(mailbox))
2059 		return PTR_ERR(mailbox);
2060 	outbox = mailbox->buf;
2061 
2062 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2063 			   MLX4_CMD_QUERY_HCA,
2064 			   MLX4_CMD_TIME_CLASS_B,
2065 			   !mlx4_is_slave(dev));
2066 	if (err)
2067 		goto out;
2068 
2069 	MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
2070 	MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2071 
2072 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
2073 
2074 	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
2075 	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
2076 	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
2077 	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
2078 	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
2079 	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
2080 	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
2081 	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
2082 	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
2083 	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
2084 	MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
2085 	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
2086 	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
2087 
2088 	MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
2089 	if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
2090 		param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2091 	} else {
2092 		MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
2093 		if (byte_field & 0x8)
2094 			param->steering_mode = MLX4_STEERING_MODE_B0;
2095 		else
2096 			param->steering_mode = MLX4_STEERING_MODE_A0;
2097 	}
2098 
2099 	if (dword_field & (1 << 13))
2100 		param->rss_ip_frags = 1;
2101 
2102 	/* steering attributes */
2103 	if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2104 		MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
2105 		MLX4_GET(param->log_mc_entry_sz, outbox,
2106 			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
2107 		MLX4_GET(param->log_mc_table_sz, outbox,
2108 			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
2109 		MLX4_GET(byte_field, outbox,
2110 			 INIT_HCA_FS_A0_OFFSET);
2111 		param->dmfs_high_steer_mode =
2112 			a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
2113 	} else {
2114 		MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
2115 		MLX4_GET(param->log_mc_entry_sz, outbox,
2116 			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2117 		MLX4_GET(param->log_mc_hash_sz,  outbox,
2118 			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2119 		MLX4_GET(param->log_mc_table_sz, outbox,
2120 			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2121 	}
2122 
2123 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
2124 	MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
2125 	if (byte_field & 0x20) /* 64-bytes eqe enabled */
2126 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
2127 	if (byte_field & 0x40) /* 64-bytes cqe enabled */
2128 		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
2129 
2130 	/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
2131 	MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
2132 	if (byte_field) {
2133 		param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
2134 		param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
2135 		param->cqe_size = 1 << ((byte_field &
2136 					 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
2137 		param->eqe_size = 1 << (((byte_field &
2138 					  MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
2139 	}
2140 
2141 	/* TPT attributes */
2142 
2143 	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
2144 	MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
2145 	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2146 	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
2147 	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
2148 
2149 	/* UAR attributes */
2150 
2151 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2152 	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2153 
2154 	/* phv_check enable */
2155 	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
2156 	if (byte_field & 0x2)
2157 		param->phv_check_en = 1;
2158 out:
2159 	mlx4_free_cmd_mailbox(dev, mailbox);
2160 
2161 	return err;
2162 }
2163 
2164 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
2165 {
2166 	struct mlx4_cmd_mailbox *mailbox;
2167 	__be32 *outbox;
2168 	int err;
2169 
2170 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2171 	if (IS_ERR(mailbox)) {
2172 		mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
2173 		return PTR_ERR(mailbox);
2174 	}
2175 	outbox = mailbox->buf;
2176 
2177 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2178 			   MLX4_CMD_QUERY_HCA,
2179 			   MLX4_CMD_TIME_CLASS_B,
2180 			   !mlx4_is_slave(dev));
2181 	if (err) {
2182 		mlx4_warn(dev, "hca_core_clock update failed\n");
2183 		goto out;
2184 	}
2185 
2186 	MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2187 
2188 out:
2189 	mlx4_free_cmd_mailbox(dev, mailbox);
2190 
2191 	return err;
2192 }
2193 
2194 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
2195  * and real QP0 are active, so that the paravirtualized QP0 is ready
2196  * to operate */
2197 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
2198 {
2199 	struct mlx4_priv *priv = mlx4_priv(dev);
2200 	/* irrelevant if not infiniband */
2201 	if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
2202 	    priv->mfunc.master.qp0_state[port].qp0_active)
2203 		return 1;
2204 	return 0;
2205 }
2206 
2207 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
2208 			   struct mlx4_vhcr *vhcr,
2209 			   struct mlx4_cmd_mailbox *inbox,
2210 			   struct mlx4_cmd_mailbox *outbox,
2211 			   struct mlx4_cmd_info *cmd)
2212 {
2213 	struct mlx4_priv *priv = mlx4_priv(dev);
2214 	int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2215 	int err;
2216 
2217 	if (port < 0)
2218 		return -EINVAL;
2219 
2220 	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
2221 		return 0;
2222 
2223 	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2224 		/* Enable port only if it was previously disabled */
2225 		if (!priv->mfunc.master.init_port_ref[port]) {
2226 			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2227 				       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2228 			if (err)
2229 				return err;
2230 		}
2231 		priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2232 	} else {
2233 		if (slave == mlx4_master_func_num(dev)) {
2234 			if (check_qp0_state(dev, slave, port) &&
2235 			    !priv->mfunc.master.qp0_state[port].port_active) {
2236 				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2237 					       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2238 				if (err)
2239 					return err;
2240 				priv->mfunc.master.qp0_state[port].port_active = 1;
2241 				priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2242 			}
2243 		} else
2244 			priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2245 	}
2246 	++priv->mfunc.master.init_port_ref[port];
2247 	return 0;
2248 }
2249 
2250 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
2251 {
2252 	struct mlx4_cmd_mailbox *mailbox;
2253 	u32 *inbox;
2254 	int err;
2255 	u32 flags;
2256 	u16 field;
2257 
2258 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
2259 #define INIT_PORT_IN_SIZE          256
2260 #define INIT_PORT_FLAGS_OFFSET     0x00
2261 #define INIT_PORT_FLAG_SIG         (1 << 18)
2262 #define INIT_PORT_FLAG_NG          (1 << 17)
2263 #define INIT_PORT_FLAG_G0          (1 << 16)
2264 #define INIT_PORT_VL_SHIFT         4
2265 #define INIT_PORT_PORT_WIDTH_SHIFT 8
2266 #define INIT_PORT_MTU_OFFSET       0x04
2267 #define INIT_PORT_MAX_GID_OFFSET   0x06
2268 #define INIT_PORT_MAX_PKEY_OFFSET  0x0a
2269 #define INIT_PORT_GUID0_OFFSET     0x10
2270 #define INIT_PORT_NODE_GUID_OFFSET 0x18
2271 #define INIT_PORT_SI_GUID_OFFSET   0x20
2272 
2273 		mailbox = mlx4_alloc_cmd_mailbox(dev);
2274 		if (IS_ERR(mailbox))
2275 			return PTR_ERR(mailbox);
2276 		inbox = mailbox->buf;
2277 
2278 		flags = 0;
2279 		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
2280 		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
2281 		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
2282 
2283 		field = 128 << dev->caps.ib_mtu_cap[port];
2284 		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
2285 		field = dev->caps.gid_table_len[port];
2286 		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
2287 		field = dev->caps.pkey_table_len[port];
2288 		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2289 
2290 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2291 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2292 
2293 		mlx4_free_cmd_mailbox(dev, mailbox);
2294 	} else
2295 		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2296 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2297 
2298 	if (!err)
2299 		mlx4_hca_core_clock_update(dev);
2300 
2301 	return err;
2302 }
2303 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
2304 
2305 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2306 			    struct mlx4_vhcr *vhcr,
2307 			    struct mlx4_cmd_mailbox *inbox,
2308 			    struct mlx4_cmd_mailbox *outbox,
2309 			    struct mlx4_cmd_info *cmd)
2310 {
2311 	struct mlx4_priv *priv = mlx4_priv(dev);
2312 	int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2313 	int err;
2314 
2315 	if (port < 0)
2316 		return -EINVAL;
2317 
2318 	if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2319 	    (1 << port)))
2320 		return 0;
2321 
2322 	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2323 		if (priv->mfunc.master.init_port_ref[port] == 1) {
2324 			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2325 				       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2326 			if (err)
2327 				return err;
2328 		}
2329 		priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2330 	} else {
2331 		/* infiniband port */
2332 		if (slave == mlx4_master_func_num(dev)) {
2333 			if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2334 			    priv->mfunc.master.qp0_state[port].port_active) {
2335 				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2336 					       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2337 				if (err)
2338 					return err;
2339 				priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2340 				priv->mfunc.master.qp0_state[port].port_active = 0;
2341 			}
2342 		} else
2343 			priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2344 	}
2345 	--priv->mfunc.master.init_port_ref[port];
2346 	return 0;
2347 }
2348 
2349 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2350 {
2351 	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2352 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2353 }
2354 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2355 
2356 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2357 {
2358 	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2359 			MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2360 }
2361 
2362 struct mlx4_config_dev {
2363 	__be32	update_flags;
2364 	__be32	rsvd1[3];
2365 	__be16	vxlan_udp_dport;
2366 	__be16	rsvd2;
2367 	__be16  roce_v2_entropy;
2368 	__be16  roce_v2_udp_dport;
2369 	__be32	roce_flags;
2370 	__be32	rsvd4[25];
2371 	__be16	rsvd5;
2372 	u8	rsvd6;
2373 	u8	rx_checksum_val;
2374 };
2375 
2376 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2377 #define MLX4_ROCE_V2_UDP_DPORT BIT(3)
2378 #define MLX4_DISABLE_RX_PORT BIT(18)
2379 
2380 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2381 {
2382 	int err;
2383 	struct mlx4_cmd_mailbox *mailbox;
2384 
2385 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2386 	if (IS_ERR(mailbox))
2387 		return PTR_ERR(mailbox);
2388 
2389 	memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2390 
2391 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2392 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2393 
2394 	mlx4_free_cmd_mailbox(dev, mailbox);
2395 	return err;
2396 }
2397 
2398 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2399 {
2400 	int err;
2401 	struct mlx4_cmd_mailbox *mailbox;
2402 
2403 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2404 	if (IS_ERR(mailbox))
2405 		return PTR_ERR(mailbox);
2406 
2407 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2408 			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2409 
2410 	if (!err)
2411 		memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2412 
2413 	mlx4_free_cmd_mailbox(dev, mailbox);
2414 	return err;
2415 }
2416 
2417 /* Conversion between the HW values and the actual functionality.
2418  * The value represented by the array index,
2419  * and the functionality determined by the flags.
2420  */
2421 static const u8 config_dev_csum_flags[] = {
2422 	[0] =	0,
2423 	[1] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2424 	[2] =	MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP	|
2425 		MLX4_RX_CSUM_MODE_L4,
2426 	[3] =	MLX4_RX_CSUM_MODE_L4			|
2427 		MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP	|
2428 		MLX4_RX_CSUM_MODE_MULTI_VLAN
2429 };
2430 
2431 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2432 			      struct mlx4_config_dev_params *params)
2433 {
2434 	struct mlx4_config_dev config_dev = {0};
2435 	int err;
2436 	u8 csum_mask;
2437 
2438 #define CONFIG_DEV_RX_CSUM_MODE_MASK			0x7
2439 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET	0
2440 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET	4
2441 
2442 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2443 		return -EOPNOTSUPP;
2444 
2445 	err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2446 	if (err)
2447 		return err;
2448 
2449 	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2450 			CONFIG_DEV_RX_CSUM_MODE_MASK;
2451 
2452 	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2453 		return -EINVAL;
2454 	params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2455 
2456 	csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2457 			CONFIG_DEV_RX_CSUM_MODE_MASK;
2458 
2459 	if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2460 		return -EINVAL;
2461 	params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2462 
2463 	params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2464 
2465 	return 0;
2466 }
2467 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2468 
2469 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2470 {
2471 	struct mlx4_config_dev config_dev;
2472 
2473 	memset(&config_dev, 0, sizeof(config_dev));
2474 	config_dev.update_flags    = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2475 	config_dev.vxlan_udp_dport = udp_port;
2476 
2477 	return mlx4_CONFIG_DEV_set(dev, &config_dev);
2478 }
2479 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2480 
2481 #define CONFIG_DISABLE_RX_PORT BIT(15)
2482 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2483 {
2484 	struct mlx4_config_dev config_dev;
2485 
2486 	memset(&config_dev, 0, sizeof(config_dev));
2487 	config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2488 	if (dis)
2489 		config_dev.roce_flags =
2490 			cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2491 
2492 	return mlx4_CONFIG_DEV_set(dev, &config_dev);
2493 }
2494 
2495 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port)
2496 {
2497 	struct mlx4_config_dev config_dev;
2498 
2499 	memset(&config_dev, 0, sizeof(config_dev));
2500 	config_dev.update_flags    = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT);
2501 	config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port);
2502 
2503 	return mlx4_CONFIG_DEV_set(dev, &config_dev);
2504 }
2505 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port);
2506 
2507 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2508 {
2509 	struct mlx4_cmd_mailbox *mailbox;
2510 	struct {
2511 		__be32 v_port1;
2512 		__be32 v_port2;
2513 	} *v2p;
2514 	int err;
2515 
2516 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2517 	if (IS_ERR(mailbox))
2518 		return -ENOMEM;
2519 
2520 	v2p = mailbox->buf;
2521 	v2p->v_port1 = cpu_to_be32(port1);
2522 	v2p->v_port2 = cpu_to_be32(port2);
2523 
2524 	err = mlx4_cmd(dev, mailbox->dma, 0,
2525 		       MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2526 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2527 
2528 	mlx4_free_cmd_mailbox(dev, mailbox);
2529 	return err;
2530 }
2531 
2532 
2533 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2534 {
2535 	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2536 			       MLX4_CMD_SET_ICM_SIZE,
2537 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2538 	if (ret)
2539 		return ret;
2540 
2541 	/*
2542 	 * Round up number of system pages needed in case
2543 	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2544 	 */
2545 	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2546 		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2547 
2548 	return 0;
2549 }
2550 
2551 int mlx4_NOP(struct mlx4_dev *dev)
2552 {
2553 	/* Input modifier of 0x1f means "finish as soon as possible." */
2554 	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2555 			MLX4_CMD_NATIVE);
2556 }
2557 
2558 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
2559 			     const u32 offset[],
2560 			     u32 value[], size_t array_len, u8 port)
2561 {
2562 	struct mlx4_cmd_mailbox *mailbox;
2563 	u32 *outbox;
2564 	size_t i;
2565 	int ret;
2566 
2567 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2568 	if (IS_ERR(mailbox))
2569 		return PTR_ERR(mailbox);
2570 
2571 	outbox = mailbox->buf;
2572 
2573 	ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
2574 			   MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
2575 			   MLX4_CMD_NATIVE);
2576 	if (ret)
2577 		goto out;
2578 
2579 	for (i = 0; i < array_len; i++) {
2580 		if (offset[i] > MLX4_MAILBOX_SIZE) {
2581 			ret = -EINVAL;
2582 			goto out;
2583 		}
2584 
2585 		MLX4_GET(value[i], outbox, offset[i]);
2586 	}
2587 
2588 out:
2589 	mlx4_free_cmd_mailbox(dev, mailbox);
2590 	return ret;
2591 }
2592 EXPORT_SYMBOL(mlx4_query_diag_counters);
2593 
2594 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2595 {
2596 	u8 port;
2597 	u32 *outbox;
2598 	struct mlx4_cmd_mailbox *mailbox;
2599 	u32 in_mod;
2600 	u32 guid_hi, guid_lo;
2601 	int err, ret = 0;
2602 #define MOD_STAT_CFG_PORT_OFFSET 8
2603 #define MOD_STAT_CFG_GUID_H	 0X14
2604 #define MOD_STAT_CFG_GUID_L	 0X1c
2605 
2606 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2607 	if (IS_ERR(mailbox))
2608 		return PTR_ERR(mailbox);
2609 	outbox = mailbox->buf;
2610 
2611 	for (port = 1; port <= dev->caps.num_ports; port++) {
2612 		in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2613 		err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2614 				   MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2615 				   MLX4_CMD_NATIVE);
2616 		if (err) {
2617 			mlx4_err(dev, "Fail to get port %d uplink guid\n",
2618 				 port);
2619 			ret = err;
2620 		} else {
2621 			MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2622 			MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2623 			dev->caps.phys_port_id[port] = (u64)guid_lo |
2624 						       (u64)guid_hi << 32;
2625 		}
2626 	}
2627 	mlx4_free_cmd_mailbox(dev, mailbox);
2628 	return ret;
2629 }
2630 
2631 #define MLX4_WOL_SETUP_MODE (5 << 28)
2632 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2633 {
2634 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2635 
2636 	return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2637 			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2638 			    MLX4_CMD_NATIVE);
2639 }
2640 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2641 
2642 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2643 {
2644 	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2645 
2646 	return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2647 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2648 }
2649 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2650 
2651 enum {
2652 	ADD_TO_MCG = 0x26,
2653 };
2654 
2655 
2656 void mlx4_opreq_action(struct work_struct *work)
2657 {
2658 	struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2659 					      opreq_task);
2660 	struct mlx4_dev *dev = &priv->dev;
2661 	int num_tasks = atomic_read(&priv->opreq_count);
2662 	struct mlx4_cmd_mailbox *mailbox;
2663 	struct mlx4_mgm *mgm;
2664 	u32 *outbox;
2665 	u32 modifier;
2666 	u16 token;
2667 	u16 type;
2668 	int err;
2669 	u32 num_qps;
2670 	struct mlx4_qp qp;
2671 	int i;
2672 	u8 rem_mcg;
2673 	u8 prot;
2674 
2675 #define GET_OP_REQ_MODIFIER_OFFSET	0x08
2676 #define GET_OP_REQ_TOKEN_OFFSET		0x14
2677 #define GET_OP_REQ_TYPE_OFFSET		0x1a
2678 #define GET_OP_REQ_DATA_OFFSET		0x20
2679 
2680 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2681 	if (IS_ERR(mailbox)) {
2682 		mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2683 		return;
2684 	}
2685 	outbox = mailbox->buf;
2686 
2687 	while (num_tasks) {
2688 		err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2689 				   MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2690 				   MLX4_CMD_NATIVE);
2691 		if (err) {
2692 			mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2693 				 err);
2694 			return;
2695 		}
2696 		MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2697 		MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2698 		MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2699 		type &= 0xfff;
2700 
2701 		switch (type) {
2702 		case ADD_TO_MCG:
2703 			if (dev->caps.steering_mode ==
2704 			    MLX4_STEERING_MODE_DEVICE_MANAGED) {
2705 				mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2706 				err = EPERM;
2707 				break;
2708 			}
2709 			mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2710 						  GET_OP_REQ_DATA_OFFSET);
2711 			num_qps = be32_to_cpu(mgm->members_count) &
2712 				  MGM_QPN_MASK;
2713 			rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2714 			prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2715 
2716 			for (i = 0; i < num_qps; i++) {
2717 				qp.qpn = be32_to_cpu(mgm->qp[i]);
2718 				if (rem_mcg)
2719 					err = mlx4_multicast_detach(dev, &qp,
2720 								    mgm->gid,
2721 								    prot, 0);
2722 				else
2723 					err = mlx4_multicast_attach(dev, &qp,
2724 								    mgm->gid,
2725 								    mgm->gid[5]
2726 								    , 0, prot,
2727 								    NULL);
2728 				if (err)
2729 					break;
2730 			}
2731 			break;
2732 		default:
2733 			mlx4_warn(dev, "Bad type for required operation\n");
2734 			err = EINVAL;
2735 			break;
2736 		}
2737 		err = mlx4_cmd(dev, 0, ((u32) err |
2738 					(__force u32)cpu_to_be32(token) << 16),
2739 			       1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2740 			       MLX4_CMD_NATIVE);
2741 		if (err) {
2742 			mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2743 				 err);
2744 			goto out;
2745 		}
2746 		memset(outbox, 0, 0xffc);
2747 		num_tasks = atomic_dec_return(&priv->opreq_count);
2748 	}
2749 
2750 out:
2751 	mlx4_free_cmd_mailbox(dev, mailbox);
2752 }
2753 
2754 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2755 					  struct mlx4_cmd_mailbox *mailbox)
2756 {
2757 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET		0x10
2758 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET		0x20
2759 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET		0x40
2760 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET	0x70
2761 
2762 	u32 set_attr_mask, getresp_attr_mask;
2763 	u32 trap_attr_mask, traprepress_attr_mask;
2764 
2765 	MLX4_GET(set_attr_mask, mailbox->buf,
2766 		 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2767 	mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2768 		 set_attr_mask);
2769 
2770 	MLX4_GET(getresp_attr_mask, mailbox->buf,
2771 		 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2772 	mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2773 		 getresp_attr_mask);
2774 
2775 	MLX4_GET(trap_attr_mask, mailbox->buf,
2776 		 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2777 	mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2778 		 trap_attr_mask);
2779 
2780 	MLX4_GET(traprepress_attr_mask, mailbox->buf,
2781 		 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2782 	mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2783 		 traprepress_attr_mask);
2784 
2785 	if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2786 	    traprepress_attr_mask)
2787 		return 1;
2788 
2789 	return 0;
2790 }
2791 
2792 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2793 {
2794 	struct mlx4_cmd_mailbox *mailbox;
2795 	int err;
2796 
2797 	/* Check if mad_demux is supported */
2798 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2799 		return 0;
2800 
2801 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2802 	if (IS_ERR(mailbox)) {
2803 		mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2804 		return -ENOMEM;
2805 	}
2806 
2807 	/* Query mad_demux to find out which MADs are handled by internal sma */
2808 	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2809 			   MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2810 			   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2811 	if (err) {
2812 		mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2813 			  err);
2814 		goto out;
2815 	}
2816 
2817 	if (mlx4_check_smp_firewall_active(dev, mailbox))
2818 		dev->flags |= MLX4_FLAG_SECURE_HOST;
2819 
2820 	/* Config mad_demux to handle all MADs returned by the query above */
2821 	err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2822 		       MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2823 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2824 	if (err) {
2825 		mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2826 		goto out;
2827 	}
2828 
2829 	if (dev->flags & MLX4_FLAG_SECURE_HOST)
2830 		mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2831 out:
2832 	mlx4_free_cmd_mailbox(dev, mailbox);
2833 	return err;
2834 }
2835 
2836 /* Access Reg commands */
2837 enum mlx4_access_reg_masks {
2838 	MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2839 	MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2840 	MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2841 };
2842 
2843 struct mlx4_access_reg {
2844 	__be16 constant1;
2845 	u8 status;
2846 	u8 resrvd1;
2847 	__be16 reg_id;
2848 	u8 method;
2849 	u8 constant2;
2850 	__be32 resrvd2[2];
2851 	__be16 len_const;
2852 	__be16 resrvd3;
2853 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2854 	u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2855 } __attribute__((__packed__));
2856 
2857 /**
2858  * mlx4_ACCESS_REG - Generic access reg command.
2859  * @dev: mlx4_dev.
2860  * @reg_id: register ID to access.
2861  * @method: Access method Read/Write.
2862  * @reg_len: register length to Read/Write in bytes.
2863  * @reg_data: reg_data pointer to Read/Write From/To.
2864  *
2865  * Access ConnectX registers FW command.
2866  * Returns 0 on success and copies outbox mlx4_access_reg data
2867  * field into reg_data or a negative error code.
2868  */
2869 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2870 			   enum mlx4_access_reg_method method,
2871 			   u16 reg_len, void *reg_data)
2872 {
2873 	struct mlx4_cmd_mailbox *inbox, *outbox;
2874 	struct mlx4_access_reg *inbuf, *outbuf;
2875 	int err;
2876 
2877 	inbox = mlx4_alloc_cmd_mailbox(dev);
2878 	if (IS_ERR(inbox))
2879 		return PTR_ERR(inbox);
2880 
2881 	outbox = mlx4_alloc_cmd_mailbox(dev);
2882 	if (IS_ERR(outbox)) {
2883 		mlx4_free_cmd_mailbox(dev, inbox);
2884 		return PTR_ERR(outbox);
2885 	}
2886 
2887 	inbuf = inbox->buf;
2888 	outbuf = outbox->buf;
2889 
2890 	inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2891 	inbuf->constant2 = 0x1;
2892 	inbuf->reg_id = cpu_to_be16(reg_id);
2893 	inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2894 
2895 	reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2896 	inbuf->len_const =
2897 		cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2898 			    ((0x3) << 12));
2899 
2900 	memcpy(inbuf->reg_data, reg_data, reg_len);
2901 	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2902 			   MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2903 			   MLX4_CMD_WRAPPED);
2904 	if (err)
2905 		goto out;
2906 
2907 	if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2908 		err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2909 		mlx4_err(dev,
2910 			 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2911 			 reg_id, err);
2912 		goto out;
2913 	}
2914 
2915 	memcpy(reg_data, outbuf->reg_data, reg_len);
2916 out:
2917 	mlx4_free_cmd_mailbox(dev, inbox);
2918 	mlx4_free_cmd_mailbox(dev, outbox);
2919 	return err;
2920 }
2921 
2922 /* ConnectX registers IDs */
2923 enum mlx4_reg_id {
2924 	MLX4_REG_ID_PTYS = 0x5004,
2925 };
2926 
2927 /**
2928  * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2929  * register
2930  * @dev: mlx4_dev.
2931  * @method: Access method Read/Write.
2932  * @ptys_reg: PTYS register data pointer.
2933  *
2934  * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2935  * configuration
2936  * Returns 0 on success or a negative error code.
2937  */
2938 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2939 			 enum mlx4_access_reg_method method,
2940 			 struct mlx4_ptys_reg *ptys_reg)
2941 {
2942 	return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2943 			       method, sizeof(*ptys_reg), ptys_reg);
2944 }
2945 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2946 
2947 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2948 			    struct mlx4_vhcr *vhcr,
2949 			    struct mlx4_cmd_mailbox *inbox,
2950 			    struct mlx4_cmd_mailbox *outbox,
2951 			    struct mlx4_cmd_info *cmd)
2952 {
2953 	struct mlx4_access_reg *inbuf = inbox->buf;
2954 	u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2955 	u16 reg_id = be16_to_cpu(inbuf->reg_id);
2956 
2957 	if (slave != mlx4_master_func_num(dev) &&
2958 	    method == MLX4_ACCESS_REG_WRITE)
2959 		return -EPERM;
2960 
2961 	if (reg_id == MLX4_REG_ID_PTYS) {
2962 		struct mlx4_ptys_reg *ptys_reg =
2963 			(struct mlx4_ptys_reg *)inbuf->reg_data;
2964 
2965 		ptys_reg->local_port =
2966 			mlx4_slave_convert_port(dev, slave,
2967 						ptys_reg->local_port);
2968 	}
2969 
2970 	return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2971 			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2972 			    MLX4_CMD_NATIVE);
2973 }
2974 
2975 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
2976 {
2977 #define SET_PORT_GEN_PHV_VALID	0x10
2978 #define SET_PORT_GEN_PHV_EN	0x80
2979 
2980 	struct mlx4_cmd_mailbox *mailbox;
2981 	struct mlx4_set_port_general_context *context;
2982 	u32 in_mod;
2983 	int err;
2984 
2985 	mailbox = mlx4_alloc_cmd_mailbox(dev);
2986 	if (IS_ERR(mailbox))
2987 		return PTR_ERR(mailbox);
2988 	context = mailbox->buf;
2989 
2990 	context->flags2 |=  SET_PORT_GEN_PHV_VALID;
2991 	if (phv_bit)
2992 		context->phv_en |=  SET_PORT_GEN_PHV_EN;
2993 
2994 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
2995 	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
2996 		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
2997 		       MLX4_CMD_NATIVE);
2998 
2999 	mlx4_free_cmd_mailbox(dev, mailbox);
3000 	return err;
3001 }
3002 
3003 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
3004 {
3005 	int err;
3006 	struct mlx4_func_cap func_cap;
3007 
3008 	memset(&func_cap, 0, sizeof(func_cap));
3009 	err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3010 	if (!err)
3011 		*phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
3012 	return err;
3013 }
3014 EXPORT_SYMBOL(get_phv_bit);
3015 
3016 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
3017 {
3018 	int ret;
3019 
3020 	if (mlx4_is_slave(dev))
3021 		return -EPERM;
3022 
3023 	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3024 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3025 		ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
3026 		if (!ret)
3027 			dev->caps.phv_bit[port] = new_val;
3028 		return ret;
3029 	}
3030 
3031 	return -EOPNOTSUPP;
3032 }
3033 EXPORT_SYMBOL(set_phv_bit);
3034 
3035 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
3036 				      bool *vlan_offload_disabled)
3037 {
3038 	struct mlx4_func_cap func_cap;
3039 	int err;
3040 
3041 	memset(&func_cap, 0, sizeof(func_cap));
3042 	err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
3043 	if (!err)
3044 		*vlan_offload_disabled =
3045 			!!(func_cap.flags0 &
3046 			   QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
3047 	return err;
3048 }
3049 EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
3050 
3051 void mlx4_replace_zero_macs(struct mlx4_dev *dev)
3052 {
3053 	int i;
3054 	u8 mac_addr[ETH_ALEN];
3055 
3056 	dev->port_random_macs = 0;
3057 	for (i = 1; i <= dev->caps.num_ports; ++i)
3058 		if (!dev->caps.def_mac[i] &&
3059 		    dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
3060 			eth_random_addr(mac_addr);
3061 			dev->port_random_macs |= 1 << i;
3062 			dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
3063 		}
3064 }
3065 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
3066