1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/pci.h>
37 
38 #include "t4vf_common.h"
39 #include "t4vf_defs.h"
40 
41 #include "../cxgb4/t4_regs.h"
42 #include "../cxgb4/t4_values.h"
43 #include "../cxgb4/t4fw_api.h"
44 
45 /*
46  * Wait for the device to become ready (signified by our "who am I" register
47  * returning a value other than all 1's).  Return an error if it doesn't
48  * become ready ...
49  */
50 int t4vf_wait_dev_ready(struct adapter *adapter)
51 {
52 	const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
53 	const u32 notready1 = 0xffffffff;
54 	const u32 notready2 = 0xeeeeeeee;
55 	u32 val;
56 
57 	val = t4_read_reg(adapter, whoami);
58 	if (val != notready1 && val != notready2)
59 		return 0;
60 	msleep(500);
61 	val = t4_read_reg(adapter, whoami);
62 	if (val != notready1 && val != notready2)
63 		return 0;
64 	else
65 		return -EIO;
66 }
67 
68 /*
69  * Get the reply to a mailbox command and store it in @rpl in big-endian order
70  * (since the firmware data structures are specified in a big-endian layout).
71  */
72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
73 			 u32 mbox_data)
74 {
75 	for ( ; size; size -= 8, mbox_data += 8)
76 		*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
77 }
78 
79 /**
80  *	t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
81  *	@adapter: the adapter
82  *	@cmd: the Firmware Mailbox Command or Reply
83  *	@size: command length in bytes
84  *	@access: the time (ms) needed to access the Firmware Mailbox
85  *	@execute: the time (ms) the command spent being executed
86  */
87 static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
88 			     int size, int access, int execute)
89 {
90 	struct mbox_cmd_log *log = adapter->mbox_log;
91 	struct mbox_cmd *entry;
92 	int i;
93 
94 	entry = mbox_cmd_log_entry(log, log->cursor++);
95 	if (log->cursor == log->size)
96 		log->cursor = 0;
97 
98 	for (i = 0; i < size / 8; i++)
99 		entry->cmd[i] = be64_to_cpu(cmd[i]);
100 	while (i < MBOX_LEN / 8)
101 		entry->cmd[i++] = 0;
102 	entry->timestamp = jiffies;
103 	entry->seqno = log->seqno++;
104 	entry->access = access;
105 	entry->execute = execute;
106 }
107 
108 /**
109  *	t4vf_wr_mbox_core - send a command to FW through the mailbox
110  *	@adapter: the adapter
111  *	@cmd: the command to write
112  *	@size: command length in bytes
113  *	@rpl: where to optionally store the reply
114  *	@sleep_ok: if true we may sleep while awaiting command completion
115  *
116  *	Sends the given command to FW through the mailbox and waits for the
117  *	FW to execute the command.  If @rpl is not %NULL it is used to store
118  *	the FW's reply to the command.  The command and its optional reply
119  *	are of the same length.  FW can take up to 500 ms to respond.
120  *	@sleep_ok determines whether we may sleep while awaiting the response.
121  *	If sleeping is allowed we use progressive backoff otherwise we spin.
122  *
123  *	The return value is 0 on success or a negative errno on failure.  A
124  *	failure can happen either because we are not able to execute the
125  *	command or FW executes it but signals an error.  In the latter case
126  *	the return value is the error code indicated by FW (negated).
127  */
128 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
129 		      void *rpl, bool sleep_ok)
130 {
131 	static const int delay[] = {
132 		1, 1, 3, 5, 10, 10, 20, 50, 100
133 	};
134 
135 	u16 access = 0, execute = 0;
136 	u32 v, mbox_data;
137 	int i, ms, delay_idx, ret;
138 	const __be64 *p;
139 	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
140 	u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
141 	__be64 cmd_rpl[MBOX_LEN / 8];
142 	struct mbox_list entry;
143 
144 	/* In T6, mailbox size is changed to 128 bytes to avoid
145 	 * invalidating the entire prefetch buffer.
146 	 */
147 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
148 		mbox_data = T4VF_MBDATA_BASE_ADDR;
149 	else
150 		mbox_data = T6VF_MBDATA_BASE_ADDR;
151 
152 	/*
153 	 * Commands must be multiples of 16 bytes in length and may not be
154 	 * larger than the size of the Mailbox Data register array.
155 	 */
156 	if ((size % 16) != 0 ||
157 	    size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
158 		return -EINVAL;
159 
160 	/* Queue ourselves onto the mailbox access list.  When our entry is at
161 	 * the front of the list, we have rights to access the mailbox.  So we
162 	 * wait [for a while] till we're at the front [or bail out with an
163 	 * EBUSY] ...
164 	 */
165 	spin_lock(&adapter->mbox_lock);
166 	list_add_tail(&entry.list, &adapter->mlist.list);
167 	spin_unlock(&adapter->mbox_lock);
168 
169 	delay_idx = 0;
170 	ms = delay[0];
171 
172 	for (i = 0; ; i += ms) {
173 		/* If we've waited too long, return a busy indication.  This
174 		 * really ought to be based on our initial position in the
175 		 * mailbox access list but this is a start.  We very rearely
176 		 * contend on access to the mailbox ...
177 		 */
178 		if (i > FW_CMD_MAX_TIMEOUT) {
179 			spin_lock(&adapter->mbox_lock);
180 			list_del(&entry.list);
181 			spin_unlock(&adapter->mbox_lock);
182 			ret = -EBUSY;
183 			t4vf_record_mbox(adapter, cmd, size, access, ret);
184 			return ret;
185 		}
186 
187 		/* If we're at the head, break out and start the mailbox
188 		 * protocol.
189 		 */
190 		if (list_first_entry(&adapter->mlist.list, struct mbox_list,
191 				     list) == &entry)
192 			break;
193 
194 		/* Delay for a bit before checking again ... */
195 		if (sleep_ok) {
196 			ms = delay[delay_idx];  /* last element may repeat */
197 			if (delay_idx < ARRAY_SIZE(delay) - 1)
198 				delay_idx++;
199 			msleep(ms);
200 		} else {
201 			mdelay(ms);
202 		}
203 	}
204 
205 	/*
206 	 * Loop trying to get ownership of the mailbox.  Return an error
207 	 * if we can't gain ownership.
208 	 */
209 	v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
210 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
211 		v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
212 	if (v != MBOX_OWNER_DRV) {
213 		spin_lock(&adapter->mbox_lock);
214 		list_del(&entry.list);
215 		spin_unlock(&adapter->mbox_lock);
216 		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
217 		t4vf_record_mbox(adapter, cmd, size, access, ret);
218 		return ret;
219 	}
220 
221 	/*
222 	 * Write the command array into the Mailbox Data register array and
223 	 * transfer ownership of the mailbox to the firmware.
224 	 *
225 	 * For the VFs, the Mailbox Data "registers" are actually backed by
226 	 * T4's "MA" interface rather than PL Registers (as is the case for
227 	 * the PFs).  Because these are in different coherency domains, the
228 	 * write to the VF's PL-register-backed Mailbox Control can race in
229 	 * front of the writes to the MA-backed VF Mailbox Data "registers".
230 	 * So we need to do a read-back on at least one byte of the VF Mailbox
231 	 * Data registers before doing the write to the VF Mailbox Control
232 	 * register.
233 	 */
234 	if (cmd_op != FW_VI_STATS_CMD)
235 		t4vf_record_mbox(adapter, cmd, size, access, 0);
236 	for (i = 0, p = cmd; i < size; i += 8)
237 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
238 	t4_read_reg(adapter, mbox_data);         /* flush write */
239 
240 	t4_write_reg(adapter, mbox_ctl,
241 		     MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
242 	t4_read_reg(adapter, mbox_ctl);          /* flush write */
243 
244 	/*
245 	 * Spin waiting for firmware to acknowledge processing our command.
246 	 */
247 	delay_idx = 0;
248 	ms = delay[0];
249 
250 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
251 		if (sleep_ok) {
252 			ms = delay[delay_idx];
253 			if (delay_idx < ARRAY_SIZE(delay) - 1)
254 				delay_idx++;
255 			msleep(ms);
256 		} else
257 			mdelay(ms);
258 
259 		/*
260 		 * If we're the owner, see if this is the reply we wanted.
261 		 */
262 		v = t4_read_reg(adapter, mbox_ctl);
263 		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
264 			/*
265 			 * If the Message Valid bit isn't on, revoke ownership
266 			 * of the mailbox and continue waiting for our reply.
267 			 */
268 			if ((v & MBMSGVALID_F) == 0) {
269 				t4_write_reg(adapter, mbox_ctl,
270 					     MBOWNER_V(MBOX_OWNER_NONE));
271 				continue;
272 			}
273 
274 			/*
275 			 * We now have our reply.  Extract the command return
276 			 * value, copy the reply back to our caller's buffer
277 			 * (if specified) and revoke ownership of the mailbox.
278 			 * We return the (negated) firmware command return
279 			 * code (this depends on FW_SUCCESS == 0).
280 			 */
281 			get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
282 
283 			/* return value in low-order little-endian word */
284 			v = be64_to_cpu(cmd_rpl[0]);
285 
286 			if (rpl) {
287 				/* request bit in high-order BE word */
288 				WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
289 					 & FW_CMD_REQUEST_F) == 0);
290 				memcpy(rpl, cmd_rpl, size);
291 				WARN_ON((be32_to_cpu(*(__be32 *)rpl)
292 					 & FW_CMD_REQUEST_F) != 0);
293 			}
294 			t4_write_reg(adapter, mbox_ctl,
295 				     MBOWNER_V(MBOX_OWNER_NONE));
296 			execute = i + ms;
297 			if (cmd_op != FW_VI_STATS_CMD)
298 				t4vf_record_mbox(adapter, cmd_rpl, size, access,
299 						 execute);
300 			spin_lock(&adapter->mbox_lock);
301 			list_del(&entry.list);
302 			spin_unlock(&adapter->mbox_lock);
303 			return -FW_CMD_RETVAL_G(v);
304 		}
305 	}
306 
307 	/* We timed out.  Return the error ... */
308 	ret = -ETIMEDOUT;
309 	t4vf_record_mbox(adapter, cmd, size, access, ret);
310 	spin_lock(&adapter->mbox_lock);
311 	list_del(&entry.list);
312 	spin_unlock(&adapter->mbox_lock);
313 	return ret;
314 }
315 
316 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
317 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
318 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
319 
320 /**
321  *	init_link_config - initialize a link's SW state
322  *	@lc: structure holding the link state
323  *	@caps: link capabilities
324  *
325  *	Initializes the SW state maintained for each link, including the link's
326  *	capabilities and default speed/flow-control/autonegotiation settings.
327  */
328 static void init_link_config(struct link_config *lc, unsigned int caps)
329 {
330 	lc->supported = caps;
331 	lc->lp_advertising = 0;
332 	lc->requested_speed = 0;
333 	lc->speed = 0;
334 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
335 	if (lc->supported & FW_PORT_CAP_ANEG) {
336 		lc->advertising = lc->supported & ADVERT_MASK;
337 		lc->autoneg = AUTONEG_ENABLE;
338 		lc->requested_fc |= PAUSE_AUTONEG;
339 	} else {
340 		lc->advertising = 0;
341 		lc->autoneg = AUTONEG_DISABLE;
342 	}
343 }
344 
345 /**
346  *	t4vf_port_init - initialize port hardware/software state
347  *	@adapter: the adapter
348  *	@pidx: the adapter port index
349  */
350 int t4vf_port_init(struct adapter *adapter, int pidx)
351 {
352 	struct port_info *pi = adap2pinfo(adapter, pidx);
353 	struct fw_vi_cmd vi_cmd, vi_rpl;
354 	struct fw_port_cmd port_cmd, port_rpl;
355 	int v;
356 
357 	/*
358 	 * Execute a VI Read command to get our Virtual Interface information
359 	 * like MAC address, etc.
360 	 */
361 	memset(&vi_cmd, 0, sizeof(vi_cmd));
362 	vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
363 				       FW_CMD_REQUEST_F |
364 				       FW_CMD_READ_F);
365 	vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
366 	vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
367 	v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
368 	if (v)
369 		return v;
370 
371 	BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
372 	pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
373 	t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
374 
375 	/*
376 	 * If we don't have read access to our port information, we're done
377 	 * now.  Otherwise, execute a PORT Read command to get it ...
378 	 */
379 	if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
380 		return 0;
381 
382 	memset(&port_cmd, 0, sizeof(port_cmd));
383 	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
384 					    FW_CMD_REQUEST_F |
385 					    FW_CMD_READ_F |
386 					    FW_PORT_CMD_PORTID_V(pi->port_id));
387 	port_cmd.action_to_len16 =
388 		cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
389 			    FW_LEN16(port_cmd));
390 	v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
391 	if (v)
392 		return v;
393 
394 	v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
395 	pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
396 			FW_PORT_CMD_MDIOADDR_G(v) : -1;
397 	pi->port_type = FW_PORT_CMD_PTYPE_G(v);
398 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
399 
400 	init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
401 
402 	return 0;
403 }
404 
405 /**
406  *      t4vf_fw_reset - issue a reset to FW
407  *      @adapter: the adapter
408  *
409  *	Issues a reset command to FW.  For a Physical Function this would
410  *	result in the Firmware resetting all of its state.  For a Virtual
411  *	Function this just resets the state associated with the VF.
412  */
413 int t4vf_fw_reset(struct adapter *adapter)
414 {
415 	struct fw_reset_cmd cmd;
416 
417 	memset(&cmd, 0, sizeof(cmd));
418 	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
419 				      FW_CMD_WRITE_F);
420 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
421 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
422 }
423 
424 /**
425  *	t4vf_query_params - query FW or device parameters
426  *	@adapter: the adapter
427  *	@nparams: the number of parameters
428  *	@params: the parameter names
429  *	@vals: the parameter values
430  *
431  *	Reads the values of firmware or device parameters.  Up to 7 parameters
432  *	can be queried at once.
433  */
434 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
435 			     const u32 *params, u32 *vals)
436 {
437 	int i, ret;
438 	struct fw_params_cmd cmd, rpl;
439 	struct fw_params_param *p;
440 	size_t len16;
441 
442 	if (nparams > 7)
443 		return -EINVAL;
444 
445 	memset(&cmd, 0, sizeof(cmd));
446 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
447 				    FW_CMD_REQUEST_F |
448 				    FW_CMD_READ_F);
449 	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
450 				      param[nparams].mnem), 16);
451 	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
452 	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
453 		p->mnem = htonl(*params++);
454 
455 	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
456 	if (ret == 0)
457 		for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
458 			*vals++ = be32_to_cpu(p->val);
459 	return ret;
460 }
461 
462 /**
463  *	t4vf_set_params - sets FW or device parameters
464  *	@adapter: the adapter
465  *	@nparams: the number of parameters
466  *	@params: the parameter names
467  *	@vals: the parameter values
468  *
469  *	Sets the values of firmware or device parameters.  Up to 7 parameters
470  *	can be specified at once.
471  */
472 int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
473 		    const u32 *params, const u32 *vals)
474 {
475 	int i;
476 	struct fw_params_cmd cmd;
477 	struct fw_params_param *p;
478 	size_t len16;
479 
480 	if (nparams > 7)
481 		return -EINVAL;
482 
483 	memset(&cmd, 0, sizeof(cmd));
484 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
485 				    FW_CMD_REQUEST_F |
486 				    FW_CMD_WRITE_F);
487 	len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
488 				      param[nparams]), 16);
489 	cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
490 	for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
491 		p->mnem = cpu_to_be32(*params++);
492 		p->val = cpu_to_be32(*vals++);
493 	}
494 
495 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
496 }
497 
498 /**
499  *	t4vf_fl_pkt_align - return the fl packet alignment
500  *	@adapter: the adapter
501  *
502  *	T4 has a single field to specify the packing and padding boundary.
503  *	T5 onwards has separate fields for this and hence the alignment for
504  *	next packet offset is maximum of these two.  And T6 changes the
505  *	Ingress Padding Boundary Shift, so it's all a mess and it's best
506  *	if we put this in low-level Common Code ...
507  *
508  */
509 int t4vf_fl_pkt_align(struct adapter *adapter)
510 {
511 	u32 sge_control, sge_control2;
512 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
513 
514 	sge_control = adapter->params.sge.sge_control;
515 
516 	/* T4 uses a single control field to specify both the PCIe Padding and
517 	 * Packing Boundary.  T5 introduced the ability to specify these
518 	 * separately.  The actual Ingress Packet Data alignment boundary
519 	 * within Packed Buffer Mode is the maximum of these two
520 	 * specifications.  (Note that it makes no real practical sense to
521 	 * have the Pading Boudary be larger than the Packing Boundary but you
522 	 * could set the chip up that way and, in fact, legacy T4 code would
523 	 * end doing this because it would initialize the Padding Boundary and
524 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
525 	 * Padding Boundary values in T6 starts from 8B,
526 	 * where as it is 32B for T4 and T5.
527 	 */
528 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
529 		ingpad_shift = INGPADBOUNDARY_SHIFT_X;
530 	else
531 		ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
532 
533 	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
534 
535 	fl_align = ingpadboundary;
536 	if (!is_t4(adapter->params.chip)) {
537 		/* T5 has a different interpretation of one of the PCIe Packing
538 		 * Boundary values.
539 		 */
540 		sge_control2 = adapter->params.sge.sge_control2;
541 		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
542 		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
543 			ingpackboundary = 16;
544 		else
545 			ingpackboundary = 1 << (ingpackboundary +
546 						INGPACKBOUNDARY_SHIFT_X);
547 
548 		fl_align = max(ingpadboundary, ingpackboundary);
549 	}
550 	return fl_align;
551 }
552 
553 /**
554  *	t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
555  *	@adapter: the adapter
556  *	@qid: the Queue ID
557  *	@qtype: the Ingress or Egress type for @qid
558  *	@pbar2_qoffset: BAR2 Queue Offset
559  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
560  *
561  *	Returns the BAR2 SGE Queue Registers information associated with the
562  *	indicated Absolute Queue ID.  These are passed back in return value
563  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
564  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
565  *
566  *	This may return an error which indicates that BAR2 SGE Queue
567  *	registers aren't available.  If an error is not returned, then the
568  *	following values are returned:
569  *
570  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
571  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
572  *
573  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
574  *	require the "Inferred Queue ID" ability may be used.  E.g. the
575  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
576  *	then these "Inferred Queue ID" register may not be used.
577  */
578 int t4vf_bar2_sge_qregs(struct adapter *adapter,
579 			unsigned int qid,
580 			enum t4_bar2_qtype qtype,
581 			u64 *pbar2_qoffset,
582 			unsigned int *pbar2_qid)
583 {
584 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
585 	u64 bar2_page_offset, bar2_qoffset;
586 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
587 
588 	/* T4 doesn't support BAR2 SGE Queue registers.
589 	 */
590 	if (is_t4(adapter->params.chip))
591 		return -EINVAL;
592 
593 	/* Get our SGE Page Size parameters.
594 	 */
595 	page_shift = adapter->params.sge.sge_vf_hps + 10;
596 	page_size = 1 << page_shift;
597 
598 	/* Get the right Queues per Page parameters for our Queue.
599 	 */
600 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
601 		     ? adapter->params.sge.sge_vf_eq_qpp
602 		     : adapter->params.sge.sge_vf_iq_qpp);
603 	qpp_mask = (1 << qpp_shift) - 1;
604 
605 	/* Calculate the basics of the BAR2 SGE Queue register area:
606 	 *  o The BAR2 page the Queue registers will be in.
607 	 *  o The BAR2 Queue ID.
608 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
609 	 */
610 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
611 	bar2_qid = qid & qpp_mask;
612 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
613 
614 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
615 	 * hardware will infer the Absolute Queue ID simply from the writes to
616 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
617 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
618 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
619 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
620 	 * from the BAR2 Page and BAR2 Queue ID.
621 	 *
622 	 * One important censequence of this is that some BAR2 SGE registers
623 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
624 	 * there.  But other registers synthesize the SGE Queue ID purely
625 	 * from the writes to the registers -- the Write Combined Doorbell
626 	 * Buffer is a good example.  These BAR2 SGE Registers are only
627 	 * available for those BAR2 SGE Register areas where the SGE Absolute
628 	 * Queue ID can be inferred from simple writes.
629 	 */
630 	bar2_qoffset = bar2_page_offset;
631 	bar2_qinferred = (bar2_qid_offset < page_size);
632 	if (bar2_qinferred) {
633 		bar2_qoffset += bar2_qid_offset;
634 		bar2_qid = 0;
635 	}
636 
637 	*pbar2_qoffset = bar2_qoffset;
638 	*pbar2_qid = bar2_qid;
639 	return 0;
640 }
641 
642 /**
643  *	t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
644  *	@adapter: the adapter
645  *
646  *	Retrieves various core SGE parameters in the form of hardware SGE
647  *	register values.  The caller is responsible for decoding these as
648  *	needed.  The SGE parameters are stored in @adapter->params.sge.
649  */
650 int t4vf_get_sge_params(struct adapter *adapter)
651 {
652 	struct sge_params *sge_params = &adapter->params.sge;
653 	u32 params[7], vals[7];
654 	int v;
655 
656 	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
657 		     FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
658 	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
659 		     FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
660 	params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
661 		     FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
662 	params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
663 		     FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
664 	params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
665 		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
666 	params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
667 		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
668 	params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
669 		     FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
670 	v = t4vf_query_params(adapter, 7, params, vals);
671 	if (v)
672 		return v;
673 	sge_params->sge_control = vals[0];
674 	sge_params->sge_host_page_size = vals[1];
675 	sge_params->sge_fl_buffer_size[0] = vals[2];
676 	sge_params->sge_fl_buffer_size[1] = vals[3];
677 	sge_params->sge_timer_value_0_and_1 = vals[4];
678 	sge_params->sge_timer_value_2_and_3 = vals[5];
679 	sge_params->sge_timer_value_4_and_5 = vals[6];
680 
681 	/* T4 uses a single control field to specify both the PCIe Padding and
682 	 * Packing Boundary.  T5 introduced the ability to specify these
683 	 * separately with the Padding Boundary in SGE_CONTROL and and Packing
684 	 * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
685 	 * SGE_CONTROL in order to determine how ingress packet data will be
686 	 * laid out in Packed Buffer Mode.  Unfortunately, older versions of
687 	 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
688 	 * failure grabbing it we throw an error since we can't figure out the
689 	 * right value.
690 	 */
691 	if (!is_t4(adapter->params.chip)) {
692 		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
693 			     FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
694 		v = t4vf_query_params(adapter, 1, params, vals);
695 		if (v != FW_SUCCESS) {
696 			dev_err(adapter->pdev_dev,
697 				"Unable to get SGE Control2; "
698 				"probably old firmware.\n");
699 			return v;
700 		}
701 		sge_params->sge_control2 = vals[0];
702 	}
703 
704 	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
705 		     FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
706 	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
707 		     FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
708 	v = t4vf_query_params(adapter, 2, params, vals);
709 	if (v)
710 		return v;
711 	sge_params->sge_ingress_rx_threshold = vals[0];
712 	sge_params->sge_congestion_control = vals[1];
713 
714 	/* For T5 and later we want to use the new BAR2 Doorbells.
715 	 * Unfortunately, older firmware didn't allow the this register to be
716 	 * read.
717 	 */
718 	if (!is_t4(adapter->params.chip)) {
719 		u32 whoami;
720 		unsigned int pf, s_hps, s_qpp;
721 
722 		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
723 			     FW_PARAMS_PARAM_XYZ_V(
724 				     SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
725 		params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
726 			     FW_PARAMS_PARAM_XYZ_V(
727 				     SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
728 		v = t4vf_query_params(adapter, 2, params, vals);
729 		if (v != FW_SUCCESS) {
730 			dev_warn(adapter->pdev_dev,
731 				 "Unable to get VF SGE Queues/Page; "
732 				 "probably old firmware.\n");
733 			return v;
734 		}
735 		sge_params->sge_egress_queues_per_page = vals[0];
736 		sge_params->sge_ingress_queues_per_page = vals[1];
737 
738 		/* We need the Queues/Page for our VF.  This is based on the
739 		 * PF from which we're instantiated and is indexed in the
740 		 * register we just read. Do it once here so other code in
741 		 * the driver can just use it.
742 		 */
743 		whoami = t4_read_reg(adapter,
744 				     T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
745 		pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
746 			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
747 
748 		s_hps = (HOSTPAGESIZEPF0_S +
749 			 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
750 		sge_params->sge_vf_hps =
751 			((sge_params->sge_host_page_size >> s_hps)
752 			 & HOSTPAGESIZEPF0_M);
753 
754 		s_qpp = (QUEUESPERPAGEPF0_S +
755 			 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
756 		sge_params->sge_vf_eq_qpp =
757 			((sge_params->sge_egress_queues_per_page >> s_qpp)
758 			 & QUEUESPERPAGEPF0_M);
759 		sge_params->sge_vf_iq_qpp =
760 			((sge_params->sge_ingress_queues_per_page >> s_qpp)
761 			 & QUEUESPERPAGEPF0_M);
762 	}
763 
764 	return 0;
765 }
766 
767 /**
768  *	t4vf_get_vpd_params - retrieve device VPD paremeters
769  *	@adapter: the adapter
770  *
771  *	Retrives various device Vital Product Data parameters.  The parameters
772  *	are stored in @adapter->params.vpd.
773  */
774 int t4vf_get_vpd_params(struct adapter *adapter)
775 {
776 	struct vpd_params *vpd_params = &adapter->params.vpd;
777 	u32 params[7], vals[7];
778 	int v;
779 
780 	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
781 		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
782 	v = t4vf_query_params(adapter, 1, params, vals);
783 	if (v)
784 		return v;
785 	vpd_params->cclk = vals[0];
786 
787 	return 0;
788 }
789 
790 /**
791  *	t4vf_get_dev_params - retrieve device paremeters
792  *	@adapter: the adapter
793  *
794  *	Retrives various device parameters.  The parameters are stored in
795  *	@adapter->params.dev.
796  */
797 int t4vf_get_dev_params(struct adapter *adapter)
798 {
799 	struct dev_params *dev_params = &adapter->params.dev;
800 	u32 params[7], vals[7];
801 	int v;
802 
803 	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
804 		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
805 	params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
806 		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
807 	v = t4vf_query_params(adapter, 2, params, vals);
808 	if (v)
809 		return v;
810 	dev_params->fwrev = vals[0];
811 	dev_params->tprev = vals[1];
812 
813 	return 0;
814 }
815 
816 /**
817  *	t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
818  *	@adapter: the adapter
819  *
820  *	Retrieves global RSS mode and parameters with which we have to live
821  *	and stores them in the @adapter's RSS parameters.
822  */
823 int t4vf_get_rss_glb_config(struct adapter *adapter)
824 {
825 	struct rss_params *rss = &adapter->params.rss;
826 	struct fw_rss_glb_config_cmd cmd, rpl;
827 	int v;
828 
829 	/*
830 	 * Execute an RSS Global Configuration read command to retrieve
831 	 * our RSS configuration.
832 	 */
833 	memset(&cmd, 0, sizeof(cmd));
834 	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
835 				      FW_CMD_REQUEST_F |
836 				      FW_CMD_READ_F);
837 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
838 	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
839 	if (v)
840 		return v;
841 
842 	/*
843 	 * Transate the big-endian RSS Global Configuration into our
844 	 * cpu-endian format based on the RSS mode.  We also do first level
845 	 * filtering at this point to weed out modes which don't support
846 	 * VF Drivers ...
847 	 */
848 	rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
849 			be32_to_cpu(rpl.u.manual.mode_pkd));
850 	switch (rss->mode) {
851 	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
852 		u32 word = be32_to_cpu(
853 				rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
854 
855 		rss->u.basicvirtual.synmapen =
856 			((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
857 		rss->u.basicvirtual.syn4tupenipv6 =
858 			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
859 		rss->u.basicvirtual.syn2tupenipv6 =
860 			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
861 		rss->u.basicvirtual.syn4tupenipv4 =
862 			((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
863 		rss->u.basicvirtual.syn2tupenipv4 =
864 			((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
865 
866 		rss->u.basicvirtual.ofdmapen =
867 			((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
868 
869 		rss->u.basicvirtual.tnlmapen =
870 			((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
871 		rss->u.basicvirtual.tnlalllookup =
872 			((word  & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
873 
874 		rss->u.basicvirtual.hashtoeplitz =
875 			((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
876 
877 		/* we need at least Tunnel Map Enable to be set */
878 		if (!rss->u.basicvirtual.tnlmapen)
879 			return -EINVAL;
880 		break;
881 	}
882 
883 	default:
884 		/* all unknown/unsupported RSS modes result in an error */
885 		return -EINVAL;
886 	}
887 
888 	return 0;
889 }
890 
891 /**
892  *	t4vf_get_vfres - retrieve VF resource limits
893  *	@adapter: the adapter
894  *
895  *	Retrieves configured resource limits and capabilities for a virtual
896  *	function.  The results are stored in @adapter->vfres.
897  */
898 int t4vf_get_vfres(struct adapter *adapter)
899 {
900 	struct vf_resources *vfres = &adapter->params.vfres;
901 	struct fw_pfvf_cmd cmd, rpl;
902 	int v;
903 	u32 word;
904 
905 	/*
906 	 * Execute PFVF Read command to get VF resource limits; bail out early
907 	 * with error on command failure.
908 	 */
909 	memset(&cmd, 0, sizeof(cmd));
910 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
911 				    FW_CMD_REQUEST_F |
912 				    FW_CMD_READ_F);
913 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
914 	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
915 	if (v)
916 		return v;
917 
918 	/*
919 	 * Extract VF resource limits and return success.
920 	 */
921 	word = be32_to_cpu(rpl.niqflint_niq);
922 	vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
923 	vfres->niq = FW_PFVF_CMD_NIQ_G(word);
924 
925 	word = be32_to_cpu(rpl.type_to_neq);
926 	vfres->neq = FW_PFVF_CMD_NEQ_G(word);
927 	vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
928 
929 	word = be32_to_cpu(rpl.tc_to_nexactf);
930 	vfres->tc = FW_PFVF_CMD_TC_G(word);
931 	vfres->nvi = FW_PFVF_CMD_NVI_G(word);
932 	vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
933 
934 	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
935 	vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
936 	vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
937 	vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
938 
939 	return 0;
940 }
941 
942 /**
943  *	t4vf_read_rss_vi_config - read a VI's RSS configuration
944  *	@adapter: the adapter
945  *	@viid: Virtual Interface ID
946  *	@config: pointer to host-native VI RSS Configuration buffer
947  *
948  *	Reads the Virtual Interface's RSS configuration information and
949  *	translates it into CPU-native format.
950  */
951 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
952 			    union rss_vi_config *config)
953 {
954 	struct fw_rss_vi_config_cmd cmd, rpl;
955 	int v;
956 
957 	memset(&cmd, 0, sizeof(cmd));
958 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
959 				     FW_CMD_REQUEST_F |
960 				     FW_CMD_READ_F |
961 				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
962 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
963 	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
964 	if (v)
965 		return v;
966 
967 	switch (adapter->params.rss.mode) {
968 	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
969 		u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
970 
971 		config->basicvirtual.ip6fourtupen =
972 			((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
973 		config->basicvirtual.ip6twotupen =
974 			((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
975 		config->basicvirtual.ip4fourtupen =
976 			((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
977 		config->basicvirtual.ip4twotupen =
978 			((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
979 		config->basicvirtual.udpen =
980 			((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
981 		config->basicvirtual.defaultq =
982 			FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
983 		break;
984 	}
985 
986 	default:
987 		return -EINVAL;
988 	}
989 
990 	return 0;
991 }
992 
993 /**
994  *	t4vf_write_rss_vi_config - write a VI's RSS configuration
995  *	@adapter: the adapter
996  *	@viid: Virtual Interface ID
997  *	@config: pointer to host-native VI RSS Configuration buffer
998  *
999  *	Write the Virtual Interface's RSS configuration information
1000  *	(translating it into firmware-native format before writing).
1001  */
1002 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
1003 			     union rss_vi_config *config)
1004 {
1005 	struct fw_rss_vi_config_cmd cmd, rpl;
1006 
1007 	memset(&cmd, 0, sizeof(cmd));
1008 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
1009 				     FW_CMD_REQUEST_F |
1010 				     FW_CMD_WRITE_F |
1011 				     FW_RSS_VI_CONFIG_CMD_VIID(viid));
1012 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1013 	switch (adapter->params.rss.mode) {
1014 	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
1015 		u32 word = 0;
1016 
1017 		if (config->basicvirtual.ip6fourtupen)
1018 			word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
1019 		if (config->basicvirtual.ip6twotupen)
1020 			word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
1021 		if (config->basicvirtual.ip4fourtupen)
1022 			word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
1023 		if (config->basicvirtual.ip4twotupen)
1024 			word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
1025 		if (config->basicvirtual.udpen)
1026 			word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
1027 		word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
1028 				config->basicvirtual.defaultq);
1029 		cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
1030 		break;
1031 	}
1032 
1033 	default:
1034 		return -EINVAL;
1035 	}
1036 
1037 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1038 }
1039 
1040 /**
1041  *	t4vf_config_rss_range - configure a portion of the RSS mapping table
1042  *	@adapter: the adapter
1043  *	@viid: Virtual Interface of RSS Table Slice
1044  *	@start: starting entry in the table to write
1045  *	@n: how many table entries to write
1046  *	@rspq: values for the "Response Queue" (Ingress Queue) lookup table
1047  *	@nrspq: number of values in @rspq
1048  *
1049  *	Programs the selected part of the VI's RSS mapping table with the
1050  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
1051  *	until the full table range is populated.
1052  *
1053  *	The caller must ensure the values in @rspq are in the range 0..1023.
1054  */
1055 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
1056 			  int start, int n, const u16 *rspq, int nrspq)
1057 {
1058 	const u16 *rsp = rspq;
1059 	const u16 *rsp_end = rspq+nrspq;
1060 	struct fw_rss_ind_tbl_cmd cmd;
1061 
1062 	/*
1063 	 * Initialize firmware command template to write the RSS table.
1064 	 */
1065 	memset(&cmd, 0, sizeof(cmd));
1066 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
1067 				     FW_CMD_REQUEST_F |
1068 				     FW_CMD_WRITE_F |
1069 				     FW_RSS_IND_TBL_CMD_VIID_V(viid));
1070 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1071 
1072 	/*
1073 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
1074 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
1075 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
1076 	 * reserved.
1077 	 */
1078 	while (n > 0) {
1079 		__be32 *qp = &cmd.iq0_to_iq2;
1080 		int nq = min(n, 32);
1081 		int ret;
1082 
1083 		/*
1084 		 * Set up the firmware RSS command header to send the next
1085 		 * "nq" Ingress Queue IDs to the firmware.
1086 		 */
1087 		cmd.niqid = cpu_to_be16(nq);
1088 		cmd.startidx = cpu_to_be16(start);
1089 
1090 		/*
1091 		 * "nq" more done for the start of the next loop.
1092 		 */
1093 		start += nq;
1094 		n -= nq;
1095 
1096 		/*
1097 		 * While there are still Ingress Queue IDs to stuff into the
1098 		 * current firmware RSS command, retrieve them from the
1099 		 * Ingress Queue ID array and insert them into the command.
1100 		 */
1101 		while (nq > 0) {
1102 			/*
1103 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
1104 			 * around the Ingress Queue ID array if necessary) and
1105 			 * insert them into the firmware RSS command at the
1106 			 * current 3-tuple position within the commad.
1107 			 */
1108 			u16 qbuf[3];
1109 			u16 *qbp = qbuf;
1110 			int nqbuf = min(3, nq);
1111 
1112 			nq -= nqbuf;
1113 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
1114 			while (nqbuf) {
1115 				nqbuf--;
1116 				*qbp++ = *rsp++;
1117 				if (rsp >= rsp_end)
1118 					rsp = rspq;
1119 			}
1120 			*qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
1121 					    FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
1122 					    FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
1123 		}
1124 
1125 		/*
1126 		 * Send this portion of the RRS table update to the firmware;
1127 		 * bail out on any errors.
1128 		 */
1129 		ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1130 		if (ret)
1131 			return ret;
1132 	}
1133 	return 0;
1134 }
1135 
1136 /**
1137  *	t4vf_alloc_vi - allocate a virtual interface on a port
1138  *	@adapter: the adapter
1139  *	@port_id: physical port associated with the VI
1140  *
1141  *	Allocate a new Virtual Interface and bind it to the indicated
1142  *	physical port.  Return the new Virtual Interface Identifier on
1143  *	success, or a [negative] error number on failure.
1144  */
1145 int t4vf_alloc_vi(struct adapter *adapter, int port_id)
1146 {
1147 	struct fw_vi_cmd cmd, rpl;
1148 	int v;
1149 
1150 	/*
1151 	 * Execute a VI command to allocate Virtual Interface and return its
1152 	 * VIID.
1153 	 */
1154 	memset(&cmd, 0, sizeof(cmd));
1155 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1156 				    FW_CMD_REQUEST_F |
1157 				    FW_CMD_WRITE_F |
1158 				    FW_CMD_EXEC_F);
1159 	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1160 					 FW_VI_CMD_ALLOC_F);
1161 	cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
1162 	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1163 	if (v)
1164 		return v;
1165 
1166 	return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
1167 }
1168 
1169 /**
1170  *	t4vf_free_vi -- free a virtual interface
1171  *	@adapter: the adapter
1172  *	@viid: the virtual interface identifier
1173  *
1174  *	Free a previously allocated Virtual Interface.  Return an error on
1175  *	failure.
1176  */
1177 int t4vf_free_vi(struct adapter *adapter, int viid)
1178 {
1179 	struct fw_vi_cmd cmd;
1180 
1181 	/*
1182 	 * Execute a VI command to free the Virtual Interface.
1183 	 */
1184 	memset(&cmd, 0, sizeof(cmd));
1185 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1186 				    FW_CMD_REQUEST_F |
1187 				    FW_CMD_EXEC_F);
1188 	cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1189 					 FW_VI_CMD_FREE_F);
1190 	cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
1191 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1192 }
1193 
1194 /**
1195  *	t4vf_enable_vi - enable/disable a virtual interface
1196  *	@adapter: the adapter
1197  *	@viid: the Virtual Interface ID
1198  *	@rx_en: 1=enable Rx, 0=disable Rx
1199  *	@tx_en: 1=enable Tx, 0=disable Tx
1200  *
1201  *	Enables/disables a virtual interface.
1202  */
1203 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
1204 		   bool rx_en, bool tx_en)
1205 {
1206 	struct fw_vi_enable_cmd cmd;
1207 
1208 	memset(&cmd, 0, sizeof(cmd));
1209 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1210 				     FW_CMD_REQUEST_F |
1211 				     FW_CMD_EXEC_F |
1212 				     FW_VI_ENABLE_CMD_VIID_V(viid));
1213 	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
1214 				       FW_VI_ENABLE_CMD_EEN_V(tx_en) |
1215 				       FW_LEN16(cmd));
1216 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1217 }
1218 
1219 /**
1220  *	t4vf_identify_port - identify a VI's port by blinking its LED
1221  *	@adapter: the adapter
1222  *	@viid: the Virtual Interface ID
1223  *	@nblinks: how many times to blink LED at 2.5 Hz
1224  *
1225  *	Identifies a VI's port by blinking its LED.
1226  */
1227 int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
1228 		       unsigned int nblinks)
1229 {
1230 	struct fw_vi_enable_cmd cmd;
1231 
1232 	memset(&cmd, 0, sizeof(cmd));
1233 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1234 				     FW_CMD_REQUEST_F |
1235 				     FW_CMD_EXEC_F |
1236 				     FW_VI_ENABLE_CMD_VIID_V(viid));
1237 	cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
1238 				       FW_LEN16(cmd));
1239 	cmd.blinkdur = cpu_to_be16(nblinks);
1240 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1241 }
1242 
1243 /**
1244  *	t4vf_set_rxmode - set Rx properties of a virtual interface
1245  *	@adapter: the adapter
1246  *	@viid: the VI id
1247  *	@mtu: the new MTU or -1 for no change
1248  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1249  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1250  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1251  *	@vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1252  *		-1 no change
1253  *
1254  *	Sets Rx properties of a virtual interface.
1255  */
1256 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
1257 		    int mtu, int promisc, int all_multi, int bcast, int vlanex,
1258 		    bool sleep_ok)
1259 {
1260 	struct fw_vi_rxmode_cmd cmd;
1261 
1262 	/* convert to FW values */
1263 	if (mtu < 0)
1264 		mtu = FW_VI_RXMODE_CMD_MTU_M;
1265 	if (promisc < 0)
1266 		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
1267 	if (all_multi < 0)
1268 		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
1269 	if (bcast < 0)
1270 		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
1271 	if (vlanex < 0)
1272 		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
1273 
1274 	memset(&cmd, 0, sizeof(cmd));
1275 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
1276 				     FW_CMD_REQUEST_F |
1277 				     FW_CMD_WRITE_F |
1278 				     FW_VI_RXMODE_CMD_VIID_V(viid));
1279 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1280 	cmd.mtu_to_vlanexen =
1281 		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
1282 			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
1283 			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
1284 			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
1285 			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
1286 	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1287 }
1288 
1289 /**
1290  *	t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
1291  *	@adapter: the adapter
1292  *	@viid: the Virtual Interface Identifier
1293  *	@free: if true any existing filters for this VI id are first removed
1294  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
1295  *	@addr: the MAC address(es)
1296  *	@idx: where to store the index of each allocated filter
1297  *	@hash: pointer to hash address filter bitmap
1298  *	@sleep_ok: call is allowed to sleep
1299  *
1300  *	Allocates an exact-match filter for each of the supplied addresses and
1301  *	sets it to the corresponding address.  If @idx is not %NULL it should
1302  *	have at least @naddr entries, each of which will be set to the index of
1303  *	the filter allocated for the corresponding MAC address.  If a filter
1304  *	could not be allocated for an address its index is set to 0xffff.
1305  *	If @hash is not %NULL addresses that fail to allocate an exact filter
1306  *	are hashed and update the hash filter bitmap pointed at by @hash.
1307  *
1308  *	Returns a negative error number or the number of filters allocated.
1309  */
1310 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1311 			unsigned int naddr, const u8 **addr, u16 *idx,
1312 			u64 *hash, bool sleep_ok)
1313 {
1314 	int offset, ret = 0;
1315 	unsigned nfilters = 0;
1316 	unsigned int rem = naddr;
1317 	struct fw_vi_mac_cmd cmd, rpl;
1318 	unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1319 
1320 	if (naddr > max_naddr)
1321 		return -EINVAL;
1322 
1323 	for (offset = 0; offset < naddr; /**/) {
1324 		unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1325 					 ? rem
1326 					 : ARRAY_SIZE(cmd.u.exact));
1327 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1328 						     u.exact[fw_naddr]), 16);
1329 		struct fw_vi_mac_exact *p;
1330 		int i;
1331 
1332 		memset(&cmd, 0, sizeof(cmd));
1333 		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1334 					     FW_CMD_REQUEST_F |
1335 					     FW_CMD_WRITE_F |
1336 					     (free ? FW_CMD_EXEC_F : 0) |
1337 					     FW_VI_MAC_CMD_VIID_V(viid));
1338 		cmd.freemacs_to_len16 =
1339 			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
1340 				    FW_CMD_LEN16_V(len16));
1341 
1342 		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1343 			p->valid_to_idx = cpu_to_be16(
1344 				FW_VI_MAC_CMD_VALID_F |
1345 				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
1346 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1347 		}
1348 
1349 
1350 		ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1351 					sleep_ok);
1352 		if (ret && ret != -ENOMEM)
1353 			break;
1354 
1355 		for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1356 			u16 index = FW_VI_MAC_CMD_IDX_G(
1357 				be16_to_cpu(p->valid_to_idx));
1358 
1359 			if (idx)
1360 				idx[offset+i] =
1361 					(index >= max_naddr
1362 					 ? 0xffff
1363 					 : index);
1364 			if (index < max_naddr)
1365 				nfilters++;
1366 			else if (hash)
1367 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1368 		}
1369 
1370 		free = false;
1371 		offset += fw_naddr;
1372 		rem -= fw_naddr;
1373 	}
1374 
1375 	/*
1376 	 * If there were no errors or we merely ran out of room in our MAC
1377 	 * address arena, return the number of filters actually written.
1378 	 */
1379 	if (ret == 0 || ret == -ENOMEM)
1380 		ret = nfilters;
1381 	return ret;
1382 }
1383 
1384 /**
1385  *	t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
1386  *	@adapter: the adapter
1387  *	@viid: the VI id
1388  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
1389  *	@addr: the MAC address(es)
1390  *	@sleep_ok: call is allowed to sleep
1391  *
1392  *	Frees the exact-match filter for each of the supplied addresses
1393  *
1394  *	Returns a negative error number or the number of filters freed.
1395  */
1396 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
1397 		       unsigned int naddr, const u8 **addr, bool sleep_ok)
1398 {
1399 	int offset, ret = 0;
1400 	struct fw_vi_mac_cmd cmd;
1401 	unsigned int nfilters = 0;
1402 	unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1403 	unsigned int rem = naddr;
1404 
1405 	if (naddr > max_naddr)
1406 		return -EINVAL;
1407 
1408 	for (offset = 0; offset < (int)naddr ; /**/) {
1409 		unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
1410 					 rem : ARRAY_SIZE(cmd.u.exact));
1411 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1412 						     u.exact[fw_naddr]), 16);
1413 		struct fw_vi_mac_exact *p;
1414 		int i;
1415 
1416 		memset(&cmd, 0, sizeof(cmd));
1417 		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1418 				     FW_CMD_REQUEST_F |
1419 				     FW_CMD_WRITE_F |
1420 				     FW_CMD_EXEC_V(0) |
1421 				     FW_VI_MAC_CMD_VIID_V(viid));
1422 		cmd.freemacs_to_len16 =
1423 				cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
1424 					    FW_CMD_LEN16_V(len16));
1425 
1426 		for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
1427 			p->valid_to_idx = cpu_to_be16(
1428 				FW_VI_MAC_CMD_VALID_F |
1429 				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
1430 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1431 		}
1432 
1433 		ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
1434 					sleep_ok);
1435 		if (ret)
1436 			break;
1437 
1438 		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1439 			u16 index = FW_VI_MAC_CMD_IDX_G(
1440 						be16_to_cpu(p->valid_to_idx));
1441 
1442 			if (index < max_naddr)
1443 				nfilters++;
1444 		}
1445 
1446 		offset += fw_naddr;
1447 		rem -= fw_naddr;
1448 	}
1449 
1450 	if (ret == 0)
1451 		ret = nfilters;
1452 	return ret;
1453 }
1454 
1455 /**
1456  *	t4vf_change_mac - modifies the exact-match filter for a MAC address
1457  *	@adapter: the adapter
1458  *	@viid: the Virtual Interface ID
1459  *	@idx: index of existing filter for old value of MAC address, or -1
1460  *	@addr: the new MAC address value
1461  *	@persist: if idx < 0, the new MAC allocation should be persistent
1462  *
1463  *	Modifies an exact-match filter and sets it to the new MAC address.
1464  *	Note that in general it is not possible to modify the value of a given
1465  *	filter so the generic way to modify an address filter is to free the
1466  *	one being used by the old address value and allocate a new filter for
1467  *	the new address value.  @idx can be -1 if the address is a new
1468  *	addition.
1469  *
1470  *	Returns a negative error number or the index of the filter with the new
1471  *	MAC value.
1472  */
1473 int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1474 		    int idx, const u8 *addr, bool persist)
1475 {
1476 	int ret;
1477 	struct fw_vi_mac_cmd cmd, rpl;
1478 	struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1479 	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1480 					     u.exact[1]), 16);
1481 	unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
1482 
1483 	/*
1484 	 * If this is a new allocation, determine whether it should be
1485 	 * persistent (across a "freemacs" operation) or not.
1486 	 */
1487 	if (idx < 0)
1488 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1489 
1490 	memset(&cmd, 0, sizeof(cmd));
1491 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1492 				     FW_CMD_REQUEST_F |
1493 				     FW_CMD_WRITE_F |
1494 				     FW_VI_MAC_CMD_VIID_V(viid));
1495 	cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1496 	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
1497 				      FW_VI_MAC_CMD_IDX_V(idx));
1498 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
1499 
1500 	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1501 	if (ret == 0) {
1502 		p = &rpl.u.exact[0];
1503 		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
1504 		if (ret >= max_mac_addr)
1505 			ret = -ENOMEM;
1506 	}
1507 	return ret;
1508 }
1509 
1510 /**
1511  *	t4vf_set_addr_hash - program the MAC inexact-match hash filter
1512  *	@adapter: the adapter
1513  *	@viid: the Virtual Interface Identifier
1514  *	@ucast: whether the hash filter should also match unicast addresses
1515  *	@vec: the value to be written to the hash filter
1516  *	@sleep_ok: call is allowed to sleep
1517  *
1518  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
1519  */
1520 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
1521 		       bool ucast, u64 vec, bool sleep_ok)
1522 {
1523 	struct fw_vi_mac_cmd cmd;
1524 	size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1525 					     u.exact[0]), 16);
1526 
1527 	memset(&cmd, 0, sizeof(cmd));
1528 	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1529 				     FW_CMD_REQUEST_F |
1530 				     FW_CMD_WRITE_F |
1531 				     FW_VI_ENABLE_CMD_VIID_V(viid));
1532 	cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
1533 					    FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
1534 					    FW_CMD_LEN16_V(len16));
1535 	cmd.u.hash.hashvec = cpu_to_be64(vec);
1536 	return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1537 }
1538 
1539 /**
1540  *	t4vf_get_port_stats - collect "port" statistics
1541  *	@adapter: the adapter
1542  *	@pidx: the port index
1543  *	@s: the stats structure to fill
1544  *
1545  *	Collect statistics for the "port"'s Virtual Interface.
1546  */
1547 int t4vf_get_port_stats(struct adapter *adapter, int pidx,
1548 			struct t4vf_port_stats *s)
1549 {
1550 	struct port_info *pi = adap2pinfo(adapter, pidx);
1551 	struct fw_vi_stats_vf fwstats;
1552 	unsigned int rem = VI_VF_NUM_STATS;
1553 	__be64 *fwsp = (__be64 *)&fwstats;
1554 
1555 	/*
1556 	 * Grab the Virtual Interface statistics a chunk at a time via mailbox
1557 	 * commands.  We could use a Work Request and get all of them at once
1558 	 * but that's an asynchronous interface which is awkward to use.
1559 	 */
1560 	while (rem) {
1561 		unsigned int ix = VI_VF_NUM_STATS - rem;
1562 		unsigned int nstats = min(6U, rem);
1563 		struct fw_vi_stats_cmd cmd, rpl;
1564 		size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
1565 			      sizeof(struct fw_vi_stats_ctl));
1566 		size_t len16 = DIV_ROUND_UP(len, 16);
1567 		int ret;
1568 
1569 		memset(&cmd, 0, sizeof(cmd));
1570 		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
1571 					     FW_VI_STATS_CMD_VIID_V(pi->viid) |
1572 					     FW_CMD_REQUEST_F |
1573 					     FW_CMD_READ_F);
1574 		cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1575 		cmd.u.ctl.nstats_ix =
1576 			cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
1577 				    FW_VI_STATS_CMD_NSTATS_V(nstats));
1578 		ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
1579 		if (ret)
1580 			return ret;
1581 
1582 		memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
1583 
1584 		rem -= nstats;
1585 		fwsp += nstats;
1586 	}
1587 
1588 	/*
1589 	 * Translate firmware statistics into host native statistics.
1590 	 */
1591 	s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
1592 	s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
1593 	s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
1594 	s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
1595 	s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
1596 	s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
1597 	s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
1598 	s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
1599 	s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
1600 
1601 	s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
1602 	s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
1603 	s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
1604 	s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
1605 	s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
1606 	s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
1607 
1608 	s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
1609 
1610 	return 0;
1611 }
1612 
1613 /**
1614  *	t4vf_iq_free - free an ingress queue and its free lists
1615  *	@adapter: the adapter
1616  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1617  *	@iqid: ingress queue ID
1618  *	@fl0id: FL0 queue ID or 0xffff if no attached FL0
1619  *	@fl1id: FL1 queue ID or 0xffff if no attached FL1
1620  *
1621  *	Frees an ingress queue and its associated free lists, if any.
1622  */
1623 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
1624 		 unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
1625 {
1626 	struct fw_iq_cmd cmd;
1627 
1628 	memset(&cmd, 0, sizeof(cmd));
1629 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
1630 				    FW_CMD_REQUEST_F |
1631 				    FW_CMD_EXEC_F);
1632 	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
1633 					 FW_LEN16(cmd));
1634 	cmd.type_to_iqandstindex =
1635 		cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
1636 
1637 	cmd.iqid = cpu_to_be16(iqid);
1638 	cmd.fl0id = cpu_to_be16(fl0id);
1639 	cmd.fl1id = cpu_to_be16(fl1id);
1640 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1641 }
1642 
1643 /**
1644  *	t4vf_eth_eq_free - free an Ethernet egress queue
1645  *	@adapter: the adapter
1646  *	@eqid: egress queue ID
1647  *
1648  *	Frees an Ethernet egress queue.
1649  */
1650 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1651 {
1652 	struct fw_eq_eth_cmd cmd;
1653 
1654 	memset(&cmd, 0, sizeof(cmd));
1655 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
1656 				    FW_CMD_REQUEST_F |
1657 				    FW_CMD_EXEC_F);
1658 	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
1659 					 FW_LEN16(cmd));
1660 	cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
1661 	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1662 }
1663 
1664 /**
1665  *	t4vf_handle_fw_rpl - process a firmware reply message
1666  *	@adapter: the adapter
1667  *	@rpl: start of the firmware message
1668  *
1669  *	Processes a firmware message, such as link state change messages.
1670  */
1671 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1672 {
1673 	const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1674 	u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
1675 
1676 	switch (opcode) {
1677 	case FW_PORT_CMD: {
1678 		/*
1679 		 * Link/module state change message.
1680 		 */
1681 		const struct fw_port_cmd *port_cmd =
1682 			(const struct fw_port_cmd *)rpl;
1683 		u32 stat, mod;
1684 		int action, port_id, link_ok, speed, fc, pidx;
1685 
1686 		/*
1687 		 * Extract various fields from port status change message.
1688 		 */
1689 		action = FW_PORT_CMD_ACTION_G(
1690 			be32_to_cpu(port_cmd->action_to_len16));
1691 		if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1692 			dev_err(adapter->pdev_dev,
1693 				"Unknown firmware PORT reply action %x\n",
1694 				action);
1695 			break;
1696 		}
1697 
1698 		port_id = FW_PORT_CMD_PORTID_G(
1699 			be32_to_cpu(port_cmd->op_to_portid));
1700 
1701 		stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
1702 		link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
1703 		speed = 0;
1704 		fc = 0;
1705 		if (stat & FW_PORT_CMD_RXPAUSE_F)
1706 			fc |= PAUSE_RX;
1707 		if (stat & FW_PORT_CMD_TXPAUSE_F)
1708 			fc |= PAUSE_TX;
1709 		if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1710 			speed = 100;
1711 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1712 			speed = 1000;
1713 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1714 			speed = 10000;
1715 		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1716 			speed = 40000;
1717 
1718 		/*
1719 		 * Scan all of our "ports" (Virtual Interfaces) looking for
1720 		 * those bound to the physical port which has changed.  If
1721 		 * our recorded state doesn't match the current state,
1722 		 * signal that change to the OS code.
1723 		 */
1724 		for_each_port(adapter, pidx) {
1725 			struct port_info *pi = adap2pinfo(adapter, pidx);
1726 			struct link_config *lc;
1727 
1728 			if (pi->port_id != port_id)
1729 				continue;
1730 
1731 			lc = &pi->link_cfg;
1732 
1733 			mod = FW_PORT_CMD_MODTYPE_G(stat);
1734 			if (mod != pi->mod_type) {
1735 				pi->mod_type = mod;
1736 				t4vf_os_portmod_changed(adapter, pidx);
1737 			}
1738 
1739 			if (link_ok != lc->link_ok || speed != lc->speed ||
1740 			    fc != lc->fc) {
1741 				/* something changed */
1742 				lc->link_ok = link_ok;
1743 				lc->speed = speed;
1744 				lc->fc = fc;
1745 				lc->supported =
1746 					be16_to_cpu(port_cmd->u.info.pcap);
1747 				lc->lp_advertising =
1748 					be16_to_cpu(port_cmd->u.info.lpacap);
1749 				t4vf_os_link_changed(adapter, pidx, link_ok);
1750 			}
1751 		}
1752 		break;
1753 	}
1754 
1755 	default:
1756 		dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
1757 			opcode);
1758 	}
1759 	return 0;
1760 }
1761 
1762 /**
1763  */
1764 int t4vf_prep_adapter(struct adapter *adapter)
1765 {
1766 	int err;
1767 	unsigned int chipid;
1768 
1769 	/* Wait for the device to become ready before proceeding ...
1770 	 */
1771 	err = t4vf_wait_dev_ready(adapter);
1772 	if (err)
1773 		return err;
1774 
1775 	/* Default port and clock for debugging in case we can't reach
1776 	 * firmware.
1777 	 */
1778 	adapter->params.nports = 1;
1779 	adapter->params.vfres.pmask = 1;
1780 	adapter->params.vpd.cclk = 50000;
1781 
1782 	adapter->params.chip = 0;
1783 	switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
1784 	case CHELSIO_T4:
1785 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
1786 		adapter->params.arch.sge_fl_db = DBPRIO_F;
1787 		adapter->params.arch.mps_tcam_size =
1788 				NUM_MPS_CLS_SRAM_L_INSTANCES;
1789 		break;
1790 
1791 	case CHELSIO_T5:
1792 		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1793 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
1794 		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
1795 		adapter->params.arch.mps_tcam_size =
1796 				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1797 		break;
1798 
1799 	case CHELSIO_T6:
1800 		chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1801 		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
1802 		adapter->params.arch.sge_fl_db = 0;
1803 		adapter->params.arch.mps_tcam_size =
1804 				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1805 		break;
1806 	}
1807 
1808 	return 0;
1809 }
1810