1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3f7917c00SJeff Kirsher * driver for Linux.
4f7917c00SJeff Kirsher *
5f7917c00SJeff Kirsher * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6f7917c00SJeff Kirsher *
7f7917c00SJeff Kirsher * This software is available to you under a choice of one of two
8f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU
9f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file
10f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the
11f7917c00SJeff Kirsher * OpenIB.org BSD license below:
12f7917c00SJeff Kirsher *
13f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or
14f7917c00SJeff Kirsher * without modification, are permitted provided that the following
15f7917c00SJeff Kirsher * conditions are met:
16f7917c00SJeff Kirsher *
17f7917c00SJeff Kirsher * - Redistributions of source code must retain the above
18f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following
19f7917c00SJeff Kirsher * disclaimer.
20f7917c00SJeff Kirsher *
21f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above
22f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following
23f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials
24f7917c00SJeff Kirsher * provided with the distribution.
25f7917c00SJeff Kirsher *
26f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33f7917c00SJeff Kirsher * SOFTWARE.
34f7917c00SJeff Kirsher */
35f7917c00SJeff Kirsher
36428ac43fSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37428ac43fSJoe Perches
38f7917c00SJeff Kirsher #include <linux/module.h>
39f7917c00SJeff Kirsher #include <linux/moduleparam.h>
40f7917c00SJeff Kirsher #include <linux/init.h>
41f7917c00SJeff Kirsher #include <linux/pci.h>
42f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
43f7917c00SJeff Kirsher #include <linux/netdevice.h>
44f7917c00SJeff Kirsher #include <linux/etherdevice.h>
45f7917c00SJeff Kirsher #include <linux/debugfs.h>
46f7917c00SJeff Kirsher #include <linux/ethtool.h>
475ad24defSHariprasad Shenai #include <linux/mdio.h>
48f7917c00SJeff Kirsher
49f7917c00SJeff Kirsher #include "t4vf_common.h"
50f7917c00SJeff Kirsher #include "t4vf_defs.h"
51f7917c00SJeff Kirsher
52f7917c00SJeff Kirsher #include "../cxgb4/t4_regs.h"
53f7917c00SJeff Kirsher #include "../cxgb4/t4_msg.h"
54f7917c00SJeff Kirsher
55f7917c00SJeff Kirsher /*
56f7917c00SJeff Kirsher * Generic information about the driver.
57f7917c00SJeff Kirsher */
5852a5f846SHariprasad Shenai #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
59f7917c00SJeff Kirsher
60f7917c00SJeff Kirsher /*
61f7917c00SJeff Kirsher * Module Parameters.
62f7917c00SJeff Kirsher * ==================
63f7917c00SJeff Kirsher */
64f7917c00SJeff Kirsher
65f7917c00SJeff Kirsher /*
66f7917c00SJeff Kirsher * Default ethtool "message level" for adapters.
67f7917c00SJeff Kirsher */
68f7917c00SJeff Kirsher #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69f7917c00SJeff Kirsher NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70f7917c00SJeff Kirsher NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
71f7917c00SJeff Kirsher
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher * The driver uses the best interrupt scheme available on a platform in the
74f7917c00SJeff Kirsher * order MSI-X then MSI. This parameter determines which of these schemes the
75f7917c00SJeff Kirsher * driver may consider as follows:
76f7917c00SJeff Kirsher *
77f7917c00SJeff Kirsher * msi = 2: choose from among MSI-X and MSI
78f7917c00SJeff Kirsher * msi = 1: only consider MSI interrupts
79f7917c00SJeff Kirsher *
80f7917c00SJeff Kirsher * Note that unlike the Physical Function driver, this Virtual Function driver
81f7917c00SJeff Kirsher * does _not_ support legacy INTx interrupts (this limitation is mandated by
82f7917c00SJeff Kirsher * the PCI-E SR-IOV standard).
83f7917c00SJeff Kirsher */
84f7917c00SJeff Kirsher #define MSI_MSIX 2
85f7917c00SJeff Kirsher #define MSI_MSI 1
86f7917c00SJeff Kirsher #define MSI_DEFAULT MSI_MSIX
87f7917c00SJeff Kirsher
88f7917c00SJeff Kirsher static int msi = MSI_DEFAULT;
89f7917c00SJeff Kirsher
90f7917c00SJeff Kirsher module_param(msi, int, 0644);
91f7917c00SJeff Kirsher MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
92f7917c00SJeff Kirsher
93f7917c00SJeff Kirsher /*
94f7917c00SJeff Kirsher * Fundamental constants.
95f7917c00SJeff Kirsher * ======================
96f7917c00SJeff Kirsher */
97f7917c00SJeff Kirsher
98f7917c00SJeff Kirsher enum {
99f7917c00SJeff Kirsher MAX_TXQ_ENTRIES = 16384,
100f7917c00SJeff Kirsher MAX_RSPQ_ENTRIES = 16384,
101f7917c00SJeff Kirsher MAX_RX_BUFFERS = 16384,
102f7917c00SJeff Kirsher
103f7917c00SJeff Kirsher MIN_TXQ_ENTRIES = 32,
104f7917c00SJeff Kirsher MIN_RSPQ_ENTRIES = 128,
105f7917c00SJeff Kirsher MIN_FL_ENTRIES = 16,
106f7917c00SJeff Kirsher
107f7917c00SJeff Kirsher /*
108f7917c00SJeff Kirsher * For purposes of manipulating the Free List size we need to
109f7917c00SJeff Kirsher * recognize that Free Lists are actually Egress Queues (the host
110f7917c00SJeff Kirsher * produces free buffers which the hardware consumes), Egress Queues
111f7917c00SJeff Kirsher * indices are all in units of Egress Context Units bytes, and free
112f7917c00SJeff Kirsher * list entries are 64-bit PCI DMA addresses. And since the state of
113f7917c00SJeff Kirsher * the Producer Index == the Consumer Index implies an EMPTY list, we
114f7917c00SJeff Kirsher * always have at least one Egress Unit's worth of Free List entries
115f7917c00SJeff Kirsher * unused. See sge.c for more details ...
116f7917c00SJeff Kirsher */
117f7917c00SJeff Kirsher EQ_UNIT = SGE_EQ_IDXSIZE,
118f7917c00SJeff Kirsher FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
119f7917c00SJeff Kirsher MIN_FL_RESID = FL_PER_EQ_UNIT,
120f7917c00SJeff Kirsher };
121f7917c00SJeff Kirsher
122f7917c00SJeff Kirsher /*
123f7917c00SJeff Kirsher * Global driver state.
124f7917c00SJeff Kirsher * ====================
125f7917c00SJeff Kirsher */
126f7917c00SJeff Kirsher
127f7917c00SJeff Kirsher static struct dentry *cxgb4vf_debugfs_root;
128f7917c00SJeff Kirsher
129f7917c00SJeff Kirsher /*
130f7917c00SJeff Kirsher * OS "Callback" functions.
131f7917c00SJeff Kirsher * ========================
132f7917c00SJeff Kirsher */
133f7917c00SJeff Kirsher
134f7917c00SJeff Kirsher /*
135f7917c00SJeff Kirsher * The link status has changed on the indicated "port" (Virtual Interface).
136f7917c00SJeff Kirsher */
t4vf_os_link_changed(struct adapter * adapter,int pidx,int link_ok)137f7917c00SJeff Kirsher void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
138f7917c00SJeff Kirsher {
139f7917c00SJeff Kirsher struct net_device *dev = adapter->port[pidx];
140f7917c00SJeff Kirsher
141f7917c00SJeff Kirsher /*
142f7917c00SJeff Kirsher * If the port is disabled or the current recorded "link up"
143f7917c00SJeff Kirsher * status matches the new status, just return.
144f7917c00SJeff Kirsher */
145f7917c00SJeff Kirsher if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
146f7917c00SJeff Kirsher return;
147f7917c00SJeff Kirsher
148f7917c00SJeff Kirsher /*
149f7917c00SJeff Kirsher * Tell the OS that the link status has changed and print a short
150f7917c00SJeff Kirsher * informative message on the console about the event.
151f7917c00SJeff Kirsher */
152f7917c00SJeff Kirsher if (link_ok) {
153f7917c00SJeff Kirsher const char *s;
154f7917c00SJeff Kirsher const char *fc;
155f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
156f7917c00SJeff Kirsher
157502c1a16SArjun Vynipadath netif_carrier_on(dev);
158502c1a16SArjun Vynipadath
159f7917c00SJeff Kirsher switch (pi->link_cfg.speed) {
1605e78f7fdSGanesh Goudar case 100:
1615e78f7fdSGanesh Goudar s = "100Mbps";
162897d55dfSHariprasad Shenai break;
1635e78f7fdSGanesh Goudar case 1000:
1645e78f7fdSGanesh Goudar s = "1Gbps";
1655e78f7fdSGanesh Goudar break;
166897d55dfSHariprasad Shenai case 10000:
167f7917c00SJeff Kirsher s = "10Gbps";
168f7917c00SJeff Kirsher break;
1695e78f7fdSGanesh Goudar case 25000:
1705e78f7fdSGanesh Goudar s = "25Gbps";
171f7917c00SJeff Kirsher break;
1725e78f7fdSGanesh Goudar case 40000:
1735e78f7fdSGanesh Goudar s = "40Gbps";
1745e78f7fdSGanesh Goudar break;
1755e78f7fdSGanesh Goudar case 100000:
1765e78f7fdSGanesh Goudar s = "100Gbps";
177f7917c00SJeff Kirsher break;
178f7917c00SJeff Kirsher
179f7917c00SJeff Kirsher default:
180f7917c00SJeff Kirsher s = "unknown";
181f7917c00SJeff Kirsher break;
182f7917c00SJeff Kirsher }
183f7917c00SJeff Kirsher
184c3168cabSGanesh Goudar switch ((int)pi->link_cfg.fc) {
185f7917c00SJeff Kirsher case PAUSE_RX:
186f7917c00SJeff Kirsher fc = "RX";
187f7917c00SJeff Kirsher break;
188f7917c00SJeff Kirsher
189f7917c00SJeff Kirsher case PAUSE_TX:
190f7917c00SJeff Kirsher fc = "TX";
191f7917c00SJeff Kirsher break;
192f7917c00SJeff Kirsher
193f7917c00SJeff Kirsher case PAUSE_RX | PAUSE_TX:
194f7917c00SJeff Kirsher fc = "RX/TX";
195f7917c00SJeff Kirsher break;
196f7917c00SJeff Kirsher
197f7917c00SJeff Kirsher default:
198f7917c00SJeff Kirsher fc = "no";
199f7917c00SJeff Kirsher break;
200f7917c00SJeff Kirsher }
201f7917c00SJeff Kirsher
202428ac43fSJoe Perches netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
203f7917c00SJeff Kirsher } else {
204502c1a16SArjun Vynipadath netif_carrier_off(dev);
205428ac43fSJoe Perches netdev_info(dev, "link down\n");
206f7917c00SJeff Kirsher }
207f7917c00SJeff Kirsher }
208f7917c00SJeff Kirsher
209f7917c00SJeff Kirsher /*
2105ad24defSHariprasad Shenai * THe port module type has changed on the indicated "port" (Virtual
2115ad24defSHariprasad Shenai * Interface).
2125ad24defSHariprasad Shenai */
t4vf_os_portmod_changed(struct adapter * adapter,int pidx)2135ad24defSHariprasad Shenai void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
2145ad24defSHariprasad Shenai {
2155ad24defSHariprasad Shenai static const char * const mod_str[] = {
2165ad24defSHariprasad Shenai NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
2175ad24defSHariprasad Shenai };
2185ad24defSHariprasad Shenai const struct net_device *dev = adapter->port[pidx];
2195ad24defSHariprasad Shenai const struct port_info *pi = netdev_priv(dev);
2205ad24defSHariprasad Shenai
2215ad24defSHariprasad Shenai if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2225ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
2235ad24defSHariprasad Shenai dev->name);
2245ad24defSHariprasad Shenai else if (pi->mod_type < ARRAY_SIZE(mod_str))
2255ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
2265ad24defSHariprasad Shenai dev->name, mod_str[pi->mod_type]);
2275ad24defSHariprasad Shenai else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2285ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: unsupported optical port "
2295ad24defSHariprasad Shenai "module inserted\n", dev->name);
2305ad24defSHariprasad Shenai else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2315ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
2325ad24defSHariprasad Shenai "forcing TWINAX\n", dev->name);
2335ad24defSHariprasad Shenai else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
2345ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
2355ad24defSHariprasad Shenai dev->name);
2365ad24defSHariprasad Shenai else
2375ad24defSHariprasad Shenai dev_info(adapter->pdev_dev, "%s: unknown module type %d "
2385ad24defSHariprasad Shenai "inserted\n", dev->name, pi->mod_type);
2395ad24defSHariprasad Shenai }
2405ad24defSHariprasad Shenai
cxgb4vf_set_addr_hash(struct port_info * pi)2413f8cfd0dSArjun Vynipadath static int cxgb4vf_set_addr_hash(struct port_info *pi)
2423f8cfd0dSArjun Vynipadath {
2433f8cfd0dSArjun Vynipadath struct adapter *adapter = pi->adapter;
2443f8cfd0dSArjun Vynipadath u64 vec = 0;
2453f8cfd0dSArjun Vynipadath bool ucast = false;
2463f8cfd0dSArjun Vynipadath struct hash_mac_addr *entry;
2473f8cfd0dSArjun Vynipadath
2483f8cfd0dSArjun Vynipadath /* Calculate the hash vector for the updated list and program it */
2493f8cfd0dSArjun Vynipadath list_for_each_entry(entry, &adapter->mac_hlist, list) {
2503f8cfd0dSArjun Vynipadath ucast |= is_unicast_ether_addr(entry->addr);
2513f8cfd0dSArjun Vynipadath vec |= (1ULL << hash_mac_addr(entry->addr));
2523f8cfd0dSArjun Vynipadath }
2533f8cfd0dSArjun Vynipadath return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
2543f8cfd0dSArjun Vynipadath }
2553f8cfd0dSArjun Vynipadath
2563f8cfd0dSArjun Vynipadath /**
2573f8cfd0dSArjun Vynipadath * cxgb4vf_change_mac - Update match filter for a MAC address.
2583f8cfd0dSArjun Vynipadath * @pi: the port_info
2593f8cfd0dSArjun Vynipadath * @viid: the VI id
2603f8cfd0dSArjun Vynipadath * @tcam_idx: TCAM index of existing filter for old value of MAC address,
2613f8cfd0dSArjun Vynipadath * or -1
2623f8cfd0dSArjun Vynipadath * @addr: the new MAC address value
26320bb0c8fSRahul Lakkireddy * @persistent: whether a new MAC allocation should be persistent
2643f8cfd0dSArjun Vynipadath *
2653f8cfd0dSArjun Vynipadath * Modifies an MPS filter and sets it to the new MAC address if
2663f8cfd0dSArjun Vynipadath * @tcam_idx >= 0, or adds the MAC address to a new filter if
2673f8cfd0dSArjun Vynipadath * @tcam_idx < 0. In the latter case the address is added persistently
2683f8cfd0dSArjun Vynipadath * if @persist is %true.
2693f8cfd0dSArjun Vynipadath * Addresses are programmed to hash region, if tcam runs out of entries.
2703f8cfd0dSArjun Vynipadath *
2713f8cfd0dSArjun Vynipadath */
cxgb4vf_change_mac(struct port_info * pi,unsigned int viid,int * tcam_idx,const u8 * addr,bool persistent)2723f8cfd0dSArjun Vynipadath static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
2733f8cfd0dSArjun Vynipadath int *tcam_idx, const u8 *addr, bool persistent)
2743f8cfd0dSArjun Vynipadath {
2753f8cfd0dSArjun Vynipadath struct hash_mac_addr *new_entry, *entry;
2763f8cfd0dSArjun Vynipadath struct adapter *adapter = pi->adapter;
2773f8cfd0dSArjun Vynipadath int ret;
2783f8cfd0dSArjun Vynipadath
2793f8cfd0dSArjun Vynipadath ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
2803f8cfd0dSArjun Vynipadath /* We ran out of TCAM entries. try programming hash region. */
2813f8cfd0dSArjun Vynipadath if (ret == -ENOMEM) {
2823f8cfd0dSArjun Vynipadath /* If the MAC address to be updated is in the hash addr
2833f8cfd0dSArjun Vynipadath * list, update it from the list
2843f8cfd0dSArjun Vynipadath */
2853f8cfd0dSArjun Vynipadath list_for_each_entry(entry, &adapter->mac_hlist, list) {
2863f8cfd0dSArjun Vynipadath if (entry->iface_mac) {
2873f8cfd0dSArjun Vynipadath ether_addr_copy(entry->addr, addr);
2883f8cfd0dSArjun Vynipadath goto set_hash;
2893f8cfd0dSArjun Vynipadath }
2903f8cfd0dSArjun Vynipadath }
2913f8cfd0dSArjun Vynipadath new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
2923f8cfd0dSArjun Vynipadath if (!new_entry)
2933f8cfd0dSArjun Vynipadath return -ENOMEM;
2943f8cfd0dSArjun Vynipadath ether_addr_copy(new_entry->addr, addr);
2953f8cfd0dSArjun Vynipadath new_entry->iface_mac = true;
2963f8cfd0dSArjun Vynipadath list_add_tail(&new_entry->list, &adapter->mac_hlist);
2973f8cfd0dSArjun Vynipadath set_hash:
2983f8cfd0dSArjun Vynipadath ret = cxgb4vf_set_addr_hash(pi);
2993f8cfd0dSArjun Vynipadath } else if (ret >= 0) {
3003f8cfd0dSArjun Vynipadath *tcam_idx = ret;
3013f8cfd0dSArjun Vynipadath ret = 0;
3023f8cfd0dSArjun Vynipadath }
3033f8cfd0dSArjun Vynipadath
3043f8cfd0dSArjun Vynipadath return ret;
3053f8cfd0dSArjun Vynipadath }
3063f8cfd0dSArjun Vynipadath
3075ad24defSHariprasad Shenai /*
308f7917c00SJeff Kirsher * Net device operations.
309f7917c00SJeff Kirsher * ======================
310f7917c00SJeff Kirsher */
311f7917c00SJeff Kirsher
312f7917c00SJeff Kirsher
313f7917c00SJeff Kirsher
314f7917c00SJeff Kirsher
315f7917c00SJeff Kirsher /*
316f7917c00SJeff Kirsher * Perform the MAC and PHY actions needed to enable a "port" (Virtual
317f7917c00SJeff Kirsher * Interface).
318f7917c00SJeff Kirsher */
link_start(struct net_device * dev)319f7917c00SJeff Kirsher static int link_start(struct net_device *dev)
320f7917c00SJeff Kirsher {
321f7917c00SJeff Kirsher int ret;
322f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
323f7917c00SJeff Kirsher
324f7917c00SJeff Kirsher /*
325f7917c00SJeff Kirsher * We do not set address filters and promiscuity here, the stack does
326f7917c00SJeff Kirsher * that step explicitly. Enable vlan accel.
327f7917c00SJeff Kirsher */
328f7917c00SJeff Kirsher ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
329f7917c00SJeff Kirsher true);
3303f8cfd0dSArjun Vynipadath if (ret == 0)
3313f8cfd0dSArjun Vynipadath ret = cxgb4vf_change_mac(pi, pi->viid,
3323f8cfd0dSArjun Vynipadath &pi->xact_addr_filt,
3333f8cfd0dSArjun Vynipadath dev->dev_addr, true);
334f7917c00SJeff Kirsher
335f7917c00SJeff Kirsher /*
336f7917c00SJeff Kirsher * We don't need to actually "start the link" itself since the
337f7917c00SJeff Kirsher * firmware will do that for us when the first Virtual Interface
338f7917c00SJeff Kirsher * is enabled on a port.
339f7917c00SJeff Kirsher */
340f7917c00SJeff Kirsher if (ret == 0)
341e2f4f4e9SArjun Vynipadath ret = t4vf_enable_pi(pi->adapter, pi, true, true);
3420913667aSArjun Vynipadath
343f7917c00SJeff Kirsher return ret;
344f7917c00SJeff Kirsher }
345f7917c00SJeff Kirsher
346f7917c00SJeff Kirsher /*
347f7917c00SJeff Kirsher * Name the MSI-X interrupts.
348f7917c00SJeff Kirsher */
name_msix_vecs(struct adapter * adapter)349f7917c00SJeff Kirsher static void name_msix_vecs(struct adapter *adapter)
350f7917c00SJeff Kirsher {
351f7917c00SJeff Kirsher int namelen = sizeof(adapter->msix_info[0].desc) - 1;
352f7917c00SJeff Kirsher int pidx;
353f7917c00SJeff Kirsher
354f7917c00SJeff Kirsher /*
355f7917c00SJeff Kirsher * Firmware events.
356f7917c00SJeff Kirsher */
357f7917c00SJeff Kirsher snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
358f7917c00SJeff Kirsher "%s-FWeventq", adapter->name);
359f7917c00SJeff Kirsher adapter->msix_info[MSIX_FW].desc[namelen] = 0;
360f7917c00SJeff Kirsher
361f7917c00SJeff Kirsher /*
362f7917c00SJeff Kirsher * Ethernet queues.
363f7917c00SJeff Kirsher */
364f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
365f7917c00SJeff Kirsher struct net_device *dev = adapter->port[pidx];
366f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
367f7917c00SJeff Kirsher int qs, msi;
368f7917c00SJeff Kirsher
369f7917c00SJeff Kirsher for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
370f7917c00SJeff Kirsher snprintf(adapter->msix_info[msi].desc, namelen,
371f7917c00SJeff Kirsher "%s-%d", dev->name, qs);
372f7917c00SJeff Kirsher adapter->msix_info[msi].desc[namelen] = 0;
373f7917c00SJeff Kirsher }
374f7917c00SJeff Kirsher }
375f7917c00SJeff Kirsher }
376f7917c00SJeff Kirsher
377f7917c00SJeff Kirsher /*
378f7917c00SJeff Kirsher * Request all of our MSI-X resources.
379f7917c00SJeff Kirsher */
request_msix_queue_irqs(struct adapter * adapter)380f7917c00SJeff Kirsher static int request_msix_queue_irqs(struct adapter *adapter)
381f7917c00SJeff Kirsher {
382f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
383f7917c00SJeff Kirsher int rxq, msi, err;
384f7917c00SJeff Kirsher
385f7917c00SJeff Kirsher /*
386f7917c00SJeff Kirsher * Firmware events.
387f7917c00SJeff Kirsher */
388f7917c00SJeff Kirsher err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
389f7917c00SJeff Kirsher 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
390f7917c00SJeff Kirsher if (err)
391f7917c00SJeff Kirsher return err;
392f7917c00SJeff Kirsher
393f7917c00SJeff Kirsher /*
394f7917c00SJeff Kirsher * Ethernet queues.
395f7917c00SJeff Kirsher */
396f7917c00SJeff Kirsher msi = MSIX_IQFLINT;
397f7917c00SJeff Kirsher for_each_ethrxq(s, rxq) {
398f7917c00SJeff Kirsher err = request_irq(adapter->msix_info[msi].vec,
399f7917c00SJeff Kirsher t4vf_sge_intr_msix, 0,
400f7917c00SJeff Kirsher adapter->msix_info[msi].desc,
401f7917c00SJeff Kirsher &s->ethrxq[rxq].rspq);
402f7917c00SJeff Kirsher if (err)
403f7917c00SJeff Kirsher goto err_free_irqs;
404f7917c00SJeff Kirsher msi++;
405f7917c00SJeff Kirsher }
406f7917c00SJeff Kirsher return 0;
407f7917c00SJeff Kirsher
408f7917c00SJeff Kirsher err_free_irqs:
409f7917c00SJeff Kirsher while (--rxq >= 0)
410f7917c00SJeff Kirsher free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
411f7917c00SJeff Kirsher free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
412f7917c00SJeff Kirsher return err;
413f7917c00SJeff Kirsher }
414f7917c00SJeff Kirsher
415f7917c00SJeff Kirsher /*
416f7917c00SJeff Kirsher * Free our MSI-X resources.
417f7917c00SJeff Kirsher */
free_msix_queue_irqs(struct adapter * adapter)418f7917c00SJeff Kirsher static void free_msix_queue_irqs(struct adapter *adapter)
419f7917c00SJeff Kirsher {
420f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
421f7917c00SJeff Kirsher int rxq, msi;
422f7917c00SJeff Kirsher
423f7917c00SJeff Kirsher free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
424f7917c00SJeff Kirsher msi = MSIX_IQFLINT;
425f7917c00SJeff Kirsher for_each_ethrxq(s, rxq)
426f7917c00SJeff Kirsher free_irq(adapter->msix_info[msi++].vec,
427f7917c00SJeff Kirsher &s->ethrxq[rxq].rspq);
428f7917c00SJeff Kirsher }
429f7917c00SJeff Kirsher
430f7917c00SJeff Kirsher /*
431f7917c00SJeff Kirsher * Turn on NAPI and start up interrupts on a response queue.
432f7917c00SJeff Kirsher */
qenable(struct sge_rspq * rspq)433f7917c00SJeff Kirsher static void qenable(struct sge_rspq *rspq)
434f7917c00SJeff Kirsher {
435f7917c00SJeff Kirsher napi_enable(&rspq->napi);
436f7917c00SJeff Kirsher
437f7917c00SJeff Kirsher /*
438f7917c00SJeff Kirsher * 0-increment the Going To Sleep register to start the timer and
439f7917c00SJeff Kirsher * enable interrupts.
440f7917c00SJeff Kirsher */
441f7917c00SJeff Kirsher t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
442f612b815SHariprasad Shenai CIDXINC_V(0) |
443f612b815SHariprasad Shenai SEINTARM_V(rspq->intr_params) |
444f612b815SHariprasad Shenai INGRESSQID_V(rspq->cntxt_id));
445f7917c00SJeff Kirsher }
446f7917c00SJeff Kirsher
447f7917c00SJeff Kirsher /*
448f7917c00SJeff Kirsher * Enable NAPI scheduling and interrupt generation for all Receive Queues.
449f7917c00SJeff Kirsher */
enable_rx(struct adapter * adapter)450f7917c00SJeff Kirsher static void enable_rx(struct adapter *adapter)
451f7917c00SJeff Kirsher {
452f7917c00SJeff Kirsher int rxq;
453f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
454f7917c00SJeff Kirsher
455f7917c00SJeff Kirsher for_each_ethrxq(s, rxq)
456f7917c00SJeff Kirsher qenable(&s->ethrxq[rxq].rspq);
457f7917c00SJeff Kirsher qenable(&s->fw_evtq);
458f7917c00SJeff Kirsher
459f7917c00SJeff Kirsher /*
460f7917c00SJeff Kirsher * The interrupt queue doesn't use NAPI so we do the 0-increment of
461f7917c00SJeff Kirsher * its Going To Sleep register here to get it started.
462f7917c00SJeff Kirsher */
4633d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSI)
464f7917c00SJeff Kirsher t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
465f612b815SHariprasad Shenai CIDXINC_V(0) |
466f612b815SHariprasad Shenai SEINTARM_V(s->intrq.intr_params) |
467f612b815SHariprasad Shenai INGRESSQID_V(s->intrq.cntxt_id));
468f7917c00SJeff Kirsher
469f7917c00SJeff Kirsher }
470f7917c00SJeff Kirsher
471f7917c00SJeff Kirsher /*
472f7917c00SJeff Kirsher * Wait until all NAPI handlers are descheduled.
473f7917c00SJeff Kirsher */
quiesce_rx(struct adapter * adapter)474f7917c00SJeff Kirsher static void quiesce_rx(struct adapter *adapter)
475f7917c00SJeff Kirsher {
476f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
477f7917c00SJeff Kirsher int rxq;
478f7917c00SJeff Kirsher
479f7917c00SJeff Kirsher for_each_ethrxq(s, rxq)
480f7917c00SJeff Kirsher napi_disable(&s->ethrxq[rxq].rspq.napi);
481f7917c00SJeff Kirsher napi_disable(&s->fw_evtq.napi);
482f7917c00SJeff Kirsher }
483f7917c00SJeff Kirsher
484f7917c00SJeff Kirsher /*
485f7917c00SJeff Kirsher * Response queue handler for the firmware event queue.
486f7917c00SJeff Kirsher */
fwevtq_handler(struct sge_rspq * rspq,const __be64 * rsp,const struct pkt_gl * gl)487f7917c00SJeff Kirsher static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
488f7917c00SJeff Kirsher const struct pkt_gl *gl)
489f7917c00SJeff Kirsher {
490f7917c00SJeff Kirsher /*
491f7917c00SJeff Kirsher * Extract response opcode and get pointer to CPL message body.
492f7917c00SJeff Kirsher */
493f7917c00SJeff Kirsher struct adapter *adapter = rspq->adapter;
494f7917c00SJeff Kirsher u8 opcode = ((const struct rss_header *)rsp)->opcode;
495f7917c00SJeff Kirsher void *cpl = (void *)(rsp + 1);
496f7917c00SJeff Kirsher
497f7917c00SJeff Kirsher switch (opcode) {
498f7917c00SJeff Kirsher case CPL_FW6_MSG: {
499f7917c00SJeff Kirsher /*
500f7917c00SJeff Kirsher * We've received an asynchronous message from the firmware.
501f7917c00SJeff Kirsher */
502f7917c00SJeff Kirsher const struct cpl_fw6_msg *fw_msg = cpl;
503f7917c00SJeff Kirsher if (fw_msg->type == FW6_TYPE_CMD_RPL)
504f7917c00SJeff Kirsher t4vf_handle_fw_rpl(adapter, fw_msg->data);
505f7917c00SJeff Kirsher break;
506f7917c00SJeff Kirsher }
507f7917c00SJeff Kirsher
50894dace10SVipul Pandya case CPL_FW4_MSG: {
50994dace10SVipul Pandya /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
51094dace10SVipul Pandya */
51194dace10SVipul Pandya const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
5126c53e938SHariprasad Shenai opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
51394dace10SVipul Pandya if (opcode != CPL_SGE_EGR_UPDATE) {
51494dace10SVipul Pandya dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
51594dace10SVipul Pandya , opcode);
51694dace10SVipul Pandya break;
51794dace10SVipul Pandya }
51894dace10SVipul Pandya cpl = (void *)p;
51994dace10SVipul Pandya }
520df561f66SGustavo A. R. Silva fallthrough;
52194dace10SVipul Pandya
522f7917c00SJeff Kirsher case CPL_SGE_EGR_UPDATE: {
523f7917c00SJeff Kirsher /*
524f7917c00SJeff Kirsher * We've received an Egress Queue Status Update message. We
525f7917c00SJeff Kirsher * get these, if the SGE is configured to send these when the
526f7917c00SJeff Kirsher * firmware passes certain points in processing our TX
527f7917c00SJeff Kirsher * Ethernet Queue or if we make an explicit request for one.
528f7917c00SJeff Kirsher * We use these updates to determine when we may need to
529f7917c00SJeff Kirsher * restart a TX Ethernet Queue which was stopped for lack of
530f7917c00SJeff Kirsher * free TX Queue Descriptors ...
531f7917c00SJeff Kirsher */
53264699336SJoe Perches const struct cpl_sge_egr_update *p = cpl;
533bdc590b9SHariprasad Shenai unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
534f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
535f7917c00SJeff Kirsher struct sge_txq *tq;
536f7917c00SJeff Kirsher struct sge_eth_txq *txq;
537f7917c00SJeff Kirsher unsigned int eq_idx;
538f7917c00SJeff Kirsher
539f7917c00SJeff Kirsher /*
540f7917c00SJeff Kirsher * Perform sanity checking on the Queue ID to make sure it
541f7917c00SJeff Kirsher * really refers to one of our TX Ethernet Egress Queues which
542f7917c00SJeff Kirsher * is active and matches the queue's ID. None of these error
543f7917c00SJeff Kirsher * conditions should ever happen so we may want to either make
544f7917c00SJeff Kirsher * them fatal and/or conditionalized under DEBUG.
545f7917c00SJeff Kirsher */
546f7917c00SJeff Kirsher eq_idx = EQ_IDX(s, qid);
547f7917c00SJeff Kirsher if (unlikely(eq_idx >= MAX_EGRQ)) {
548f7917c00SJeff Kirsher dev_err(adapter->pdev_dev,
549f7917c00SJeff Kirsher "Egress Update QID %d out of range\n", qid);
550f7917c00SJeff Kirsher break;
551f7917c00SJeff Kirsher }
552f7917c00SJeff Kirsher tq = s->egr_map[eq_idx];
553f7917c00SJeff Kirsher if (unlikely(tq == NULL)) {
554f7917c00SJeff Kirsher dev_err(adapter->pdev_dev,
555f7917c00SJeff Kirsher "Egress Update QID %d TXQ=NULL\n", qid);
556f7917c00SJeff Kirsher break;
557f7917c00SJeff Kirsher }
558f7917c00SJeff Kirsher txq = container_of(tq, struct sge_eth_txq, q);
559f7917c00SJeff Kirsher if (unlikely(tq->abs_id != qid)) {
560f7917c00SJeff Kirsher dev_err(adapter->pdev_dev,
561f7917c00SJeff Kirsher "Egress Update QID %d refers to TXQ %d\n",
562f7917c00SJeff Kirsher qid, tq->abs_id);
563f7917c00SJeff Kirsher break;
564f7917c00SJeff Kirsher }
565f7917c00SJeff Kirsher
566f7917c00SJeff Kirsher /*
567f7917c00SJeff Kirsher * Restart a stopped TX Queue which has less than half of its
568f7917c00SJeff Kirsher * TX ring in use ...
569f7917c00SJeff Kirsher */
570f7917c00SJeff Kirsher txq->q.restarts++;
571f7917c00SJeff Kirsher netif_tx_wake_queue(txq->txq);
572f7917c00SJeff Kirsher break;
573f7917c00SJeff Kirsher }
574f7917c00SJeff Kirsher
575f7917c00SJeff Kirsher default:
576f7917c00SJeff Kirsher dev_err(adapter->pdev_dev,
577f7917c00SJeff Kirsher "unexpected CPL %#x on FW event queue\n", opcode);
578f7917c00SJeff Kirsher }
579f7917c00SJeff Kirsher
580f7917c00SJeff Kirsher return 0;
581f7917c00SJeff Kirsher }
582f7917c00SJeff Kirsher
583f7917c00SJeff Kirsher /*
584f7917c00SJeff Kirsher * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
585f7917c00SJeff Kirsher * to use and initializes them. We support multiple "Queue Sets" per port if
586f7917c00SJeff Kirsher * we have MSI-X, otherwise just one queue set per port.
587f7917c00SJeff Kirsher */
setup_sge_queues(struct adapter * adapter)588f7917c00SJeff Kirsher static int setup_sge_queues(struct adapter *adapter)
589f7917c00SJeff Kirsher {
590f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
591f7917c00SJeff Kirsher int err, pidx, msix;
592f7917c00SJeff Kirsher
593f7917c00SJeff Kirsher /*
594f7917c00SJeff Kirsher * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
595f7917c00SJeff Kirsher * state.
596f7917c00SJeff Kirsher */
597f7917c00SJeff Kirsher bitmap_zero(s->starving_fl, MAX_EGRQ);
598f7917c00SJeff Kirsher
599f7917c00SJeff Kirsher /*
600f7917c00SJeff Kirsher * If we're using MSI interrupt mode we need to set up a "forwarded
601f7917c00SJeff Kirsher * interrupt" queue which we'll set up with our MSI vector. The rest
602f7917c00SJeff Kirsher * of the ingress queues will be set up to forward their interrupts to
603f7917c00SJeff Kirsher * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
604f7917c00SJeff Kirsher * the intrq's queue ID as the interrupt forwarding queue for the
605f7917c00SJeff Kirsher * subsequent calls ...
606f7917c00SJeff Kirsher */
6073d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSI) {
608f7917c00SJeff Kirsher err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
609f7917c00SJeff Kirsher adapter->port[0], 0, NULL, NULL);
610f7917c00SJeff Kirsher if (err)
611f7917c00SJeff Kirsher goto err_free_queues;
612f7917c00SJeff Kirsher }
613f7917c00SJeff Kirsher
614f7917c00SJeff Kirsher /*
615f7917c00SJeff Kirsher * Allocate our ingress queue for asynchronous firmware messages.
616f7917c00SJeff Kirsher */
617f7917c00SJeff Kirsher err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
618f7917c00SJeff Kirsher MSIX_FW, NULL, fwevtq_handler);
619f7917c00SJeff Kirsher if (err)
620f7917c00SJeff Kirsher goto err_free_queues;
621f7917c00SJeff Kirsher
622f7917c00SJeff Kirsher /*
623f7917c00SJeff Kirsher * Allocate each "port"'s initial Queue Sets. These can be changed
624f7917c00SJeff Kirsher * later on ... up to the point where any interface on the adapter is
625f7917c00SJeff Kirsher * brought up at which point lots of things get nailed down
626f7917c00SJeff Kirsher * permanently ...
627f7917c00SJeff Kirsher */
628f7917c00SJeff Kirsher msix = MSIX_IQFLINT;
629f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
630f7917c00SJeff Kirsher struct net_device *dev = adapter->port[pidx];
631f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
632f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
633f7917c00SJeff Kirsher struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
634f7917c00SJeff Kirsher int qs;
635f7917c00SJeff Kirsher
636f7917c00SJeff Kirsher for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
637f7917c00SJeff Kirsher err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
638f7917c00SJeff Kirsher dev, msix++,
639f7917c00SJeff Kirsher &rxq->fl, t4vf_ethrx_handler);
640f7917c00SJeff Kirsher if (err)
641f7917c00SJeff Kirsher goto err_free_queues;
642f7917c00SJeff Kirsher
643f7917c00SJeff Kirsher err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
644f7917c00SJeff Kirsher netdev_get_tx_queue(dev, qs),
645f7917c00SJeff Kirsher s->fw_evtq.cntxt_id);
646f7917c00SJeff Kirsher if (err)
647f7917c00SJeff Kirsher goto err_free_queues;
648f7917c00SJeff Kirsher
649f7917c00SJeff Kirsher rxq->rspq.idx = qs;
650f7917c00SJeff Kirsher memset(&rxq->stats, 0, sizeof(rxq->stats));
651f7917c00SJeff Kirsher }
652f7917c00SJeff Kirsher }
653f7917c00SJeff Kirsher
654f7917c00SJeff Kirsher /*
655f7917c00SJeff Kirsher * Create the reverse mappings for the queues.
656f7917c00SJeff Kirsher */
657f7917c00SJeff Kirsher s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
658f7917c00SJeff Kirsher s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
659f7917c00SJeff Kirsher IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
660f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
661f7917c00SJeff Kirsher struct net_device *dev = adapter->port[pidx];
662f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
663f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
664f7917c00SJeff Kirsher struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
665f7917c00SJeff Kirsher int qs;
666f7917c00SJeff Kirsher
667f7917c00SJeff Kirsher for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
668f7917c00SJeff Kirsher IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
669f7917c00SJeff Kirsher EQ_MAP(s, txq->q.abs_id) = &txq->q;
670f7917c00SJeff Kirsher
671f7917c00SJeff Kirsher /*
672f7917c00SJeff Kirsher * The FW_IQ_CMD doesn't return the Absolute Queue IDs
673f7917c00SJeff Kirsher * for Free Lists but since all of the Egress Queues
674f7917c00SJeff Kirsher * (including Free Lists) have Relative Queue IDs
675f7917c00SJeff Kirsher * which are computed as Absolute - Base Queue ID, we
676f7917c00SJeff Kirsher * can synthesize the Absolute Queue IDs for the Free
677f7917c00SJeff Kirsher * Lists. This is useful for debugging purposes when
678f7917c00SJeff Kirsher * we want to dump Queue Contexts via the PF Driver.
679f7917c00SJeff Kirsher */
680f7917c00SJeff Kirsher rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
681f7917c00SJeff Kirsher EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
682f7917c00SJeff Kirsher }
683f7917c00SJeff Kirsher }
684f7917c00SJeff Kirsher return 0;
685f7917c00SJeff Kirsher
686f7917c00SJeff Kirsher err_free_queues:
687f7917c00SJeff Kirsher t4vf_free_sge_resources(adapter);
688f7917c00SJeff Kirsher return err;
689f7917c00SJeff Kirsher }
690f7917c00SJeff Kirsher
691f7917c00SJeff Kirsher /*
692f7917c00SJeff Kirsher * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
693f7917c00SJeff Kirsher * queues. We configure the RSS CPU lookup table to distribute to the number
694f7917c00SJeff Kirsher * of HW receive queues, and the response queue lookup table to narrow that
695f7917c00SJeff Kirsher * down to the response queues actually configured for each "port" (Virtual
696f7917c00SJeff Kirsher * Interface). We always configure the RSS mapping for all ports since the
697f7917c00SJeff Kirsher * mapping table has plenty of entries.
698f7917c00SJeff Kirsher */
setup_rss(struct adapter * adapter)699f7917c00SJeff Kirsher static int setup_rss(struct adapter *adapter)
700f7917c00SJeff Kirsher {
701f7917c00SJeff Kirsher int pidx;
702f7917c00SJeff Kirsher
703f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
704f7917c00SJeff Kirsher struct port_info *pi = adap2pinfo(adapter, pidx);
705f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
706f7917c00SJeff Kirsher u16 rss[MAX_PORT_QSETS];
707f7917c00SJeff Kirsher int qs, err;
708f7917c00SJeff Kirsher
709f7917c00SJeff Kirsher for (qs = 0; qs < pi->nqsets; qs++)
710f7917c00SJeff Kirsher rss[qs] = rxq[qs].rspq.abs_id;
711f7917c00SJeff Kirsher
712f7917c00SJeff Kirsher err = t4vf_config_rss_range(adapter, pi->viid,
713f7917c00SJeff Kirsher 0, pi->rss_size, rss, pi->nqsets);
714f7917c00SJeff Kirsher if (err)
715f7917c00SJeff Kirsher return err;
716f7917c00SJeff Kirsher
717f7917c00SJeff Kirsher /*
718f7917c00SJeff Kirsher * Perform Global RSS Mode-specific initialization.
719f7917c00SJeff Kirsher */
720f7917c00SJeff Kirsher switch (adapter->params.rss.mode) {
721f7917c00SJeff Kirsher case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
722f7917c00SJeff Kirsher /*
723f7917c00SJeff Kirsher * If Tunnel All Lookup isn't specified in the global
724f7917c00SJeff Kirsher * RSS Configuration, then we need to specify a
725f7917c00SJeff Kirsher * default Ingress Queue for any ingress packets which
726f7917c00SJeff Kirsher * aren't hashed. We'll use our first ingress queue
727f7917c00SJeff Kirsher * ...
728f7917c00SJeff Kirsher */
729f7917c00SJeff Kirsher if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
730f7917c00SJeff Kirsher union rss_vi_config config;
731f7917c00SJeff Kirsher err = t4vf_read_rss_vi_config(adapter,
732f7917c00SJeff Kirsher pi->viid,
733f7917c00SJeff Kirsher &config);
734f7917c00SJeff Kirsher if (err)
735f7917c00SJeff Kirsher return err;
736f7917c00SJeff Kirsher config.basicvirtual.defaultq =
737f7917c00SJeff Kirsher rxq[0].rspq.abs_id;
738f7917c00SJeff Kirsher err = t4vf_write_rss_vi_config(adapter,
739f7917c00SJeff Kirsher pi->viid,
740f7917c00SJeff Kirsher &config);
741f7917c00SJeff Kirsher if (err)
742f7917c00SJeff Kirsher return err;
743f7917c00SJeff Kirsher }
744f7917c00SJeff Kirsher break;
745f7917c00SJeff Kirsher }
746f7917c00SJeff Kirsher }
747f7917c00SJeff Kirsher
748f7917c00SJeff Kirsher return 0;
749f7917c00SJeff Kirsher }
750f7917c00SJeff Kirsher
751f7917c00SJeff Kirsher /*
752f7917c00SJeff Kirsher * Bring the adapter up. Called whenever we go from no "ports" open to having
753f7917c00SJeff Kirsher * one open. This function performs the actions necessary to make an adapter
754f7917c00SJeff Kirsher * operational, such as completing the initialization of HW modules, and
755f7917c00SJeff Kirsher * enabling interrupts. Must be called with the rtnl lock held. (Note that
756f7917c00SJeff Kirsher * this is called "cxgb_up" in the PF Driver.)
757f7917c00SJeff Kirsher */
adapter_up(struct adapter * adapter)758f7917c00SJeff Kirsher static int adapter_up(struct adapter *adapter)
759f7917c00SJeff Kirsher {
760f7917c00SJeff Kirsher int err;
761f7917c00SJeff Kirsher
762f7917c00SJeff Kirsher /*
763f7917c00SJeff Kirsher * If this is the first time we've been called, perform basic
764f7917c00SJeff Kirsher * adapter setup. Once we've done this, many of our adapter
765f7917c00SJeff Kirsher * parameters can no longer be changed ...
766f7917c00SJeff Kirsher */
7673d78bfaaSArjun Vynipadath if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
768f7917c00SJeff Kirsher err = setup_sge_queues(adapter);
769f7917c00SJeff Kirsher if (err)
770f7917c00SJeff Kirsher return err;
771f7917c00SJeff Kirsher err = setup_rss(adapter);
772f7917c00SJeff Kirsher if (err) {
773f7917c00SJeff Kirsher t4vf_free_sge_resources(adapter);
774f7917c00SJeff Kirsher return err;
775f7917c00SJeff Kirsher }
776f7917c00SJeff Kirsher
7773d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX)
778f7917c00SJeff Kirsher name_msix_vecs(adapter);
77924357e06SArjun Vynipadath
7803d78bfaaSArjun Vynipadath adapter->flags |= CXGB4VF_FULL_INIT_DONE;
781f7917c00SJeff Kirsher }
782f7917c00SJeff Kirsher
783f7917c00SJeff Kirsher /*
784f7917c00SJeff Kirsher * Acquire our interrupt resources. We only support MSI-X and MSI.
785f7917c00SJeff Kirsher */
7863d78bfaaSArjun Vynipadath BUG_ON((adapter->flags &
7873d78bfaaSArjun Vynipadath (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
7883d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX)
789f7917c00SJeff Kirsher err = request_msix_queue_irqs(adapter);
790f7917c00SJeff Kirsher else
791f7917c00SJeff Kirsher err = request_irq(adapter->pdev->irq,
792f7917c00SJeff Kirsher t4vf_intr_handler(adapter), 0,
793f7917c00SJeff Kirsher adapter->name, adapter);
794f7917c00SJeff Kirsher if (err) {
795f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
796f7917c00SJeff Kirsher err);
797f7917c00SJeff Kirsher return err;
798f7917c00SJeff Kirsher }
799f7917c00SJeff Kirsher
800f7917c00SJeff Kirsher /*
801f7917c00SJeff Kirsher * Enable NAPI ingress processing and return success.
802f7917c00SJeff Kirsher */
803f7917c00SJeff Kirsher enable_rx(adapter);
804f7917c00SJeff Kirsher t4vf_sge_start(adapter);
805fe5d2709SHariprasad Shenai
806f7917c00SJeff Kirsher return 0;
807f7917c00SJeff Kirsher }
808f7917c00SJeff Kirsher
809f7917c00SJeff Kirsher /*
810f7917c00SJeff Kirsher * Bring the adapter down. Called whenever the last "port" (Virtual
811f7917c00SJeff Kirsher * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
812f7917c00SJeff Kirsher * Driver.)
813f7917c00SJeff Kirsher */
adapter_down(struct adapter * adapter)814f7917c00SJeff Kirsher static void adapter_down(struct adapter *adapter)
815f7917c00SJeff Kirsher {
816f7917c00SJeff Kirsher /*
817f7917c00SJeff Kirsher * Free interrupt resources.
818f7917c00SJeff Kirsher */
8193d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX)
820f7917c00SJeff Kirsher free_msix_queue_irqs(adapter);
821f7917c00SJeff Kirsher else
822f7917c00SJeff Kirsher free_irq(adapter->pdev->irq, adapter);
823f7917c00SJeff Kirsher
824f7917c00SJeff Kirsher /*
825f7917c00SJeff Kirsher * Wait for NAPI handlers to finish.
826f7917c00SJeff Kirsher */
827f7917c00SJeff Kirsher quiesce_rx(adapter);
828f7917c00SJeff Kirsher }
829f7917c00SJeff Kirsher
830f7917c00SJeff Kirsher /*
831f7917c00SJeff Kirsher * Start up a net device.
832f7917c00SJeff Kirsher */
cxgb4vf_open(struct net_device * dev)833f7917c00SJeff Kirsher static int cxgb4vf_open(struct net_device *dev)
834f7917c00SJeff Kirsher {
835f7917c00SJeff Kirsher int err;
836f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
837f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
838f7917c00SJeff Kirsher
839f7917c00SJeff Kirsher /*
8404a8acef7SArjun Vynipadath * If we don't have a connection to the firmware there's nothing we
8414a8acef7SArjun Vynipadath * can do.
8424a8acef7SArjun Vynipadath */
8433d78bfaaSArjun Vynipadath if (!(adapter->flags & CXGB4VF_FW_OK))
8444a8acef7SArjun Vynipadath return -ENXIO;
8454a8acef7SArjun Vynipadath
8464a8acef7SArjun Vynipadath /*
847f7917c00SJeff Kirsher * If this is the first interface that we're opening on the "adapter",
848f7917c00SJeff Kirsher * bring the "adapter" up now.
849f7917c00SJeff Kirsher */
850f7917c00SJeff Kirsher if (adapter->open_device_map == 0) {
851f7917c00SJeff Kirsher err = adapter_up(adapter);
852f7917c00SJeff Kirsher if (err)
853f7917c00SJeff Kirsher return err;
854f7917c00SJeff Kirsher }
855f7917c00SJeff Kirsher
85618d79f72SArjun Vynipadath /* It's possible that the basic port information could have
85718d79f72SArjun Vynipadath * changed since we first read it.
85818d79f72SArjun Vynipadath */
85918d79f72SArjun Vynipadath err = t4vf_update_port_info(pi);
86018d79f72SArjun Vynipadath if (err < 0)
861*c6092ea1SZhengchao Shao goto err_unwind;
86218d79f72SArjun Vynipadath
863f7917c00SJeff Kirsher /*
864f7917c00SJeff Kirsher * Note that this interface is up and start everything up ...
865f7917c00SJeff Kirsher */
866f7917c00SJeff Kirsher err = link_start(dev);
867f7917c00SJeff Kirsher if (err)
868f7917c00SJeff Kirsher goto err_unwind;
869f7917c00SJeff Kirsher
8709d5fd927SGanesh Goudar pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
8719d5fd927SGanesh Goudar
872f7917c00SJeff Kirsher netif_tx_start_all_queues(dev);
873f7917c00SJeff Kirsher set_bit(pi->port_id, &adapter->open_device_map);
874f7917c00SJeff Kirsher return 0;
875f7917c00SJeff Kirsher
876f7917c00SJeff Kirsher err_unwind:
877f7917c00SJeff Kirsher if (adapter->open_device_map == 0)
878f7917c00SJeff Kirsher adapter_down(adapter);
879f7917c00SJeff Kirsher return err;
880f7917c00SJeff Kirsher }
881f7917c00SJeff Kirsher
882f7917c00SJeff Kirsher /*
883f7917c00SJeff Kirsher * Shut down a net device. This routine is called "cxgb_close" in the PF
884f7917c00SJeff Kirsher * Driver ...
885f7917c00SJeff Kirsher */
cxgb4vf_stop(struct net_device * dev)886f7917c00SJeff Kirsher static int cxgb4vf_stop(struct net_device *dev)
887f7917c00SJeff Kirsher {
888f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
889f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
890f7917c00SJeff Kirsher
891f7917c00SJeff Kirsher netif_tx_stop_all_queues(dev);
892f7917c00SJeff Kirsher netif_carrier_off(dev);
893e2f4f4e9SArjun Vynipadath t4vf_enable_pi(adapter, pi, false, false);
894f7917c00SJeff Kirsher
895f7917c00SJeff Kirsher clear_bit(pi->port_id, &adapter->open_device_map);
896f7917c00SJeff Kirsher if (adapter->open_device_map == 0)
897f7917c00SJeff Kirsher adapter_down(adapter);
898f7917c00SJeff Kirsher return 0;
899f7917c00SJeff Kirsher }
900f7917c00SJeff Kirsher
901f7917c00SJeff Kirsher /*
902f7917c00SJeff Kirsher * Translate our basic statistics into the standard "ifconfig" statistics.
903f7917c00SJeff Kirsher */
cxgb4vf_get_stats(struct net_device * dev)904f7917c00SJeff Kirsher static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
905f7917c00SJeff Kirsher {
906f7917c00SJeff Kirsher struct t4vf_port_stats stats;
907f7917c00SJeff Kirsher struct port_info *pi = netdev2pinfo(dev);
908f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
909f7917c00SJeff Kirsher struct net_device_stats *ns = &dev->stats;
910f7917c00SJeff Kirsher int err;
911f7917c00SJeff Kirsher
912f7917c00SJeff Kirsher spin_lock(&adapter->stats_lock);
913f7917c00SJeff Kirsher err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
914f7917c00SJeff Kirsher spin_unlock(&adapter->stats_lock);
915f7917c00SJeff Kirsher
916f7917c00SJeff Kirsher memset(ns, 0, sizeof(*ns));
917f7917c00SJeff Kirsher if (err)
918f7917c00SJeff Kirsher return ns;
919f7917c00SJeff Kirsher
920f7917c00SJeff Kirsher ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
921f7917c00SJeff Kirsher stats.tx_ucast_bytes + stats.tx_offload_bytes);
922f7917c00SJeff Kirsher ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
923f7917c00SJeff Kirsher stats.tx_ucast_frames + stats.tx_offload_frames);
924f7917c00SJeff Kirsher ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
925f7917c00SJeff Kirsher stats.rx_ucast_bytes);
926f7917c00SJeff Kirsher ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
927f7917c00SJeff Kirsher stats.rx_ucast_frames);
928f7917c00SJeff Kirsher ns->multicast = stats.rx_mcast_frames;
929f7917c00SJeff Kirsher ns->tx_errors = stats.tx_drop_frames;
930f7917c00SJeff Kirsher ns->rx_errors = stats.rx_err_frames;
931f7917c00SJeff Kirsher
932f7917c00SJeff Kirsher return ns;
933f7917c00SJeff Kirsher }
934f7917c00SJeff Kirsher
cxgb4vf_mac_sync(struct net_device * netdev,const u8 * mac_addr)935fe5d2709SHariprasad Shenai static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
936fe5d2709SHariprasad Shenai {
937fe5d2709SHariprasad Shenai struct port_info *pi = netdev_priv(netdev);
938fe5d2709SHariprasad Shenai struct adapter *adapter = pi->adapter;
939fe5d2709SHariprasad Shenai int ret;
940f7917c00SJeff Kirsher u64 mhash = 0;
941f7917c00SJeff Kirsher u64 uhash = 0;
942fe5d2709SHariprasad Shenai bool free = false;
943fe5d2709SHariprasad Shenai bool ucast = is_unicast_ether_addr(mac_addr);
944fe5d2709SHariprasad Shenai const u8 *maclist[1] = {mac_addr};
945fe5d2709SHariprasad Shenai struct hash_mac_addr *new_entry;
946fe5d2709SHariprasad Shenai
947fe5d2709SHariprasad Shenai ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
948fe5d2709SHariprasad Shenai NULL, ucast ? &uhash : &mhash, false);
949fe5d2709SHariprasad Shenai if (ret < 0)
950fe5d2709SHariprasad Shenai goto out;
951fe5d2709SHariprasad Shenai /* if hash != 0, then add the addr to hash addr list
952fe5d2709SHariprasad Shenai * so on the end we will calculate the hash for the
953fe5d2709SHariprasad Shenai * list and program it
954fe5d2709SHariprasad Shenai */
955fe5d2709SHariprasad Shenai if (uhash || mhash) {
956fe5d2709SHariprasad Shenai new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
957fe5d2709SHariprasad Shenai if (!new_entry)
958fe5d2709SHariprasad Shenai return -ENOMEM;
959fe5d2709SHariprasad Shenai ether_addr_copy(new_entry->addr, mac_addr);
960fe5d2709SHariprasad Shenai list_add_tail(&new_entry->list, &adapter->mac_hlist);
961fe5d2709SHariprasad Shenai ret = cxgb4vf_set_addr_hash(pi);
962fe5d2709SHariprasad Shenai }
963fe5d2709SHariprasad Shenai out:
964fe5d2709SHariprasad Shenai return ret < 0 ? ret : 0;
965fe5d2709SHariprasad Shenai }
966fe5d2709SHariprasad Shenai
cxgb4vf_mac_unsync(struct net_device * netdev,const u8 * mac_addr)967fe5d2709SHariprasad Shenai static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
968fe5d2709SHariprasad Shenai {
969fe5d2709SHariprasad Shenai struct port_info *pi = netdev_priv(netdev);
970fe5d2709SHariprasad Shenai struct adapter *adapter = pi->adapter;
971f7917c00SJeff Kirsher int ret;
972fe5d2709SHariprasad Shenai const u8 *maclist[1] = {mac_addr};
973fe5d2709SHariprasad Shenai struct hash_mac_addr *entry, *tmp;
974f7917c00SJeff Kirsher
975fe5d2709SHariprasad Shenai /* If the MAC address to be removed is in the hash addr
976fe5d2709SHariprasad Shenai * list, delete it from the list and update hash vector
977fe5d2709SHariprasad Shenai */
978fe5d2709SHariprasad Shenai list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
979fe5d2709SHariprasad Shenai if (ether_addr_equal(entry->addr, mac_addr)) {
980fe5d2709SHariprasad Shenai list_del(&entry->list);
981fe5d2709SHariprasad Shenai kfree(entry);
982fe5d2709SHariprasad Shenai return cxgb4vf_set_addr_hash(pi);
983fe5d2709SHariprasad Shenai }
984f7917c00SJeff Kirsher }
985f7917c00SJeff Kirsher
986fe5d2709SHariprasad Shenai ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
987fe5d2709SHariprasad Shenai return ret < 0 ? -EINVAL : 0;
988f7917c00SJeff Kirsher }
989f7917c00SJeff Kirsher
990f7917c00SJeff Kirsher /*
991f7917c00SJeff Kirsher * Set RX properties of a port, such as promiscruity, address filters, and MTU.
992f7917c00SJeff Kirsher * If @mtu is -1 it is left unchanged.
993f7917c00SJeff Kirsher */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)994f7917c00SJeff Kirsher static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
995f7917c00SJeff Kirsher {
996f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
997f7917c00SJeff Kirsher
998fe5d2709SHariprasad Shenai __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
999d01f7abcSHariprasad Shenai __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
1000fe5d2709SHariprasad Shenai return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
1001f7917c00SJeff Kirsher (dev->flags & IFF_PROMISC) != 0,
1002f7917c00SJeff Kirsher (dev->flags & IFF_ALLMULTI) != 0,
1003f7917c00SJeff Kirsher 1, -1, sleep_ok);
1004f7917c00SJeff Kirsher }
1005f7917c00SJeff Kirsher
1006f7917c00SJeff Kirsher /*
1007f7917c00SJeff Kirsher * Set the current receive modes on the device.
1008f7917c00SJeff Kirsher */
cxgb4vf_set_rxmode(struct net_device * dev)1009f7917c00SJeff Kirsher static void cxgb4vf_set_rxmode(struct net_device *dev)
1010f7917c00SJeff Kirsher {
1011f7917c00SJeff Kirsher /* unfortunately we can't return errors to the stack */
1012f7917c00SJeff Kirsher set_rxmode(dev, -1, false);
1013f7917c00SJeff Kirsher }
1014f7917c00SJeff Kirsher
1015f7917c00SJeff Kirsher /*
1016f7917c00SJeff Kirsher * Find the entry in the interrupt holdoff timer value array which comes
1017f7917c00SJeff Kirsher * closest to the specified interrupt holdoff value.
1018f7917c00SJeff Kirsher */
closest_timer(const struct sge * s,int us)1019f7917c00SJeff Kirsher static int closest_timer(const struct sge *s, int us)
1020f7917c00SJeff Kirsher {
1021f7917c00SJeff Kirsher int i, timer_idx = 0, min_delta = INT_MAX;
1022f7917c00SJeff Kirsher
1023f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1024f7917c00SJeff Kirsher int delta = us - s->timer_val[i];
1025f7917c00SJeff Kirsher if (delta < 0)
1026f7917c00SJeff Kirsher delta = -delta;
1027f7917c00SJeff Kirsher if (delta < min_delta) {
1028f7917c00SJeff Kirsher min_delta = delta;
1029f7917c00SJeff Kirsher timer_idx = i;
1030f7917c00SJeff Kirsher }
1031f7917c00SJeff Kirsher }
1032f7917c00SJeff Kirsher return timer_idx;
1033f7917c00SJeff Kirsher }
1034f7917c00SJeff Kirsher
closest_thres(const struct sge * s,int thres)1035f7917c00SJeff Kirsher static int closest_thres(const struct sge *s, int thres)
1036f7917c00SJeff Kirsher {
1037f7917c00SJeff Kirsher int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
1038f7917c00SJeff Kirsher
1039f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1040f7917c00SJeff Kirsher delta = thres - s->counter_val[i];
1041f7917c00SJeff Kirsher if (delta < 0)
1042f7917c00SJeff Kirsher delta = -delta;
1043f7917c00SJeff Kirsher if (delta < min_delta) {
1044f7917c00SJeff Kirsher min_delta = delta;
1045f7917c00SJeff Kirsher pktcnt_idx = i;
1046f7917c00SJeff Kirsher }
1047f7917c00SJeff Kirsher }
1048f7917c00SJeff Kirsher return pktcnt_idx;
1049f7917c00SJeff Kirsher }
1050f7917c00SJeff Kirsher
1051f7917c00SJeff Kirsher /*
1052f7917c00SJeff Kirsher * Return a queue's interrupt hold-off time in us. 0 means no timer.
1053f7917c00SJeff Kirsher */
qtimer_val(const struct adapter * adapter,const struct sge_rspq * rspq)1054f7917c00SJeff Kirsher static unsigned int qtimer_val(const struct adapter *adapter,
1055f7917c00SJeff Kirsher const struct sge_rspq *rspq)
1056f7917c00SJeff Kirsher {
10571ecc7b7aSHariprasad Shenai unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1058f7917c00SJeff Kirsher
1059f7917c00SJeff Kirsher return timer_idx < SGE_NTIMERS
1060f7917c00SJeff Kirsher ? adapter->sge.timer_val[timer_idx]
1061f7917c00SJeff Kirsher : 0;
1062f7917c00SJeff Kirsher }
1063f7917c00SJeff Kirsher
1064f7917c00SJeff Kirsher /**
1065f7917c00SJeff Kirsher * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1066f7917c00SJeff Kirsher * @adapter: the adapter
1067f7917c00SJeff Kirsher * @rspq: the RX response queue
1068f7917c00SJeff Kirsher * @us: the hold-off time in us, or 0 to disable timer
1069f7917c00SJeff Kirsher * @cnt: the hold-off packet count, or 0 to disable counter
1070f7917c00SJeff Kirsher *
1071f7917c00SJeff Kirsher * Sets an RX response queue's interrupt hold-off time and packet count.
1072f7917c00SJeff Kirsher * At least one of the two needs to be enabled for the queue to generate
1073f7917c00SJeff Kirsher * interrupts.
1074f7917c00SJeff Kirsher */
set_rxq_intr_params(struct adapter * adapter,struct sge_rspq * rspq,unsigned int us,unsigned int cnt)1075f7917c00SJeff Kirsher static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1076f7917c00SJeff Kirsher unsigned int us, unsigned int cnt)
1077f7917c00SJeff Kirsher {
1078f7917c00SJeff Kirsher unsigned int timer_idx;
1079f7917c00SJeff Kirsher
1080f7917c00SJeff Kirsher /*
1081f7917c00SJeff Kirsher * If both the interrupt holdoff timer and count are specified as
1082f7917c00SJeff Kirsher * zero, default to a holdoff count of 1 ...
1083f7917c00SJeff Kirsher */
1084f7917c00SJeff Kirsher if ((us | cnt) == 0)
1085f7917c00SJeff Kirsher cnt = 1;
1086f7917c00SJeff Kirsher
1087f7917c00SJeff Kirsher /*
1088f7917c00SJeff Kirsher * If an interrupt holdoff count has been specified, then find the
1089f7917c00SJeff Kirsher * closest configured holdoff count and use that. If the response
1090f7917c00SJeff Kirsher * queue has already been created, then update its queue context
1091f7917c00SJeff Kirsher * parameters ...
1092f7917c00SJeff Kirsher */
1093f7917c00SJeff Kirsher if (cnt) {
1094f7917c00SJeff Kirsher int err;
1095f7917c00SJeff Kirsher u32 v, pktcnt_idx;
1096f7917c00SJeff Kirsher
1097f7917c00SJeff Kirsher pktcnt_idx = closest_thres(&adapter->sge, cnt);
1098f7917c00SJeff Kirsher if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
10995167865aSHariprasad Shenai v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
11005167865aSHariprasad Shenai FW_PARAMS_PARAM_X_V(
1101f7917c00SJeff Kirsher FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
11025167865aSHariprasad Shenai FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1103f7917c00SJeff Kirsher err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1104f7917c00SJeff Kirsher if (err)
1105f7917c00SJeff Kirsher return err;
1106f7917c00SJeff Kirsher }
1107f7917c00SJeff Kirsher rspq->pktcnt_idx = pktcnt_idx;
1108f7917c00SJeff Kirsher }
1109f7917c00SJeff Kirsher
1110f7917c00SJeff Kirsher /*
1111f7917c00SJeff Kirsher * Compute the closest holdoff timer index from the supplied holdoff
1112f7917c00SJeff Kirsher * timer value.
1113f7917c00SJeff Kirsher */
1114f7917c00SJeff Kirsher timer_idx = (us == 0
1115f7917c00SJeff Kirsher ? SGE_TIMER_RSTRT_CNTR
1116f7917c00SJeff Kirsher : closest_timer(&adapter->sge, us));
1117f7917c00SJeff Kirsher
1118f7917c00SJeff Kirsher /*
1119f7917c00SJeff Kirsher * Update the response queue's interrupt coalescing parameters and
1120f7917c00SJeff Kirsher * return success.
1121f7917c00SJeff Kirsher */
11221ecc7b7aSHariprasad Shenai rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
11231ecc7b7aSHariprasad Shenai QINTR_CNT_EN_V(cnt > 0));
1124f7917c00SJeff Kirsher return 0;
1125f7917c00SJeff Kirsher }
1126f7917c00SJeff Kirsher
1127f7917c00SJeff Kirsher /*
1128f7917c00SJeff Kirsher * Return a version number to identify the type of adapter. The scheme is:
1129f7917c00SJeff Kirsher * - bits 0..9: chip version
1130f7917c00SJeff Kirsher * - bits 10..15: chip revision
1131f7917c00SJeff Kirsher */
mk_adap_vers(const struct adapter * adapter)1132f7917c00SJeff Kirsher static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1133f7917c00SJeff Kirsher {
1134f7917c00SJeff Kirsher /*
1135f7917c00SJeff Kirsher * Chip version 4, revision 0x3f (cxgb4vf).
1136f7917c00SJeff Kirsher */
113770ee3666SHariprasad Shenai return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1138f7917c00SJeff Kirsher }
1139f7917c00SJeff Kirsher
1140f7917c00SJeff Kirsher /*
1141f7917c00SJeff Kirsher * Execute the specified ioctl command.
1142f7917c00SJeff Kirsher */
cxgb4vf_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1143f7917c00SJeff Kirsher static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1144f7917c00SJeff Kirsher {
1145f7917c00SJeff Kirsher int ret = 0;
1146f7917c00SJeff Kirsher
1147f7917c00SJeff Kirsher switch (cmd) {
1148f7917c00SJeff Kirsher /*
1149f7917c00SJeff Kirsher * The VF Driver doesn't have access to any of the other
1150f7917c00SJeff Kirsher * common Ethernet device ioctl()'s (like reading/writing
1151f7917c00SJeff Kirsher * PHY registers, etc.
1152f7917c00SJeff Kirsher */
1153f7917c00SJeff Kirsher
1154f7917c00SJeff Kirsher default:
1155f7917c00SJeff Kirsher ret = -EOPNOTSUPP;
1156f7917c00SJeff Kirsher break;
1157f7917c00SJeff Kirsher }
1158f7917c00SJeff Kirsher return ret;
1159f7917c00SJeff Kirsher }
1160f7917c00SJeff Kirsher
1161f7917c00SJeff Kirsher /*
1162f7917c00SJeff Kirsher * Change the device's MTU.
1163f7917c00SJeff Kirsher */
cxgb4vf_change_mtu(struct net_device * dev,int new_mtu)1164f7917c00SJeff Kirsher static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1165f7917c00SJeff Kirsher {
1166f7917c00SJeff Kirsher int ret;
1167f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1168f7917c00SJeff Kirsher
1169f7917c00SJeff Kirsher ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1170f7917c00SJeff Kirsher -1, -1, -1, -1, true);
1171f7917c00SJeff Kirsher if (!ret)
1172f7917c00SJeff Kirsher dev->mtu = new_mtu;
1173f7917c00SJeff Kirsher return ret;
1174f7917c00SJeff Kirsher }
1175f7917c00SJeff Kirsher
cxgb4vf_fix_features(struct net_device * dev,netdev_features_t features)1176c8f44affSMichał Mirosław static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1177c8f44affSMichał Mirosław netdev_features_t features)
1178f7917c00SJeff Kirsher {
1179f7917c00SJeff Kirsher /*
1180f7917c00SJeff Kirsher * Since there is no support for separate rx/tx vlan accel
1181f7917c00SJeff Kirsher * enable/disable make sure tx flag is always in same state as rx.
1182f7917c00SJeff Kirsher */
1183f646968fSPatrick McHardy if (features & NETIF_F_HW_VLAN_CTAG_RX)
1184f646968fSPatrick McHardy features |= NETIF_F_HW_VLAN_CTAG_TX;
1185f7917c00SJeff Kirsher else
1186f646968fSPatrick McHardy features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1187f7917c00SJeff Kirsher
1188f7917c00SJeff Kirsher return features;
1189f7917c00SJeff Kirsher }
1190f7917c00SJeff Kirsher
cxgb4vf_set_features(struct net_device * dev,netdev_features_t features)1191c8f44affSMichał Mirosław static int cxgb4vf_set_features(struct net_device *dev,
1192c8f44affSMichał Mirosław netdev_features_t features)
1193f7917c00SJeff Kirsher {
1194f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1195c8f44affSMichał Mirosław netdev_features_t changed = dev->features ^ features;
1196f7917c00SJeff Kirsher
1197f646968fSPatrick McHardy if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1198f7917c00SJeff Kirsher t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1199f646968fSPatrick McHardy features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1200f7917c00SJeff Kirsher
1201f7917c00SJeff Kirsher return 0;
1202f7917c00SJeff Kirsher }
1203f7917c00SJeff Kirsher
1204f7917c00SJeff Kirsher /*
1205f7917c00SJeff Kirsher * Change the devices MAC address.
1206f7917c00SJeff Kirsher */
cxgb4vf_set_mac_addr(struct net_device * dev,void * _addr)1207f7917c00SJeff Kirsher static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1208f7917c00SJeff Kirsher {
1209f7917c00SJeff Kirsher int ret;
1210f7917c00SJeff Kirsher struct sockaddr *addr = _addr;
1211f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1212f7917c00SJeff Kirsher
1213f7917c00SJeff Kirsher if (!is_valid_ether_addr(addr->sa_data))
1214504f9b5aSDanny Kukawka return -EADDRNOTAVAIL;
1215f7917c00SJeff Kirsher
12163f8cfd0dSArjun Vynipadath ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
1217f7917c00SJeff Kirsher addr->sa_data, true);
1218f7917c00SJeff Kirsher if (ret < 0)
1219f7917c00SJeff Kirsher return ret;
1220f7917c00SJeff Kirsher
1221a05e4c0aSJakub Kicinski eth_hw_addr_set(dev, addr->sa_data);
1222f7917c00SJeff Kirsher return 0;
1223f7917c00SJeff Kirsher }
1224f7917c00SJeff Kirsher
1225f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1226f7917c00SJeff Kirsher /*
1227f7917c00SJeff Kirsher * Poll all of our receive queues. This is called outside of normal interrupt
1228f7917c00SJeff Kirsher * context.
1229f7917c00SJeff Kirsher */
cxgb4vf_poll_controller(struct net_device * dev)1230f7917c00SJeff Kirsher static void cxgb4vf_poll_controller(struct net_device *dev)
1231f7917c00SJeff Kirsher {
1232f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1233f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
1234f7917c00SJeff Kirsher
12353d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX) {
1236f7917c00SJeff Kirsher struct sge_eth_rxq *rxq;
1237f7917c00SJeff Kirsher int nqsets;
1238f7917c00SJeff Kirsher
1239f7917c00SJeff Kirsher rxq = &adapter->sge.ethrxq[pi->first_qset];
1240f7917c00SJeff Kirsher for (nqsets = pi->nqsets; nqsets; nqsets--) {
1241f7917c00SJeff Kirsher t4vf_sge_intr_msix(0, &rxq->rspq);
1242f7917c00SJeff Kirsher rxq++;
1243f7917c00SJeff Kirsher }
1244f7917c00SJeff Kirsher } else
1245f7917c00SJeff Kirsher t4vf_intr_handler(adapter)(0, adapter);
1246f7917c00SJeff Kirsher }
1247f7917c00SJeff Kirsher #endif
1248f7917c00SJeff Kirsher
1249f7917c00SJeff Kirsher /*
1250f7917c00SJeff Kirsher * Ethtool operations.
1251f7917c00SJeff Kirsher * ===================
1252f7917c00SJeff Kirsher *
1253f7917c00SJeff Kirsher * Note that we don't support any ethtool operations which change the physical
1254f7917c00SJeff Kirsher * state of the port to which we're linked.
1255f7917c00SJeff Kirsher */
1256f7917c00SJeff Kirsher
1257eb97ad99SGanesh Goudar /**
1258eb97ad99SGanesh Goudar * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1259eb97ad99SGanesh Goudar * @port_type: Firmware Port Type
1260eb97ad99SGanesh Goudar * @mod_type: Firmware Module Type
1261eb97ad99SGanesh Goudar *
1262eb97ad99SGanesh Goudar * Translate Firmware Port/Module type to Ethtool Port Type.
1263eb97ad99SGanesh Goudar */
from_fw_port_mod_type(enum fw_port_type port_type,enum fw_port_module_type mod_type)1264eb97ad99SGanesh Goudar static int from_fw_port_mod_type(enum fw_port_type port_type,
1265eb97ad99SGanesh Goudar enum fw_port_module_type mod_type)
1266f7917c00SJeff Kirsher {
1267eb97ad99SGanesh Goudar if (port_type == FW_PORT_TYPE_BT_SGMII ||
1268eb97ad99SGanesh Goudar port_type == FW_PORT_TYPE_BT_XFI ||
1269eb97ad99SGanesh Goudar port_type == FW_PORT_TYPE_BT_XAUI) {
1270eb97ad99SGanesh Goudar return PORT_TP;
1271eb97ad99SGanesh Goudar } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1272eb97ad99SGanesh Goudar port_type == FW_PORT_TYPE_FIBER_XAUI) {
1273eb97ad99SGanesh Goudar return PORT_FIBRE;
1274eb97ad99SGanesh Goudar } else if (port_type == FW_PORT_TYPE_SFP ||
1275eb97ad99SGanesh Goudar port_type == FW_PORT_TYPE_QSFP_10G ||
1276eb97ad99SGanesh Goudar port_type == FW_PORT_TYPE_QSA ||
1277c3168cabSGanesh Goudar port_type == FW_PORT_TYPE_QSFP ||
1278c3168cabSGanesh Goudar port_type == FW_PORT_TYPE_CR4_QSFP ||
1279c3168cabSGanesh Goudar port_type == FW_PORT_TYPE_CR_QSFP ||
1280c3168cabSGanesh Goudar port_type == FW_PORT_TYPE_CR2_QSFP ||
1281c3168cabSGanesh Goudar port_type == FW_PORT_TYPE_SFP28) {
1282eb97ad99SGanesh Goudar if (mod_type == FW_PORT_MOD_TYPE_LR ||
1283eb97ad99SGanesh Goudar mod_type == FW_PORT_MOD_TYPE_SR ||
1284eb97ad99SGanesh Goudar mod_type == FW_PORT_MOD_TYPE_ER ||
1285eb97ad99SGanesh Goudar mod_type == FW_PORT_MOD_TYPE_LRM)
1286eb97ad99SGanesh Goudar return PORT_FIBRE;
1287eb97ad99SGanesh Goudar else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1288eb97ad99SGanesh Goudar mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1289eb97ad99SGanesh Goudar return PORT_DA;
12905ad24defSHariprasad Shenai else
1291eb97ad99SGanesh Goudar return PORT_OTHER;
1292c3168cabSGanesh Goudar } else if (port_type == FW_PORT_TYPE_KR4_100G ||
1293b39ab140SGanesh Goudar port_type == FW_PORT_TYPE_KR_SFP28 ||
1294b39ab140SGanesh Goudar port_type == FW_PORT_TYPE_KR_XLAUI) {
1295c3168cabSGanesh Goudar return PORT_NONE;
12965ad24defSHariprasad Shenai }
12975ad24defSHariprasad Shenai
1298eb97ad99SGanesh Goudar return PORT_OTHER;
1299eb97ad99SGanesh Goudar }
1300eb97ad99SGanesh Goudar
1301eb97ad99SGanesh Goudar /**
1302eb97ad99SGanesh Goudar * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1303eb97ad99SGanesh Goudar * @port_type: Firmware Port Type
1304eb97ad99SGanesh Goudar * @fw_caps: Firmware Port Capabilities
1305eb97ad99SGanesh Goudar * @link_mode_mask: ethtool Link Mode Mask
1306eb97ad99SGanesh Goudar *
1307eb97ad99SGanesh Goudar * Translate a Firmware Port Capabilities specification to an ethtool
1308eb97ad99SGanesh Goudar * Link Mode Mask.
1309eb97ad99SGanesh Goudar */
fw_caps_to_lmm(enum fw_port_type port_type,unsigned int fw_caps,unsigned long * link_mode_mask)1310eb97ad99SGanesh Goudar static void fw_caps_to_lmm(enum fw_port_type port_type,
1311eb97ad99SGanesh Goudar unsigned int fw_caps,
1312eb97ad99SGanesh Goudar unsigned long *link_mode_mask)
1313eb97ad99SGanesh Goudar {
1314c3168cabSGanesh Goudar #define SET_LMM(__lmm_name) \
1315c3168cabSGanesh Goudar __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1316c3168cabSGanesh Goudar link_mode_mask)
1317eb97ad99SGanesh Goudar
1318eb97ad99SGanesh Goudar #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1319eb97ad99SGanesh Goudar do { \
1320c3168cabSGanesh Goudar if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1321eb97ad99SGanesh Goudar SET_LMM(__lmm_name); \
1322eb97ad99SGanesh Goudar } while (0)
1323eb97ad99SGanesh Goudar
1324eb97ad99SGanesh Goudar switch (port_type) {
1325eb97ad99SGanesh Goudar case FW_PORT_TYPE_BT_SGMII:
1326eb97ad99SGanesh Goudar case FW_PORT_TYPE_BT_XFI:
1327eb97ad99SGanesh Goudar case FW_PORT_TYPE_BT_XAUI:
1328eb97ad99SGanesh Goudar SET_LMM(TP);
1329eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1330eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1331eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1332eb97ad99SGanesh Goudar break;
1333eb97ad99SGanesh Goudar
1334eb97ad99SGanesh Goudar case FW_PORT_TYPE_KX4:
1335eb97ad99SGanesh Goudar case FW_PORT_TYPE_KX:
1336eb97ad99SGanesh Goudar SET_LMM(Backplane);
1337eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1338eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1339eb97ad99SGanesh Goudar break;
1340eb97ad99SGanesh Goudar
1341eb97ad99SGanesh Goudar case FW_PORT_TYPE_KR:
1342eb97ad99SGanesh Goudar SET_LMM(Backplane);
1343129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1344eb97ad99SGanesh Goudar break;
1345eb97ad99SGanesh Goudar
1346eb97ad99SGanesh Goudar case FW_PORT_TYPE_BP_AP:
1347eb97ad99SGanesh Goudar SET_LMM(Backplane);
1348129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1349129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1350129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1351eb97ad99SGanesh Goudar break;
1352eb97ad99SGanesh Goudar
1353eb97ad99SGanesh Goudar case FW_PORT_TYPE_BP4_AP:
1354eb97ad99SGanesh Goudar SET_LMM(Backplane);
1355129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1356129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1357129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1358129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1359eb97ad99SGanesh Goudar break;
1360eb97ad99SGanesh Goudar
1361eb97ad99SGanesh Goudar case FW_PORT_TYPE_FIBER_XFI:
1362eb97ad99SGanesh Goudar case FW_PORT_TYPE_FIBER_XAUI:
1363eb97ad99SGanesh Goudar case FW_PORT_TYPE_SFP:
1364eb97ad99SGanesh Goudar case FW_PORT_TYPE_QSFP_10G:
1365eb97ad99SGanesh Goudar case FW_PORT_TYPE_QSA:
1366eb97ad99SGanesh Goudar SET_LMM(FIBRE);
1367eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1368eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1369eb97ad99SGanesh Goudar break;
1370eb97ad99SGanesh Goudar
1371eb97ad99SGanesh Goudar case FW_PORT_TYPE_BP40_BA:
1372eb97ad99SGanesh Goudar case FW_PORT_TYPE_QSFP:
1373eb97ad99SGanesh Goudar SET_LMM(FIBRE);
1374129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1375129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1376129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1377eb97ad99SGanesh Goudar break;
1378eb97ad99SGanesh Goudar
1379eb97ad99SGanesh Goudar case FW_PORT_TYPE_CR_QSFP:
1380eb97ad99SGanesh Goudar case FW_PORT_TYPE_SFP28:
1381eb97ad99SGanesh Goudar SET_LMM(FIBRE);
1382129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1383129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1384129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1385eb97ad99SGanesh Goudar break;
1386eb97ad99SGanesh Goudar
1387c3168cabSGanesh Goudar case FW_PORT_TYPE_KR_SFP28:
1388c3168cabSGanesh Goudar SET_LMM(Backplane);
1389129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1390129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1391129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
1392c3168cabSGanesh Goudar break;
1393c3168cabSGanesh Goudar
1394b39ab140SGanesh Goudar case FW_PORT_TYPE_KR_XLAUI:
1395b39ab140SGanesh Goudar SET_LMM(Backplane);
1396b39ab140SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1397b39ab140SGanesh Goudar FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1398b39ab140SGanesh Goudar FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1399b39ab140SGanesh Goudar break;
1400b39ab140SGanesh Goudar
1401c3168cabSGanesh Goudar case FW_PORT_TYPE_CR2_QSFP:
1402c3168cabSGanesh Goudar SET_LMM(FIBRE);
1403129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
1404c3168cabSGanesh Goudar break;
1405c3168cabSGanesh Goudar
1406eb97ad99SGanesh Goudar case FW_PORT_TYPE_KR4_100G:
1407eb97ad99SGanesh Goudar case FW_PORT_TYPE_CR4_QSFP:
1408eb97ad99SGanesh Goudar SET_LMM(FIBRE);
1409129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1410f8b1f9f6SVishal Kulkarni FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1411129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1412129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1413129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
1414129cf5f7SGanesh Goudar FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
1415eb97ad99SGanesh Goudar break;
1416eb97ad99SGanesh Goudar
1417eb97ad99SGanesh Goudar default:
1418eb97ad99SGanesh Goudar break;
1419eb97ad99SGanesh Goudar }
1420eb97ad99SGanesh Goudar
1421f8b1f9f6SVishal Kulkarni if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
1422f8b1f9f6SVishal Kulkarni FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
1423f8b1f9f6SVishal Kulkarni FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
1424f8b1f9f6SVishal Kulkarni } else {
1425f8b1f9f6SVishal Kulkarni SET_LMM(FEC_NONE);
1426f8b1f9f6SVishal Kulkarni }
1427f8b1f9f6SVishal Kulkarni
1428eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(ANEG, Autoneg);
1429eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1430eb97ad99SGanesh Goudar FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1431eb97ad99SGanesh Goudar
1432eb97ad99SGanesh Goudar #undef FW_CAPS_TO_LMM
1433eb97ad99SGanesh Goudar #undef SET_LMM
1434eb97ad99SGanesh Goudar }
1435eb97ad99SGanesh Goudar
cxgb4vf_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * link_ksettings)1436eb97ad99SGanesh Goudar static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1437c3168cabSGanesh Goudar struct ethtool_link_ksettings *link_ksettings)
1438eb97ad99SGanesh Goudar {
1439c3168cabSGanesh Goudar struct port_info *pi = netdev_priv(dev);
1440eb97ad99SGanesh Goudar struct ethtool_link_settings *base = &link_ksettings->base;
1441eb97ad99SGanesh Goudar
1442c3168cabSGanesh Goudar /* For the nonce, the Firmware doesn't send up Port State changes
1443c3168cabSGanesh Goudar * when the Virtual Interface attached to the Port is down. So
1444c3168cabSGanesh Goudar * if it's down, let's grab any changes.
1445c3168cabSGanesh Goudar */
1446c3168cabSGanesh Goudar if (!netif_running(dev))
1447c3168cabSGanesh Goudar (void)t4vf_update_port_info(pi);
1448c3168cabSGanesh Goudar
1449eb97ad99SGanesh Goudar ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1450eb97ad99SGanesh Goudar ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1451eb97ad99SGanesh Goudar ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1452eb97ad99SGanesh Goudar
1453eb97ad99SGanesh Goudar base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1454eb97ad99SGanesh Goudar
1455eb97ad99SGanesh Goudar if (pi->mdio_addr >= 0) {
1456eb97ad99SGanesh Goudar base->phy_address = pi->mdio_addr;
1457eb97ad99SGanesh Goudar base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1458eb97ad99SGanesh Goudar ? ETH_MDIO_SUPPORTS_C22
1459eb97ad99SGanesh Goudar : ETH_MDIO_SUPPORTS_C45);
1460eb97ad99SGanesh Goudar } else {
1461eb97ad99SGanesh Goudar base->phy_address = 255;
1462eb97ad99SGanesh Goudar base->mdio_support = 0;
1463eb97ad99SGanesh Goudar }
1464eb97ad99SGanesh Goudar
1465c3168cabSGanesh Goudar fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1466eb97ad99SGanesh Goudar link_ksettings->link_modes.supported);
1467c3168cabSGanesh Goudar fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1468eb97ad99SGanesh Goudar link_ksettings->link_modes.advertising);
1469c3168cabSGanesh Goudar fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1470eb97ad99SGanesh Goudar link_ksettings->link_modes.lp_advertising);
1471eb97ad99SGanesh Goudar
1472eb97ad99SGanesh Goudar if (netif_carrier_ok(dev)) {
1473eb97ad99SGanesh Goudar base->speed = pi->link_cfg.speed;
1474eb97ad99SGanesh Goudar base->duplex = DUPLEX_FULL;
1475eb97ad99SGanesh Goudar } else {
1476eb97ad99SGanesh Goudar base->speed = SPEED_UNKNOWN;
1477eb97ad99SGanesh Goudar base->duplex = DUPLEX_UNKNOWN;
1478eb97ad99SGanesh Goudar }
1479eb97ad99SGanesh Goudar
1480eb97ad99SGanesh Goudar base->autoneg = pi->link_cfg.autoneg;
1481c3168cabSGanesh Goudar if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1482eb97ad99SGanesh Goudar ethtool_link_ksettings_add_link_mode(link_ksettings,
1483eb97ad99SGanesh Goudar supported, Autoneg);
1484eb97ad99SGanesh Goudar if (pi->link_cfg.autoneg)
1485eb97ad99SGanesh Goudar ethtool_link_ksettings_add_link_mode(link_ksettings,
1486eb97ad99SGanesh Goudar advertising, Autoneg);
1487eb97ad99SGanesh Goudar
1488f7917c00SJeff Kirsher return 0;
1489f7917c00SJeff Kirsher }
1490f7917c00SJeff Kirsher
14919a7b96b3SGanesh Goudar /* Translate the Firmware FEC value into the ethtool value. */
fwcap_to_eth_fec(unsigned int fw_fec)14929a7b96b3SGanesh Goudar static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
14939a7b96b3SGanesh Goudar {
14949a7b96b3SGanesh Goudar unsigned int eth_fec = 0;
14959a7b96b3SGanesh Goudar
14969a7b96b3SGanesh Goudar if (fw_fec & FW_PORT_CAP32_FEC_RS)
14979a7b96b3SGanesh Goudar eth_fec |= ETHTOOL_FEC_RS;
14989a7b96b3SGanesh Goudar if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
14999a7b96b3SGanesh Goudar eth_fec |= ETHTOOL_FEC_BASER;
15009a7b96b3SGanesh Goudar
15019a7b96b3SGanesh Goudar /* if nothing is set, then FEC is off */
15029a7b96b3SGanesh Goudar if (!eth_fec)
15039a7b96b3SGanesh Goudar eth_fec = ETHTOOL_FEC_OFF;
15049a7b96b3SGanesh Goudar
15059a7b96b3SGanesh Goudar return eth_fec;
15069a7b96b3SGanesh Goudar }
15079a7b96b3SGanesh Goudar
15089a7b96b3SGanesh Goudar /* Translate Common Code FEC value into ethtool value. */
cc_to_eth_fec(unsigned int cc_fec)15099a7b96b3SGanesh Goudar static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
15109a7b96b3SGanesh Goudar {
15119a7b96b3SGanesh Goudar unsigned int eth_fec = 0;
15129a7b96b3SGanesh Goudar
15139a7b96b3SGanesh Goudar if (cc_fec & FEC_AUTO)
15149a7b96b3SGanesh Goudar eth_fec |= ETHTOOL_FEC_AUTO;
15159a7b96b3SGanesh Goudar if (cc_fec & FEC_RS)
15169a7b96b3SGanesh Goudar eth_fec |= ETHTOOL_FEC_RS;
15179a7b96b3SGanesh Goudar if (cc_fec & FEC_BASER_RS)
15189a7b96b3SGanesh Goudar eth_fec |= ETHTOOL_FEC_BASER;
15199a7b96b3SGanesh Goudar
15209a7b96b3SGanesh Goudar /* if nothing is set, then FEC is off */
15219a7b96b3SGanesh Goudar if (!eth_fec)
15229a7b96b3SGanesh Goudar eth_fec = ETHTOOL_FEC_OFF;
15239a7b96b3SGanesh Goudar
15249a7b96b3SGanesh Goudar return eth_fec;
15259a7b96b3SGanesh Goudar }
15269a7b96b3SGanesh Goudar
cxgb4vf_get_fecparam(struct net_device * dev,struct ethtool_fecparam * fec)15279a7b96b3SGanesh Goudar static int cxgb4vf_get_fecparam(struct net_device *dev,
15289a7b96b3SGanesh Goudar struct ethtool_fecparam *fec)
15299a7b96b3SGanesh Goudar {
15309a7b96b3SGanesh Goudar const struct port_info *pi = netdev_priv(dev);
15319a7b96b3SGanesh Goudar const struct link_config *lc = &pi->link_cfg;
15329a7b96b3SGanesh Goudar
15339a7b96b3SGanesh Goudar /* Translate the Firmware FEC Support into the ethtool value. We
15349a7b96b3SGanesh Goudar * always support IEEE 802.3 "automatic" selection of Link FEC type if
15359a7b96b3SGanesh Goudar * any FEC is supported.
15369a7b96b3SGanesh Goudar */
15379a7b96b3SGanesh Goudar fec->fec = fwcap_to_eth_fec(lc->pcaps);
15389a7b96b3SGanesh Goudar if (fec->fec != ETHTOOL_FEC_OFF)
15399a7b96b3SGanesh Goudar fec->fec |= ETHTOOL_FEC_AUTO;
15409a7b96b3SGanesh Goudar
15419a7b96b3SGanesh Goudar /* Translate the current internal FEC parameters into the
15429a7b96b3SGanesh Goudar * ethtool values.
15439a7b96b3SGanesh Goudar */
15449a7b96b3SGanesh Goudar fec->active_fec = cc_to_eth_fec(lc->fec);
15459a7b96b3SGanesh Goudar return 0;
15469a7b96b3SGanesh Goudar }
15479a7b96b3SGanesh Goudar
1548f7917c00SJeff Kirsher /*
1549f7917c00SJeff Kirsher * Return our driver information.
1550f7917c00SJeff Kirsher */
cxgb4vf_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)1551f7917c00SJeff Kirsher static void cxgb4vf_get_drvinfo(struct net_device *dev,
1552f7917c00SJeff Kirsher struct ethtool_drvinfo *drvinfo)
1553f7917c00SJeff Kirsher {
1554f7917c00SJeff Kirsher struct adapter *adapter = netdev2adap(dev);
1555f7917c00SJeff Kirsher
1556f029c781SWolfram Sang strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1557f029c781SWolfram Sang strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
155823020ab3SRick Jones sizeof(drvinfo->bus_info));
1559f7917c00SJeff Kirsher snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1560f7917c00SJeff Kirsher "%u.%u.%u.%u, TP %u.%u.%u.%u",
1561b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1562b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1563b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1564b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1565b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1566b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1567b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1568b2e1a3f0SHariprasad Shenai FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1569f7917c00SJeff Kirsher }
1570f7917c00SJeff Kirsher
1571f7917c00SJeff Kirsher /*
1572f7917c00SJeff Kirsher * Return current adapter message level.
1573f7917c00SJeff Kirsher */
cxgb4vf_get_msglevel(struct net_device * dev)1574f7917c00SJeff Kirsher static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1575f7917c00SJeff Kirsher {
1576f7917c00SJeff Kirsher return netdev2adap(dev)->msg_enable;
1577f7917c00SJeff Kirsher }
1578f7917c00SJeff Kirsher
1579f7917c00SJeff Kirsher /*
1580f7917c00SJeff Kirsher * Set current adapter message level.
1581f7917c00SJeff Kirsher */
cxgb4vf_set_msglevel(struct net_device * dev,u32 msglevel)1582f7917c00SJeff Kirsher static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1583f7917c00SJeff Kirsher {
1584f7917c00SJeff Kirsher netdev2adap(dev)->msg_enable = msglevel;
1585f7917c00SJeff Kirsher }
1586f7917c00SJeff Kirsher
1587f7917c00SJeff Kirsher /*
1588f7917c00SJeff Kirsher * Return the device's current Queue Set ring size parameters along with the
1589f7917c00SJeff Kirsher * allowed maximum values. Since ethtool doesn't understand the concept of
1590f7917c00SJeff Kirsher * multi-queue devices, we just return the current values associated with the
1591f7917c00SJeff Kirsher * first Queue Set.
1592f7917c00SJeff Kirsher */
cxgb4vf_get_ringparam(struct net_device * dev,struct ethtool_ringparam * rp,struct kernel_ethtool_ringparam * kernel_rp,struct netlink_ext_ack * extack)1593f7917c00SJeff Kirsher static void cxgb4vf_get_ringparam(struct net_device *dev,
159474624944SHao Chen struct ethtool_ringparam *rp,
159574624944SHao Chen struct kernel_ethtool_ringparam *kernel_rp,
159674624944SHao Chen struct netlink_ext_ack *extack)
1597f7917c00SJeff Kirsher {
1598f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
1599f7917c00SJeff Kirsher const struct sge *s = &pi->adapter->sge;
1600f7917c00SJeff Kirsher
1601f7917c00SJeff Kirsher rp->rx_max_pending = MAX_RX_BUFFERS;
1602f7917c00SJeff Kirsher rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1603f7917c00SJeff Kirsher rp->rx_jumbo_max_pending = 0;
1604f7917c00SJeff Kirsher rp->tx_max_pending = MAX_TXQ_ENTRIES;
1605f7917c00SJeff Kirsher
1606f7917c00SJeff Kirsher rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1607f7917c00SJeff Kirsher rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1608f7917c00SJeff Kirsher rp->rx_jumbo_pending = 0;
1609f7917c00SJeff Kirsher rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1610f7917c00SJeff Kirsher }
1611f7917c00SJeff Kirsher
1612f7917c00SJeff Kirsher /*
1613f7917c00SJeff Kirsher * Set the Queue Set ring size parameters for the device. Again, since
1614f7917c00SJeff Kirsher * ethtool doesn't allow for the concept of multiple queues per device, we'll
1615f7917c00SJeff Kirsher * apply these new values across all of the Queue Sets associated with the
1616f7917c00SJeff Kirsher * device -- after vetting them of course!
1617f7917c00SJeff Kirsher */
cxgb4vf_set_ringparam(struct net_device * dev,struct ethtool_ringparam * rp,struct kernel_ethtool_ringparam * kernel_rp,struct netlink_ext_ack * extack)1618f7917c00SJeff Kirsher static int cxgb4vf_set_ringparam(struct net_device *dev,
161974624944SHao Chen struct ethtool_ringparam *rp,
162074624944SHao Chen struct kernel_ethtool_ringparam *kernel_rp,
162174624944SHao Chen struct netlink_ext_ack *extack)
1622f7917c00SJeff Kirsher {
1623f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
1624f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
1625f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
1626f7917c00SJeff Kirsher int qs;
1627f7917c00SJeff Kirsher
1628f7917c00SJeff Kirsher if (rp->rx_pending > MAX_RX_BUFFERS ||
1629f7917c00SJeff Kirsher rp->rx_jumbo_pending ||
1630f7917c00SJeff Kirsher rp->tx_pending > MAX_TXQ_ENTRIES ||
1631f7917c00SJeff Kirsher rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1632f7917c00SJeff Kirsher rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1633f7917c00SJeff Kirsher rp->rx_pending < MIN_FL_ENTRIES ||
1634f7917c00SJeff Kirsher rp->tx_pending < MIN_TXQ_ENTRIES)
1635f7917c00SJeff Kirsher return -EINVAL;
1636f7917c00SJeff Kirsher
16373d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
1638f7917c00SJeff Kirsher return -EBUSY;
1639f7917c00SJeff Kirsher
1640f7917c00SJeff Kirsher for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1641f7917c00SJeff Kirsher s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1642f7917c00SJeff Kirsher s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1643f7917c00SJeff Kirsher s->ethtxq[qs].q.size = rp->tx_pending;
1644f7917c00SJeff Kirsher }
1645f7917c00SJeff Kirsher return 0;
1646f7917c00SJeff Kirsher }
1647f7917c00SJeff Kirsher
1648f7917c00SJeff Kirsher /*
1649f7917c00SJeff Kirsher * Return the interrupt holdoff timer and count for the first Queue Set on the
1650f7917c00SJeff Kirsher * device. Our extension ioctl() (the cxgbtool interface) allows the
1651f7917c00SJeff Kirsher * interrupt holdoff timer to be read on all of the device's Queue Sets.
1652f7917c00SJeff Kirsher */
cxgb4vf_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1653f7917c00SJeff Kirsher static int cxgb4vf_get_coalesce(struct net_device *dev,
1654f3ccfda1SYufeng Mo struct ethtool_coalesce *coalesce,
1655f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
1656f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
1657f7917c00SJeff Kirsher {
1658f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
1659f7917c00SJeff Kirsher const struct adapter *adapter = pi->adapter;
1660f7917c00SJeff Kirsher const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1661f7917c00SJeff Kirsher
1662f7917c00SJeff Kirsher coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1663f7917c00SJeff Kirsher coalesce->rx_max_coalesced_frames =
16641ecc7b7aSHariprasad Shenai ((rspq->intr_params & QINTR_CNT_EN_F)
1665f7917c00SJeff Kirsher ? adapter->sge.counter_val[rspq->pktcnt_idx]
1666f7917c00SJeff Kirsher : 0);
1667f7917c00SJeff Kirsher return 0;
1668f7917c00SJeff Kirsher }
1669f7917c00SJeff Kirsher
1670f7917c00SJeff Kirsher /*
1671f7917c00SJeff Kirsher * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1672f7917c00SJeff Kirsher * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1673f7917c00SJeff Kirsher * the interrupt holdoff timer on any of the device's Queue Sets.
1674f7917c00SJeff Kirsher */
cxgb4vf_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1675f7917c00SJeff Kirsher static int cxgb4vf_set_coalesce(struct net_device *dev,
1676f3ccfda1SYufeng Mo struct ethtool_coalesce *coalesce,
1677f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal,
1678f3ccfda1SYufeng Mo struct netlink_ext_ack *extack)
1679f7917c00SJeff Kirsher {
1680f7917c00SJeff Kirsher const struct port_info *pi = netdev_priv(dev);
1681f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
1682f7917c00SJeff Kirsher
1683f7917c00SJeff Kirsher return set_rxq_intr_params(adapter,
1684f7917c00SJeff Kirsher &adapter->sge.ethrxq[pi->first_qset].rspq,
1685f7917c00SJeff Kirsher coalesce->rx_coalesce_usecs,
1686f7917c00SJeff Kirsher coalesce->rx_max_coalesced_frames);
1687f7917c00SJeff Kirsher }
1688f7917c00SJeff Kirsher
1689f7917c00SJeff Kirsher /*
1690f7917c00SJeff Kirsher * Report current port link pause parameter settings.
1691f7917c00SJeff Kirsher */
cxgb4vf_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pauseparam)1692f7917c00SJeff Kirsher static void cxgb4vf_get_pauseparam(struct net_device *dev,
1693f7917c00SJeff Kirsher struct ethtool_pauseparam *pauseparam)
1694f7917c00SJeff Kirsher {
1695f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1696f7917c00SJeff Kirsher
1697f7917c00SJeff Kirsher pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
16980caeaf6aSRahul Lakkireddy pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
16990caeaf6aSRahul Lakkireddy pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
1700f7917c00SJeff Kirsher }
1701f7917c00SJeff Kirsher
1702f7917c00SJeff Kirsher /*
1703f7917c00SJeff Kirsher * Identify the port by blinking the port's LED.
1704f7917c00SJeff Kirsher */
cxgb4vf_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)1705f7917c00SJeff Kirsher static int cxgb4vf_phys_id(struct net_device *dev,
1706f7917c00SJeff Kirsher enum ethtool_phys_id_state state)
1707f7917c00SJeff Kirsher {
1708f7917c00SJeff Kirsher unsigned int val;
1709f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
1710f7917c00SJeff Kirsher
1711f7917c00SJeff Kirsher if (state == ETHTOOL_ID_ACTIVE)
1712f7917c00SJeff Kirsher val = 0xffff;
1713f7917c00SJeff Kirsher else if (state == ETHTOOL_ID_INACTIVE)
1714f7917c00SJeff Kirsher val = 0;
1715f7917c00SJeff Kirsher else
1716f7917c00SJeff Kirsher return -EINVAL;
1717f7917c00SJeff Kirsher
1718f7917c00SJeff Kirsher return t4vf_identify_port(pi->adapter, pi->viid, val);
1719f7917c00SJeff Kirsher }
1720f7917c00SJeff Kirsher
1721f7917c00SJeff Kirsher /*
1722f7917c00SJeff Kirsher * Port stats maintained per queue of the port.
1723f7917c00SJeff Kirsher */
1724f7917c00SJeff Kirsher struct queue_port_stats {
1725f7917c00SJeff Kirsher u64 tso;
1726f7917c00SJeff Kirsher u64 tx_csum;
1727f7917c00SJeff Kirsher u64 rx_csum;
1728f7917c00SJeff Kirsher u64 vlan_ex;
1729f7917c00SJeff Kirsher u64 vlan_ins;
1730f7917c00SJeff Kirsher u64 lro_pkts;
1731f7917c00SJeff Kirsher u64 lro_merged;
1732f7917c00SJeff Kirsher };
1733f7917c00SJeff Kirsher
1734f7917c00SJeff Kirsher /*
1735f7917c00SJeff Kirsher * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1736f7917c00SJeff Kirsher * these need to match the order of statistics returned by
1737f7917c00SJeff Kirsher * t4vf_get_port_stats().
1738f7917c00SJeff Kirsher */
1739f7917c00SJeff Kirsher static const char stats_strings[][ETH_GSTRING_LEN] = {
1740f7917c00SJeff Kirsher /*
1741f7917c00SJeff Kirsher * These must match the layout of the t4vf_port_stats structure.
1742f7917c00SJeff Kirsher */
1743f7917c00SJeff Kirsher "TxBroadcastBytes ",
1744f7917c00SJeff Kirsher "TxBroadcastFrames ",
1745f7917c00SJeff Kirsher "TxMulticastBytes ",
1746f7917c00SJeff Kirsher "TxMulticastFrames ",
1747f7917c00SJeff Kirsher "TxUnicastBytes ",
1748f7917c00SJeff Kirsher "TxUnicastFrames ",
1749f7917c00SJeff Kirsher "TxDroppedFrames ",
1750f7917c00SJeff Kirsher "TxOffloadBytes ",
1751f7917c00SJeff Kirsher "TxOffloadFrames ",
1752f7917c00SJeff Kirsher "RxBroadcastBytes ",
1753f7917c00SJeff Kirsher "RxBroadcastFrames ",
1754f7917c00SJeff Kirsher "RxMulticastBytes ",
1755f7917c00SJeff Kirsher "RxMulticastFrames ",
1756f7917c00SJeff Kirsher "RxUnicastBytes ",
1757f7917c00SJeff Kirsher "RxUnicastFrames ",
1758f7917c00SJeff Kirsher "RxErrorFrames ",
1759f7917c00SJeff Kirsher
1760f7917c00SJeff Kirsher /*
1761f7917c00SJeff Kirsher * These are accumulated per-queue statistics and must match the
1762f7917c00SJeff Kirsher * order of the fields in the queue_port_stats structure.
1763f7917c00SJeff Kirsher */
1764f7917c00SJeff Kirsher "TSO ",
1765f7917c00SJeff Kirsher "TxCsumOffload ",
1766f7917c00SJeff Kirsher "RxCsumGood ",
1767f7917c00SJeff Kirsher "VLANextractions ",
1768f7917c00SJeff Kirsher "VLANinsertions ",
1769f7917c00SJeff Kirsher "GROPackets ",
1770f7917c00SJeff Kirsher "GROMerged ",
1771f7917c00SJeff Kirsher };
1772f7917c00SJeff Kirsher
1773f7917c00SJeff Kirsher /*
1774f7917c00SJeff Kirsher * Return the number of statistics in the specified statistics set.
1775f7917c00SJeff Kirsher */
cxgb4vf_get_sset_count(struct net_device * dev,int sset)1776f7917c00SJeff Kirsher static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1777f7917c00SJeff Kirsher {
1778f7917c00SJeff Kirsher switch (sset) {
1779f7917c00SJeff Kirsher case ETH_SS_STATS:
1780f7917c00SJeff Kirsher return ARRAY_SIZE(stats_strings);
1781f7917c00SJeff Kirsher default:
1782f7917c00SJeff Kirsher return -EOPNOTSUPP;
1783f7917c00SJeff Kirsher }
1784f7917c00SJeff Kirsher /*NOTREACHED*/
1785f7917c00SJeff Kirsher }
1786f7917c00SJeff Kirsher
1787f7917c00SJeff Kirsher /*
1788f7917c00SJeff Kirsher * Return the strings for the specified statistics set.
1789f7917c00SJeff Kirsher */
cxgb4vf_get_strings(struct net_device * dev,u32 sset,u8 * data)1790f7917c00SJeff Kirsher static void cxgb4vf_get_strings(struct net_device *dev,
1791f7917c00SJeff Kirsher u32 sset,
1792f7917c00SJeff Kirsher u8 *data)
1793f7917c00SJeff Kirsher {
1794f7917c00SJeff Kirsher switch (sset) {
1795f7917c00SJeff Kirsher case ETH_SS_STATS:
1796f7917c00SJeff Kirsher memcpy(data, stats_strings, sizeof(stats_strings));
1797f7917c00SJeff Kirsher break;
1798f7917c00SJeff Kirsher }
1799f7917c00SJeff Kirsher }
1800f7917c00SJeff Kirsher
1801f7917c00SJeff Kirsher /*
1802f7917c00SJeff Kirsher * Small utility routine to accumulate queue statistics across the queues of
1803f7917c00SJeff Kirsher * a "port".
1804f7917c00SJeff Kirsher */
collect_sge_port_stats(const struct adapter * adapter,const struct port_info * pi,struct queue_port_stats * stats)1805f7917c00SJeff Kirsher static void collect_sge_port_stats(const struct adapter *adapter,
1806f7917c00SJeff Kirsher const struct port_info *pi,
1807f7917c00SJeff Kirsher struct queue_port_stats *stats)
1808f7917c00SJeff Kirsher {
1809f7917c00SJeff Kirsher const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1810f7917c00SJeff Kirsher const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1811f7917c00SJeff Kirsher int qs;
1812f7917c00SJeff Kirsher
1813f7917c00SJeff Kirsher memset(stats, 0, sizeof(*stats));
1814f7917c00SJeff Kirsher for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1815f7917c00SJeff Kirsher stats->tso += txq->tso;
1816f7917c00SJeff Kirsher stats->tx_csum += txq->tx_cso;
1817f7917c00SJeff Kirsher stats->rx_csum += rxq->stats.rx_cso;
1818f7917c00SJeff Kirsher stats->vlan_ex += rxq->stats.vlan_ex;
1819f7917c00SJeff Kirsher stats->vlan_ins += txq->vlan_ins;
1820f7917c00SJeff Kirsher stats->lro_pkts += rxq->stats.lro_pkts;
1821f7917c00SJeff Kirsher stats->lro_merged += rxq->stats.lro_merged;
1822f7917c00SJeff Kirsher }
1823f7917c00SJeff Kirsher }
1824f7917c00SJeff Kirsher
1825f7917c00SJeff Kirsher /*
1826f7917c00SJeff Kirsher * Return the ETH_SS_STATS statistics set.
1827f7917c00SJeff Kirsher */
cxgb4vf_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1828f7917c00SJeff Kirsher static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1829f7917c00SJeff Kirsher struct ethtool_stats *stats,
1830f7917c00SJeff Kirsher u64 *data)
1831f7917c00SJeff Kirsher {
1832f7917c00SJeff Kirsher struct port_info *pi = netdev2pinfo(dev);
1833f7917c00SJeff Kirsher struct adapter *adapter = pi->adapter;
1834f7917c00SJeff Kirsher int err = t4vf_get_port_stats(adapter, pi->pidx,
1835f7917c00SJeff Kirsher (struct t4vf_port_stats *)data);
1836f7917c00SJeff Kirsher if (err)
1837f7917c00SJeff Kirsher memset(data, 0, sizeof(struct t4vf_port_stats));
1838f7917c00SJeff Kirsher
1839f7917c00SJeff Kirsher data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1840f7917c00SJeff Kirsher collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1841f7917c00SJeff Kirsher }
1842f7917c00SJeff Kirsher
1843f7917c00SJeff Kirsher /*
1844f7917c00SJeff Kirsher * Return the size of our register map.
1845f7917c00SJeff Kirsher */
cxgb4vf_get_regs_len(struct net_device * dev)1846f7917c00SJeff Kirsher static int cxgb4vf_get_regs_len(struct net_device *dev)
1847f7917c00SJeff Kirsher {
1848f7917c00SJeff Kirsher return T4VF_REGMAP_SIZE;
1849f7917c00SJeff Kirsher }
1850f7917c00SJeff Kirsher
1851f7917c00SJeff Kirsher /*
1852f7917c00SJeff Kirsher * Dump a block of registers, start to end inclusive, into a buffer.
1853f7917c00SJeff Kirsher */
reg_block_dump(struct adapter * adapter,void * regbuf,unsigned int start,unsigned int end)1854f7917c00SJeff Kirsher static void reg_block_dump(struct adapter *adapter, void *regbuf,
1855f7917c00SJeff Kirsher unsigned int start, unsigned int end)
1856f7917c00SJeff Kirsher {
1857f7917c00SJeff Kirsher u32 *bp = regbuf + start - T4VF_REGMAP_START;
1858f7917c00SJeff Kirsher
1859f7917c00SJeff Kirsher for ( ; start <= end; start += sizeof(u32)) {
1860f7917c00SJeff Kirsher /*
1861f7917c00SJeff Kirsher * Avoid reading the Mailbox Control register since that
1862f7917c00SJeff Kirsher * can trigger a Mailbox Ownership Arbitration cycle and
1863f7917c00SJeff Kirsher * interfere with communication with the firmware.
1864f7917c00SJeff Kirsher */
1865f7917c00SJeff Kirsher if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1866f7917c00SJeff Kirsher *bp++ = 0xffff;
1867f7917c00SJeff Kirsher else
1868f7917c00SJeff Kirsher *bp++ = t4_read_reg(adapter, start);
1869f7917c00SJeff Kirsher }
1870f7917c00SJeff Kirsher }
1871f7917c00SJeff Kirsher
1872f7917c00SJeff Kirsher /*
1873f7917c00SJeff Kirsher * Copy our entire register map into the provided buffer.
1874f7917c00SJeff Kirsher */
cxgb4vf_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * regbuf)1875f7917c00SJeff Kirsher static void cxgb4vf_get_regs(struct net_device *dev,
1876f7917c00SJeff Kirsher struct ethtool_regs *regs,
1877f7917c00SJeff Kirsher void *regbuf)
1878f7917c00SJeff Kirsher {
1879f7917c00SJeff Kirsher struct adapter *adapter = netdev2adap(dev);
1880f7917c00SJeff Kirsher
1881f7917c00SJeff Kirsher regs->version = mk_adap_vers(adapter);
1882f7917c00SJeff Kirsher
1883f7917c00SJeff Kirsher /*
1884f7917c00SJeff Kirsher * Fill in register buffer with our register map.
1885f7917c00SJeff Kirsher */
1886f7917c00SJeff Kirsher memset(regbuf, 0, T4VF_REGMAP_SIZE);
1887f7917c00SJeff Kirsher
1888f7917c00SJeff Kirsher reg_block_dump(adapter, regbuf,
1889f7917c00SJeff Kirsher T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1890f7917c00SJeff Kirsher T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1891f7917c00SJeff Kirsher reg_block_dump(adapter, regbuf,
1892f7917c00SJeff Kirsher T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1893f7917c00SJeff Kirsher T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
189470ee3666SHariprasad Shenai
189570ee3666SHariprasad Shenai /* T5 adds new registers in the PL Register map.
189670ee3666SHariprasad Shenai */
1897f7917c00SJeff Kirsher reg_block_dump(adapter, regbuf,
1898f7917c00SJeff Kirsher T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
189970ee3666SHariprasad Shenai T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
19000d804338SHariprasad Shenai ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1901f7917c00SJeff Kirsher reg_block_dump(adapter, regbuf,
1902f7917c00SJeff Kirsher T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1903f7917c00SJeff Kirsher T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1904f7917c00SJeff Kirsher
1905f7917c00SJeff Kirsher reg_block_dump(adapter, regbuf,
1906f7917c00SJeff Kirsher T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1907f7917c00SJeff Kirsher T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1908f7917c00SJeff Kirsher }
1909f7917c00SJeff Kirsher
1910f7917c00SJeff Kirsher /*
1911f7917c00SJeff Kirsher * Report current Wake On LAN settings.
1912f7917c00SJeff Kirsher */
cxgb4vf_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1913f7917c00SJeff Kirsher static void cxgb4vf_get_wol(struct net_device *dev,
1914f7917c00SJeff Kirsher struct ethtool_wolinfo *wol)
1915f7917c00SJeff Kirsher {
1916f7917c00SJeff Kirsher wol->supported = 0;
1917f7917c00SJeff Kirsher wol->wolopts = 0;
1918f7917c00SJeff Kirsher memset(&wol->sopass, 0, sizeof(wol->sopass));
1919f7917c00SJeff Kirsher }
1920f7917c00SJeff Kirsher
1921f7917c00SJeff Kirsher /*
1922f7917c00SJeff Kirsher * TCP Segmentation Offload flags which we support.
1923f7917c00SJeff Kirsher */
1924f7917c00SJeff Kirsher #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1925012475e3SArjun Vynipadath #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
1926012475e3SArjun Vynipadath NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1927f7917c00SJeff Kirsher
19289b07be4bSstephen hemminger static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1929009ab69bSJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1930009ab69bSJakub Kicinski ETHTOOL_COALESCE_RX_MAX_FRAMES,
1931eb97ad99SGanesh Goudar .get_link_ksettings = cxgb4vf_get_link_ksettings,
19329a7b96b3SGanesh Goudar .get_fecparam = cxgb4vf_get_fecparam,
1933f7917c00SJeff Kirsher .get_drvinfo = cxgb4vf_get_drvinfo,
1934f7917c00SJeff Kirsher .get_msglevel = cxgb4vf_get_msglevel,
1935f7917c00SJeff Kirsher .set_msglevel = cxgb4vf_set_msglevel,
1936f7917c00SJeff Kirsher .get_ringparam = cxgb4vf_get_ringparam,
1937f7917c00SJeff Kirsher .set_ringparam = cxgb4vf_set_ringparam,
1938f7917c00SJeff Kirsher .get_coalesce = cxgb4vf_get_coalesce,
1939f7917c00SJeff Kirsher .set_coalesce = cxgb4vf_set_coalesce,
1940f7917c00SJeff Kirsher .get_pauseparam = cxgb4vf_get_pauseparam,
1941f7917c00SJeff Kirsher .get_link = ethtool_op_get_link,
1942f7917c00SJeff Kirsher .get_strings = cxgb4vf_get_strings,
1943f7917c00SJeff Kirsher .set_phys_id = cxgb4vf_phys_id,
1944f7917c00SJeff Kirsher .get_sset_count = cxgb4vf_get_sset_count,
1945f7917c00SJeff Kirsher .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1946f7917c00SJeff Kirsher .get_regs_len = cxgb4vf_get_regs_len,
1947f7917c00SJeff Kirsher .get_regs = cxgb4vf_get_regs,
1948f7917c00SJeff Kirsher .get_wol = cxgb4vf_get_wol,
1949f7917c00SJeff Kirsher };
1950f7917c00SJeff Kirsher
1951f7917c00SJeff Kirsher /*
1952f7917c00SJeff Kirsher * /sys/kernel/debug/cxgb4vf support code and data.
1953f7917c00SJeff Kirsher * ================================================
1954f7917c00SJeff Kirsher */
1955f7917c00SJeff Kirsher
1956f7917c00SJeff Kirsher /*
1957ae7b7576SHariprasad Shenai * Show Firmware Mailbox Command/Reply Log
1958ae7b7576SHariprasad Shenai *
1959ae7b7576SHariprasad Shenai * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1960ae7b7576SHariprasad Shenai * it's possible that we can catch things during a log update and therefore
1961ae7b7576SHariprasad Shenai * see partially corrupted log entries. But i9t's probably Good Enough(tm).
1962ae7b7576SHariprasad Shenai * If we ever decide that we want to make sure that we're dumping a coherent
1963ae7b7576SHariprasad Shenai * log, we'd need to perform locking in the mailbox logging and in
1964ae7b7576SHariprasad Shenai * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1965ae7b7576SHariprasad Shenai * like we do for the Firmware Device Log. But as stated above, meh ...
1966ae7b7576SHariprasad Shenai */
mboxlog_show(struct seq_file * seq,void * v)1967ae7b7576SHariprasad Shenai static int mboxlog_show(struct seq_file *seq, void *v)
1968ae7b7576SHariprasad Shenai {
1969ae7b7576SHariprasad Shenai struct adapter *adapter = seq->private;
1970ae7b7576SHariprasad Shenai struct mbox_cmd_log *log = adapter->mbox_log;
1971ae7b7576SHariprasad Shenai struct mbox_cmd *entry;
1972ae7b7576SHariprasad Shenai int entry_idx, i;
1973ae7b7576SHariprasad Shenai
1974ae7b7576SHariprasad Shenai if (v == SEQ_START_TOKEN) {
1975ae7b7576SHariprasad Shenai seq_printf(seq,
1976ae7b7576SHariprasad Shenai "%10s %15s %5s %5s %s\n",
1977ae7b7576SHariprasad Shenai "Seq#", "Tstamp", "Atime", "Etime",
1978ae7b7576SHariprasad Shenai "Command/Reply");
1979ae7b7576SHariprasad Shenai return 0;
1980ae7b7576SHariprasad Shenai }
1981ae7b7576SHariprasad Shenai
1982ae7b7576SHariprasad Shenai entry_idx = log->cursor + ((uintptr_t)v - 2);
1983ae7b7576SHariprasad Shenai if (entry_idx >= log->size)
1984ae7b7576SHariprasad Shenai entry_idx -= log->size;
1985ae7b7576SHariprasad Shenai entry = mbox_cmd_log_entry(log, entry_idx);
1986ae7b7576SHariprasad Shenai
1987ae7b7576SHariprasad Shenai /* skip over unused entries */
1988ae7b7576SHariprasad Shenai if (entry->timestamp == 0)
1989ae7b7576SHariprasad Shenai return 0;
1990ae7b7576SHariprasad Shenai
1991ae7b7576SHariprasad Shenai seq_printf(seq, "%10u %15llu %5d %5d",
1992ae7b7576SHariprasad Shenai entry->seqno, entry->timestamp,
1993ae7b7576SHariprasad Shenai entry->access, entry->execute);
1994ae7b7576SHariprasad Shenai for (i = 0; i < MBOX_LEN / 8; i++) {
1995ae7b7576SHariprasad Shenai u64 flit = entry->cmd[i];
1996ae7b7576SHariprasad Shenai u32 hi = (u32)(flit >> 32);
1997ae7b7576SHariprasad Shenai u32 lo = (u32)flit;
1998ae7b7576SHariprasad Shenai
1999ae7b7576SHariprasad Shenai seq_printf(seq, " %08x %08x", hi, lo);
2000ae7b7576SHariprasad Shenai }
2001ae7b7576SHariprasad Shenai seq_puts(seq, "\n");
2002ae7b7576SHariprasad Shenai return 0;
2003ae7b7576SHariprasad Shenai }
2004ae7b7576SHariprasad Shenai
mboxlog_get_idx(struct seq_file * seq,loff_t pos)2005ae7b7576SHariprasad Shenai static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
2006ae7b7576SHariprasad Shenai {
2007ae7b7576SHariprasad Shenai struct adapter *adapter = seq->private;
2008ae7b7576SHariprasad Shenai struct mbox_cmd_log *log = adapter->mbox_log;
2009ae7b7576SHariprasad Shenai
2010ae7b7576SHariprasad Shenai return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
2011ae7b7576SHariprasad Shenai }
2012ae7b7576SHariprasad Shenai
mboxlog_start(struct seq_file * seq,loff_t * pos)2013ae7b7576SHariprasad Shenai static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
2014ae7b7576SHariprasad Shenai {
2015ae7b7576SHariprasad Shenai return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
2016ae7b7576SHariprasad Shenai }
2017ae7b7576SHariprasad Shenai
mboxlog_next(struct seq_file * seq,void * v,loff_t * pos)2018ae7b7576SHariprasad Shenai static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
2019ae7b7576SHariprasad Shenai {
2020ae7b7576SHariprasad Shenai ++*pos;
2021ae7b7576SHariprasad Shenai return mboxlog_get_idx(seq, *pos);
2022ae7b7576SHariprasad Shenai }
2023ae7b7576SHariprasad Shenai
mboxlog_stop(struct seq_file * seq,void * v)2024ae7b7576SHariprasad Shenai static void mboxlog_stop(struct seq_file *seq, void *v)
2025ae7b7576SHariprasad Shenai {
2026ae7b7576SHariprasad Shenai }
2027ae7b7576SHariprasad Shenai
2028b948577bSLiu Shixin static const struct seq_operations mboxlog_sops = {
2029ae7b7576SHariprasad Shenai .start = mboxlog_start,
2030ae7b7576SHariprasad Shenai .next = mboxlog_next,
2031ae7b7576SHariprasad Shenai .stop = mboxlog_stop,
2032ae7b7576SHariprasad Shenai .show = mboxlog_show
2033ae7b7576SHariprasad Shenai };
2034ae7b7576SHariprasad Shenai
2035b948577bSLiu Shixin DEFINE_SEQ_ATTRIBUTE(mboxlog);
2036ae7b7576SHariprasad Shenai /*
2037f7917c00SJeff Kirsher * Show SGE Queue Set information. We display QPL Queues Sets per line.
2038f7917c00SJeff Kirsher */
2039f7917c00SJeff Kirsher #define QPL 4
2040f7917c00SJeff Kirsher
sge_qinfo_show(struct seq_file * seq,void * v)2041f7917c00SJeff Kirsher static int sge_qinfo_show(struct seq_file *seq, void *v)
2042f7917c00SJeff Kirsher {
2043f7917c00SJeff Kirsher struct adapter *adapter = seq->private;
2044f7917c00SJeff Kirsher int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2045f7917c00SJeff Kirsher int qs, r = (uintptr_t)v - 1;
2046f7917c00SJeff Kirsher
2047f7917c00SJeff Kirsher if (r)
2048f7917c00SJeff Kirsher seq_putc(seq, '\n');
2049f7917c00SJeff Kirsher
2050f7917c00SJeff Kirsher #define S3(fmt_spec, s, v) \
2051f7917c00SJeff Kirsher do {\
2052f7917c00SJeff Kirsher seq_printf(seq, "%-12s", s); \
2053f7917c00SJeff Kirsher for (qs = 0; qs < n; ++qs) \
2054f7917c00SJeff Kirsher seq_printf(seq, " %16" fmt_spec, v); \
2055f7917c00SJeff Kirsher seq_putc(seq, '\n'); \
2056f7917c00SJeff Kirsher } while (0)
2057f7917c00SJeff Kirsher #define S(s, v) S3("s", s, v)
2058f7917c00SJeff Kirsher #define T(s, v) S3("u", s, txq[qs].v)
2059f7917c00SJeff Kirsher #define R(s, v) S3("u", s, rxq[qs].v)
2060f7917c00SJeff Kirsher
2061f7917c00SJeff Kirsher if (r < eth_entries) {
2062f7917c00SJeff Kirsher const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2063f7917c00SJeff Kirsher const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2064f7917c00SJeff Kirsher int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2065f7917c00SJeff Kirsher
2066f7917c00SJeff Kirsher S("QType:", "Ethernet");
2067f7917c00SJeff Kirsher S("Interface:",
2068f7917c00SJeff Kirsher (rxq[qs].rspq.netdev
2069f7917c00SJeff Kirsher ? rxq[qs].rspq.netdev->name
2070f7917c00SJeff Kirsher : "N/A"));
2071f7917c00SJeff Kirsher S3("d", "Port:",
2072f7917c00SJeff Kirsher (rxq[qs].rspq.netdev
2073f7917c00SJeff Kirsher ? ((struct port_info *)
2074f7917c00SJeff Kirsher netdev_priv(rxq[qs].rspq.netdev))->port_id
2075f7917c00SJeff Kirsher : -1));
2076f7917c00SJeff Kirsher T("TxQ ID:", q.abs_id);
2077f7917c00SJeff Kirsher T("TxQ size:", q.size);
2078f7917c00SJeff Kirsher T("TxQ inuse:", q.in_use);
2079f7917c00SJeff Kirsher T("TxQ PIdx:", q.pidx);
2080f7917c00SJeff Kirsher T("TxQ CIdx:", q.cidx);
2081f7917c00SJeff Kirsher R("RspQ ID:", rspq.abs_id);
2082f7917c00SJeff Kirsher R("RspQ size:", rspq.size);
2083f7917c00SJeff Kirsher R("RspQE size:", rspq.iqe_len);
2084f7917c00SJeff Kirsher S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2085f7917c00SJeff Kirsher S3("u", "Intr pktcnt:",
2086f7917c00SJeff Kirsher adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2087f7917c00SJeff Kirsher R("RspQ CIdx:", rspq.cidx);
2088f7917c00SJeff Kirsher R("RspQ Gen:", rspq.gen);
2089f7917c00SJeff Kirsher R("FL ID:", fl.abs_id);
2090f7917c00SJeff Kirsher R("FL size:", fl.size - MIN_FL_RESID);
2091f7917c00SJeff Kirsher R("FL avail:", fl.avail);
2092f7917c00SJeff Kirsher R("FL PIdx:", fl.pidx);
2093f7917c00SJeff Kirsher R("FL CIdx:", fl.cidx);
2094f7917c00SJeff Kirsher return 0;
2095f7917c00SJeff Kirsher }
2096f7917c00SJeff Kirsher
2097f7917c00SJeff Kirsher r -= eth_entries;
2098f7917c00SJeff Kirsher if (r == 0) {
2099f7917c00SJeff Kirsher const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2100f7917c00SJeff Kirsher
2101f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2102f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2103f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2104f7917c00SJeff Kirsher qtimer_val(adapter, evtq));
2105f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2106f7917c00SJeff Kirsher adapter->sge.counter_val[evtq->pktcnt_idx]);
2107f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2108f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2109f7917c00SJeff Kirsher } else if (r == 1) {
2110f7917c00SJeff Kirsher const struct sge_rspq *intrq = &adapter->sge.intrq;
2111f7917c00SJeff Kirsher
2112f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2113f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2114f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2115f7917c00SJeff Kirsher qtimer_val(adapter, intrq));
2116f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2117f7917c00SJeff Kirsher adapter->sge.counter_val[intrq->pktcnt_idx]);
2118f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2119f7917c00SJeff Kirsher seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2120f7917c00SJeff Kirsher }
2121f7917c00SJeff Kirsher
2122f7917c00SJeff Kirsher #undef R
2123f7917c00SJeff Kirsher #undef T
2124f7917c00SJeff Kirsher #undef S
2125f7917c00SJeff Kirsher #undef S3
2126f7917c00SJeff Kirsher
2127f7917c00SJeff Kirsher return 0;
2128f7917c00SJeff Kirsher }
2129f7917c00SJeff Kirsher
2130f7917c00SJeff Kirsher /*
2131f7917c00SJeff Kirsher * Return the number of "entries" in our "file". We group the multi-Queue
2132f7917c00SJeff Kirsher * sections with QPL Queue Sets per "entry". The sections of the output are:
2133f7917c00SJeff Kirsher *
2134f7917c00SJeff Kirsher * Ethernet RX/TX Queue Sets
2135f7917c00SJeff Kirsher * Firmware Event Queue
2136f7917c00SJeff Kirsher * Forwarded Interrupt Queue (if in MSI mode)
2137f7917c00SJeff Kirsher */
sge_queue_entries(const struct adapter * adapter)2138f7917c00SJeff Kirsher static int sge_queue_entries(const struct adapter *adapter)
2139f7917c00SJeff Kirsher {
2140f7917c00SJeff Kirsher return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
21413d78bfaaSArjun Vynipadath ((adapter->flags & CXGB4VF_USING_MSI) != 0);
2142f7917c00SJeff Kirsher }
2143f7917c00SJeff Kirsher
sge_queue_start(struct seq_file * seq,loff_t * pos)2144f7917c00SJeff Kirsher static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2145f7917c00SJeff Kirsher {
2146f7917c00SJeff Kirsher int entries = sge_queue_entries(seq->private);
2147f7917c00SJeff Kirsher
2148f7917c00SJeff Kirsher return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2149f7917c00SJeff Kirsher }
2150f7917c00SJeff Kirsher
sge_queue_stop(struct seq_file * seq,void * v)2151f7917c00SJeff Kirsher static void sge_queue_stop(struct seq_file *seq, void *v)
2152f7917c00SJeff Kirsher {
2153f7917c00SJeff Kirsher }
2154f7917c00SJeff Kirsher
sge_queue_next(struct seq_file * seq,void * v,loff_t * pos)2155f7917c00SJeff Kirsher static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2156f7917c00SJeff Kirsher {
2157f7917c00SJeff Kirsher int entries = sge_queue_entries(seq->private);
2158f7917c00SJeff Kirsher
2159f7917c00SJeff Kirsher ++*pos;
2160f7917c00SJeff Kirsher return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2161f7917c00SJeff Kirsher }
2162f7917c00SJeff Kirsher
2163b948577bSLiu Shixin static const struct seq_operations sge_qinfo_sops = {
2164f7917c00SJeff Kirsher .start = sge_queue_start,
2165f7917c00SJeff Kirsher .next = sge_queue_next,
2166f7917c00SJeff Kirsher .stop = sge_queue_stop,
2167f7917c00SJeff Kirsher .show = sge_qinfo_show
2168f7917c00SJeff Kirsher };
2169f7917c00SJeff Kirsher
2170b948577bSLiu Shixin DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
2171f7917c00SJeff Kirsher
2172f7917c00SJeff Kirsher /*
2173f7917c00SJeff Kirsher * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
2174f7917c00SJeff Kirsher */
2175f7917c00SJeff Kirsher #define QPL 4
2176f7917c00SJeff Kirsher
sge_qstats_show(struct seq_file * seq,void * v)2177f7917c00SJeff Kirsher static int sge_qstats_show(struct seq_file *seq, void *v)
2178f7917c00SJeff Kirsher {
2179f7917c00SJeff Kirsher struct adapter *adapter = seq->private;
2180f7917c00SJeff Kirsher int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2181f7917c00SJeff Kirsher int qs, r = (uintptr_t)v - 1;
2182f7917c00SJeff Kirsher
2183f7917c00SJeff Kirsher if (r)
2184f7917c00SJeff Kirsher seq_putc(seq, '\n');
2185f7917c00SJeff Kirsher
2186f7917c00SJeff Kirsher #define S3(fmt, s, v) \
2187f7917c00SJeff Kirsher do { \
2188f7917c00SJeff Kirsher seq_printf(seq, "%-16s", s); \
2189f7917c00SJeff Kirsher for (qs = 0; qs < n; ++qs) \
2190f7917c00SJeff Kirsher seq_printf(seq, " %8" fmt, v); \
2191f7917c00SJeff Kirsher seq_putc(seq, '\n'); \
2192f7917c00SJeff Kirsher } while (0)
2193f7917c00SJeff Kirsher #define S(s, v) S3("s", s, v)
2194f7917c00SJeff Kirsher
2195f7917c00SJeff Kirsher #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
2196f7917c00SJeff Kirsher #define T(s, v) T3("lu", s, v)
2197f7917c00SJeff Kirsher
2198f7917c00SJeff Kirsher #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
2199f7917c00SJeff Kirsher #define R(s, v) R3("lu", s, v)
2200f7917c00SJeff Kirsher
2201f7917c00SJeff Kirsher if (r < eth_entries) {
2202f7917c00SJeff Kirsher const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2203f7917c00SJeff Kirsher const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2204f7917c00SJeff Kirsher int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2205f7917c00SJeff Kirsher
2206f7917c00SJeff Kirsher S("QType:", "Ethernet");
2207f7917c00SJeff Kirsher S("Interface:",
2208f7917c00SJeff Kirsher (rxq[qs].rspq.netdev
2209f7917c00SJeff Kirsher ? rxq[qs].rspq.netdev->name
2210f7917c00SJeff Kirsher : "N/A"));
2211f7917c00SJeff Kirsher R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2212f7917c00SJeff Kirsher R("RxPackets:", stats.pkts);
2213f7917c00SJeff Kirsher R("RxCSO:", stats.rx_cso);
2214f7917c00SJeff Kirsher R("VLANxtract:", stats.vlan_ex);
2215f7917c00SJeff Kirsher R("LROmerged:", stats.lro_merged);
2216f7917c00SJeff Kirsher R("LROpackets:", stats.lro_pkts);
2217f7917c00SJeff Kirsher R("RxDrops:", stats.rx_drops);
2218f7917c00SJeff Kirsher T("TSO:", tso);
2219f7917c00SJeff Kirsher T("TxCSO:", tx_cso);
2220f7917c00SJeff Kirsher T("VLANins:", vlan_ins);
2221f7917c00SJeff Kirsher T("TxQFull:", q.stops);
2222f7917c00SJeff Kirsher T("TxQRestarts:", q.restarts);
2223f7917c00SJeff Kirsher T("TxMapErr:", mapping_err);
2224f7917c00SJeff Kirsher R("FLAllocErr:", fl.alloc_failed);
2225f7917c00SJeff Kirsher R("FLLrgAlcErr:", fl.large_alloc_failed);
2226f7917c00SJeff Kirsher R("FLStarving:", fl.starving);
2227f7917c00SJeff Kirsher return 0;
2228f7917c00SJeff Kirsher }
2229f7917c00SJeff Kirsher
2230f7917c00SJeff Kirsher r -= eth_entries;
2231f7917c00SJeff Kirsher if (r == 0) {
2232f7917c00SJeff Kirsher const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2233f7917c00SJeff Kirsher
2234f7917c00SJeff Kirsher seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2235f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2236f7917c00SJeff Kirsher evtq->unhandled_irqs);
2237f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2238f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2239f7917c00SJeff Kirsher } else if (r == 1) {
2240f7917c00SJeff Kirsher const struct sge_rspq *intrq = &adapter->sge.intrq;
2241f7917c00SJeff Kirsher
2242f7917c00SJeff Kirsher seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2243f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2244f7917c00SJeff Kirsher intrq->unhandled_irqs);
2245f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2246f7917c00SJeff Kirsher seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2247f7917c00SJeff Kirsher }
2248f7917c00SJeff Kirsher
2249f7917c00SJeff Kirsher #undef R
2250f7917c00SJeff Kirsher #undef T
2251f7917c00SJeff Kirsher #undef S
2252f7917c00SJeff Kirsher #undef R3
2253f7917c00SJeff Kirsher #undef T3
2254f7917c00SJeff Kirsher #undef S3
2255f7917c00SJeff Kirsher
2256f7917c00SJeff Kirsher return 0;
2257f7917c00SJeff Kirsher }
2258f7917c00SJeff Kirsher
2259f7917c00SJeff Kirsher /*
2260f7917c00SJeff Kirsher * Return the number of "entries" in our "file". We group the multi-Queue
2261f7917c00SJeff Kirsher * sections with QPL Queue Sets per "entry". The sections of the output are:
2262f7917c00SJeff Kirsher *
2263f7917c00SJeff Kirsher * Ethernet RX/TX Queue Sets
2264f7917c00SJeff Kirsher * Firmware Event Queue
2265f7917c00SJeff Kirsher * Forwarded Interrupt Queue (if in MSI mode)
2266f7917c00SJeff Kirsher */
sge_qstats_entries(const struct adapter * adapter)2267f7917c00SJeff Kirsher static int sge_qstats_entries(const struct adapter *adapter)
2268f7917c00SJeff Kirsher {
2269f7917c00SJeff Kirsher return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
22703d78bfaaSArjun Vynipadath ((adapter->flags & CXGB4VF_USING_MSI) != 0);
2271f7917c00SJeff Kirsher }
2272f7917c00SJeff Kirsher
sge_qstats_start(struct seq_file * seq,loff_t * pos)2273f7917c00SJeff Kirsher static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2274f7917c00SJeff Kirsher {
2275f7917c00SJeff Kirsher int entries = sge_qstats_entries(seq->private);
2276f7917c00SJeff Kirsher
2277f7917c00SJeff Kirsher return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2278f7917c00SJeff Kirsher }
2279f7917c00SJeff Kirsher
sge_qstats_stop(struct seq_file * seq,void * v)2280f7917c00SJeff Kirsher static void sge_qstats_stop(struct seq_file *seq, void *v)
2281f7917c00SJeff Kirsher {
2282f7917c00SJeff Kirsher }
2283f7917c00SJeff Kirsher
sge_qstats_next(struct seq_file * seq,void * v,loff_t * pos)2284f7917c00SJeff Kirsher static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2285f7917c00SJeff Kirsher {
2286f7917c00SJeff Kirsher int entries = sge_qstats_entries(seq->private);
2287f7917c00SJeff Kirsher
2288f7917c00SJeff Kirsher (*pos)++;
2289f7917c00SJeff Kirsher return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2290f7917c00SJeff Kirsher }
2291f7917c00SJeff Kirsher
2292b948577bSLiu Shixin static const struct seq_operations sge_qstats_sops = {
2293f7917c00SJeff Kirsher .start = sge_qstats_start,
2294f7917c00SJeff Kirsher .next = sge_qstats_next,
2295f7917c00SJeff Kirsher .stop = sge_qstats_stop,
2296f7917c00SJeff Kirsher .show = sge_qstats_show
2297f7917c00SJeff Kirsher };
2298f7917c00SJeff Kirsher
2299b948577bSLiu Shixin DEFINE_SEQ_ATTRIBUTE(sge_qstats);
2300f7917c00SJeff Kirsher
2301f7917c00SJeff Kirsher /*
2302f7917c00SJeff Kirsher * Show PCI-E SR-IOV Virtual Function Resource Limits.
2303f7917c00SJeff Kirsher */
resources_show(struct seq_file * seq,void * v)2304f7917c00SJeff Kirsher static int resources_show(struct seq_file *seq, void *v)
2305f7917c00SJeff Kirsher {
2306f7917c00SJeff Kirsher struct adapter *adapter = seq->private;
2307f7917c00SJeff Kirsher struct vf_resources *vfres = &adapter->params.vfres;
2308f7917c00SJeff Kirsher
2309f7917c00SJeff Kirsher #define S(desc, fmt, var) \
2310f7917c00SJeff Kirsher seq_printf(seq, "%-60s " fmt "\n", \
2311f7917c00SJeff Kirsher desc " (" #var "):", vfres->var)
2312f7917c00SJeff Kirsher
2313f7917c00SJeff Kirsher S("Virtual Interfaces", "%d", nvi);
2314f7917c00SJeff Kirsher S("Egress Queues", "%d", neq);
2315f7917c00SJeff Kirsher S("Ethernet Control", "%d", nethctrl);
2316f7917c00SJeff Kirsher S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2317f7917c00SJeff Kirsher S("Ingress Queues", "%d", niq);
2318f7917c00SJeff Kirsher S("Traffic Class", "%d", tc);
2319f7917c00SJeff Kirsher S("Port Access Rights Mask", "%#x", pmask);
2320f7917c00SJeff Kirsher S("MAC Address Filters", "%d", nexactf);
2321f7917c00SJeff Kirsher S("Firmware Command Read Capabilities", "%#x", r_caps);
2322f7917c00SJeff Kirsher S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2323f7917c00SJeff Kirsher
2324f7917c00SJeff Kirsher #undef S
2325f7917c00SJeff Kirsher
2326f7917c00SJeff Kirsher return 0;
2327f7917c00SJeff Kirsher }
2328b09026c6SYangtao Li DEFINE_SHOW_ATTRIBUTE(resources);
2329f7917c00SJeff Kirsher
2330f7917c00SJeff Kirsher /*
2331f7917c00SJeff Kirsher * Show Virtual Interfaces.
2332f7917c00SJeff Kirsher */
interfaces_show(struct seq_file * seq,void * v)2333f7917c00SJeff Kirsher static int interfaces_show(struct seq_file *seq, void *v)
2334f7917c00SJeff Kirsher {
2335f7917c00SJeff Kirsher if (v == SEQ_START_TOKEN) {
2336f7917c00SJeff Kirsher seq_puts(seq, "Interface Port VIID\n");
2337f7917c00SJeff Kirsher } else {
2338f7917c00SJeff Kirsher struct adapter *adapter = seq->private;
2339f7917c00SJeff Kirsher int pidx = (uintptr_t)v - 2;
2340f7917c00SJeff Kirsher struct net_device *dev = adapter->port[pidx];
2341f7917c00SJeff Kirsher struct port_info *pi = netdev_priv(dev);
2342f7917c00SJeff Kirsher
2343f7917c00SJeff Kirsher seq_printf(seq, "%9s %4d %#5x\n",
2344f7917c00SJeff Kirsher dev->name, pi->port_id, pi->viid);
2345f7917c00SJeff Kirsher }
2346f7917c00SJeff Kirsher return 0;
2347f7917c00SJeff Kirsher }
2348f7917c00SJeff Kirsher
interfaces_get_idx(struct adapter * adapter,loff_t pos)2349f7917c00SJeff Kirsher static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2350f7917c00SJeff Kirsher {
2351f7917c00SJeff Kirsher return pos <= adapter->params.nports
2352f7917c00SJeff Kirsher ? (void *)(uintptr_t)(pos + 1)
2353f7917c00SJeff Kirsher : NULL;
2354f7917c00SJeff Kirsher }
2355f7917c00SJeff Kirsher
interfaces_start(struct seq_file * seq,loff_t * pos)2356f7917c00SJeff Kirsher static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2357f7917c00SJeff Kirsher {
2358f7917c00SJeff Kirsher return *pos
2359f7917c00SJeff Kirsher ? interfaces_get_idx(seq->private, *pos)
2360f7917c00SJeff Kirsher : SEQ_START_TOKEN;
2361f7917c00SJeff Kirsher }
2362f7917c00SJeff Kirsher
interfaces_next(struct seq_file * seq,void * v,loff_t * pos)2363f7917c00SJeff Kirsher static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2364f7917c00SJeff Kirsher {
2365f7917c00SJeff Kirsher (*pos)++;
2366f7917c00SJeff Kirsher return interfaces_get_idx(seq->private, *pos);
2367f7917c00SJeff Kirsher }
2368f7917c00SJeff Kirsher
interfaces_stop(struct seq_file * seq,void * v)2369f7917c00SJeff Kirsher static void interfaces_stop(struct seq_file *seq, void *v)
2370f7917c00SJeff Kirsher {
2371f7917c00SJeff Kirsher }
2372f7917c00SJeff Kirsher
2373b948577bSLiu Shixin static const struct seq_operations interfaces_sops = {
2374f7917c00SJeff Kirsher .start = interfaces_start,
2375f7917c00SJeff Kirsher .next = interfaces_next,
2376f7917c00SJeff Kirsher .stop = interfaces_stop,
2377f7917c00SJeff Kirsher .show = interfaces_show
2378f7917c00SJeff Kirsher };
2379f7917c00SJeff Kirsher
2380b948577bSLiu Shixin DEFINE_SEQ_ATTRIBUTE(interfaces);
2381f7917c00SJeff Kirsher
2382f7917c00SJeff Kirsher /*
2383f7917c00SJeff Kirsher * /sys/kernel/debugfs/cxgb4vf/ files list.
2384f7917c00SJeff Kirsher */
2385f7917c00SJeff Kirsher struct cxgb4vf_debugfs_entry {
2386f7917c00SJeff Kirsher const char *name; /* name of debugfs node */
2387f4ae40a6SAl Viro umode_t mode; /* file system mode */
2388f7917c00SJeff Kirsher const struct file_operations *fops;
2389f7917c00SJeff Kirsher };
2390f7917c00SJeff Kirsher
2391f7917c00SJeff Kirsher static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2392d3757ba4SJoe Perches { "mboxlog", 0444, &mboxlog_fops },
2393b948577bSLiu Shixin { "sge_qinfo", 0444, &sge_qinfo_fops },
2394b948577bSLiu Shixin { "sge_qstats", 0444, &sge_qstats_fops },
2395b09026c6SYangtao Li { "resources", 0444, &resources_fops },
2396b948577bSLiu Shixin { "interfaces", 0444, &interfaces_fops },
2397f7917c00SJeff Kirsher };
2398f7917c00SJeff Kirsher
2399f7917c00SJeff Kirsher /*
2400f7917c00SJeff Kirsher * Module and device initialization and cleanup code.
2401f7917c00SJeff Kirsher * ==================================================
2402f7917c00SJeff Kirsher */
2403f7917c00SJeff Kirsher
2404f7917c00SJeff Kirsher /*
2405f7917c00SJeff Kirsher * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2406f7917c00SJeff Kirsher * directory (debugfs_root) has already been set up.
2407f7917c00SJeff Kirsher */
setup_debugfs(struct adapter * adapter)2408d289f864SBill Pemberton static int setup_debugfs(struct adapter *adapter)
2409f7917c00SJeff Kirsher {
2410f7917c00SJeff Kirsher int i;
2411f7917c00SJeff Kirsher
2412f7917c00SJeff Kirsher BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2413f7917c00SJeff Kirsher
2414f7917c00SJeff Kirsher /*
2415f7917c00SJeff Kirsher * Debugfs support is best effort.
2416f7917c00SJeff Kirsher */
2417f7917c00SJeff Kirsher for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
24189dac1e8eSGreg Kroah-Hartman debugfs_create_file(debugfs_files[i].name,
2419f7917c00SJeff Kirsher debugfs_files[i].mode,
2420730f1351SGeert Uytterhoeven adapter->debugfs_root, adapter,
2421f7917c00SJeff Kirsher debugfs_files[i].fops);
2422f7917c00SJeff Kirsher
2423f7917c00SJeff Kirsher return 0;
2424f7917c00SJeff Kirsher }
2425f7917c00SJeff Kirsher
2426f7917c00SJeff Kirsher /*
2427f7917c00SJeff Kirsher * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2428f7917c00SJeff Kirsher * it to our caller to tear down the directory (debugfs_root).
2429f7917c00SJeff Kirsher */
cleanup_debugfs(struct adapter * adapter)2430f7917c00SJeff Kirsher static void cleanup_debugfs(struct adapter *adapter)
2431f7917c00SJeff Kirsher {
2432f7917c00SJeff Kirsher BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2433f7917c00SJeff Kirsher
2434f7917c00SJeff Kirsher /*
2435f7917c00SJeff Kirsher * Unlike our sister routine cleanup_proc(), we don't need to remove
2436f7917c00SJeff Kirsher * individual entries because a call will be made to
2437f7917c00SJeff Kirsher * debugfs_remove_recursive(). We just need to clean up any ancillary
2438f7917c00SJeff Kirsher * persistent state.
2439f7917c00SJeff Kirsher */
2440f7917c00SJeff Kirsher /* nothing to do */
2441f7917c00SJeff Kirsher }
2442f7917c00SJeff Kirsher
2443495c22bbSHariprasad Shenai /* Figure out how many Ports and Queue Sets we can support. This depends on
2444495c22bbSHariprasad Shenai * knowing our Virtual Function Resources and may be called a second time if
2445495c22bbSHariprasad Shenai * we fall back from MSI-X to MSI Interrupt Mode.
2446495c22bbSHariprasad Shenai */
size_nports_qsets(struct adapter * adapter)2447495c22bbSHariprasad Shenai static void size_nports_qsets(struct adapter *adapter)
2448495c22bbSHariprasad Shenai {
2449495c22bbSHariprasad Shenai struct vf_resources *vfres = &adapter->params.vfres;
2450495c22bbSHariprasad Shenai unsigned int ethqsets, pmask_nports;
2451495c22bbSHariprasad Shenai
2452495c22bbSHariprasad Shenai /* The number of "ports" which we support is equal to the number of
2453495c22bbSHariprasad Shenai * Virtual Interfaces with which we've been provisioned.
2454495c22bbSHariprasad Shenai */
2455495c22bbSHariprasad Shenai adapter->params.nports = vfres->nvi;
2456495c22bbSHariprasad Shenai if (adapter->params.nports > MAX_NPORTS) {
2457495c22bbSHariprasad Shenai dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2458495c22bbSHariprasad Shenai " allowed virtual interfaces\n", MAX_NPORTS,
2459495c22bbSHariprasad Shenai adapter->params.nports);
2460495c22bbSHariprasad Shenai adapter->params.nports = MAX_NPORTS;
2461495c22bbSHariprasad Shenai }
2462495c22bbSHariprasad Shenai
2463495c22bbSHariprasad Shenai /* We may have been provisioned with more VIs than the number of
2464495c22bbSHariprasad Shenai * ports we're allowed to access (our Port Access Rights Mask).
2465495c22bbSHariprasad Shenai * This is obviously a configuration conflict but we don't want to
2466495c22bbSHariprasad Shenai * crash the kernel or anything silly just because of that.
2467495c22bbSHariprasad Shenai */
2468495c22bbSHariprasad Shenai pmask_nports = hweight32(adapter->params.vfres.pmask);
2469495c22bbSHariprasad Shenai if (pmask_nports < adapter->params.nports) {
24701a8ff8f5SColin Ian King dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2471495c22bbSHariprasad Shenai " virtual interfaces; limited by Port Access Rights"
2472495c22bbSHariprasad Shenai " mask %#x\n", pmask_nports, adapter->params.nports,
2473495c22bbSHariprasad Shenai adapter->params.vfres.pmask);
2474495c22bbSHariprasad Shenai adapter->params.nports = pmask_nports;
2475495c22bbSHariprasad Shenai }
2476495c22bbSHariprasad Shenai
2477495c22bbSHariprasad Shenai /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2478495c22bbSHariprasad Shenai * Event Queue. And if we're using MSI Interrupts, we'll also need to
2479495c22bbSHariprasad Shenai * reserve an Ingress Queue for a Forwarded Interrupts.
2480495c22bbSHariprasad Shenai *
2481495c22bbSHariprasad Shenai * The rest of the FL/Intr-capable ingress queues will be matched up
2482495c22bbSHariprasad Shenai * one-for-one with Ethernet/Control egress queues in order to form
2483495c22bbSHariprasad Shenai * "Queue Sets" which will be aportioned between the "ports". For
2484495c22bbSHariprasad Shenai * each Queue Set, we'll need the ability to allocate two Egress
2485495c22bbSHariprasad Shenai * Contexts -- one for the Ingress Queue Free List and one for the TX
2486495c22bbSHariprasad Shenai * Ethernet Queue.
2487495c22bbSHariprasad Shenai *
2488495c22bbSHariprasad Shenai * Note that even if we're currently configured to use MSI-X
2489495c22bbSHariprasad Shenai * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2490495c22bbSHariprasad Shenai * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2491495c22bbSHariprasad Shenai * happens we'll need to adjust things later.
2492495c22bbSHariprasad Shenai */
2493495c22bbSHariprasad Shenai ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2494495c22bbSHariprasad Shenai if (vfres->nethctrl != ethqsets)
2495495c22bbSHariprasad Shenai ethqsets = min(vfres->nethctrl, ethqsets);
2496495c22bbSHariprasad Shenai if (vfres->neq < ethqsets*2)
2497495c22bbSHariprasad Shenai ethqsets = vfres->neq/2;
2498495c22bbSHariprasad Shenai if (ethqsets > MAX_ETH_QSETS)
2499495c22bbSHariprasad Shenai ethqsets = MAX_ETH_QSETS;
2500495c22bbSHariprasad Shenai adapter->sge.max_ethqsets = ethqsets;
2501495c22bbSHariprasad Shenai
2502495c22bbSHariprasad Shenai if (adapter->sge.max_ethqsets < adapter->params.nports) {
2503495c22bbSHariprasad Shenai dev_warn(adapter->pdev_dev, "only using %d of %d available"
2504495c22bbSHariprasad Shenai " virtual interfaces (too few Queue Sets)\n",
2505495c22bbSHariprasad Shenai adapter->sge.max_ethqsets, adapter->params.nports);
2506495c22bbSHariprasad Shenai adapter->params.nports = adapter->sge.max_ethqsets;
2507495c22bbSHariprasad Shenai }
2508495c22bbSHariprasad Shenai }
2509495c22bbSHariprasad Shenai
2510f7917c00SJeff Kirsher /*
2511f7917c00SJeff Kirsher * Perform early "adapter" initialization. This is where we discover what
2512f7917c00SJeff Kirsher * adapter parameters we're going to be using and initialize basic adapter
2513f7917c00SJeff Kirsher * hardware support.
2514f7917c00SJeff Kirsher */
adap_init0(struct adapter * adapter)2515d289f864SBill Pemberton static int adap_init0(struct adapter *adapter)
2516f7917c00SJeff Kirsher {
2517f7917c00SJeff Kirsher struct sge_params *sge_params = &adapter->params.sge;
2518f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
2519f7917c00SJeff Kirsher int err;
252094dace10SVipul Pandya u32 param, val = 0;
2521f7917c00SJeff Kirsher
2522f7917c00SJeff Kirsher /*
2523f7917c00SJeff Kirsher * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2524f7917c00SJeff Kirsher * 2.6.31 and later we can't call pci_reset_function() in order to
2525f7917c00SJeff Kirsher * issue an FLR because of a self- deadlock on the device semaphore.
2526f7917c00SJeff Kirsher * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2527f7917c00SJeff Kirsher * cases where they're needed -- for instance, some versions of KVM
2528f7917c00SJeff Kirsher * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2529f7917c00SJeff Kirsher * use the firmware based reset in order to reset any per function
2530f7917c00SJeff Kirsher * state.
2531f7917c00SJeff Kirsher */
2532f7917c00SJeff Kirsher err = t4vf_fw_reset(adapter);
2533f7917c00SJeff Kirsher if (err < 0) {
2534f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2535f7917c00SJeff Kirsher return err;
2536f7917c00SJeff Kirsher }
2537f7917c00SJeff Kirsher
2538f7917c00SJeff Kirsher /*
2539f7917c00SJeff Kirsher * Grab basic operational parameters. These will predominantly have
2540f7917c00SJeff Kirsher * been set up by the Physical Function Driver or will be hard coded
2541f7917c00SJeff Kirsher * into the adapter. We just have to live with them ... Note that
2542f7917c00SJeff Kirsher * we _must_ get our VPD parameters before our SGE parameters because
2543f7917c00SJeff Kirsher * we need to know the adapter's core clock from the VPD in order to
2544f7917c00SJeff Kirsher * properly decode the SGE Timer Values.
2545f7917c00SJeff Kirsher */
2546f7917c00SJeff Kirsher err = t4vf_get_dev_params(adapter);
2547f7917c00SJeff Kirsher if (err) {
2548f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2549f7917c00SJeff Kirsher " device parameters: err=%d\n", err);
2550f7917c00SJeff Kirsher return err;
2551f7917c00SJeff Kirsher }
2552f7917c00SJeff Kirsher err = t4vf_get_vpd_params(adapter);
2553f7917c00SJeff Kirsher if (err) {
2554f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2555f7917c00SJeff Kirsher " VPD parameters: err=%d\n", err);
2556f7917c00SJeff Kirsher return err;
2557f7917c00SJeff Kirsher }
2558f7917c00SJeff Kirsher err = t4vf_get_sge_params(adapter);
2559f7917c00SJeff Kirsher if (err) {
2560f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2561f7917c00SJeff Kirsher " SGE parameters: err=%d\n", err);
2562f7917c00SJeff Kirsher return err;
2563f7917c00SJeff Kirsher }
2564f7917c00SJeff Kirsher err = t4vf_get_rss_glb_config(adapter);
2565f7917c00SJeff Kirsher if (err) {
2566f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2567f7917c00SJeff Kirsher " RSS parameters: err=%d\n", err);
2568f7917c00SJeff Kirsher return err;
2569f7917c00SJeff Kirsher }
2570f7917c00SJeff Kirsher if (adapter->params.rss.mode !=
2571f7917c00SJeff Kirsher FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2572f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2573f7917c00SJeff Kirsher " mode %d\n", adapter->params.rss.mode);
2574f7917c00SJeff Kirsher return -EINVAL;
2575f7917c00SJeff Kirsher }
2576f7917c00SJeff Kirsher err = t4vf_sge_init(adapter);
2577f7917c00SJeff Kirsher if (err) {
2578f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2579f7917c00SJeff Kirsher " err=%d\n", err);
2580f7917c00SJeff Kirsher return err;
2581f7917c00SJeff Kirsher }
2582f7917c00SJeff Kirsher
258394dace10SVipul Pandya /* If we're running on newer firmware, let it know that we're
258494dace10SVipul Pandya * prepared to deal with encapsulated CPL messages. Older
258594dace10SVipul Pandya * firmware won't understand this and we'll just get
258694dace10SVipul Pandya * unencapsulated messages ...
258794dace10SVipul Pandya */
25885167865aSHariprasad Shenai param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
25895167865aSHariprasad Shenai FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
259094dace10SVipul Pandya val = 1;
259194dace10SVipul Pandya (void) t4vf_set_params(adapter, 1, ¶m, &val);
259294dace10SVipul Pandya
2593f7917c00SJeff Kirsher /*
2594f7917c00SJeff Kirsher * Retrieve our RX interrupt holdoff timer values and counter
2595f7917c00SJeff Kirsher * threshold values from the SGE parameters.
2596f7917c00SJeff Kirsher */
2597f7917c00SJeff Kirsher s->timer_val[0] = core_ticks_to_us(adapter,
2598f061de42SHariprasad Shenai TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2599f7917c00SJeff Kirsher s->timer_val[1] = core_ticks_to_us(adapter,
2600f061de42SHariprasad Shenai TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2601f7917c00SJeff Kirsher s->timer_val[2] = core_ticks_to_us(adapter,
2602f061de42SHariprasad Shenai TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2603f7917c00SJeff Kirsher s->timer_val[3] = core_ticks_to_us(adapter,
2604f061de42SHariprasad Shenai TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2605f7917c00SJeff Kirsher s->timer_val[4] = core_ticks_to_us(adapter,
2606f061de42SHariprasad Shenai TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2607f7917c00SJeff Kirsher s->timer_val[5] = core_ticks_to_us(adapter,
2608f061de42SHariprasad Shenai TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2609f7917c00SJeff Kirsher
2610f612b815SHariprasad Shenai s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2611f612b815SHariprasad Shenai s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2612f612b815SHariprasad Shenai s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2613f612b815SHariprasad Shenai s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2614f7917c00SJeff Kirsher
2615f7917c00SJeff Kirsher /*
2616f7917c00SJeff Kirsher * Grab our Virtual Interface resource allocation, extract the
2617f7917c00SJeff Kirsher * features that we're interested in and do a bit of sanity testing on
2618f7917c00SJeff Kirsher * what we discover.
2619f7917c00SJeff Kirsher */
2620f7917c00SJeff Kirsher err = t4vf_get_vfres(adapter);
2621f7917c00SJeff Kirsher if (err) {
2622f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "unable to get virtual interface"
2623f7917c00SJeff Kirsher " resources: err=%d\n", err);
2624f7917c00SJeff Kirsher return err;
2625f7917c00SJeff Kirsher }
2626f7917c00SJeff Kirsher
2627495c22bbSHariprasad Shenai /* Check for various parameter sanity issues */
262828f71c6dSHariprasad Shenai if (adapter->params.vfres.pmask == 0) {
262928f71c6dSHariprasad Shenai dev_err(adapter->pdev_dev, "no port access configured\n"
263028f71c6dSHariprasad Shenai "usable!\n");
263128f71c6dSHariprasad Shenai return -EINVAL;
263228f71c6dSHariprasad Shenai }
2633495c22bbSHariprasad Shenai if (adapter->params.vfres.nvi == 0) {
2634f7917c00SJeff Kirsher dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2635f7917c00SJeff Kirsher "usable!\n");
2636f7917c00SJeff Kirsher return -EINVAL;
2637f7917c00SJeff Kirsher }
2638495c22bbSHariprasad Shenai
2639495c22bbSHariprasad Shenai /* Initialize nports and max_ethqsets now that we have our Virtual
2640495c22bbSHariprasad Shenai * Function Resources.
2641495c22bbSHariprasad Shenai */
2642495c22bbSHariprasad Shenai size_nports_qsets(adapter);
2643495c22bbSHariprasad Shenai
26443d78bfaaSArjun Vynipadath adapter->flags |= CXGB4VF_FW_OK;
2645f7917c00SJeff Kirsher return 0;
2646f7917c00SJeff Kirsher }
2647f7917c00SJeff Kirsher
init_rspq(struct sge_rspq * rspq,u8 timer_idx,u8 pkt_cnt_idx,unsigned int size,unsigned int iqe_size)2648f7917c00SJeff Kirsher static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2649f7917c00SJeff Kirsher u8 pkt_cnt_idx, unsigned int size,
2650f7917c00SJeff Kirsher unsigned int iqe_size)
2651f7917c00SJeff Kirsher {
26521ecc7b7aSHariprasad Shenai rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
26531ecc7b7aSHariprasad Shenai (pkt_cnt_idx < SGE_NCOUNTERS ?
26541ecc7b7aSHariprasad Shenai QINTR_CNT_EN_F : 0));
2655f7917c00SJeff Kirsher rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2656f7917c00SJeff Kirsher ? pkt_cnt_idx
2657f7917c00SJeff Kirsher : 0);
2658f7917c00SJeff Kirsher rspq->iqe_len = iqe_size;
2659f7917c00SJeff Kirsher rspq->size = size;
2660f7917c00SJeff Kirsher }
2661f7917c00SJeff Kirsher
2662f7917c00SJeff Kirsher /*
2663f7917c00SJeff Kirsher * Perform default configuration of DMA queues depending on the number and
2664f7917c00SJeff Kirsher * type of ports we found and the number of available CPUs. Most settings can
2665f7917c00SJeff Kirsher * be modified by the admin via ethtool and cxgbtool prior to the adapter
2666f7917c00SJeff Kirsher * being brought up for the first time.
2667f7917c00SJeff Kirsher */
cfg_queues(struct adapter * adapter)2668d289f864SBill Pemberton static void cfg_queues(struct adapter *adapter)
2669f7917c00SJeff Kirsher {
2670f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
2671f7917c00SJeff Kirsher int q10g, n10g, qidx, pidx, qs;
2672f7917c00SJeff Kirsher size_t iqe_size;
2673f7917c00SJeff Kirsher
2674f7917c00SJeff Kirsher /*
2675f7917c00SJeff Kirsher * We should not be called till we know how many Queue Sets we can
2676f7917c00SJeff Kirsher * support. In particular, this means that we need to know what kind
2677f7917c00SJeff Kirsher * of interrupts we'll be using ...
2678f7917c00SJeff Kirsher */
26793d78bfaaSArjun Vynipadath BUG_ON((adapter->flags &
26803d78bfaaSArjun Vynipadath (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2681f7917c00SJeff Kirsher
2682f7917c00SJeff Kirsher /*
2683f7917c00SJeff Kirsher * Count the number of 10GbE Virtual Interfaces that we have.
2684f7917c00SJeff Kirsher */
2685f7917c00SJeff Kirsher n10g = 0;
2686f7917c00SJeff Kirsher for_each_port(adapter, pidx)
268714b3812fSHariprasad Shenai n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2688f7917c00SJeff Kirsher
2689f7917c00SJeff Kirsher /*
2690f7917c00SJeff Kirsher * We default to 1 queue per non-10G port and up to # of cores queues
2691f7917c00SJeff Kirsher * per 10G port.
2692f7917c00SJeff Kirsher */
2693f7917c00SJeff Kirsher if (n10g == 0)
2694f7917c00SJeff Kirsher q10g = 0;
2695f7917c00SJeff Kirsher else {
2696f7917c00SJeff Kirsher int n1g = (adapter->params.nports - n10g);
2697f7917c00SJeff Kirsher q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2698f7917c00SJeff Kirsher if (q10g > num_online_cpus())
2699f7917c00SJeff Kirsher q10g = num_online_cpus();
2700f7917c00SJeff Kirsher }
2701f7917c00SJeff Kirsher
2702f7917c00SJeff Kirsher /*
2703f7917c00SJeff Kirsher * Allocate the "Queue Sets" to the various Virtual Interfaces.
2704f7917c00SJeff Kirsher * The layout will be established in setup_sge_queues() when the
2705f7917c00SJeff Kirsher * adapter is brough up for the first time.
2706f7917c00SJeff Kirsher */
2707f7917c00SJeff Kirsher qidx = 0;
2708f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
2709f7917c00SJeff Kirsher struct port_info *pi = adap2pinfo(adapter, pidx);
2710f7917c00SJeff Kirsher
2711f7917c00SJeff Kirsher pi->first_qset = qidx;
2712897d55dfSHariprasad Shenai pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2713f7917c00SJeff Kirsher qidx += pi->nqsets;
2714f7917c00SJeff Kirsher }
2715f7917c00SJeff Kirsher s->ethqsets = qidx;
2716f7917c00SJeff Kirsher
2717f7917c00SJeff Kirsher /*
2718f7917c00SJeff Kirsher * The Ingress Queue Entry Size for our various Response Queues needs
2719f7917c00SJeff Kirsher * to be big enough to accommodate the largest message we can receive
2720f7917c00SJeff Kirsher * from the chip/firmware; which is 64 bytes ...
2721f7917c00SJeff Kirsher */
2722f7917c00SJeff Kirsher iqe_size = 64;
2723f7917c00SJeff Kirsher
2724f7917c00SJeff Kirsher /*
2725f7917c00SJeff Kirsher * Set up default Queue Set parameters ... Start off with the
2726f7917c00SJeff Kirsher * shortest interrupt holdoff timer.
2727f7917c00SJeff Kirsher */
2728f7917c00SJeff Kirsher for (qs = 0; qs < s->max_ethqsets; qs++) {
2729f7917c00SJeff Kirsher struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2730f7917c00SJeff Kirsher struct sge_eth_txq *txq = &s->ethtxq[qs];
2731f7917c00SJeff Kirsher
2732f7917c00SJeff Kirsher init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2733f7917c00SJeff Kirsher rxq->fl.size = 72;
2734f7917c00SJeff Kirsher txq->q.size = 1024;
2735f7917c00SJeff Kirsher }
2736f7917c00SJeff Kirsher
2737f7917c00SJeff Kirsher /*
2738f7917c00SJeff Kirsher * The firmware event queue is used for link state changes and
2739f7917c00SJeff Kirsher * notifications of TX DMA completions.
2740f7917c00SJeff Kirsher */
2741f7917c00SJeff Kirsher init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2742f7917c00SJeff Kirsher
2743f7917c00SJeff Kirsher /*
2744f7917c00SJeff Kirsher * The forwarded interrupt queue is used when we're in MSI interrupt
2745f7917c00SJeff Kirsher * mode. In this mode all interrupts associated with RX queues will
2746f7917c00SJeff Kirsher * be forwarded to a single queue which we'll associate with our MSI
2747f7917c00SJeff Kirsher * interrupt vector. The messages dropped in the forwarded interrupt
2748f7917c00SJeff Kirsher * queue will indicate which ingress queue needs servicing ... This
2749f7917c00SJeff Kirsher * queue needs to be large enough to accommodate all of the ingress
2750f7917c00SJeff Kirsher * queues which are forwarding their interrupt (+1 to prevent the PIDX
2751f7917c00SJeff Kirsher * from equalling the CIDX if every ingress queue has an outstanding
2752f7917c00SJeff Kirsher * interrupt). The queue doesn't need to be any larger because no
2753f7917c00SJeff Kirsher * ingress queue will ever have more than one outstanding interrupt at
2754f7917c00SJeff Kirsher * any time ...
2755f7917c00SJeff Kirsher */
2756f7917c00SJeff Kirsher init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2757f7917c00SJeff Kirsher iqe_size);
2758f7917c00SJeff Kirsher }
2759f7917c00SJeff Kirsher
2760f7917c00SJeff Kirsher /*
2761f7917c00SJeff Kirsher * Reduce the number of Ethernet queues across all ports to at most n.
2762f7917c00SJeff Kirsher * n provides at least one queue per port.
2763f7917c00SJeff Kirsher */
reduce_ethqs(struct adapter * adapter,int n)2764d289f864SBill Pemberton static void reduce_ethqs(struct adapter *adapter, int n)
2765f7917c00SJeff Kirsher {
2766f7917c00SJeff Kirsher int i;
2767f7917c00SJeff Kirsher struct port_info *pi;
2768f7917c00SJeff Kirsher
2769f7917c00SJeff Kirsher /*
2770f7917c00SJeff Kirsher * While we have too many active Ether Queue Sets, interate across the
2771f7917c00SJeff Kirsher * "ports" and reduce their individual Queue Set allocations.
2772f7917c00SJeff Kirsher */
2773f7917c00SJeff Kirsher BUG_ON(n < adapter->params.nports);
2774f7917c00SJeff Kirsher while (n < adapter->sge.ethqsets)
2775f7917c00SJeff Kirsher for_each_port(adapter, i) {
2776f7917c00SJeff Kirsher pi = adap2pinfo(adapter, i);
2777f7917c00SJeff Kirsher if (pi->nqsets > 1) {
2778f7917c00SJeff Kirsher pi->nqsets--;
2779f7917c00SJeff Kirsher adapter->sge.ethqsets--;
2780f7917c00SJeff Kirsher if (adapter->sge.ethqsets <= n)
2781f7917c00SJeff Kirsher break;
2782f7917c00SJeff Kirsher }
2783f7917c00SJeff Kirsher }
2784f7917c00SJeff Kirsher
2785f7917c00SJeff Kirsher /*
2786f7917c00SJeff Kirsher * Reassign the starting Queue Sets for each of the "ports" ...
2787f7917c00SJeff Kirsher */
2788f7917c00SJeff Kirsher n = 0;
2789f7917c00SJeff Kirsher for_each_port(adapter, i) {
2790f7917c00SJeff Kirsher pi = adap2pinfo(adapter, i);
2791f7917c00SJeff Kirsher pi->first_qset = n;
2792f7917c00SJeff Kirsher n += pi->nqsets;
2793f7917c00SJeff Kirsher }
2794f7917c00SJeff Kirsher }
2795f7917c00SJeff Kirsher
2796f7917c00SJeff Kirsher /*
2797f7917c00SJeff Kirsher * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2798f7917c00SJeff Kirsher * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2799f7917c00SJeff Kirsher * need. Minimally we need one for every Virtual Interface plus those needed
2800f7917c00SJeff Kirsher * for our "extras". Note that this process may lower the maximum number of
2801f7917c00SJeff Kirsher * allowed Queue Sets ...
2802f7917c00SJeff Kirsher */
enable_msix(struct adapter * adapter)2803d289f864SBill Pemberton static int enable_msix(struct adapter *adapter)
2804f7917c00SJeff Kirsher {
2805bd663689SAlexander Gordeev int i, want, need, nqsets;
2806f7917c00SJeff Kirsher struct msix_entry entries[MSIX_ENTRIES];
2807f7917c00SJeff Kirsher struct sge *s = &adapter->sge;
2808f7917c00SJeff Kirsher
2809f7917c00SJeff Kirsher for (i = 0; i < MSIX_ENTRIES; ++i)
2810f7917c00SJeff Kirsher entries[i].entry = i;
2811f7917c00SJeff Kirsher
2812f7917c00SJeff Kirsher /*
2813f7917c00SJeff Kirsher * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2814f7917c00SJeff Kirsher * plus those needed for our "extras" (for example, the firmware
2815f7917c00SJeff Kirsher * message queue). We _need_ at least one "Queue Set" per Virtual
2816f7917c00SJeff Kirsher * Interface plus those needed for our "extras". So now we get to see
2817f7917c00SJeff Kirsher * if the song is right ...
2818f7917c00SJeff Kirsher */
2819f7917c00SJeff Kirsher want = s->max_ethqsets + MSIX_EXTRAS;
2820f7917c00SJeff Kirsher need = adapter->params.nports + MSIX_EXTRAS;
2821f7917c00SJeff Kirsher
2822bd663689SAlexander Gordeev want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2823bd663689SAlexander Gordeev if (want < 0)
2824bd663689SAlexander Gordeev return want;
2825bd663689SAlexander Gordeev
2826bd663689SAlexander Gordeev nqsets = want - MSIX_EXTRAS;
2827f7917c00SJeff Kirsher if (nqsets < s->max_ethqsets) {
2828f7917c00SJeff Kirsher dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2829f7917c00SJeff Kirsher " for %d Queue Sets\n", nqsets);
2830f7917c00SJeff Kirsher s->max_ethqsets = nqsets;
2831f7917c00SJeff Kirsher if (nqsets < s->ethqsets)
2832f7917c00SJeff Kirsher reduce_ethqs(adapter, nqsets);
2833f7917c00SJeff Kirsher }
2834f7917c00SJeff Kirsher for (i = 0; i < want; ++i)
2835f7917c00SJeff Kirsher adapter->msix_info[i].vec = entries[i].vector;
2836bd663689SAlexander Gordeev
2837bd663689SAlexander Gordeev return 0;
2838f7917c00SJeff Kirsher }
2839f7917c00SJeff Kirsher
2840f7917c00SJeff Kirsher static const struct net_device_ops cxgb4vf_netdev_ops = {
2841f7917c00SJeff Kirsher .ndo_open = cxgb4vf_open,
2842f7917c00SJeff Kirsher .ndo_stop = cxgb4vf_stop,
2843f7917c00SJeff Kirsher .ndo_start_xmit = t4vf_eth_xmit,
2844f7917c00SJeff Kirsher .ndo_get_stats = cxgb4vf_get_stats,
2845f7917c00SJeff Kirsher .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2846f7917c00SJeff Kirsher .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2847f7917c00SJeff Kirsher .ndo_validate_addr = eth_validate_addr,
2848a7605370SArnd Bergmann .ndo_eth_ioctl = cxgb4vf_do_ioctl,
2849f7917c00SJeff Kirsher .ndo_change_mtu = cxgb4vf_change_mtu,
2850f7917c00SJeff Kirsher .ndo_fix_features = cxgb4vf_fix_features,
2851f7917c00SJeff Kirsher .ndo_set_features = cxgb4vf_set_features,
2852f7917c00SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2853f7917c00SJeff Kirsher .ndo_poll_controller = cxgb4vf_poll_controller,
2854f7917c00SJeff Kirsher #endif
2855f7917c00SJeff Kirsher };
2856f7917c00SJeff Kirsher
2857e0cdac65SNirranjan Kirubaharan /**
2858e0cdac65SNirranjan Kirubaharan * cxgb4vf_get_port_mask - Get port mask for the VF based on mac
2859e0cdac65SNirranjan Kirubaharan * address stored on the adapter
2860e0cdac65SNirranjan Kirubaharan * @adapter: The adapter
2861e0cdac65SNirranjan Kirubaharan *
2862f0d2ef7fSJiang Jian * Find the port mask for the VF based on the index of mac
2863e0cdac65SNirranjan Kirubaharan * address stored in the adapter. If no mac address is stored on
2864e0cdac65SNirranjan Kirubaharan * the adapter for the VF, use the port mask received from the
2865e0cdac65SNirranjan Kirubaharan * firmware.
2866e0cdac65SNirranjan Kirubaharan */
cxgb4vf_get_port_mask(struct adapter * adapter)2867e0cdac65SNirranjan Kirubaharan static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
2868e0cdac65SNirranjan Kirubaharan {
2869e0cdac65SNirranjan Kirubaharan unsigned int naddr = 1, pidx = 0;
2870e0cdac65SNirranjan Kirubaharan unsigned int pmask, rmask = 0;
2871e0cdac65SNirranjan Kirubaharan u8 mac[ETH_ALEN];
2872e0cdac65SNirranjan Kirubaharan int err;
2873e0cdac65SNirranjan Kirubaharan
2874e0cdac65SNirranjan Kirubaharan pmask = adapter->params.vfres.pmask;
2875e0cdac65SNirranjan Kirubaharan while (pmask) {
2876e0cdac65SNirranjan Kirubaharan if (pmask & 1) {
2877e0cdac65SNirranjan Kirubaharan err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
2878e0cdac65SNirranjan Kirubaharan if (!err && !is_zero_ether_addr(mac))
2879e0cdac65SNirranjan Kirubaharan rmask |= (1 << pidx);
2880e0cdac65SNirranjan Kirubaharan }
2881e0cdac65SNirranjan Kirubaharan pmask >>= 1;
2882e0cdac65SNirranjan Kirubaharan pidx++;
2883e0cdac65SNirranjan Kirubaharan }
2884e0cdac65SNirranjan Kirubaharan if (!rmask)
2885e0cdac65SNirranjan Kirubaharan rmask = adapter->params.vfres.pmask;
2886e0cdac65SNirranjan Kirubaharan
2887e0cdac65SNirranjan Kirubaharan return rmask;
2888e0cdac65SNirranjan Kirubaharan }
2889e0cdac65SNirranjan Kirubaharan
2890f7917c00SJeff Kirsher /*
2891f7917c00SJeff Kirsher * "Probe" a device: initialize a device and construct all kernel and driver
2892f7917c00SJeff Kirsher * state needed to manage the device. This routine is called "init_one" in
2893f7917c00SJeff Kirsher * the PF Driver ...
2894f7917c00SJeff Kirsher */
cxgb4vf_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2895d289f864SBill Pemberton static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2896f7917c00SJeff Kirsher const struct pci_device_id *ent)
2897f7917c00SJeff Kirsher {
2898e0cdac65SNirranjan Kirubaharan struct adapter *adapter;
2899e0cdac65SNirranjan Kirubaharan struct net_device *netdev;
2900e0cdac65SNirranjan Kirubaharan struct port_info *pi;
2901e0cdac65SNirranjan Kirubaharan unsigned int pmask;
2902f7917c00SJeff Kirsher int err, pidx;
2903f7917c00SJeff Kirsher
2904f7917c00SJeff Kirsher /*
2905f7917c00SJeff Kirsher * Initialize generic PCI device state.
2906f7917c00SJeff Kirsher */
2907f7917c00SJeff Kirsher err = pci_enable_device(pdev);
29089eda994dSCai Huoqing if (err)
29099eda994dSCai Huoqing return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
2910f7917c00SJeff Kirsher
2911f7917c00SJeff Kirsher /*
2912f7917c00SJeff Kirsher * Reserve PCI resources for the device. If we can't get them some
2913f7917c00SJeff Kirsher * other driver may have already claimed the device ...
2914f7917c00SJeff Kirsher */
2915f7917c00SJeff Kirsher err = pci_request_regions(pdev, KBUILD_MODNAME);
2916f7917c00SJeff Kirsher if (err) {
2917f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2918f7917c00SJeff Kirsher goto err_disable_device;
2919f7917c00SJeff Kirsher }
2920f7917c00SJeff Kirsher
2921f7917c00SJeff Kirsher /*
2922030f9ce8SChristophe JAILLET * Set up our DMA mask
2923f7917c00SJeff Kirsher */
29244489d8f5SChristophe JAILLET err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2925030f9ce8SChristophe JAILLET if (err) {
2926f7917c00SJeff Kirsher dev_err(&pdev->dev, "no usable DMA configuration\n");
2927f7917c00SJeff Kirsher goto err_release_regions;
2928f7917c00SJeff Kirsher }
2929f7917c00SJeff Kirsher
2930f7917c00SJeff Kirsher /*
2931f7917c00SJeff Kirsher * Enable bus mastering for the device ...
2932f7917c00SJeff Kirsher */
2933f7917c00SJeff Kirsher pci_set_master(pdev);
2934f7917c00SJeff Kirsher
2935f7917c00SJeff Kirsher /*
2936f7917c00SJeff Kirsher * Allocate our adapter data structure and attach it to the device.
2937f7917c00SJeff Kirsher */
2938f7917c00SJeff Kirsher adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2939f7917c00SJeff Kirsher if (!adapter) {
2940f7917c00SJeff Kirsher err = -ENOMEM;
2941f7917c00SJeff Kirsher goto err_release_regions;
2942f7917c00SJeff Kirsher }
2943f7917c00SJeff Kirsher pci_set_drvdata(pdev, adapter);
2944f7917c00SJeff Kirsher adapter->pdev = pdev;
2945f7917c00SJeff Kirsher adapter->pdev_dev = &pdev->dev;
2946f7917c00SJeff Kirsher
2947ae7b7576SHariprasad Shenai adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2948ae7b7576SHariprasad Shenai (sizeof(struct mbox_cmd) *
2949ae7b7576SHariprasad Shenai T4VF_OS_LOG_MBOX_CMDS),
2950ae7b7576SHariprasad Shenai GFP_KERNEL);
2951ae7b7576SHariprasad Shenai if (!adapter->mbox_log) {
2952ae7b7576SHariprasad Shenai err = -ENOMEM;
2953ae7b7576SHariprasad Shenai goto err_free_adapter;
2954ae7b7576SHariprasad Shenai }
2955ae7b7576SHariprasad Shenai adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2956ae7b7576SHariprasad Shenai
2957f7917c00SJeff Kirsher /*
2958f7917c00SJeff Kirsher * Initialize SMP data synchronization resources.
2959f7917c00SJeff Kirsher */
2960f7917c00SJeff Kirsher spin_lock_init(&adapter->stats_lock);
2961b38066daSHariprasad Shenai spin_lock_init(&adapter->mbox_lock);
2962b38066daSHariprasad Shenai INIT_LIST_HEAD(&adapter->mlist.list);
2963f7917c00SJeff Kirsher
2964f7917c00SJeff Kirsher /*
2965f7917c00SJeff Kirsher * Map our I/O registers in BAR0.
2966f7917c00SJeff Kirsher */
2967f7917c00SJeff Kirsher adapter->regs = pci_ioremap_bar(pdev, 0);
2968f7917c00SJeff Kirsher if (!adapter->regs) {
2969f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot map device registers\n");
2970f7917c00SJeff Kirsher err = -ENOMEM;
2971f7917c00SJeff Kirsher goto err_free_adapter;
2972f7917c00SJeff Kirsher }
2973f7917c00SJeff Kirsher
2974e0a8b34aSHariprasad Shenai /* Wait for the device to become ready before proceeding ...
2975e0a8b34aSHariprasad Shenai */
2976e0a8b34aSHariprasad Shenai err = t4vf_prep_adapter(adapter);
2977e0a8b34aSHariprasad Shenai if (err) {
2978e0a8b34aSHariprasad Shenai dev_err(adapter->pdev_dev, "device didn't become ready:"
2979e0a8b34aSHariprasad Shenai " err=%d\n", err);
2980e0a8b34aSHariprasad Shenai goto err_unmap_bar0;
2981e0a8b34aSHariprasad Shenai }
2982e0a8b34aSHariprasad Shenai
2983e0a8b34aSHariprasad Shenai /* For T5 and later we want to use the new BAR-based User Doorbells,
2984e0a8b34aSHariprasad Shenai * so we need to map BAR2 here ...
2985e0a8b34aSHariprasad Shenai */
2986e0a8b34aSHariprasad Shenai if (!is_t4(adapter->params.chip)) {
2987e0a8b34aSHariprasad Shenai adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2988e0a8b34aSHariprasad Shenai pci_resource_len(pdev, 2));
2989e0a8b34aSHariprasad Shenai if (!adapter->bar2) {
2990e0a8b34aSHariprasad Shenai dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2991e0a8b34aSHariprasad Shenai err = -ENOMEM;
2992e0a8b34aSHariprasad Shenai goto err_unmap_bar0;
2993e0a8b34aSHariprasad Shenai }
2994e0a8b34aSHariprasad Shenai }
2995f7917c00SJeff Kirsher /*
2996f7917c00SJeff Kirsher * Initialize adapter level features.
2997f7917c00SJeff Kirsher */
2998f7917c00SJeff Kirsher adapter->name = pci_name(pdev);
2999ea1e76f7SGanesh Goudar adapter->msg_enable = DFLT_MSG_ENABLE;
3000b629276dSCasey Leedom
3001b629276dSCasey Leedom /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
3002b629276dSCasey Leedom * Ingress Packet Data to Free List Buffers in order to allow for
3003b629276dSCasey Leedom * chipset performance optimizations between the Root Complex and
3004b629276dSCasey Leedom * Memory Controllers. (Messages to the associated Ingress Queue
3005b629276dSCasey Leedom * notifying new Packet Placement in the Free Lists Buffers will be
3006b629276dSCasey Leedom * send without the Relaxed Ordering Attribute thus guaranteeing that
3007b629276dSCasey Leedom * all preceding PCIe Transaction Layer Packets will be processed
3008b629276dSCasey Leedom * first.) But some Root Complexes have various issues with Upstream
3009b629276dSCasey Leedom * Transaction Layer Packets with the Relaxed Ordering Attribute set.
3010b629276dSCasey Leedom * The PCIe devices which under the Root Complexes will be cleared the
3011b629276dSCasey Leedom * Relaxed Ordering bit in the configuration space, So we check our
3012b629276dSCasey Leedom * PCIe configuration space to see if it's flagged with advice against
3013b629276dSCasey Leedom * using Relaxed Ordering.
3014b629276dSCasey Leedom */
3015b629276dSCasey Leedom if (!pcie_relaxed_ordering_enabled(pdev))
30163d78bfaaSArjun Vynipadath adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
3017b629276dSCasey Leedom
3018f7917c00SJeff Kirsher err = adap_init0(adapter);
3019f7917c00SJeff Kirsher if (err)
30204a8acef7SArjun Vynipadath dev_err(&pdev->dev,
30214a8acef7SArjun Vynipadath "Adapter initialization failed, error %d. Continuing in debug mode\n",
30224a8acef7SArjun Vynipadath err);
3023f7917c00SJeff Kirsher
3024b539ea60SArjun Vynipadath /* Initialize hash mac addr list */
3025b539ea60SArjun Vynipadath INIT_LIST_HEAD(&adapter->mac_hlist);
3026b539ea60SArjun Vynipadath
3027f7917c00SJeff Kirsher /*
3028f7917c00SJeff Kirsher * Allocate our "adapter ports" and stitch everything together.
3029f7917c00SJeff Kirsher */
3030e0cdac65SNirranjan Kirubaharan pmask = cxgb4vf_get_port_mask(adapter);
3031f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
3032f7917c00SJeff Kirsher int port_id, viid;
3033858aa65cSHariprasad Shenai u8 mac[ETH_ALEN];
3034858aa65cSHariprasad Shenai unsigned int naddr = 1;
3035f7917c00SJeff Kirsher
3036f7917c00SJeff Kirsher /*
3037f7917c00SJeff Kirsher * We simplistically allocate our virtual interfaces
3038f7917c00SJeff Kirsher * sequentially across the port numbers to which we have
3039f7917c00SJeff Kirsher * access rights. This should be configurable in some manner
3040f7917c00SJeff Kirsher * ...
3041f7917c00SJeff Kirsher */
3042f7917c00SJeff Kirsher if (pmask == 0)
3043f7917c00SJeff Kirsher break;
3044f7917c00SJeff Kirsher port_id = ffs(pmask) - 1;
3045f7917c00SJeff Kirsher pmask &= ~(1 << port_id);
3046f7917c00SJeff Kirsher
3047f7917c00SJeff Kirsher /*
3048f7917c00SJeff Kirsher * Allocate our network device and stitch things together.
3049f7917c00SJeff Kirsher */
3050f7917c00SJeff Kirsher netdev = alloc_etherdev_mq(sizeof(struct port_info),
3051f7917c00SJeff Kirsher MAX_PORT_QSETS);
3052f7917c00SJeff Kirsher if (netdev == NULL) {
3053f7917c00SJeff Kirsher err = -ENOMEM;
3054f7917c00SJeff Kirsher goto err_free_dev;
3055f7917c00SJeff Kirsher }
3056f7917c00SJeff Kirsher adapter->port[pidx] = netdev;
3057f7917c00SJeff Kirsher SET_NETDEV_DEV(netdev, &pdev->dev);
3058f7917c00SJeff Kirsher pi = netdev_priv(netdev);
3059f7917c00SJeff Kirsher pi->adapter = adapter;
3060f7917c00SJeff Kirsher pi->pidx = pidx;
3061f7917c00SJeff Kirsher pi->port_id = port_id;
3062f7917c00SJeff Kirsher
3063f7917c00SJeff Kirsher /*
3064f7917c00SJeff Kirsher * Initialize the starting state of our "port" and register
3065f7917c00SJeff Kirsher * it.
3066f7917c00SJeff Kirsher */
3067f7917c00SJeff Kirsher pi->xact_addr_filt = -1;
3068f7917c00SJeff Kirsher netdev->irq = pdev->irq;
3069f7917c00SJeff Kirsher
3070012475e3SArjun Vynipadath netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
3071012475e3SArjun Vynipadath NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3072012475e3SArjun Vynipadath NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3073030f9ce8SChristophe JAILLET netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
3074012475e3SArjun Vynipadath netdev->vlan_features = netdev->features & VLAN_FEAT;
3075f7917c00SJeff Kirsher
307601789349SJiri Pirko netdev->priv_flags |= IFF_UNICAST_FLT;
3077d894be57SJarod Wilson netdev->min_mtu = 81;
3078d894be57SJarod Wilson netdev->max_mtu = ETH_MAX_MTU;
307901789349SJiri Pirko
3080f7917c00SJeff Kirsher netdev->netdev_ops = &cxgb4vf_netdev_ops;
30817ad24ea4SWilfried Klaebe netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3082d2a007abSArjun V netdev->dev_port = pi->port_id;
3083f7917c00SJeff Kirsher
3084f7917c00SJeff Kirsher /*
30854a8acef7SArjun Vynipadath * If we haven't been able to contact the firmware, there's
30864a8acef7SArjun Vynipadath * nothing else we can do for this "port" ...
30874a8acef7SArjun Vynipadath */
30883d78bfaaSArjun Vynipadath if (!(adapter->flags & CXGB4VF_FW_OK))
30894a8acef7SArjun Vynipadath continue;
30904a8acef7SArjun Vynipadath
30914a8acef7SArjun Vynipadath viid = t4vf_alloc_vi(adapter, port_id);
30924a8acef7SArjun Vynipadath if (viid < 0) {
30934a8acef7SArjun Vynipadath dev_err(&pdev->dev,
30944a8acef7SArjun Vynipadath "cannot allocate VI for port %d: err=%d\n",
30954a8acef7SArjun Vynipadath port_id, viid);
30964a8acef7SArjun Vynipadath err = viid;
30974a8acef7SArjun Vynipadath goto err_free_dev;
30984a8acef7SArjun Vynipadath }
30994a8acef7SArjun Vynipadath pi->viid = viid;
31004a8acef7SArjun Vynipadath
31014a8acef7SArjun Vynipadath /*
3102f7917c00SJeff Kirsher * Initialize the hardware/software state for the port.
3103f7917c00SJeff Kirsher */
3104f7917c00SJeff Kirsher err = t4vf_port_init(adapter, pidx);
3105f7917c00SJeff Kirsher if (err) {
3106f7917c00SJeff Kirsher dev_err(&pdev->dev, "cannot initialize port %d\n",
3107f7917c00SJeff Kirsher pidx);
3108f7917c00SJeff Kirsher goto err_free_dev;
3109f7917c00SJeff Kirsher }
3110858aa65cSHariprasad Shenai
3111e0cdac65SNirranjan Kirubaharan err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
3112858aa65cSHariprasad Shenai if (err) {
3113858aa65cSHariprasad Shenai dev_err(&pdev->dev,
3114858aa65cSHariprasad Shenai "unable to determine MAC ACL address, "
3115858aa65cSHariprasad Shenai "continuing anyway.. (status %d)\n", err);
3116858aa65cSHariprasad Shenai } else if (naddr && adapter->params.vfres.nvi == 1) {
3117858aa65cSHariprasad Shenai struct sockaddr addr;
3118858aa65cSHariprasad Shenai
3119858aa65cSHariprasad Shenai ether_addr_copy(addr.sa_data, mac);
3120858aa65cSHariprasad Shenai err = cxgb4vf_set_mac_addr(netdev, &addr);
3121858aa65cSHariprasad Shenai if (err) {
3122858aa65cSHariprasad Shenai dev_err(&pdev->dev,
3123858aa65cSHariprasad Shenai "unable to set MAC address %pM\n",
3124858aa65cSHariprasad Shenai mac);
3125858aa65cSHariprasad Shenai goto err_free_dev;
3126858aa65cSHariprasad Shenai }
3127858aa65cSHariprasad Shenai dev_info(&pdev->dev,
3128858aa65cSHariprasad Shenai "Using assigned MAC ACL: %pM\n", mac);
3129858aa65cSHariprasad Shenai }
3130f7917c00SJeff Kirsher }
3131f7917c00SJeff Kirsher
313284f67018SHariprasad Shenai /* See what interrupts we'll be using. If we've been configured to
313384f67018SHariprasad Shenai * use MSI-X interrupts, try to enable them but fall back to using
313484f67018SHariprasad Shenai * MSI interrupts if we can't enable MSI-X interrupts. If we can't
313584f67018SHariprasad Shenai * get MSI interrupts we bail with the error.
313684f67018SHariprasad Shenai */
313784f67018SHariprasad Shenai if (msi == MSI_MSIX && enable_msix(adapter) == 0)
31383d78bfaaSArjun Vynipadath adapter->flags |= CXGB4VF_USING_MSIX;
313984f67018SHariprasad Shenai else {
3140495c22bbSHariprasad Shenai if (msi == MSI_MSIX) {
3141495c22bbSHariprasad Shenai dev_info(adapter->pdev_dev,
3142495c22bbSHariprasad Shenai "Unable to use MSI-X Interrupts; falling "
3143495c22bbSHariprasad Shenai "back to MSI Interrupts\n");
3144495c22bbSHariprasad Shenai
3145495c22bbSHariprasad Shenai /* We're going to need a Forwarded Interrupt Queue so
3146495c22bbSHariprasad Shenai * that may cut into how many Queue Sets we can
3147495c22bbSHariprasad Shenai * support.
3148495c22bbSHariprasad Shenai */
3149495c22bbSHariprasad Shenai msi = MSI_MSI;
3150495c22bbSHariprasad Shenai size_nports_qsets(adapter);
3151495c22bbSHariprasad Shenai }
315284f67018SHariprasad Shenai err = pci_enable_msi(pdev);
315384f67018SHariprasad Shenai if (err) {
3154495c22bbSHariprasad Shenai dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3155495c22bbSHariprasad Shenai " err=%d\n", err);
315684f67018SHariprasad Shenai goto err_free_dev;
315784f67018SHariprasad Shenai }
31583d78bfaaSArjun Vynipadath adapter->flags |= CXGB4VF_USING_MSI;
315984f67018SHariprasad Shenai }
316084f67018SHariprasad Shenai
3161495c22bbSHariprasad Shenai /* Now that we know how many "ports" we have and what interrupt
3162495c22bbSHariprasad Shenai * mechanism we're going to use, we can configure our queue resources.
3163495c22bbSHariprasad Shenai */
3164495c22bbSHariprasad Shenai cfg_queues(adapter);
3165495c22bbSHariprasad Shenai
3166f7917c00SJeff Kirsher /*
3167f7917c00SJeff Kirsher * The "card" is now ready to go. If any errors occur during device
3168f7917c00SJeff Kirsher * registration we do not fail the whole "card" but rather proceed
3169f7917c00SJeff Kirsher * only with the ports we manage to register successfully. However we
3170f7917c00SJeff Kirsher * must register at least one net device.
3171f7917c00SJeff Kirsher */
3172f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
3173a8d16d08SHariprasad Shenai struct port_info *pi = netdev_priv(adapter->port[pidx]);
3174f7917c00SJeff Kirsher netdev = adapter->port[pidx];
3175f7917c00SJeff Kirsher if (netdev == NULL)
3176f7917c00SJeff Kirsher continue;
3177f7917c00SJeff Kirsher
3178a8d16d08SHariprasad Shenai netif_set_real_num_tx_queues(netdev, pi->nqsets);
3179a8d16d08SHariprasad Shenai netif_set_real_num_rx_queues(netdev, pi->nqsets);
3180a8d16d08SHariprasad Shenai
3181f7917c00SJeff Kirsher err = register_netdev(netdev);
3182f7917c00SJeff Kirsher if (err) {
3183f7917c00SJeff Kirsher dev_warn(&pdev->dev, "cannot register net device %s,"
3184f7917c00SJeff Kirsher " skipping\n", netdev->name);
3185f7917c00SJeff Kirsher continue;
3186f7917c00SJeff Kirsher }
3187f7917c00SJeff Kirsher
318857d37aeaSArjun Vynipadath netif_carrier_off(netdev);
3189f7917c00SJeff Kirsher set_bit(pidx, &adapter->registered_device_map);
3190f7917c00SJeff Kirsher }
3191f7917c00SJeff Kirsher if (adapter->registered_device_map == 0) {
3192f7917c00SJeff Kirsher dev_err(&pdev->dev, "could not register any net devices\n");
3193b82d71c0SZheyu Ma err = -EINVAL;
319484f67018SHariprasad Shenai goto err_disable_interrupts;
3195f7917c00SJeff Kirsher }
3196f7917c00SJeff Kirsher
3197f7917c00SJeff Kirsher /*
3198f7917c00SJeff Kirsher * Set up our debugfs entries.
3199f7917c00SJeff Kirsher */
3200f7917c00SJeff Kirsher if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3201f7917c00SJeff Kirsher adapter->debugfs_root =
3202f7917c00SJeff Kirsher debugfs_create_dir(pci_name(pdev),
3203f7917c00SJeff Kirsher cxgb4vf_debugfs_root);
3204f7917c00SJeff Kirsher setup_debugfs(adapter);
3205f7917c00SJeff Kirsher }
3206f7917c00SJeff Kirsher
3207f7917c00SJeff Kirsher /*
3208f7917c00SJeff Kirsher * Print a short notice on the existence and configuration of the new
3209f7917c00SJeff Kirsher * VF network device ...
3210f7917c00SJeff Kirsher */
3211f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
3212f7917c00SJeff Kirsher dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3213f7917c00SJeff Kirsher adapter->port[pidx]->name,
32143d78bfaaSArjun Vynipadath (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
32153d78bfaaSArjun Vynipadath (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : "");
3216f7917c00SJeff Kirsher }
3217f7917c00SJeff Kirsher
3218f7917c00SJeff Kirsher /*
3219f7917c00SJeff Kirsher * Return success!
3220f7917c00SJeff Kirsher */
3221f7917c00SJeff Kirsher return 0;
3222f7917c00SJeff Kirsher
3223f7917c00SJeff Kirsher /*
3224f7917c00SJeff Kirsher * Error recovery and exit code. Unwind state that's been created
3225f7917c00SJeff Kirsher * so far and return the error.
3226f7917c00SJeff Kirsher */
322784f67018SHariprasad Shenai err_disable_interrupts:
32283d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX) {
322984f67018SHariprasad Shenai pci_disable_msix(adapter->pdev);
32303d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSIX;
32313d78bfaaSArjun Vynipadath } else if (adapter->flags & CXGB4VF_USING_MSI) {
323284f67018SHariprasad Shenai pci_disable_msi(adapter->pdev);
32333d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSI;
3234f7917c00SJeff Kirsher }
3235f7917c00SJeff Kirsher
3236f7917c00SJeff Kirsher err_free_dev:
3237f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
3238f7917c00SJeff Kirsher netdev = adapter->port[pidx];
3239f7917c00SJeff Kirsher if (netdev == NULL)
3240f7917c00SJeff Kirsher continue;
3241f7917c00SJeff Kirsher pi = netdev_priv(netdev);
32424a8acef7SArjun Vynipadath if (pi->viid)
3243f7917c00SJeff Kirsher t4vf_free_vi(adapter, pi->viid);
3244f7917c00SJeff Kirsher if (test_bit(pidx, &adapter->registered_device_map))
3245f7917c00SJeff Kirsher unregister_netdev(netdev);
3246f7917c00SJeff Kirsher free_netdev(netdev);
3247f7917c00SJeff Kirsher }
3248f7917c00SJeff Kirsher
3249e0a8b34aSHariprasad Shenai if (!is_t4(adapter->params.chip))
3250e0a8b34aSHariprasad Shenai iounmap(adapter->bar2);
3251e0a8b34aSHariprasad Shenai
3252e0a8b34aSHariprasad Shenai err_unmap_bar0:
3253f7917c00SJeff Kirsher iounmap(adapter->regs);
3254f7917c00SJeff Kirsher
3255f7917c00SJeff Kirsher err_free_adapter:
3256ae7b7576SHariprasad Shenai kfree(adapter->mbox_log);
3257f7917c00SJeff Kirsher kfree(adapter);
3258f7917c00SJeff Kirsher
3259f7917c00SJeff Kirsher err_release_regions:
3260f7917c00SJeff Kirsher pci_release_regions(pdev);
3261f7917c00SJeff Kirsher
3262f7917c00SJeff Kirsher err_disable_device:
3263f7917c00SJeff Kirsher pci_disable_device(pdev);
3264f7917c00SJeff Kirsher
3265f7917c00SJeff Kirsher return err;
3266f7917c00SJeff Kirsher }
3267f7917c00SJeff Kirsher
3268f7917c00SJeff Kirsher /*
3269f7917c00SJeff Kirsher * "Remove" a device: tear down all kernel and driver state created in the
3270f7917c00SJeff Kirsher * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
3271f7917c00SJeff Kirsher * that this is called "remove_one" in the PF Driver.)
3272f7917c00SJeff Kirsher */
cxgb4vf_pci_remove(struct pci_dev * pdev)3273d289f864SBill Pemberton static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3274f7917c00SJeff Kirsher {
3275f7917c00SJeff Kirsher struct adapter *adapter = pci_get_drvdata(pdev);
327640c4b1e9SArjun Vynipadath struct hash_mac_addr *entry, *tmp;
3277f7917c00SJeff Kirsher
3278f7917c00SJeff Kirsher /*
3279f7917c00SJeff Kirsher * Tear down driver state associated with device.
3280f7917c00SJeff Kirsher */
3281f7917c00SJeff Kirsher if (adapter) {
3282f7917c00SJeff Kirsher int pidx;
3283f7917c00SJeff Kirsher
3284f7917c00SJeff Kirsher /*
3285f7917c00SJeff Kirsher * Stop all of our activity. Unregister network port,
3286f7917c00SJeff Kirsher * disable interrupts, etc.
3287f7917c00SJeff Kirsher */
3288f7917c00SJeff Kirsher for_each_port(adapter, pidx)
3289f7917c00SJeff Kirsher if (test_bit(pidx, &adapter->registered_device_map))
3290f7917c00SJeff Kirsher unregister_netdev(adapter->port[pidx]);
3291f7917c00SJeff Kirsher t4vf_sge_stop(adapter);
32923d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX) {
3293f7917c00SJeff Kirsher pci_disable_msix(adapter->pdev);
32943d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSIX;
32953d78bfaaSArjun Vynipadath } else if (adapter->flags & CXGB4VF_USING_MSI) {
3296f7917c00SJeff Kirsher pci_disable_msi(adapter->pdev);
32973d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSI;
3298f7917c00SJeff Kirsher }
3299f7917c00SJeff Kirsher
3300f7917c00SJeff Kirsher /*
3301f7917c00SJeff Kirsher * Tear down our debugfs entries.
3302f7917c00SJeff Kirsher */
3303f7917c00SJeff Kirsher if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3304f7917c00SJeff Kirsher cleanup_debugfs(adapter);
3305f7917c00SJeff Kirsher debugfs_remove_recursive(adapter->debugfs_root);
3306f7917c00SJeff Kirsher }
3307f7917c00SJeff Kirsher
3308f7917c00SJeff Kirsher /*
3309f7917c00SJeff Kirsher * Free all of the various resources which we've acquired ...
3310f7917c00SJeff Kirsher */
3311f7917c00SJeff Kirsher t4vf_free_sge_resources(adapter);
3312f7917c00SJeff Kirsher for_each_port(adapter, pidx) {
3313f7917c00SJeff Kirsher struct net_device *netdev = adapter->port[pidx];
3314f7917c00SJeff Kirsher struct port_info *pi;
3315f7917c00SJeff Kirsher
3316f7917c00SJeff Kirsher if (netdev == NULL)
3317f7917c00SJeff Kirsher continue;
3318f7917c00SJeff Kirsher
3319f7917c00SJeff Kirsher pi = netdev_priv(netdev);
33204a8acef7SArjun Vynipadath if (pi->viid)
3321f7917c00SJeff Kirsher t4vf_free_vi(adapter, pi->viid);
3322f7917c00SJeff Kirsher free_netdev(netdev);
3323f7917c00SJeff Kirsher }
3324f7917c00SJeff Kirsher iounmap(adapter->regs);
3325e0a8b34aSHariprasad Shenai if (!is_t4(adapter->params.chip))
3326e0a8b34aSHariprasad Shenai iounmap(adapter->bar2);
3327ae7b7576SHariprasad Shenai kfree(adapter->mbox_log);
332840c4b1e9SArjun Vynipadath list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
332940c4b1e9SArjun Vynipadath list) {
333040c4b1e9SArjun Vynipadath list_del(&entry->list);
333140c4b1e9SArjun Vynipadath kfree(entry);
333240c4b1e9SArjun Vynipadath }
3333f7917c00SJeff Kirsher kfree(adapter);
3334f7917c00SJeff Kirsher }
3335f7917c00SJeff Kirsher
3336f7917c00SJeff Kirsher /*
3337f7917c00SJeff Kirsher * Disable the device and release its PCI resources.
3338f7917c00SJeff Kirsher */
3339f7917c00SJeff Kirsher pci_disable_device(pdev);
3340f7917c00SJeff Kirsher pci_release_regions(pdev);
3341f7917c00SJeff Kirsher }
3342f7917c00SJeff Kirsher
3343f7917c00SJeff Kirsher /*
3344f7917c00SJeff Kirsher * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3345f7917c00SJeff Kirsher * delivery.
3346f7917c00SJeff Kirsher */
cxgb4vf_pci_shutdown(struct pci_dev * pdev)3347d289f864SBill Pemberton static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3348f7917c00SJeff Kirsher {
3349f7917c00SJeff Kirsher struct adapter *adapter;
3350f7917c00SJeff Kirsher int pidx;
3351f7917c00SJeff Kirsher
3352f7917c00SJeff Kirsher adapter = pci_get_drvdata(pdev);
3353f7917c00SJeff Kirsher if (!adapter)
3354f7917c00SJeff Kirsher return;
3355f7917c00SJeff Kirsher
3356c2a19856SHariprasad Shenai /* Disable all Virtual Interfaces. This will shut down the
3357f7917c00SJeff Kirsher * delivery of all ingress packets into the chip for these
3358f7917c00SJeff Kirsher * Virtual Interfaces.
3359f7917c00SJeff Kirsher */
3360c2a19856SHariprasad Shenai for_each_port(adapter, pidx)
3361c2a19856SHariprasad Shenai if (test_bit(pidx, &adapter->registered_device_map))
3362c2a19856SHariprasad Shenai unregister_netdev(adapter->port[pidx]);
3363f7917c00SJeff Kirsher
3364c2a19856SHariprasad Shenai /* Free up all Queues which will prevent further DMA and
3365c2a19856SHariprasad Shenai * Interrupts allowing various internal pathways to drain.
3366c2a19856SHariprasad Shenai */
3367c2a19856SHariprasad Shenai t4vf_sge_stop(adapter);
33683d78bfaaSArjun Vynipadath if (adapter->flags & CXGB4VF_USING_MSIX) {
3369c2a19856SHariprasad Shenai pci_disable_msix(adapter->pdev);
33703d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSIX;
33713d78bfaaSArjun Vynipadath } else if (adapter->flags & CXGB4VF_USING_MSI) {
3372c2a19856SHariprasad Shenai pci_disable_msi(adapter->pdev);
33733d78bfaaSArjun Vynipadath adapter->flags &= ~CXGB4VF_USING_MSI;
3374f7917c00SJeff Kirsher }
3375f7917c00SJeff Kirsher
3376f7917c00SJeff Kirsher /*
3377f7917c00SJeff Kirsher * Free up all Queues which will prevent further DMA and
3378f7917c00SJeff Kirsher * Interrupts allowing various internal pathways to drain.
3379f7917c00SJeff Kirsher */
3380f7917c00SJeff Kirsher t4vf_free_sge_resources(adapter);
3381c2a19856SHariprasad Shenai pci_set_drvdata(pdev, NULL);
3382f7917c00SJeff Kirsher }
3383f7917c00SJeff Kirsher
33843fedeab1SHariprasad Shenai /* Macros needed to support the PCI Device ID Table ...
3385f7917c00SJeff Kirsher */
33863fedeab1SHariprasad Shenai #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3387768ffc66SHariprasad Shenai static const struct pci_device_id cxgb4vf_pci_tbl[] = {
33883fedeab1SHariprasad Shenai #define CH_PCI_DEVICE_ID_FUNCTION 0x8
3389f7917c00SJeff Kirsher
33903fedeab1SHariprasad Shenai #define CH_PCI_ID_TABLE_ENTRY(devid) \
33913fedeab1SHariprasad Shenai { PCI_VDEVICE(CHELSIO, (devid)), 0 }
33923fedeab1SHariprasad Shenai
33933fedeab1SHariprasad Shenai #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
33943fedeab1SHariprasad Shenai
33953fedeab1SHariprasad Shenai #include "../cxgb4/t4_pci_id_tbl.h"
3396f7917c00SJeff Kirsher
3397f7917c00SJeff Kirsher MODULE_DESCRIPTION(DRV_DESC);
3398f7917c00SJeff Kirsher MODULE_AUTHOR("Chelsio Communications");
3399f7917c00SJeff Kirsher MODULE_LICENSE("Dual BSD/GPL");
3400f7917c00SJeff Kirsher MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3401f7917c00SJeff Kirsher
3402f7917c00SJeff Kirsher static struct pci_driver cxgb4vf_driver = {
3403f7917c00SJeff Kirsher .name = KBUILD_MODNAME,
3404f7917c00SJeff Kirsher .id_table = cxgb4vf_pci_tbl,
3405f7917c00SJeff Kirsher .probe = cxgb4vf_pci_probe,
3406d289f864SBill Pemberton .remove = cxgb4vf_pci_remove,
3407d289f864SBill Pemberton .shutdown = cxgb4vf_pci_shutdown,
3408f7917c00SJeff Kirsher };
3409f7917c00SJeff Kirsher
3410f7917c00SJeff Kirsher /*
3411f7917c00SJeff Kirsher * Initialize global driver state.
3412f7917c00SJeff Kirsher */
cxgb4vf_module_init(void)3413f7917c00SJeff Kirsher static int __init cxgb4vf_module_init(void)
3414f7917c00SJeff Kirsher {
3415f7917c00SJeff Kirsher int ret;
3416f7917c00SJeff Kirsher
3417f7917c00SJeff Kirsher /*
3418f7917c00SJeff Kirsher * Vet our module parameters.
3419f7917c00SJeff Kirsher */
3420f7917c00SJeff Kirsher if (msi != MSI_MSIX && msi != MSI_MSI) {
3421428ac43fSJoe Perches pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3422f7917c00SJeff Kirsher msi, MSI_MSIX, MSI_MSI);
3423f7917c00SJeff Kirsher return -EINVAL;
3424f7917c00SJeff Kirsher }
3425f7917c00SJeff Kirsher
34269dac1e8eSGreg Kroah-Hartman /* Debugfs support is optional, debugfs will warn if this fails */
3427f7917c00SJeff Kirsher cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3428f7917c00SJeff Kirsher
3429f7917c00SJeff Kirsher ret = pci_register_driver(&cxgb4vf_driver);
34309dac1e8eSGreg Kroah-Hartman if (ret < 0)
3431f7917c00SJeff Kirsher debugfs_remove(cxgb4vf_debugfs_root);
3432f7917c00SJeff Kirsher return ret;
3433f7917c00SJeff Kirsher }
3434f7917c00SJeff Kirsher
3435f7917c00SJeff Kirsher /*
3436f7917c00SJeff Kirsher * Tear down global driver state.
3437f7917c00SJeff Kirsher */
cxgb4vf_module_exit(void)3438f7917c00SJeff Kirsher static void __exit cxgb4vf_module_exit(void)
3439f7917c00SJeff Kirsher {
3440f7917c00SJeff Kirsher pci_unregister_driver(&cxgb4vf_driver);
3441f7917c00SJeff Kirsher debugfs_remove(cxgb4vf_debugfs_root);
3442f7917c00SJeff Kirsher }
3443f7917c00SJeff Kirsher
3444f7917c00SJeff Kirsher module_init(cxgb4vf_module_init);
3445f7917c00SJeff Kirsher module_exit(cxgb4vf_module_exit);
3446