1 /**
2  * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 
22 #include "enic_res.h"
23 #include "enic.h"
24 #include "enic_dev.h"
25 #include "enic_clsf.h"
26 #include "vnic_rss.h"
27 #include "vnic_stats.h"
28 
29 struct enic_stat {
30 	char name[ETH_GSTRING_LEN];
31 	unsigned int index;
32 };
33 
34 #define ENIC_TX_STAT(stat) { \
35 	.name = #stat, \
36 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
37 }
38 
39 #define ENIC_RX_STAT(stat) { \
40 	.name = #stat, \
41 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
42 }
43 
44 #define ENIC_GEN_STAT(stat) { \
45 	.name = #stat, \
46 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
47 }
48 
49 static const struct enic_stat enic_tx_stats[] = {
50 	ENIC_TX_STAT(tx_frames_ok),
51 	ENIC_TX_STAT(tx_unicast_frames_ok),
52 	ENIC_TX_STAT(tx_multicast_frames_ok),
53 	ENIC_TX_STAT(tx_broadcast_frames_ok),
54 	ENIC_TX_STAT(tx_bytes_ok),
55 	ENIC_TX_STAT(tx_unicast_bytes_ok),
56 	ENIC_TX_STAT(tx_multicast_bytes_ok),
57 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
58 	ENIC_TX_STAT(tx_drops),
59 	ENIC_TX_STAT(tx_errors),
60 	ENIC_TX_STAT(tx_tso),
61 };
62 
63 static const struct enic_stat enic_rx_stats[] = {
64 	ENIC_RX_STAT(rx_frames_ok),
65 	ENIC_RX_STAT(rx_frames_total),
66 	ENIC_RX_STAT(rx_unicast_frames_ok),
67 	ENIC_RX_STAT(rx_multicast_frames_ok),
68 	ENIC_RX_STAT(rx_broadcast_frames_ok),
69 	ENIC_RX_STAT(rx_bytes_ok),
70 	ENIC_RX_STAT(rx_unicast_bytes_ok),
71 	ENIC_RX_STAT(rx_multicast_bytes_ok),
72 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
73 	ENIC_RX_STAT(rx_drop),
74 	ENIC_RX_STAT(rx_no_bufs),
75 	ENIC_RX_STAT(rx_errors),
76 	ENIC_RX_STAT(rx_rss),
77 	ENIC_RX_STAT(rx_crc_errors),
78 	ENIC_RX_STAT(rx_frames_64),
79 	ENIC_RX_STAT(rx_frames_127),
80 	ENIC_RX_STAT(rx_frames_255),
81 	ENIC_RX_STAT(rx_frames_511),
82 	ENIC_RX_STAT(rx_frames_1023),
83 	ENIC_RX_STAT(rx_frames_1518),
84 	ENIC_RX_STAT(rx_frames_to_max),
85 };
86 
87 static const struct enic_stat enic_gen_stats[] = {
88 	ENIC_GEN_STAT(dma_map_error),
89 };
90 
91 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
92 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
93 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
94 
95 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
96 {
97 	int i;
98 	int intr;
99 
100 	for (i = 0; i < enic->rq_count; i++) {
101 		intr = enic_msix_rq_intr(enic, i);
102 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
103 	}
104 }
105 
106 static int enic_get_ksettings(struct net_device *netdev,
107 			      struct ethtool_link_ksettings *ecmd)
108 {
109 	struct enic *enic = netdev_priv(netdev);
110 	struct ethtool_link_settings *base = &ecmd->base;
111 
112 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
113 					     10000baseT_Full);
114 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
115 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
116 					     10000baseT_Full);
117 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
118 	base->port = PORT_FIBRE;
119 
120 	if (netif_carrier_ok(netdev)) {
121 		base->speed = vnic_dev_port_speed(enic->vdev);
122 		base->duplex = DUPLEX_FULL;
123 	} else {
124 		base->speed = SPEED_UNKNOWN;
125 		base->duplex = DUPLEX_UNKNOWN;
126 	}
127 
128 	base->autoneg = AUTONEG_DISABLE;
129 
130 	return 0;
131 }
132 
133 static void enic_get_drvinfo(struct net_device *netdev,
134 	struct ethtool_drvinfo *drvinfo)
135 {
136 	struct enic *enic = netdev_priv(netdev);
137 	struct vnic_devcmd_fw_info *fw_info;
138 	int err;
139 
140 	err = enic_dev_fw_info(enic, &fw_info);
141 	/* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
142 	 * For other failures, like devcmd failure, we return previously
143 	 * recorded info.
144 	 */
145 	if (err == -ENOMEM)
146 		return;
147 
148 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
149 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
150 	strlcpy(drvinfo->fw_version, fw_info->fw_version,
151 		sizeof(drvinfo->fw_version));
152 	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
153 		sizeof(drvinfo->bus_info));
154 }
155 
156 static void enic_get_strings(struct net_device *netdev, u32 stringset,
157 	u8 *data)
158 {
159 	unsigned int i;
160 
161 	switch (stringset) {
162 	case ETH_SS_STATS:
163 		for (i = 0; i < enic_n_tx_stats; i++) {
164 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
165 			data += ETH_GSTRING_LEN;
166 		}
167 		for (i = 0; i < enic_n_rx_stats; i++) {
168 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
169 			data += ETH_GSTRING_LEN;
170 		}
171 		for (i = 0; i < enic_n_gen_stats; i++) {
172 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
173 			data += ETH_GSTRING_LEN;
174 		}
175 		break;
176 	}
177 }
178 
179 static int enic_get_sset_count(struct net_device *netdev, int sset)
180 {
181 	switch (sset) {
182 	case ETH_SS_STATS:
183 		return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
184 	default:
185 		return -EOPNOTSUPP;
186 	}
187 }
188 
189 static void enic_get_ethtool_stats(struct net_device *netdev,
190 	struct ethtool_stats *stats, u64 *data)
191 {
192 	struct enic *enic = netdev_priv(netdev);
193 	struct vnic_stats *vstats;
194 	unsigned int i;
195 	int err;
196 
197 	err = enic_dev_stats_dump(enic, &vstats);
198 	/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
199 	 * For other failures, like devcmd failure, we return previously
200 	 * recorded stats.
201 	 */
202 	if (err == -ENOMEM)
203 		return;
204 
205 	for (i = 0; i < enic_n_tx_stats; i++)
206 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
207 	for (i = 0; i < enic_n_rx_stats; i++)
208 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
209 	for (i = 0; i < enic_n_gen_stats; i++)
210 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
211 }
212 
213 static u32 enic_get_msglevel(struct net_device *netdev)
214 {
215 	struct enic *enic = netdev_priv(netdev);
216 	return enic->msg_enable;
217 }
218 
219 static void enic_set_msglevel(struct net_device *netdev, u32 value)
220 {
221 	struct enic *enic = netdev_priv(netdev);
222 	enic->msg_enable = value;
223 }
224 
225 static int enic_get_coalesce(struct net_device *netdev,
226 	struct ethtool_coalesce *ecmd)
227 {
228 	struct enic *enic = netdev_priv(netdev);
229 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
230 
231 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
232 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
233 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
234 	if (rxcoal->use_adaptive_rx_coalesce)
235 		ecmd->use_adaptive_rx_coalesce = 1;
236 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
237 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
238 
239 	return 0;
240 }
241 
242 static int enic_coalesce_valid(struct enic *enic,
243 			       struct ethtool_coalesce *ec)
244 {
245 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
246 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
247 					   ec->rx_coalesce_usecs_high);
248 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
249 					  ec->rx_coalesce_usecs_low);
250 
251 	if (ec->rx_max_coalesced_frames		||
252 	    ec->rx_coalesce_usecs_irq		||
253 	    ec->rx_max_coalesced_frames_irq	||
254 	    ec->tx_max_coalesced_frames		||
255 	    ec->tx_coalesce_usecs_irq		||
256 	    ec->tx_max_coalesced_frames_irq	||
257 	    ec->stats_block_coalesce_usecs	||
258 	    ec->use_adaptive_tx_coalesce	||
259 	    ec->pkt_rate_low			||
260 	    ec->rx_max_coalesced_frames_low	||
261 	    ec->tx_coalesce_usecs_low		||
262 	    ec->tx_max_coalesced_frames_low	||
263 	    ec->pkt_rate_high			||
264 	    ec->rx_max_coalesced_frames_high	||
265 	    ec->tx_coalesce_usecs_high		||
266 	    ec->tx_max_coalesced_frames_high	||
267 	    ec->rate_sample_interval)
268 		return -EINVAL;
269 
270 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
271 	    ec->tx_coalesce_usecs)
272 		return -EINVAL;
273 
274 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
275 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
276 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
277 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
278 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
279 			    coalesce_usecs_max);
280 
281 	if (ec->rx_coalesce_usecs_high &&
282 	    (rx_coalesce_usecs_high <
283 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
284 		return -EINVAL;
285 
286 	return 0;
287 }
288 
289 static int enic_set_coalesce(struct net_device *netdev,
290 	struct ethtool_coalesce *ecmd)
291 {
292 	struct enic *enic = netdev_priv(netdev);
293 	u32 tx_coalesce_usecs;
294 	u32 rx_coalesce_usecs;
295 	u32 rx_coalesce_usecs_low;
296 	u32 rx_coalesce_usecs_high;
297 	u32 coalesce_usecs_max;
298 	unsigned int i, intr;
299 	int ret;
300 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
301 
302 	ret = enic_coalesce_valid(enic, ecmd);
303 	if (ret)
304 		return ret;
305 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
306 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
307 				  coalesce_usecs_max);
308 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
309 				  coalesce_usecs_max);
310 
311 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
312 				      coalesce_usecs_max);
313 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
314 				       coalesce_usecs_max);
315 
316 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
317 		for (i = 0; i < enic->wq_count; i++) {
318 			intr = enic_msix_wq_intr(enic, i);
319 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
320 						       tx_coalesce_usecs);
321 		}
322 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
323 	}
324 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
325 	if (!rxcoal->use_adaptive_rx_coalesce)
326 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
327 	if (ecmd->rx_coalesce_usecs_high) {
328 		rxcoal->range_end = rx_coalesce_usecs_high;
329 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
330 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
331 						ENIC_AIC_LARGE_PKT_DIFF;
332 	}
333 
334 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
335 
336 	return 0;
337 }
338 
339 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
340 			    u32 *rule_locs)
341 {
342 	int j, ret = 0, cnt = 0;
343 
344 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
345 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
346 		struct hlist_head *hhead;
347 		struct hlist_node *tmp;
348 		struct enic_rfs_fltr_node *n;
349 
350 		hhead = &enic->rfs_h.ht_head[j];
351 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
352 			if (cnt == cmd->rule_cnt)
353 				return -EMSGSIZE;
354 			rule_locs[cnt] = n->fltr_id;
355 			cnt++;
356 		}
357 	}
358 	cmd->rule_cnt = cnt;
359 
360 	return ret;
361 }
362 
363 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
364 {
365 	struct ethtool_rx_flow_spec *fsp =
366 				(struct ethtool_rx_flow_spec *)&cmd->fs;
367 	struct enic_rfs_fltr_node *n;
368 
369 	n = htbl_fltr_search(enic, (u16)fsp->location);
370 	if (!n)
371 		return -EINVAL;
372 	switch (n->keys.basic.ip_proto) {
373 	case IPPROTO_TCP:
374 		fsp->flow_type = TCP_V4_FLOW;
375 		break;
376 	case IPPROTO_UDP:
377 		fsp->flow_type = UDP_V4_FLOW;
378 		break;
379 	default:
380 		return -EINVAL;
381 		break;
382 	}
383 
384 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
385 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
386 
387 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
388 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
389 
390 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
391 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
392 
393 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
394 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
395 
396 	fsp->ring_cookie = n->rq_id;
397 
398 	return 0;
399 }
400 
401 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
402 			  u32 *rule_locs)
403 {
404 	struct enic *enic = netdev_priv(dev);
405 	int ret = 0;
406 
407 	switch (cmd->cmd) {
408 	case ETHTOOL_GRXRINGS:
409 		cmd->data = enic->rq_count;
410 		break;
411 	case ETHTOOL_GRXCLSRLCNT:
412 		spin_lock_bh(&enic->rfs_h.lock);
413 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
414 		cmd->data = enic->rfs_h.max;
415 		spin_unlock_bh(&enic->rfs_h.lock);
416 		break;
417 	case ETHTOOL_GRXCLSRLALL:
418 		spin_lock_bh(&enic->rfs_h.lock);
419 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
420 		spin_unlock_bh(&enic->rfs_h.lock);
421 		break;
422 	case ETHTOOL_GRXCLSRULE:
423 		spin_lock_bh(&enic->rfs_h.lock);
424 		ret = enic_grxclsrule(enic, cmd);
425 		spin_unlock_bh(&enic->rfs_h.lock);
426 		break;
427 	default:
428 		ret = -EOPNOTSUPP;
429 		break;
430 	}
431 
432 	return ret;
433 }
434 
435 static int enic_get_tunable(struct net_device *dev,
436 			    const struct ethtool_tunable *tuna, void *data)
437 {
438 	struct enic *enic = netdev_priv(dev);
439 	int ret = 0;
440 
441 	switch (tuna->id) {
442 	case ETHTOOL_RX_COPYBREAK:
443 		*(u32 *)data = enic->rx_copybreak;
444 		break;
445 	default:
446 		ret = -EINVAL;
447 		break;
448 	}
449 
450 	return ret;
451 }
452 
453 static int enic_set_tunable(struct net_device *dev,
454 			    const struct ethtool_tunable *tuna,
455 			    const void *data)
456 {
457 	struct enic *enic = netdev_priv(dev);
458 	int ret = 0;
459 
460 	switch (tuna->id) {
461 	case ETHTOOL_RX_COPYBREAK:
462 		enic->rx_copybreak = *(u32 *)data;
463 		break;
464 	default:
465 		ret = -EINVAL;
466 		break;
467 	}
468 
469 	return ret;
470 }
471 
472 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
473 {
474 	return ENIC_RSS_LEN;
475 }
476 
477 static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
478 			 u8 *hfunc)
479 {
480 	struct enic *enic = netdev_priv(netdev);
481 
482 	if (hkey)
483 		memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
484 
485 	if (hfunc)
486 		*hfunc = ETH_RSS_HASH_TOP;
487 
488 	return 0;
489 }
490 
491 static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
492 			 const u8 *hkey, const u8 hfunc)
493 {
494 	struct enic *enic = netdev_priv(netdev);
495 
496 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
497 	    indir)
498 		return -EINVAL;
499 
500 	if (hkey)
501 		memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
502 
503 	return __enic_set_rsskey(enic);
504 }
505 
506 static const struct ethtool_ops enic_ethtool_ops = {
507 	.get_drvinfo = enic_get_drvinfo,
508 	.get_msglevel = enic_get_msglevel,
509 	.set_msglevel = enic_set_msglevel,
510 	.get_link = ethtool_op_get_link,
511 	.get_strings = enic_get_strings,
512 	.get_sset_count = enic_get_sset_count,
513 	.get_ethtool_stats = enic_get_ethtool_stats,
514 	.get_coalesce = enic_get_coalesce,
515 	.set_coalesce = enic_set_coalesce,
516 	.get_rxnfc = enic_get_rxnfc,
517 	.get_tunable = enic_get_tunable,
518 	.set_tunable = enic_set_tunable,
519 	.get_rxfh_key_size = enic_get_rxfh_key_size,
520 	.get_rxfh = enic_get_rxfh,
521 	.set_rxfh = enic_set_rxfh,
522 	.get_link_ksettings = enic_get_ksettings,
523 };
524 
525 void enic_set_ethtool_ops(struct net_device *netdev)
526 {
527 	netdev->ethtool_ops = &enic_ethtool_ops;
528 }
529