1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
40 #include <net/ip.h>
41 #include <net/busy_poll.h>
42 #include <net/vxlan.h>
43 
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
48 
49 #include "mlx4_en.h"
50 #include "en_port.h"
51 
52 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
53 {
54 	struct mlx4_en_priv *priv = netdev_priv(dev);
55 	int i;
56 	unsigned int offset = 0;
57 
58 	if (up && up != MLX4_EN_NUM_UP)
59 		return -EINVAL;
60 
61 	netdev_set_num_tc(dev, up);
62 
63 	/* Partition Tx queues evenly amongst UP's */
64 	for (i = 0; i < up; i++) {
65 		netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
66 		offset += priv->num_tx_rings_p_up;
67 	}
68 
69 	return 0;
70 }
71 
72 #ifdef CONFIG_NET_RX_BUSY_POLL
73 /* must be called with local_bh_disable()d */
74 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75 {
76 	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
77 	struct net_device *dev = cq->dev;
78 	struct mlx4_en_priv *priv = netdev_priv(dev);
79 	struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
80 	int done;
81 
82 	if (!priv->port_up)
83 		return LL_FLUSH_FAILED;
84 
85 	if (!mlx4_en_cq_lock_poll(cq))
86 		return LL_FLUSH_BUSY;
87 
88 	done = mlx4_en_process_rx_cq(dev, cq, 4);
89 	if (likely(done))
90 		rx_ring->cleaned += done;
91 	else
92 		rx_ring->misses++;
93 
94 	mlx4_en_cq_unlock_poll(cq);
95 
96 	return done;
97 }
98 #endif	/* CONFIG_NET_RX_BUSY_POLL */
99 
100 #ifdef CONFIG_RFS_ACCEL
101 
102 struct mlx4_en_filter {
103 	struct list_head next;
104 	struct work_struct work;
105 
106 	u8     ip_proto;
107 	__be32 src_ip;
108 	__be32 dst_ip;
109 	__be16 src_port;
110 	__be16 dst_port;
111 
112 	int rxq_index;
113 	struct mlx4_en_priv *priv;
114 	u32 flow_id;			/* RFS infrastructure id */
115 	int id;				/* mlx4_en driver id */
116 	u64 reg_id;			/* Flow steering API id */
117 	u8 activated;			/* Used to prevent expiry before filter
118 					 * is attached
119 					 */
120 	struct hlist_node filter_chain;
121 };
122 
123 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
124 
125 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
126 {
127 	switch (ip_proto) {
128 	case IPPROTO_UDP:
129 		return MLX4_NET_TRANS_RULE_ID_UDP;
130 	case IPPROTO_TCP:
131 		return MLX4_NET_TRANS_RULE_ID_TCP;
132 	default:
133 		return MLX4_NET_TRANS_RULE_NUM;
134 	}
135 };
136 
137 static void mlx4_en_filter_work(struct work_struct *work)
138 {
139 	struct mlx4_en_filter *filter = container_of(work,
140 						     struct mlx4_en_filter,
141 						     work);
142 	struct mlx4_en_priv *priv = filter->priv;
143 	struct mlx4_spec_list spec_tcp_udp = {
144 		.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
145 		{
146 			.tcp_udp = {
147 				.dst_port = filter->dst_port,
148 				.dst_port_msk = (__force __be16)-1,
149 				.src_port = filter->src_port,
150 				.src_port_msk = (__force __be16)-1,
151 			},
152 		},
153 	};
154 	struct mlx4_spec_list spec_ip = {
155 		.id = MLX4_NET_TRANS_RULE_ID_IPV4,
156 		{
157 			.ipv4 = {
158 				.dst_ip = filter->dst_ip,
159 				.dst_ip_msk = (__force __be32)-1,
160 				.src_ip = filter->src_ip,
161 				.src_ip_msk = (__force __be32)-1,
162 			},
163 		},
164 	};
165 	struct mlx4_spec_list spec_eth = {
166 		.id = MLX4_NET_TRANS_RULE_ID_ETH,
167 	};
168 	struct mlx4_net_trans_rule rule = {
169 		.list = LIST_HEAD_INIT(rule.list),
170 		.queue_mode = MLX4_NET_TRANS_Q_LIFO,
171 		.exclusive = 1,
172 		.allow_loopback = 1,
173 		.promisc_mode = MLX4_FS_REGULAR,
174 		.port = priv->port,
175 		.priority = MLX4_DOMAIN_RFS,
176 	};
177 	int rc;
178 	__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179 
180 	if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
181 		en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
182 			filter->ip_proto);
183 		goto ignore;
184 	}
185 	list_add_tail(&spec_eth.list, &rule.list);
186 	list_add_tail(&spec_ip.list, &rule.list);
187 	list_add_tail(&spec_tcp_udp.list, &rule.list);
188 
189 	rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
190 	memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
191 	memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
192 
193 	filter->activated = 0;
194 
195 	if (filter->reg_id) {
196 		rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
197 		if (rc && rc != -ENOENT)
198 			en_err(priv, "Error detaching flow. rc = %d\n", rc);
199 	}
200 
201 	rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
202 	if (rc)
203 		en_err(priv, "Error attaching flow. err = %d\n", rc);
204 
205 ignore:
206 	mlx4_en_filter_rfs_expire(priv);
207 
208 	filter->activated = 1;
209 }
210 
211 static inline struct hlist_head *
212 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
213 		   __be16 src_port, __be16 dst_port)
214 {
215 	unsigned long l;
216 	int bucket_idx;
217 
218 	l = (__force unsigned long)src_port |
219 	    ((__force unsigned long)dst_port << 2);
220 	l ^= (__force unsigned long)(src_ip ^ dst_ip);
221 
222 	bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
223 
224 	return &priv->filter_hash[bucket_idx];
225 }
226 
227 static struct mlx4_en_filter *
228 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
229 		     __be32 dst_ip, u8 ip_proto, __be16 src_port,
230 		     __be16 dst_port, u32 flow_id)
231 {
232 	struct mlx4_en_filter *filter = NULL;
233 
234 	filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
235 	if (!filter)
236 		return NULL;
237 
238 	filter->priv = priv;
239 	filter->rxq_index = rxq_index;
240 	INIT_WORK(&filter->work, mlx4_en_filter_work);
241 
242 	filter->src_ip = src_ip;
243 	filter->dst_ip = dst_ip;
244 	filter->ip_proto = ip_proto;
245 	filter->src_port = src_port;
246 	filter->dst_port = dst_port;
247 
248 	filter->flow_id = flow_id;
249 
250 	filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
251 
252 	list_add_tail(&filter->next, &priv->filters);
253 	hlist_add_head(&filter->filter_chain,
254 		       filter_hash_bucket(priv, src_ip, dst_ip, src_port,
255 					  dst_port));
256 
257 	return filter;
258 }
259 
260 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
261 {
262 	struct mlx4_en_priv *priv = filter->priv;
263 	int rc;
264 
265 	list_del(&filter->next);
266 
267 	rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
268 	if (rc && rc != -ENOENT)
269 		en_err(priv, "Error detaching flow. rc = %d\n", rc);
270 
271 	kfree(filter);
272 }
273 
274 static inline struct mlx4_en_filter *
275 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
276 		    u8 ip_proto, __be16 src_port, __be16 dst_port)
277 {
278 	struct mlx4_en_filter *filter;
279 	struct mlx4_en_filter *ret = NULL;
280 
281 	hlist_for_each_entry(filter,
282 			     filter_hash_bucket(priv, src_ip, dst_ip,
283 						src_port, dst_port),
284 			     filter_chain) {
285 		if (filter->src_ip == src_ip &&
286 		    filter->dst_ip == dst_ip &&
287 		    filter->ip_proto == ip_proto &&
288 		    filter->src_port == src_port &&
289 		    filter->dst_port == dst_port) {
290 			ret = filter;
291 			break;
292 		}
293 	}
294 
295 	return ret;
296 }
297 
298 static int
299 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
300 		   u16 rxq_index, u32 flow_id)
301 {
302 	struct mlx4_en_priv *priv = netdev_priv(net_dev);
303 	struct mlx4_en_filter *filter;
304 	const struct iphdr *ip;
305 	const __be16 *ports;
306 	u8 ip_proto;
307 	__be32 src_ip;
308 	__be32 dst_ip;
309 	__be16 src_port;
310 	__be16 dst_port;
311 	int nhoff = skb_network_offset(skb);
312 	int ret = 0;
313 
314 	if (skb->protocol != htons(ETH_P_IP))
315 		return -EPROTONOSUPPORT;
316 
317 	ip = (const struct iphdr *)(skb->data + nhoff);
318 	if (ip_is_fragment(ip))
319 		return -EPROTONOSUPPORT;
320 
321 	if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
322 		return -EPROTONOSUPPORT;
323 	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
324 
325 	ip_proto = ip->protocol;
326 	src_ip = ip->saddr;
327 	dst_ip = ip->daddr;
328 	src_port = ports[0];
329 	dst_port = ports[1];
330 
331 	spin_lock_bh(&priv->filters_lock);
332 	filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
333 				     src_port, dst_port);
334 	if (filter) {
335 		if (filter->rxq_index == rxq_index)
336 			goto out;
337 
338 		filter->rxq_index = rxq_index;
339 	} else {
340 		filter = mlx4_en_filter_alloc(priv, rxq_index,
341 					      src_ip, dst_ip, ip_proto,
342 					      src_port, dst_port, flow_id);
343 		if (!filter) {
344 			ret = -ENOMEM;
345 			goto err;
346 		}
347 	}
348 
349 	queue_work(priv->mdev->workqueue, &filter->work);
350 
351 out:
352 	ret = filter->id;
353 err:
354 	spin_unlock_bh(&priv->filters_lock);
355 
356 	return ret;
357 }
358 
359 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
360 {
361 	struct mlx4_en_filter *filter, *tmp;
362 	LIST_HEAD(del_list);
363 
364 	spin_lock_bh(&priv->filters_lock);
365 	list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
366 		list_move(&filter->next, &del_list);
367 		hlist_del(&filter->filter_chain);
368 	}
369 	spin_unlock_bh(&priv->filters_lock);
370 
371 	list_for_each_entry_safe(filter, tmp, &del_list, next) {
372 		cancel_work_sync(&filter->work);
373 		mlx4_en_filter_free(filter);
374 	}
375 }
376 
377 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
378 {
379 	struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
380 	LIST_HEAD(del_list);
381 	int i = 0;
382 
383 	spin_lock_bh(&priv->filters_lock);
384 	list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
385 		if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
386 			break;
387 
388 		if (filter->activated &&
389 		    !work_pending(&filter->work) &&
390 		    rps_may_expire_flow(priv->dev,
391 					filter->rxq_index, filter->flow_id,
392 					filter->id)) {
393 			list_move(&filter->next, &del_list);
394 			hlist_del(&filter->filter_chain);
395 		} else
396 			last_filter = filter;
397 
398 		i++;
399 	}
400 
401 	if (last_filter && (&last_filter->next != priv->filters.next))
402 		list_move(&priv->filters, &last_filter->next);
403 
404 	spin_unlock_bh(&priv->filters_lock);
405 
406 	list_for_each_entry_safe(filter, tmp, &del_list, next)
407 		mlx4_en_filter_free(filter);
408 }
409 #endif
410 
411 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
412 				   __be16 proto, u16 vid)
413 {
414 	struct mlx4_en_priv *priv = netdev_priv(dev);
415 	struct mlx4_en_dev *mdev = priv->mdev;
416 	int err;
417 	int idx;
418 
419 	en_dbg(HW, priv, "adding VLAN:%d\n", vid);
420 
421 	set_bit(vid, priv->active_vlans);
422 
423 	/* Add VID to port VLAN filter */
424 	mutex_lock(&mdev->state_lock);
425 	if (mdev->device_up && priv->port_up) {
426 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
427 		if (err)
428 			en_err(priv, "Failed configuring VLAN filter\n");
429 	}
430 	if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
431 		en_dbg(HW, priv, "failed adding vlan %d\n", vid);
432 	mutex_unlock(&mdev->state_lock);
433 
434 	return 0;
435 }
436 
437 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
438 				    __be16 proto, u16 vid)
439 {
440 	struct mlx4_en_priv *priv = netdev_priv(dev);
441 	struct mlx4_en_dev *mdev = priv->mdev;
442 	int err;
443 
444 	en_dbg(HW, priv, "Killing VID:%d\n", vid);
445 
446 	clear_bit(vid, priv->active_vlans);
447 
448 	/* Remove VID from port VLAN filter */
449 	mutex_lock(&mdev->state_lock);
450 	mlx4_unregister_vlan(mdev->dev, priv->port, vid);
451 
452 	if (mdev->device_up && priv->port_up) {
453 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
454 		if (err)
455 			en_err(priv, "Failed configuring VLAN filter\n");
456 	}
457 	mutex_unlock(&mdev->state_lock);
458 
459 	return 0;
460 }
461 
462 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
463 {
464 	int i;
465 	for (i = ETH_ALEN - 1; i >= 0; --i) {
466 		dst_mac[i] = src_mac & 0xff;
467 		src_mac >>= 8;
468 	}
469 	memset(&dst_mac[ETH_ALEN], 0, 2);
470 }
471 
472 
473 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
474 				    int qpn, u64 *reg_id)
475 {
476 	int err;
477 	struct mlx4_spec_list spec_eth_outer = { {NULL} };
478 	struct mlx4_spec_list spec_vxlan     = { {NULL} };
479 	struct mlx4_spec_list spec_eth_inner = { {NULL} };
480 
481 	struct mlx4_net_trans_rule rule = {
482 		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
483 		.exclusive = 0,
484 		.allow_loopback = 1,
485 		.promisc_mode = MLX4_FS_REGULAR,
486 		.priority = MLX4_DOMAIN_NIC,
487 	};
488 
489 	__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
490 
491 	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
492 		return 0; /* do nothing */
493 
494 	rule.port = priv->port;
495 	rule.qpn = qpn;
496 	INIT_LIST_HEAD(&rule.list);
497 
498 	spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
499 	memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
500 	memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
501 
502 	spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
503 	spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;	 /* any inner eth header */
504 
505 	list_add_tail(&spec_eth_outer.list, &rule.list);
506 	list_add_tail(&spec_vxlan.list,     &rule.list);
507 	list_add_tail(&spec_eth_inner.list, &rule.list);
508 
509 	err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
510 	if (err) {
511 		en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
512 		return err;
513 	}
514 	en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
515 	return 0;
516 }
517 
518 
519 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
520 				unsigned char *mac, int *qpn, u64 *reg_id)
521 {
522 	struct mlx4_en_dev *mdev = priv->mdev;
523 	struct mlx4_dev *dev = mdev->dev;
524 	int err;
525 
526 	switch (dev->caps.steering_mode) {
527 	case MLX4_STEERING_MODE_B0: {
528 		struct mlx4_qp qp;
529 		u8 gid[16] = {0};
530 
531 		qp.qpn = *qpn;
532 		memcpy(&gid[10], mac, ETH_ALEN);
533 		gid[5] = priv->port;
534 
535 		err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
536 		break;
537 	}
538 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
539 		struct mlx4_spec_list spec_eth = { {NULL} };
540 		__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
541 
542 		struct mlx4_net_trans_rule rule = {
543 			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
544 			.exclusive = 0,
545 			.allow_loopback = 1,
546 			.promisc_mode = MLX4_FS_REGULAR,
547 			.priority = MLX4_DOMAIN_NIC,
548 		};
549 
550 		rule.port = priv->port;
551 		rule.qpn = *qpn;
552 		INIT_LIST_HEAD(&rule.list);
553 
554 		spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
555 		memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
556 		memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
557 		list_add_tail(&spec_eth.list, &rule.list);
558 
559 		err = mlx4_flow_attach(dev, &rule, reg_id);
560 		break;
561 	}
562 	default:
563 		return -EINVAL;
564 	}
565 	if (err)
566 		en_warn(priv, "Failed Attaching Unicast\n");
567 
568 	return err;
569 }
570 
571 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
572 				     unsigned char *mac, int qpn, u64 reg_id)
573 {
574 	struct mlx4_en_dev *mdev = priv->mdev;
575 	struct mlx4_dev *dev = mdev->dev;
576 
577 	switch (dev->caps.steering_mode) {
578 	case MLX4_STEERING_MODE_B0: {
579 		struct mlx4_qp qp;
580 		u8 gid[16] = {0};
581 
582 		qp.qpn = qpn;
583 		memcpy(&gid[10], mac, ETH_ALEN);
584 		gid[5] = priv->port;
585 
586 		mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
587 		break;
588 	}
589 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
590 		mlx4_flow_detach(dev, reg_id);
591 		break;
592 	}
593 	default:
594 		en_err(priv, "Invalid steering mode.\n");
595 	}
596 }
597 
598 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
599 {
600 	struct mlx4_en_dev *mdev = priv->mdev;
601 	struct mlx4_dev *dev = mdev->dev;
602 	struct mlx4_mac_entry *entry;
603 	int index = 0;
604 	int err = 0;
605 	u64 reg_id;
606 	int *qpn = &priv->base_qpn;
607 	u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
608 
609 	en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
610 	       priv->dev->dev_addr);
611 	index = mlx4_register_mac(dev, priv->port, mac);
612 	if (index < 0) {
613 		err = index;
614 		en_err(priv, "Failed adding MAC: %pM\n",
615 		       priv->dev->dev_addr);
616 		return err;
617 	}
618 
619 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
620 		int base_qpn = mlx4_get_base_qpn(dev, priv->port);
621 		*qpn = base_qpn + index;
622 		return 0;
623 	}
624 
625 	err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
626 	en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
627 	if (err) {
628 		en_err(priv, "Failed to reserve qp for mac registration\n");
629 		goto qp_err;
630 	}
631 
632 	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
633 	if (err)
634 		goto steer_err;
635 
636 	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
637 				       &priv->tunnel_reg_id);
638 	if (err)
639 		goto tunnel_err;
640 
641 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
642 	if (!entry) {
643 		err = -ENOMEM;
644 		goto alloc_err;
645 	}
646 	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
647 	entry->reg_id = reg_id;
648 
649 	hlist_add_head_rcu(&entry->hlist,
650 			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
651 
652 	return 0;
653 
654 alloc_err:
655 	if (priv->tunnel_reg_id)
656 		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
657 tunnel_err:
658 	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
659 
660 steer_err:
661 	mlx4_qp_release_range(dev, *qpn, 1);
662 
663 qp_err:
664 	mlx4_unregister_mac(dev, priv->port, mac);
665 	return err;
666 }
667 
668 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
669 {
670 	struct mlx4_en_dev *mdev = priv->mdev;
671 	struct mlx4_dev *dev = mdev->dev;
672 	int qpn = priv->base_qpn;
673 	u64 mac;
674 
675 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
676 		mac = mlx4_mac_to_u64(priv->dev->dev_addr);
677 		en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
678 		       priv->dev->dev_addr);
679 		mlx4_unregister_mac(dev, priv->port, mac);
680 	} else {
681 		struct mlx4_mac_entry *entry;
682 		struct hlist_node *tmp;
683 		struct hlist_head *bucket;
684 		unsigned int i;
685 
686 		for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
687 			bucket = &priv->mac_hash[i];
688 			hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
689 				mac = mlx4_mac_to_u64(entry->mac);
690 				en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
691 				       entry->mac);
692 				mlx4_en_uc_steer_release(priv, entry->mac,
693 							 qpn, entry->reg_id);
694 
695 				mlx4_unregister_mac(dev, priv->port, mac);
696 				hlist_del_rcu(&entry->hlist);
697 				kfree_rcu(entry, rcu);
698 			}
699 		}
700 
701 		if (priv->tunnel_reg_id) {
702 			mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
703 			priv->tunnel_reg_id = 0;
704 		}
705 
706 		en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
707 		       priv->port, qpn);
708 		mlx4_qp_release_range(dev, qpn, 1);
709 		priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
710 	}
711 }
712 
713 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
714 			       unsigned char *new_mac, unsigned char *prev_mac)
715 {
716 	struct mlx4_en_dev *mdev = priv->mdev;
717 	struct mlx4_dev *dev = mdev->dev;
718 	int err = 0;
719 	u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
720 
721 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
722 		struct hlist_head *bucket;
723 		unsigned int mac_hash;
724 		struct mlx4_mac_entry *entry;
725 		struct hlist_node *tmp;
726 		u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
727 
728 		bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
729 		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
730 			if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
731 				mlx4_en_uc_steer_release(priv, entry->mac,
732 							 qpn, entry->reg_id);
733 				mlx4_unregister_mac(dev, priv->port,
734 						    prev_mac_u64);
735 				hlist_del_rcu(&entry->hlist);
736 				synchronize_rcu();
737 				memcpy(entry->mac, new_mac, ETH_ALEN);
738 				entry->reg_id = 0;
739 				mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
740 				hlist_add_head_rcu(&entry->hlist,
741 						   &priv->mac_hash[mac_hash]);
742 				mlx4_register_mac(dev, priv->port, new_mac_u64);
743 				err = mlx4_en_uc_steer_add(priv, new_mac,
744 							   &qpn,
745 							   &entry->reg_id);
746 				if (err)
747 					return err;
748 				if (priv->tunnel_reg_id) {
749 					mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
750 					priv->tunnel_reg_id = 0;
751 				}
752 				err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
753 							       &priv->tunnel_reg_id);
754 				return err;
755 			}
756 		}
757 		return -EINVAL;
758 	}
759 
760 	return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
761 }
762 
763 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
764 {
765 	int err = 0;
766 
767 	if (priv->port_up) {
768 		/* Remove old MAC and insert the new one */
769 		err = mlx4_en_replace_mac(priv, priv->base_qpn,
770 					  priv->dev->dev_addr, priv->prev_mac);
771 		if (err)
772 			en_err(priv, "Failed changing HW MAC address\n");
773 	} else
774 		en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
775 
776 	memcpy(priv->prev_mac, priv->dev->dev_addr,
777 	       sizeof(priv->prev_mac));
778 
779 	return err;
780 }
781 
782 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
783 {
784 	struct mlx4_en_priv *priv = netdev_priv(dev);
785 	struct mlx4_en_dev *mdev = priv->mdev;
786 	struct sockaddr *saddr = addr;
787 	int err;
788 
789 	if (!is_valid_ether_addr(saddr->sa_data))
790 		return -EADDRNOTAVAIL;
791 
792 	mutex_lock(&mdev->state_lock);
793 	memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
794 	err = mlx4_en_do_set_mac(priv);
795 	mutex_unlock(&mdev->state_lock);
796 
797 	return err;
798 }
799 
800 static void mlx4_en_clear_list(struct net_device *dev)
801 {
802 	struct mlx4_en_priv *priv = netdev_priv(dev);
803 	struct mlx4_en_mc_list *tmp, *mc_to_del;
804 
805 	list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
806 		list_del(&mc_to_del->list);
807 		kfree(mc_to_del);
808 	}
809 }
810 
811 static void mlx4_en_cache_mclist(struct net_device *dev)
812 {
813 	struct mlx4_en_priv *priv = netdev_priv(dev);
814 	struct netdev_hw_addr *ha;
815 	struct mlx4_en_mc_list *tmp;
816 
817 	mlx4_en_clear_list(dev);
818 	netdev_for_each_mc_addr(ha, dev) {
819 		tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
820 		if (!tmp) {
821 			mlx4_en_clear_list(dev);
822 			return;
823 		}
824 		memcpy(tmp->addr, ha->addr, ETH_ALEN);
825 		list_add_tail(&tmp->list, &priv->mc_list);
826 	}
827 }
828 
829 static void update_mclist_flags(struct mlx4_en_priv *priv,
830 				struct list_head *dst,
831 				struct list_head *src)
832 {
833 	struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
834 	bool found;
835 
836 	/* Find all the entries that should be removed from dst,
837 	 * These are the entries that are not found in src
838 	 */
839 	list_for_each_entry(dst_tmp, dst, list) {
840 		found = false;
841 		list_for_each_entry(src_tmp, src, list) {
842 			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
843 				found = true;
844 				break;
845 			}
846 		}
847 		if (!found)
848 			dst_tmp->action = MCLIST_REM;
849 	}
850 
851 	/* Add entries that exist in src but not in dst
852 	 * mark them as need to add
853 	 */
854 	list_for_each_entry(src_tmp, src, list) {
855 		found = false;
856 		list_for_each_entry(dst_tmp, dst, list) {
857 			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
858 				dst_tmp->action = MCLIST_NONE;
859 				found = true;
860 				break;
861 			}
862 		}
863 		if (!found) {
864 			new_mc = kmemdup(src_tmp,
865 					 sizeof(struct mlx4_en_mc_list),
866 					 GFP_KERNEL);
867 			if (!new_mc)
868 				return;
869 
870 			new_mc->action = MCLIST_ADD;
871 			list_add_tail(&new_mc->list, dst);
872 		}
873 	}
874 }
875 
876 static void mlx4_en_set_rx_mode(struct net_device *dev)
877 {
878 	struct mlx4_en_priv *priv = netdev_priv(dev);
879 
880 	if (!priv->port_up)
881 		return;
882 
883 	queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
884 }
885 
886 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
887 				     struct mlx4_en_dev *mdev)
888 {
889 	int err = 0;
890 
891 	if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
892 		if (netif_msg_rx_status(priv))
893 			en_warn(priv, "Entering promiscuous mode\n");
894 		priv->flags |= MLX4_EN_FLAG_PROMISC;
895 
896 		/* Enable promiscouos mode */
897 		switch (mdev->dev->caps.steering_mode) {
898 		case MLX4_STEERING_MODE_DEVICE_MANAGED:
899 			err = mlx4_flow_steer_promisc_add(mdev->dev,
900 							  priv->port,
901 							  priv->base_qpn,
902 							  MLX4_FS_ALL_DEFAULT);
903 			if (err)
904 				en_err(priv, "Failed enabling promiscuous mode\n");
905 			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
906 			break;
907 
908 		case MLX4_STEERING_MODE_B0:
909 			err = mlx4_unicast_promisc_add(mdev->dev,
910 						       priv->base_qpn,
911 						       priv->port);
912 			if (err)
913 				en_err(priv, "Failed enabling unicast promiscuous mode\n");
914 
915 			/* Add the default qp number as multicast
916 			 * promisc
917 			 */
918 			if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
919 				err = mlx4_multicast_promisc_add(mdev->dev,
920 								 priv->base_qpn,
921 								 priv->port);
922 				if (err)
923 					en_err(priv, "Failed enabling multicast promiscuous mode\n");
924 				priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
925 			}
926 			break;
927 
928 		case MLX4_STEERING_MODE_A0:
929 			err = mlx4_SET_PORT_qpn_calc(mdev->dev,
930 						     priv->port,
931 						     priv->base_qpn,
932 						     1);
933 			if (err)
934 				en_err(priv, "Failed enabling promiscuous mode\n");
935 			break;
936 		}
937 
938 		/* Disable port multicast filter (unconditionally) */
939 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
940 					  0, MLX4_MCAST_DISABLE);
941 		if (err)
942 			en_err(priv, "Failed disabling multicast filter\n");
943 
944 		/* Disable port VLAN filter */
945 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
946 		if (err)
947 			en_err(priv, "Failed disabling VLAN filter\n");
948 	}
949 }
950 
951 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
952 				       struct mlx4_en_dev *mdev)
953 {
954 	int err = 0;
955 
956 	if (netif_msg_rx_status(priv))
957 		en_warn(priv, "Leaving promiscuous mode\n");
958 	priv->flags &= ~MLX4_EN_FLAG_PROMISC;
959 
960 	/* Disable promiscouos mode */
961 	switch (mdev->dev->caps.steering_mode) {
962 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
963 		err = mlx4_flow_steer_promisc_remove(mdev->dev,
964 						     priv->port,
965 						     MLX4_FS_ALL_DEFAULT);
966 		if (err)
967 			en_err(priv, "Failed disabling promiscuous mode\n");
968 		priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
969 		break;
970 
971 	case MLX4_STEERING_MODE_B0:
972 		err = mlx4_unicast_promisc_remove(mdev->dev,
973 						  priv->base_qpn,
974 						  priv->port);
975 		if (err)
976 			en_err(priv, "Failed disabling unicast promiscuous mode\n");
977 		/* Disable Multicast promisc */
978 		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
979 			err = mlx4_multicast_promisc_remove(mdev->dev,
980 							    priv->base_qpn,
981 							    priv->port);
982 			if (err)
983 				en_err(priv, "Failed disabling multicast promiscuous mode\n");
984 			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
985 		}
986 		break;
987 
988 	case MLX4_STEERING_MODE_A0:
989 		err = mlx4_SET_PORT_qpn_calc(mdev->dev,
990 					     priv->port,
991 					     priv->base_qpn, 0);
992 		if (err)
993 			en_err(priv, "Failed disabling promiscuous mode\n");
994 		break;
995 	}
996 
997 	/* Enable port VLAN filter */
998 	err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
999 	if (err)
1000 		en_err(priv, "Failed enabling VLAN filter\n");
1001 }
1002 
1003 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1004 				 struct net_device *dev,
1005 				 struct mlx4_en_dev *mdev)
1006 {
1007 	struct mlx4_en_mc_list *mclist, *tmp;
1008 	u64 mcast_addr = 0;
1009 	u8 mc_list[16] = {0};
1010 	int err = 0;
1011 
1012 	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
1013 	if (dev->flags & IFF_ALLMULTI) {
1014 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1015 					  0, MLX4_MCAST_DISABLE);
1016 		if (err)
1017 			en_err(priv, "Failed disabling multicast filter\n");
1018 
1019 		/* Add the default qp number as multicast promisc */
1020 		if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1021 			switch (mdev->dev->caps.steering_mode) {
1022 			case MLX4_STEERING_MODE_DEVICE_MANAGED:
1023 				err = mlx4_flow_steer_promisc_add(mdev->dev,
1024 								  priv->port,
1025 								  priv->base_qpn,
1026 								  MLX4_FS_MC_DEFAULT);
1027 				break;
1028 
1029 			case MLX4_STEERING_MODE_B0:
1030 				err = mlx4_multicast_promisc_add(mdev->dev,
1031 								 priv->base_qpn,
1032 								 priv->port);
1033 				break;
1034 
1035 			case MLX4_STEERING_MODE_A0:
1036 				break;
1037 			}
1038 			if (err)
1039 				en_err(priv, "Failed entering multicast promisc mode\n");
1040 			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1041 		}
1042 	} else {
1043 		/* Disable Multicast promisc */
1044 		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1045 			switch (mdev->dev->caps.steering_mode) {
1046 			case MLX4_STEERING_MODE_DEVICE_MANAGED:
1047 				err = mlx4_flow_steer_promisc_remove(mdev->dev,
1048 								     priv->port,
1049 								     MLX4_FS_MC_DEFAULT);
1050 				break;
1051 
1052 			case MLX4_STEERING_MODE_B0:
1053 				err = mlx4_multicast_promisc_remove(mdev->dev,
1054 								    priv->base_qpn,
1055 								    priv->port);
1056 				break;
1057 
1058 			case MLX4_STEERING_MODE_A0:
1059 				break;
1060 			}
1061 			if (err)
1062 				en_err(priv, "Failed disabling multicast promiscuous mode\n");
1063 			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1064 		}
1065 
1066 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1067 					  0, MLX4_MCAST_DISABLE);
1068 		if (err)
1069 			en_err(priv, "Failed disabling multicast filter\n");
1070 
1071 		/* Flush mcast filter and init it with broadcast address */
1072 		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1073 				    1, MLX4_MCAST_CONFIG);
1074 
1075 		/* Update multicast list - we cache all addresses so they won't
1076 		 * change while HW is updated holding the command semaphor */
1077 		netif_addr_lock_bh(dev);
1078 		mlx4_en_cache_mclist(dev);
1079 		netif_addr_unlock_bh(dev);
1080 		list_for_each_entry(mclist, &priv->mc_list, list) {
1081 			mcast_addr = mlx4_mac_to_u64(mclist->addr);
1082 			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1083 					    mcast_addr, 0, MLX4_MCAST_CONFIG);
1084 		}
1085 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1086 					  0, MLX4_MCAST_ENABLE);
1087 		if (err)
1088 			en_err(priv, "Failed enabling multicast filter\n");
1089 
1090 		update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1091 		list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1092 			if (mclist->action == MCLIST_REM) {
1093 				/* detach this address and delete from list */
1094 				memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1095 				mc_list[5] = priv->port;
1096 				err = mlx4_multicast_detach(mdev->dev,
1097 							    &priv->rss_map.indir_qp,
1098 							    mc_list,
1099 							    MLX4_PROT_ETH,
1100 							    mclist->reg_id);
1101 				if (err)
1102 					en_err(priv, "Fail to detach multicast address\n");
1103 
1104 				if (mclist->tunnel_reg_id) {
1105 					err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1106 					if (err)
1107 						en_err(priv, "Failed to detach multicast address\n");
1108 				}
1109 
1110 				/* remove from list */
1111 				list_del(&mclist->list);
1112 				kfree(mclist);
1113 			} else if (mclist->action == MCLIST_ADD) {
1114 				/* attach the address */
1115 				memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1116 				/* needed for B0 steering support */
1117 				mc_list[5] = priv->port;
1118 				err = mlx4_multicast_attach(mdev->dev,
1119 							    &priv->rss_map.indir_qp,
1120 							    mc_list,
1121 							    priv->port, 0,
1122 							    MLX4_PROT_ETH,
1123 							    &mclist->reg_id);
1124 				if (err)
1125 					en_err(priv, "Fail to attach multicast address\n");
1126 
1127 				err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1128 							       &mclist->tunnel_reg_id);
1129 				if (err)
1130 					en_err(priv, "Failed to attach multicast address\n");
1131 			}
1132 		}
1133 	}
1134 }
1135 
1136 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1137 				 struct net_device *dev,
1138 				 struct mlx4_en_dev *mdev)
1139 {
1140 	struct netdev_hw_addr *ha;
1141 	struct mlx4_mac_entry *entry;
1142 	struct hlist_node *tmp;
1143 	bool found;
1144 	u64 mac;
1145 	int err = 0;
1146 	struct hlist_head *bucket;
1147 	unsigned int i;
1148 	int removed = 0;
1149 	u32 prev_flags;
1150 
1151 	/* Note that we do not need to protect our mac_hash traversal with rcu,
1152 	 * since all modification code is protected by mdev->state_lock
1153 	 */
1154 
1155 	/* find what to remove */
1156 	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1157 		bucket = &priv->mac_hash[i];
1158 		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1159 			found = false;
1160 			netdev_for_each_uc_addr(ha, dev) {
1161 				if (ether_addr_equal_64bits(entry->mac,
1162 							    ha->addr)) {
1163 					found = true;
1164 					break;
1165 				}
1166 			}
1167 
1168 			/* MAC address of the port is not in uc list */
1169 			if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
1170 				found = true;
1171 
1172 			if (!found) {
1173 				mac = mlx4_mac_to_u64(entry->mac);
1174 				mlx4_en_uc_steer_release(priv, entry->mac,
1175 							 priv->base_qpn,
1176 							 entry->reg_id);
1177 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
1178 
1179 				hlist_del_rcu(&entry->hlist);
1180 				kfree_rcu(entry, rcu);
1181 				en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1182 				       entry->mac, priv->port);
1183 				++removed;
1184 			}
1185 		}
1186 	}
1187 
1188 	/* if we didn't remove anything, there is no use in trying to add
1189 	 * again once we are in a forced promisc mode state
1190 	 */
1191 	if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1192 		return;
1193 
1194 	prev_flags = priv->flags;
1195 	priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1196 
1197 	/* find what to add */
1198 	netdev_for_each_uc_addr(ha, dev) {
1199 		found = false;
1200 		bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1201 		hlist_for_each_entry(entry, bucket, hlist) {
1202 			if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1203 				found = true;
1204 				break;
1205 			}
1206 		}
1207 
1208 		if (!found) {
1209 			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1210 			if (!entry) {
1211 				en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1212 				       ha->addr, priv->port);
1213 				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1214 				break;
1215 			}
1216 			mac = mlx4_mac_to_u64(ha->addr);
1217 			memcpy(entry->mac, ha->addr, ETH_ALEN);
1218 			err = mlx4_register_mac(mdev->dev, priv->port, mac);
1219 			if (err < 0) {
1220 				en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1221 				       ha->addr, priv->port, err);
1222 				kfree(entry);
1223 				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1224 				break;
1225 			}
1226 			err = mlx4_en_uc_steer_add(priv, ha->addr,
1227 						   &priv->base_qpn,
1228 						   &entry->reg_id);
1229 			if (err) {
1230 				en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1231 				       ha->addr, priv->port, err);
1232 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
1233 				kfree(entry);
1234 				priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1235 				break;
1236 			} else {
1237 				unsigned int mac_hash;
1238 				en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1239 				       ha->addr, priv->port);
1240 				mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1241 				bucket = &priv->mac_hash[mac_hash];
1242 				hlist_add_head_rcu(&entry->hlist, bucket);
1243 			}
1244 		}
1245 	}
1246 
1247 	if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1248 		en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1249 			priv->port);
1250 	} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1251 		en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1252 			priv->port);
1253 	}
1254 }
1255 
1256 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1257 {
1258 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1259 						 rx_mode_task);
1260 	struct mlx4_en_dev *mdev = priv->mdev;
1261 	struct net_device *dev = priv->dev;
1262 
1263 	mutex_lock(&mdev->state_lock);
1264 	if (!mdev->device_up) {
1265 		en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1266 		goto out;
1267 	}
1268 	if (!priv->port_up) {
1269 		en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1270 		goto out;
1271 	}
1272 
1273 	if (!netif_carrier_ok(dev)) {
1274 		if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1275 			if (priv->port_state.link_state) {
1276 				priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1277 				netif_carrier_on(dev);
1278 				en_dbg(LINK, priv, "Link Up\n");
1279 			}
1280 		}
1281 	}
1282 
1283 	if (dev->priv_flags & IFF_UNICAST_FLT)
1284 		mlx4_en_do_uc_filter(priv, dev, mdev);
1285 
1286 	/* Promsicuous mode: disable all filters */
1287 	if ((dev->flags & IFF_PROMISC) ||
1288 	    (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1289 		mlx4_en_set_promisc_mode(priv, mdev);
1290 		goto out;
1291 	}
1292 
1293 	/* Not in promiscuous mode */
1294 	if (priv->flags & MLX4_EN_FLAG_PROMISC)
1295 		mlx4_en_clear_promisc_mode(priv, mdev);
1296 
1297 	mlx4_en_do_multicast(priv, dev, mdev);
1298 out:
1299 	mutex_unlock(&mdev->state_lock);
1300 }
1301 
1302 #ifdef CONFIG_NET_POLL_CONTROLLER
1303 static void mlx4_en_netpoll(struct net_device *dev)
1304 {
1305 	struct mlx4_en_priv *priv = netdev_priv(dev);
1306 	struct mlx4_en_cq *cq;
1307 	int i;
1308 
1309 	for (i = 0; i < priv->rx_ring_num; i++) {
1310 		cq = priv->rx_cq[i];
1311 		napi_schedule(&cq->napi);
1312 	}
1313 }
1314 #endif
1315 
1316 static void mlx4_en_tx_timeout(struct net_device *dev)
1317 {
1318 	struct mlx4_en_priv *priv = netdev_priv(dev);
1319 	struct mlx4_en_dev *mdev = priv->mdev;
1320 	int i;
1321 
1322 	if (netif_msg_timer(priv))
1323 		en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1324 
1325 	for (i = 0; i < priv->tx_ring_num; i++) {
1326 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1327 			continue;
1328 		en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1329 			i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1330 			priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1331 	}
1332 
1333 	priv->port_stats.tx_timeout++;
1334 	en_dbg(DRV, priv, "Scheduling watchdog\n");
1335 	queue_work(mdev->workqueue, &priv->watchdog_task);
1336 }
1337 
1338 
1339 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1340 {
1341 	struct mlx4_en_priv *priv = netdev_priv(dev);
1342 
1343 	spin_lock_bh(&priv->stats_lock);
1344 	memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1345 	spin_unlock_bh(&priv->stats_lock);
1346 
1347 	return &priv->ret_stats;
1348 }
1349 
1350 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1351 {
1352 	struct mlx4_en_cq *cq;
1353 	int i;
1354 
1355 	/* If we haven't received a specific coalescing setting
1356 	 * (module param), we set the moderation parameters as follows:
1357 	 * - moder_cnt is set to the number of mtu sized packets to
1358 	 *   satisfy our coalescing target.
1359 	 * - moder_time is set to a fixed value.
1360 	 */
1361 	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1362 	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1363 	priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1364 	priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1365 	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1366 	       priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1367 
1368 	/* Setup cq moderation params */
1369 	for (i = 0; i < priv->rx_ring_num; i++) {
1370 		cq = priv->rx_cq[i];
1371 		cq->moder_cnt = priv->rx_frames;
1372 		cq->moder_time = priv->rx_usecs;
1373 		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1374 		priv->last_moder_packets[i] = 0;
1375 		priv->last_moder_bytes[i] = 0;
1376 	}
1377 
1378 	for (i = 0; i < priv->tx_ring_num; i++) {
1379 		cq = priv->tx_cq[i];
1380 		cq->moder_cnt = priv->tx_frames;
1381 		cq->moder_time = priv->tx_usecs;
1382 	}
1383 
1384 	/* Reset auto-moderation params */
1385 	priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1386 	priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1387 	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1388 	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1389 	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1390 	priv->adaptive_rx_coal = 1;
1391 	priv->last_moder_jiffies = 0;
1392 	priv->last_moder_tx_packets = 0;
1393 }
1394 
1395 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1396 {
1397 	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1398 	struct mlx4_en_cq *cq;
1399 	unsigned long packets;
1400 	unsigned long rate;
1401 	unsigned long avg_pkt_size;
1402 	unsigned long rx_packets;
1403 	unsigned long rx_bytes;
1404 	unsigned long rx_pkt_diff;
1405 	int moder_time;
1406 	int ring, err;
1407 
1408 	if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1409 		return;
1410 
1411 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
1412 		spin_lock_bh(&priv->stats_lock);
1413 		rx_packets = priv->rx_ring[ring]->packets;
1414 		rx_bytes = priv->rx_ring[ring]->bytes;
1415 		spin_unlock_bh(&priv->stats_lock);
1416 
1417 		rx_pkt_diff = ((unsigned long) (rx_packets -
1418 				priv->last_moder_packets[ring]));
1419 		packets = rx_pkt_diff;
1420 		rate = packets * HZ / period;
1421 		avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1422 				priv->last_moder_bytes[ring])) / packets : 0;
1423 
1424 		/* Apply auto-moderation only when packet rate
1425 		 * exceeds a rate that it matters */
1426 		if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1427 		    avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1428 			if (rate < priv->pkt_rate_low)
1429 				moder_time = priv->rx_usecs_low;
1430 			else if (rate > priv->pkt_rate_high)
1431 				moder_time = priv->rx_usecs_high;
1432 			else
1433 				moder_time = (rate - priv->pkt_rate_low) *
1434 					(priv->rx_usecs_high - priv->rx_usecs_low) /
1435 					(priv->pkt_rate_high - priv->pkt_rate_low) +
1436 					priv->rx_usecs_low;
1437 		} else {
1438 			moder_time = priv->rx_usecs_low;
1439 		}
1440 
1441 		if (moder_time != priv->last_moder_time[ring]) {
1442 			priv->last_moder_time[ring] = moder_time;
1443 			cq = priv->rx_cq[ring];
1444 			cq->moder_time = moder_time;
1445 			cq->moder_cnt = priv->rx_frames;
1446 			err = mlx4_en_set_cq_moder(priv, cq);
1447 			if (err)
1448 				en_err(priv, "Failed modifying moderation for cq:%d\n",
1449 				       ring);
1450 		}
1451 		priv->last_moder_packets[ring] = rx_packets;
1452 		priv->last_moder_bytes[ring] = rx_bytes;
1453 	}
1454 
1455 	priv->last_moder_jiffies = jiffies;
1456 }
1457 
1458 static void mlx4_en_do_get_stats(struct work_struct *work)
1459 {
1460 	struct delayed_work *delay = to_delayed_work(work);
1461 	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1462 						 stats_task);
1463 	struct mlx4_en_dev *mdev = priv->mdev;
1464 	int err;
1465 
1466 	mutex_lock(&mdev->state_lock);
1467 	if (mdev->device_up) {
1468 		if (priv->port_up) {
1469 			err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1470 			if (err)
1471 				en_dbg(HW, priv, "Could not update stats\n");
1472 
1473 			mlx4_en_auto_moderation(priv);
1474 		}
1475 
1476 		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1477 	}
1478 	if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1479 		mlx4_en_do_set_mac(priv);
1480 		mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1481 	}
1482 	mutex_unlock(&mdev->state_lock);
1483 }
1484 
1485 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1486  * periodically
1487  */
1488 static void mlx4_en_service_task(struct work_struct *work)
1489 {
1490 	struct delayed_work *delay = to_delayed_work(work);
1491 	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1492 						 service_task);
1493 	struct mlx4_en_dev *mdev = priv->mdev;
1494 
1495 	mutex_lock(&mdev->state_lock);
1496 	if (mdev->device_up) {
1497 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1498 			mlx4_en_ptp_overflow_check(mdev);
1499 
1500 		queue_delayed_work(mdev->workqueue, &priv->service_task,
1501 				   SERVICE_TASK_DELAY);
1502 	}
1503 	mutex_unlock(&mdev->state_lock);
1504 }
1505 
1506 static void mlx4_en_linkstate(struct work_struct *work)
1507 {
1508 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1509 						 linkstate_task);
1510 	struct mlx4_en_dev *mdev = priv->mdev;
1511 	int linkstate = priv->link_state;
1512 
1513 	mutex_lock(&mdev->state_lock);
1514 	/* If observable port state changed set carrier state and
1515 	 * report to system log */
1516 	if (priv->last_link_state != linkstate) {
1517 		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1518 			en_info(priv, "Link Down\n");
1519 			netif_carrier_off(priv->dev);
1520 		} else {
1521 			en_info(priv, "Link Up\n");
1522 			netif_carrier_on(priv->dev);
1523 		}
1524 	}
1525 	priv->last_link_state = linkstate;
1526 	mutex_unlock(&mdev->state_lock);
1527 }
1528 
1529 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1530 {
1531 	struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1532 	int numa_node = priv->mdev->dev->numa_node;
1533 	int ret = 0;
1534 
1535 	if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1536 		return -ENOMEM;
1537 
1538 	ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1539 					  ring->affinity_mask);
1540 	if (ret)
1541 		free_cpumask_var(ring->affinity_mask);
1542 
1543 	return ret;
1544 }
1545 
1546 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1547 {
1548 	free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1549 }
1550 
1551 int mlx4_en_start_port(struct net_device *dev)
1552 {
1553 	struct mlx4_en_priv *priv = netdev_priv(dev);
1554 	struct mlx4_en_dev *mdev = priv->mdev;
1555 	struct mlx4_en_cq *cq;
1556 	struct mlx4_en_tx_ring *tx_ring;
1557 	int rx_index = 0;
1558 	int tx_index = 0;
1559 	int err = 0;
1560 	int i;
1561 	int j;
1562 	u8 mc_list[16] = {0};
1563 
1564 	if (priv->port_up) {
1565 		en_dbg(DRV, priv, "start port called while port already up\n");
1566 		return 0;
1567 	}
1568 
1569 	INIT_LIST_HEAD(&priv->mc_list);
1570 	INIT_LIST_HEAD(&priv->curr_list);
1571 	INIT_LIST_HEAD(&priv->ethtool_list);
1572 	memset(&priv->ethtool_rules[0], 0,
1573 	       sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1574 
1575 	/* Calculate Rx buf size */
1576 	dev->mtu = min(dev->mtu, priv->max_mtu);
1577 	mlx4_en_calc_rx_buf(dev);
1578 	en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1579 
1580 	/* Configure rx cq's and rings */
1581 	err = mlx4_en_activate_rx_rings(priv);
1582 	if (err) {
1583 		en_err(priv, "Failed to activate RX rings\n");
1584 		return err;
1585 	}
1586 	for (i = 0; i < priv->rx_ring_num; i++) {
1587 		cq = priv->rx_cq[i];
1588 
1589 		mlx4_en_cq_init_lock(cq);
1590 
1591 		err = mlx4_en_init_affinity_hint(priv, i);
1592 		if (err) {
1593 			en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 			goto cq_err;
1595 		}
1596 
1597 		err = mlx4_en_activate_cq(priv, cq, i);
1598 		if (err) {
1599 			en_err(priv, "Failed activating Rx CQ\n");
1600 			mlx4_en_free_affinity_hint(priv, i);
1601 			goto cq_err;
1602 		}
1603 		for (j = 0; j < cq->size; j++)
1604 			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1605 		err = mlx4_en_set_cq_moder(priv, cq);
1606 		if (err) {
1607 			en_err(priv, "Failed setting cq moderation parameters\n");
1608 			mlx4_en_deactivate_cq(priv, cq);
1609 			mlx4_en_free_affinity_hint(priv, i);
1610 			goto cq_err;
1611 		}
1612 		mlx4_en_arm_cq(priv, cq);
1613 		priv->rx_ring[i]->cqn = cq->mcq.cqn;
1614 		++rx_index;
1615 	}
1616 
1617 	/* Set qp number */
1618 	en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1619 	err = mlx4_en_get_qp(priv);
1620 	if (err) {
1621 		en_err(priv, "Failed getting eth qp\n");
1622 		goto cq_err;
1623 	}
1624 	mdev->mac_removed[priv->port] = 0;
1625 
1626 	err = mlx4_en_config_rss_steer(priv);
1627 	if (err) {
1628 		en_err(priv, "Failed configuring rss steering\n");
1629 		goto mac_err;
1630 	}
1631 
1632 	err = mlx4_en_create_drop_qp(priv);
1633 	if (err)
1634 		goto rss_err;
1635 
1636 	/* Configure tx cq's and rings */
1637 	for (i = 0; i < priv->tx_ring_num; i++) {
1638 		/* Configure cq */
1639 		cq = priv->tx_cq[i];
1640 		err = mlx4_en_activate_cq(priv, cq, i);
1641 		if (err) {
1642 			en_err(priv, "Failed allocating Tx CQ\n");
1643 			goto tx_err;
1644 		}
1645 		err = mlx4_en_set_cq_moder(priv, cq);
1646 		if (err) {
1647 			en_err(priv, "Failed setting cq moderation parameters\n");
1648 			mlx4_en_deactivate_cq(priv, cq);
1649 			goto tx_err;
1650 		}
1651 		en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1652 		cq->buf->wqe_index = cpu_to_be16(0xffff);
1653 
1654 		/* Configure ring */
1655 		tx_ring = priv->tx_ring[i];
1656 		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1657 			i / priv->num_tx_rings_p_up);
1658 		if (err) {
1659 			en_err(priv, "Failed allocating Tx ring\n");
1660 			mlx4_en_deactivate_cq(priv, cq);
1661 			goto tx_err;
1662 		}
1663 		tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1664 
1665 		/* Arm CQ for TX completions */
1666 		mlx4_en_arm_cq(priv, cq);
1667 
1668 		/* Set initial ownership of all Tx TXBBs to SW (1) */
1669 		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1670 			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1671 		++tx_index;
1672 	}
1673 
1674 	/* Configure port */
1675 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1676 				    priv->rx_skb_size + ETH_FCS_LEN,
1677 				    priv->prof->tx_pause,
1678 				    priv->prof->tx_ppp,
1679 				    priv->prof->rx_pause,
1680 				    priv->prof->rx_ppp);
1681 	if (err) {
1682 		en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1683 		       priv->port, err);
1684 		goto tx_err;
1685 	}
1686 	/* Set default qp number */
1687 	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1688 	if (err) {
1689 		en_err(priv, "Failed setting default qp numbers\n");
1690 		goto tx_err;
1691 	}
1692 
1693 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1694 		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1695 		if (err) {
1696 			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1697 			       err);
1698 			goto tx_err;
1699 		}
1700 	}
1701 
1702 	/* Init port */
1703 	en_dbg(HW, priv, "Initializing port\n");
1704 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
1705 	if (err) {
1706 		en_err(priv, "Failed Initializing port\n");
1707 		goto tx_err;
1708 	}
1709 
1710 	/* Attach rx QP to bradcast address */
1711 	memset(&mc_list[10], 0xff, ETH_ALEN);
1712 	mc_list[5] = priv->port; /* needed for B0 steering support */
1713 	if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1714 				  priv->port, 0, MLX4_PROT_ETH,
1715 				  &priv->broadcast_id))
1716 		mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1717 
1718 	/* Must redo promiscuous mode setup. */
1719 	priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1720 
1721 	/* Schedule multicast task to populate multicast list */
1722 	queue_work(mdev->workqueue, &priv->rx_mode_task);
1723 
1724 	mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1725 
1726 #ifdef CONFIG_MLX4_EN_VXLAN
1727 	if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1728 		vxlan_get_rx_port(dev);
1729 #endif
1730 	priv->port_up = true;
1731 	netif_tx_start_all_queues(dev);
1732 	netif_device_attach(dev);
1733 
1734 	return 0;
1735 
1736 tx_err:
1737 	while (tx_index--) {
1738 		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1739 		mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1740 	}
1741 	mlx4_en_destroy_drop_qp(priv);
1742 rss_err:
1743 	mlx4_en_release_rss_steer(priv);
1744 mac_err:
1745 	mlx4_en_put_qp(priv);
1746 cq_err:
1747 	while (rx_index--) {
1748 		mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1749 		mlx4_en_free_affinity_hint(priv, i);
1750 	}
1751 	for (i = 0; i < priv->rx_ring_num; i++)
1752 		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1753 
1754 	return err; /* need to close devices */
1755 }
1756 
1757 
1758 void mlx4_en_stop_port(struct net_device *dev, int detach)
1759 {
1760 	struct mlx4_en_priv *priv = netdev_priv(dev);
1761 	struct mlx4_en_dev *mdev = priv->mdev;
1762 	struct mlx4_en_mc_list *mclist, *tmp;
1763 	struct ethtool_flow_id *flow, *tmp_flow;
1764 	int i;
1765 	u8 mc_list[16] = {0};
1766 
1767 	if (!priv->port_up) {
1768 		en_dbg(DRV, priv, "stop port called while port already down\n");
1769 		return;
1770 	}
1771 
1772 	/* close port*/
1773 	mlx4_CLOSE_PORT(mdev->dev, priv->port);
1774 
1775 	/* Synchronize with tx routine */
1776 	netif_tx_lock_bh(dev);
1777 	if (detach)
1778 		netif_device_detach(dev);
1779 	netif_tx_stop_all_queues(dev);
1780 	netif_tx_unlock_bh(dev);
1781 
1782 	netif_tx_disable(dev);
1783 
1784 	/* Set port as not active */
1785 	priv->port_up = false;
1786 
1787 	/* Promsicuous mode */
1788 	if (mdev->dev->caps.steering_mode ==
1789 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1790 		priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1791 				 MLX4_EN_FLAG_MC_PROMISC);
1792 		mlx4_flow_steer_promisc_remove(mdev->dev,
1793 					       priv->port,
1794 					       MLX4_FS_ALL_DEFAULT);
1795 		mlx4_flow_steer_promisc_remove(mdev->dev,
1796 					       priv->port,
1797 					       MLX4_FS_MC_DEFAULT);
1798 	} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1799 		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1800 
1801 		/* Disable promiscouos mode */
1802 		mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1803 					    priv->port);
1804 
1805 		/* Disable Multicast promisc */
1806 		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1807 			mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1808 						      priv->port);
1809 			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1810 		}
1811 	}
1812 
1813 	/* Detach All multicasts */
1814 	memset(&mc_list[10], 0xff, ETH_ALEN);
1815 	mc_list[5] = priv->port; /* needed for B0 steering support */
1816 	mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1817 			      MLX4_PROT_ETH, priv->broadcast_id);
1818 	list_for_each_entry(mclist, &priv->curr_list, list) {
1819 		memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1820 		mc_list[5] = priv->port;
1821 		mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1822 				      mc_list, MLX4_PROT_ETH, mclist->reg_id);
1823 		if (mclist->tunnel_reg_id)
1824 			mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1825 	}
1826 	mlx4_en_clear_list(dev);
1827 	list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1828 		list_del(&mclist->list);
1829 		kfree(mclist);
1830 	}
1831 
1832 	/* Flush multicast filter */
1833 	mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1834 
1835 	/* Remove flow steering rules for the port*/
1836 	if (mdev->dev->caps.steering_mode ==
1837 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1838 		ASSERT_RTNL();
1839 		list_for_each_entry_safe(flow, tmp_flow,
1840 					 &priv->ethtool_list, list) {
1841 			mlx4_flow_detach(mdev->dev, flow->id);
1842 			list_del(&flow->list);
1843 		}
1844 	}
1845 
1846 	mlx4_en_destroy_drop_qp(priv);
1847 
1848 	/* Free TX Rings */
1849 	for (i = 0; i < priv->tx_ring_num; i++) {
1850 		mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1851 		mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1852 	}
1853 	msleep(10);
1854 
1855 	for (i = 0; i < priv->tx_ring_num; i++)
1856 		mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1857 
1858 	/* Free RSS qps */
1859 	mlx4_en_release_rss_steer(priv);
1860 
1861 	/* Unregister Mac address for the port */
1862 	mlx4_en_put_qp(priv);
1863 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1864 		mdev->mac_removed[priv->port] = 1;
1865 
1866 	/* Free RX Rings */
1867 	for (i = 0; i < priv->rx_ring_num; i++) {
1868 		struct mlx4_en_cq *cq = priv->rx_cq[i];
1869 
1870 		local_bh_disable();
1871 		while (!mlx4_en_cq_lock_napi(cq)) {
1872 			pr_info("CQ %d locked\n", i);
1873 			mdelay(1);
1874 		}
1875 		local_bh_enable();
1876 
1877 		while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1878 			msleep(1);
1879 		mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1880 		mlx4_en_deactivate_cq(priv, cq);
1881 
1882 		mlx4_en_free_affinity_hint(priv, i);
1883 	}
1884 }
1885 
1886 static void mlx4_en_restart(struct work_struct *work)
1887 {
1888 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1889 						 watchdog_task);
1890 	struct mlx4_en_dev *mdev = priv->mdev;
1891 	struct net_device *dev = priv->dev;
1892 
1893 	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1894 
1895 	mutex_lock(&mdev->state_lock);
1896 	if (priv->port_up) {
1897 		mlx4_en_stop_port(dev, 1);
1898 		if (mlx4_en_start_port(dev))
1899 			en_err(priv, "Failed restarting port %d\n", priv->port);
1900 	}
1901 	mutex_unlock(&mdev->state_lock);
1902 }
1903 
1904 static void mlx4_en_clear_stats(struct net_device *dev)
1905 {
1906 	struct mlx4_en_priv *priv = netdev_priv(dev);
1907 	struct mlx4_en_dev *mdev = priv->mdev;
1908 	int i;
1909 
1910 	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1911 		en_dbg(HW, priv, "Failed dumping statistics\n");
1912 
1913 	memset(&priv->stats, 0, sizeof(priv->stats));
1914 	memset(&priv->pstats, 0, sizeof(priv->pstats));
1915 	memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1916 	memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1917 
1918 	for (i = 0; i < priv->tx_ring_num; i++) {
1919 		priv->tx_ring[i]->bytes = 0;
1920 		priv->tx_ring[i]->packets = 0;
1921 		priv->tx_ring[i]->tx_csum = 0;
1922 	}
1923 	for (i = 0; i < priv->rx_ring_num; i++) {
1924 		priv->rx_ring[i]->bytes = 0;
1925 		priv->rx_ring[i]->packets = 0;
1926 		priv->rx_ring[i]->csum_ok = 0;
1927 		priv->rx_ring[i]->csum_none = 0;
1928 	}
1929 }
1930 
1931 static int mlx4_en_open(struct net_device *dev)
1932 {
1933 	struct mlx4_en_priv *priv = netdev_priv(dev);
1934 	struct mlx4_en_dev *mdev = priv->mdev;
1935 	int err = 0;
1936 
1937 	mutex_lock(&mdev->state_lock);
1938 
1939 	if (!mdev->device_up) {
1940 		en_err(priv, "Cannot open - device down/disabled\n");
1941 		err = -EBUSY;
1942 		goto out;
1943 	}
1944 
1945 	/* Reset HW statistics and SW counters */
1946 	mlx4_en_clear_stats(dev);
1947 
1948 	err = mlx4_en_start_port(dev);
1949 	if (err)
1950 		en_err(priv, "Failed starting port:%d\n", priv->port);
1951 
1952 out:
1953 	mutex_unlock(&mdev->state_lock);
1954 	return err;
1955 }
1956 
1957 
1958 static int mlx4_en_close(struct net_device *dev)
1959 {
1960 	struct mlx4_en_priv *priv = netdev_priv(dev);
1961 	struct mlx4_en_dev *mdev = priv->mdev;
1962 
1963 	en_dbg(IFDOWN, priv, "Close port called\n");
1964 
1965 	mutex_lock(&mdev->state_lock);
1966 
1967 	mlx4_en_stop_port(dev, 0);
1968 	netif_carrier_off(dev);
1969 
1970 	mutex_unlock(&mdev->state_lock);
1971 	return 0;
1972 }
1973 
1974 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1975 {
1976 	int i;
1977 
1978 #ifdef CONFIG_RFS_ACCEL
1979 	free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1980 	priv->dev->rx_cpu_rmap = NULL;
1981 #endif
1982 
1983 	for (i = 0; i < priv->tx_ring_num; i++) {
1984 		if (priv->tx_ring && priv->tx_ring[i])
1985 			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1986 		if (priv->tx_cq && priv->tx_cq[i])
1987 			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1988 	}
1989 
1990 	for (i = 0; i < priv->rx_ring_num; i++) {
1991 		if (priv->rx_ring[i])
1992 			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1993 				priv->prof->rx_ring_size, priv->stride);
1994 		if (priv->rx_cq[i])
1995 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1996 	}
1997 
1998 	if (priv->base_tx_qpn) {
1999 		mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
2000 		priv->base_tx_qpn = 0;
2001 	}
2002 }
2003 
2004 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2005 {
2006 	struct mlx4_en_port_profile *prof = priv->prof;
2007 	int i;
2008 	int err;
2009 	int node;
2010 
2011 	err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
2012 	if (err) {
2013 		en_err(priv, "failed reserving range for TX rings\n");
2014 		return err;
2015 	}
2016 
2017 	/* Create tx Rings */
2018 	for (i = 0; i < priv->tx_ring_num; i++) {
2019 		node = cpu_to_node(i % num_online_cpus());
2020 		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
2021 				      prof->tx_ring_size, i, TX, node))
2022 			goto err;
2023 
2024 		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
2025 					   priv->base_tx_qpn + i,
2026 					   prof->tx_ring_size, TXBB_SIZE,
2027 					   node, i))
2028 			goto err;
2029 	}
2030 
2031 	/* Create rx Rings */
2032 	for (i = 0; i < priv->rx_ring_num; i++) {
2033 		node = cpu_to_node(i % num_online_cpus());
2034 		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2035 				      prof->rx_ring_size, i, RX, node))
2036 			goto err;
2037 
2038 		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2039 					   prof->rx_ring_size, priv->stride,
2040 					   node))
2041 			goto err;
2042 	}
2043 
2044 #ifdef CONFIG_RFS_ACCEL
2045 	if (priv->mdev->dev->caps.comp_pool) {
2046 		priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2047 		if (!priv->dev->rx_cpu_rmap)
2048 			goto err;
2049 	}
2050 #endif
2051 
2052 	return 0;
2053 
2054 err:
2055 	en_err(priv, "Failed to allocate NIC resources\n");
2056 	for (i = 0; i < priv->rx_ring_num; i++) {
2057 		if (priv->rx_ring[i])
2058 			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2059 						prof->rx_ring_size,
2060 						priv->stride);
2061 		if (priv->rx_cq[i])
2062 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2063 	}
2064 	for (i = 0; i < priv->tx_ring_num; i++) {
2065 		if (priv->tx_ring[i])
2066 			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2067 		if (priv->tx_cq[i])
2068 			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2069 	}
2070 	return -ENOMEM;
2071 }
2072 
2073 
2074 void mlx4_en_destroy_netdev(struct net_device *dev)
2075 {
2076 	struct mlx4_en_priv *priv = netdev_priv(dev);
2077 	struct mlx4_en_dev *mdev = priv->mdev;
2078 
2079 	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2080 
2081 	/* Unregister device - this will close the port if it was up */
2082 	if (priv->registered)
2083 		unregister_netdev(dev);
2084 
2085 	if (priv->allocated)
2086 		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2087 
2088 	cancel_delayed_work(&priv->stats_task);
2089 	cancel_delayed_work(&priv->service_task);
2090 	/* flush any pending task for this netdev */
2091 	flush_workqueue(mdev->workqueue);
2092 
2093 	/* Detach the netdev so tasks would not attempt to access it */
2094 	mutex_lock(&mdev->state_lock);
2095 	mdev->pndev[priv->port] = NULL;
2096 	mutex_unlock(&mdev->state_lock);
2097 
2098 	mlx4_en_free_resources(priv);
2099 
2100 	kfree(priv->tx_ring);
2101 	kfree(priv->tx_cq);
2102 
2103 	free_netdev(dev);
2104 }
2105 
2106 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2107 {
2108 	struct mlx4_en_priv *priv = netdev_priv(dev);
2109 	struct mlx4_en_dev *mdev = priv->mdev;
2110 	int err = 0;
2111 
2112 	en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2113 		 dev->mtu, new_mtu);
2114 
2115 	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2116 		en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2117 		return -EPERM;
2118 	}
2119 	dev->mtu = new_mtu;
2120 
2121 	if (netif_running(dev)) {
2122 		mutex_lock(&mdev->state_lock);
2123 		if (!mdev->device_up) {
2124 			/* NIC is probably restarting - let watchdog task reset
2125 			 * the port */
2126 			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2127 		} else {
2128 			mlx4_en_stop_port(dev, 1);
2129 			err = mlx4_en_start_port(dev);
2130 			if (err) {
2131 				en_err(priv, "Failed restarting port:%d\n",
2132 					 priv->port);
2133 				queue_work(mdev->workqueue, &priv->watchdog_task);
2134 			}
2135 		}
2136 		mutex_unlock(&mdev->state_lock);
2137 	}
2138 	return 0;
2139 }
2140 
2141 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2142 {
2143 	struct mlx4_en_priv *priv = netdev_priv(dev);
2144 	struct mlx4_en_dev *mdev = priv->mdev;
2145 	struct hwtstamp_config config;
2146 
2147 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2148 		return -EFAULT;
2149 
2150 	/* reserved for future extensions */
2151 	if (config.flags)
2152 		return -EINVAL;
2153 
2154 	/* device doesn't support time stamping */
2155 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2156 		return -EINVAL;
2157 
2158 	/* TX HW timestamp */
2159 	switch (config.tx_type) {
2160 	case HWTSTAMP_TX_OFF:
2161 	case HWTSTAMP_TX_ON:
2162 		break;
2163 	default:
2164 		return -ERANGE;
2165 	}
2166 
2167 	/* RX HW timestamp */
2168 	switch (config.rx_filter) {
2169 	case HWTSTAMP_FILTER_NONE:
2170 		break;
2171 	case HWTSTAMP_FILTER_ALL:
2172 	case HWTSTAMP_FILTER_SOME:
2173 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2174 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2175 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2176 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2177 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2178 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2179 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2180 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2181 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2182 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2183 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2184 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2185 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2186 		break;
2187 	default:
2188 		return -ERANGE;
2189 	}
2190 
2191 	if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
2192 		config.tx_type = HWTSTAMP_TX_OFF;
2193 		config.rx_filter = HWTSTAMP_FILTER_NONE;
2194 	}
2195 
2196 	return copy_to_user(ifr->ifr_data, &config,
2197 			    sizeof(config)) ? -EFAULT : 0;
2198 }
2199 
2200 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2201 {
2202 	struct mlx4_en_priv *priv = netdev_priv(dev);
2203 
2204 	return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2205 			    sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2206 }
2207 
2208 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2209 {
2210 	switch (cmd) {
2211 	case SIOCSHWTSTAMP:
2212 		return mlx4_en_hwtstamp_set(dev, ifr);
2213 	case SIOCGHWTSTAMP:
2214 		return mlx4_en_hwtstamp_get(dev, ifr);
2215 	default:
2216 		return -EOPNOTSUPP;
2217 	}
2218 }
2219 
2220 static int mlx4_en_set_features(struct net_device *netdev,
2221 		netdev_features_t features)
2222 {
2223 	struct mlx4_en_priv *priv = netdev_priv(netdev);
2224 
2225 	if (features & NETIF_F_LOOPBACK)
2226 		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2227 	else
2228 		priv->ctrl_flags &=
2229 			cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
2230 
2231 	mlx4_en_update_loopback_state(netdev, features);
2232 
2233 	return 0;
2234 
2235 }
2236 
2237 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2238 {
2239 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2240 	struct mlx4_en_dev *mdev = en_priv->mdev;
2241 	u64 mac_u64 = mlx4_mac_to_u64(mac);
2242 
2243 	if (!is_valid_ether_addr(mac))
2244 		return -EINVAL;
2245 
2246 	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2247 }
2248 
2249 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2250 {
2251 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2252 	struct mlx4_en_dev *mdev = en_priv->mdev;
2253 
2254 	return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2255 }
2256 
2257 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2258 {
2259 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2260 	struct mlx4_en_dev *mdev = en_priv->mdev;
2261 
2262 	return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2263 }
2264 
2265 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2266 {
2267 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2268 	struct mlx4_en_dev *mdev = en_priv->mdev;
2269 
2270 	return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2271 }
2272 
2273 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2274 {
2275 	struct mlx4_en_priv *en_priv = netdev_priv(dev);
2276 	struct mlx4_en_dev *mdev = en_priv->mdev;
2277 
2278 	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2279 }
2280 
2281 #define PORT_ID_BYTE_LEN 8
2282 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2283 				    struct netdev_phys_port_id *ppid)
2284 {
2285 	struct mlx4_en_priv *priv = netdev_priv(dev);
2286 	struct mlx4_dev *mdev = priv->mdev->dev;
2287 	int i;
2288 	u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2289 
2290 	if (!phys_port_id)
2291 		return -EOPNOTSUPP;
2292 
2293 	ppid->id_len = sizeof(phys_port_id);
2294 	for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2295 		ppid->id[i] =  phys_port_id & 0xff;
2296 		phys_port_id >>= 8;
2297 	}
2298 	return 0;
2299 }
2300 
2301 #ifdef CONFIG_MLX4_EN_VXLAN
2302 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2303 {
2304 	int ret;
2305 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2306 						 vxlan_add_task);
2307 
2308 	ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2309 	if (ret)
2310 		goto out;
2311 
2312 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2313 				  VXLAN_STEER_BY_OUTER_MAC, 1);
2314 out:
2315 	if (ret)
2316 		en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2317 }
2318 
2319 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2320 {
2321 	int ret;
2322 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2323 						 vxlan_del_task);
2324 
2325 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2326 				  VXLAN_STEER_BY_OUTER_MAC, 0);
2327 	if (ret)
2328 		en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2329 
2330 	priv->vxlan_port = 0;
2331 }
2332 
2333 static void mlx4_en_add_vxlan_port(struct  net_device *dev,
2334 				   sa_family_t sa_family, __be16 port)
2335 {
2336 	struct mlx4_en_priv *priv = netdev_priv(dev);
2337 	__be16 current_port;
2338 
2339 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
2340 		return;
2341 
2342 	if (sa_family == AF_INET6)
2343 		return;
2344 
2345 	current_port = priv->vxlan_port;
2346 	if (current_port && current_port != port) {
2347 		en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2348 			ntohs(current_port), ntohs(port));
2349 		return;
2350 	}
2351 
2352 	priv->vxlan_port = port;
2353 	queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2354 }
2355 
2356 static void mlx4_en_del_vxlan_port(struct  net_device *dev,
2357 				   sa_family_t sa_family, __be16 port)
2358 {
2359 	struct mlx4_en_priv *priv = netdev_priv(dev);
2360 	__be16 current_port;
2361 
2362 	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2363 		return;
2364 
2365 	if (sa_family == AF_INET6)
2366 		return;
2367 
2368 	current_port = priv->vxlan_port;
2369 	if (current_port != port) {
2370 		en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2371 		return;
2372 	}
2373 
2374 	queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2375 }
2376 #endif
2377 
2378 static const struct net_device_ops mlx4_netdev_ops = {
2379 	.ndo_open		= mlx4_en_open,
2380 	.ndo_stop		= mlx4_en_close,
2381 	.ndo_start_xmit		= mlx4_en_xmit,
2382 	.ndo_select_queue	= mlx4_en_select_queue,
2383 	.ndo_get_stats		= mlx4_en_get_stats,
2384 	.ndo_set_rx_mode	= mlx4_en_set_rx_mode,
2385 	.ndo_set_mac_address	= mlx4_en_set_mac,
2386 	.ndo_validate_addr	= eth_validate_addr,
2387 	.ndo_change_mtu		= mlx4_en_change_mtu,
2388 	.ndo_do_ioctl		= mlx4_en_ioctl,
2389 	.ndo_tx_timeout		= mlx4_en_tx_timeout,
2390 	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
2391 	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
2392 #ifdef CONFIG_NET_POLL_CONTROLLER
2393 	.ndo_poll_controller	= mlx4_en_netpoll,
2394 #endif
2395 	.ndo_set_features	= mlx4_en_set_features,
2396 	.ndo_setup_tc		= mlx4_en_setup_tc,
2397 #ifdef CONFIG_RFS_ACCEL
2398 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
2399 #endif
2400 #ifdef CONFIG_NET_RX_BUSY_POLL
2401 	.ndo_busy_poll		= mlx4_en_low_latency_recv,
2402 #endif
2403 	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
2404 #ifdef CONFIG_MLX4_EN_VXLAN
2405 	.ndo_add_vxlan_port	= mlx4_en_add_vxlan_port,
2406 	.ndo_del_vxlan_port	= mlx4_en_del_vxlan_port,
2407 #endif
2408 };
2409 
2410 static const struct net_device_ops mlx4_netdev_ops_master = {
2411 	.ndo_open		= mlx4_en_open,
2412 	.ndo_stop		= mlx4_en_close,
2413 	.ndo_start_xmit		= mlx4_en_xmit,
2414 	.ndo_select_queue	= mlx4_en_select_queue,
2415 	.ndo_get_stats		= mlx4_en_get_stats,
2416 	.ndo_set_rx_mode	= mlx4_en_set_rx_mode,
2417 	.ndo_set_mac_address	= mlx4_en_set_mac,
2418 	.ndo_validate_addr	= eth_validate_addr,
2419 	.ndo_change_mtu		= mlx4_en_change_mtu,
2420 	.ndo_tx_timeout		= mlx4_en_tx_timeout,
2421 	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
2422 	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
2423 	.ndo_set_vf_mac		= mlx4_en_set_vf_mac,
2424 	.ndo_set_vf_vlan	= mlx4_en_set_vf_vlan,
2425 	.ndo_set_vf_spoofchk	= mlx4_en_set_vf_spoofchk,
2426 	.ndo_set_vf_link_state	= mlx4_en_set_vf_link_state,
2427 	.ndo_get_vf_config	= mlx4_en_get_vf_config,
2428 #ifdef CONFIG_NET_POLL_CONTROLLER
2429 	.ndo_poll_controller	= mlx4_en_netpoll,
2430 #endif
2431 	.ndo_set_features	= mlx4_en_set_features,
2432 	.ndo_setup_tc		= mlx4_en_setup_tc,
2433 #ifdef CONFIG_RFS_ACCEL
2434 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
2435 #endif
2436 	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
2437 };
2438 
2439 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2440 			struct mlx4_en_port_profile *prof)
2441 {
2442 	struct net_device *dev;
2443 	struct mlx4_en_priv *priv;
2444 	int i;
2445 	int err;
2446 	u64 mac_u64;
2447 
2448 	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2449 				 MAX_TX_RINGS, MAX_RX_RINGS);
2450 	if (dev == NULL)
2451 		return -ENOMEM;
2452 
2453 	netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2454 	netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2455 
2456 	SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
2457 	dev->dev_port = port - 1;
2458 
2459 	/*
2460 	 * Initialize driver private data
2461 	 */
2462 
2463 	priv = netdev_priv(dev);
2464 	memset(priv, 0, sizeof(struct mlx4_en_priv));
2465 	priv->dev = dev;
2466 	priv->mdev = mdev;
2467 	priv->ddev = &mdev->pdev->dev;
2468 	priv->prof = prof;
2469 	priv->port = port;
2470 	priv->port_up = false;
2471 	priv->flags = prof->flags;
2472 	priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2473 			MLX4_WQE_CTRL_SOLICITED);
2474 	priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2475 	priv->tx_ring_num = prof->tx_ring_num;
2476 
2477 	priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2478 				GFP_KERNEL);
2479 	if (!priv->tx_ring) {
2480 		err = -ENOMEM;
2481 		goto out;
2482 	}
2483 	priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2484 			      GFP_KERNEL);
2485 	if (!priv->tx_cq) {
2486 		err = -ENOMEM;
2487 		goto out;
2488 	}
2489 	priv->rx_ring_num = prof->rx_ring_num;
2490 	priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2491 	priv->mac_index = -1;
2492 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
2493 	spin_lock_init(&priv->stats_lock);
2494 	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2495 	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2496 	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2497 	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2498 	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2499 #ifdef CONFIG_MLX4_EN_VXLAN
2500 	INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2501 	INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2502 #endif
2503 #ifdef CONFIG_MLX4_EN_DCB
2504 	if (!mlx4_is_slave(priv->mdev->dev)) {
2505 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2506 			dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2507 		} else {
2508 			en_info(priv, "enabling only PFC DCB ops\n");
2509 			dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2510 		}
2511 	}
2512 #endif
2513 
2514 	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2515 		INIT_HLIST_HEAD(&priv->mac_hash[i]);
2516 
2517 	/* Query for default mac and max mtu */
2518 	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2519 
2520 	/* Set default MAC */
2521 	dev->addr_len = ETH_ALEN;
2522 	mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2523 	if (!is_valid_ether_addr(dev->dev_addr)) {
2524 		if (mlx4_is_slave(priv->mdev->dev)) {
2525 			eth_hw_addr_random(dev);
2526 			en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2527 			mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
2528 			mdev->dev->caps.def_mac[priv->port] = mac_u64;
2529 		} else {
2530 			en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2531 			       priv->port, dev->dev_addr);
2532 			err = -EINVAL;
2533 			goto out;
2534 		}
2535 	}
2536 
2537 	memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
2538 
2539 	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2540 					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2541 	err = mlx4_en_alloc_resources(priv);
2542 	if (err)
2543 		goto out;
2544 
2545 #ifdef CONFIG_RFS_ACCEL
2546 	INIT_LIST_HEAD(&priv->filters);
2547 	spin_lock_init(&priv->filters_lock);
2548 #endif
2549 
2550 	/* Initialize time stamping config */
2551 	priv->hwtstamp_config.flags = 0;
2552 	priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2553 	priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2554 
2555 	/* Allocate page for receive rings */
2556 	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2557 				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2558 	if (err) {
2559 		en_err(priv, "Failed to allocate page for rx qps\n");
2560 		goto out;
2561 	}
2562 	priv->allocated = 1;
2563 
2564 	/*
2565 	 * Initialize netdev entry points
2566 	 */
2567 	if (mlx4_is_master(priv->mdev->dev))
2568 		dev->netdev_ops = &mlx4_netdev_ops_master;
2569 	else
2570 		dev->netdev_ops = &mlx4_netdev_ops;
2571 	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2572 	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2573 	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2574 
2575 	dev->ethtool_ops = &mlx4_en_ethtool_ops;
2576 
2577 	/*
2578 	 * Set driver features
2579 	 */
2580 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2581 	if (mdev->LSO_support)
2582 		dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2583 
2584 	dev->vlan_features = dev->hw_features;
2585 
2586 	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2587 	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2588 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2589 			NETIF_F_HW_VLAN_CTAG_FILTER;
2590 	dev->hw_features |= NETIF_F_LOOPBACK;
2591 
2592 	if (mdev->dev->caps.steering_mode ==
2593 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
2594 		dev->hw_features |= NETIF_F_NTUPLE;
2595 
2596 	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2597 		dev->priv_flags |= IFF_UNICAST_FLT;
2598 
2599 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2600 		dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2601 					NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2602 		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2603 		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
2604 	}
2605 
2606 	mdev->pndev[port] = dev;
2607 
2608 	netif_carrier_off(dev);
2609 	mlx4_en_set_default_moderation(priv);
2610 
2611 	err = register_netdev(dev);
2612 	if (err) {
2613 		en_err(priv, "Netdev registration failed for port %d\n", port);
2614 		goto out;
2615 	}
2616 	priv->registered = 1;
2617 
2618 	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2619 	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2620 
2621 	mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2622 
2623 	/* Configure port */
2624 	mlx4_en_calc_rx_buf(dev);
2625 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2626 				    priv->rx_skb_size + ETH_FCS_LEN,
2627 				    prof->tx_pause, prof->tx_ppp,
2628 				    prof->rx_pause, prof->rx_ppp);
2629 	if (err) {
2630 		en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2631 		       priv->port, err);
2632 		goto out;
2633 	}
2634 
2635 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2636 		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
2637 		if (err) {
2638 			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2639 			       err);
2640 			goto out;
2641 		}
2642 	}
2643 
2644 	/* Init port */
2645 	en_warn(priv, "Initializing port\n");
2646 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
2647 	if (err) {
2648 		en_err(priv, "Failed Initializing port\n");
2649 		goto out;
2650 	}
2651 	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2652 
2653 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2654 		queue_delayed_work(mdev->workqueue, &priv->service_task,
2655 				   SERVICE_TASK_DELAY);
2656 
2657 	return 0;
2658 
2659 out:
2660 	mlx4_en_destroy_netdev(dev);
2661 	return err;
2662 }
2663 
2664