1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/bpf.h> 35 #include <linux/etherdevice.h> 36 #include <linux/tcp.h> 37 #include <linux/if_vlan.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/hash.h> 41 #include <net/ip.h> 42 #include <net/busy_poll.h> 43 #include <net/vxlan.h> 44 #include <net/devlink.h> 45 46 #include <linux/mlx4/driver.h> 47 #include <linux/mlx4/device.h> 48 #include <linux/mlx4/cmd.h> 49 #include <linux/mlx4/cq.h> 50 51 #include "mlx4_en.h" 52 #include "en_port.h" 53 54 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ 55 XDP_PACKET_HEADROOM)) 56 57 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 58 { 59 struct mlx4_en_priv *priv = netdev_priv(dev); 60 int i; 61 unsigned int offset = 0; 62 63 if (up && up != MLX4_EN_NUM_UP) 64 return -EINVAL; 65 66 netdev_set_num_tc(dev, up); 67 68 /* Partition Tx queues evenly amongst UP's */ 69 for (i = 0; i < up; i++) { 70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 71 offset += priv->num_tx_rings_p_up; 72 } 73 74 #ifdef CONFIG_MLX4_EN_DCB 75 if (!mlx4_is_slave(priv->mdev->dev)) { 76 if (up) { 77 if (priv->dcbx_cap) 78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 79 } else { 80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 81 priv->cee_config.pfc_state = false; 82 } 83 } 84 #endif /* CONFIG_MLX4_EN_DCB */ 85 86 return 0; 87 } 88 89 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 90 struct tc_to_netdev *tc) 91 { 92 if (tc->type != TC_SETUP_MQPRIO) 93 return -EINVAL; 94 95 return mlx4_en_setup_tc(dev, tc->tc); 96 } 97 98 #ifdef CONFIG_RFS_ACCEL 99 100 struct mlx4_en_filter { 101 struct list_head next; 102 struct work_struct work; 103 104 u8 ip_proto; 105 __be32 src_ip; 106 __be32 dst_ip; 107 __be16 src_port; 108 __be16 dst_port; 109 110 int rxq_index; 111 struct mlx4_en_priv *priv; 112 u32 flow_id; /* RFS infrastructure id */ 113 int id; /* mlx4_en driver id */ 114 u64 reg_id; /* Flow steering API id */ 115 u8 activated; /* Used to prevent expiry before filter 116 * is attached 117 */ 118 struct hlist_node filter_chain; 119 }; 120 121 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 122 123 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 124 { 125 switch (ip_proto) { 126 case IPPROTO_UDP: 127 return MLX4_NET_TRANS_RULE_ID_UDP; 128 case IPPROTO_TCP: 129 return MLX4_NET_TRANS_RULE_ID_TCP; 130 default: 131 return MLX4_NET_TRANS_RULE_NUM; 132 } 133 }; 134 135 /* Must not acquire state_lock, as its corresponding work_sync 136 * is done under it. 137 */ 138 static void mlx4_en_filter_work(struct work_struct *work) 139 { 140 struct mlx4_en_filter *filter = container_of(work, 141 struct mlx4_en_filter, 142 work); 143 struct mlx4_en_priv *priv = filter->priv; 144 struct mlx4_spec_list spec_tcp_udp = { 145 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 146 { 147 .tcp_udp = { 148 .dst_port = filter->dst_port, 149 .dst_port_msk = (__force __be16)-1, 150 .src_port = filter->src_port, 151 .src_port_msk = (__force __be16)-1, 152 }, 153 }, 154 }; 155 struct mlx4_spec_list spec_ip = { 156 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 157 { 158 .ipv4 = { 159 .dst_ip = filter->dst_ip, 160 .dst_ip_msk = (__force __be32)-1, 161 .src_ip = filter->src_ip, 162 .src_ip_msk = (__force __be32)-1, 163 }, 164 }, 165 }; 166 struct mlx4_spec_list spec_eth = { 167 .id = MLX4_NET_TRANS_RULE_ID_ETH, 168 }; 169 struct mlx4_net_trans_rule rule = { 170 .list = LIST_HEAD_INIT(rule.list), 171 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 172 .exclusive = 1, 173 .allow_loopback = 1, 174 .promisc_mode = MLX4_FS_REGULAR, 175 .port = priv->port, 176 .priority = MLX4_DOMAIN_RFS, 177 }; 178 int rc; 179 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 180 181 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 182 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 183 filter->ip_proto); 184 goto ignore; 185 } 186 list_add_tail(&spec_eth.list, &rule.list); 187 list_add_tail(&spec_ip.list, &rule.list); 188 list_add_tail(&spec_tcp_udp.list, &rule.list); 189 190 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 191 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 192 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 193 194 filter->activated = 0; 195 196 if (filter->reg_id) { 197 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 198 if (rc && rc != -ENOENT) 199 en_err(priv, "Error detaching flow. rc = %d\n", rc); 200 } 201 202 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 203 if (rc) 204 en_err(priv, "Error attaching flow. err = %d\n", rc); 205 206 ignore: 207 mlx4_en_filter_rfs_expire(priv); 208 209 filter->activated = 1; 210 } 211 212 static inline struct hlist_head * 213 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 214 __be16 src_port, __be16 dst_port) 215 { 216 unsigned long l; 217 int bucket_idx; 218 219 l = (__force unsigned long)src_port | 220 ((__force unsigned long)dst_port << 2); 221 l ^= (__force unsigned long)(src_ip ^ dst_ip); 222 223 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 224 225 return &priv->filter_hash[bucket_idx]; 226 } 227 228 static struct mlx4_en_filter * 229 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 230 __be32 dst_ip, u8 ip_proto, __be16 src_port, 231 __be16 dst_port, u32 flow_id) 232 { 233 struct mlx4_en_filter *filter = NULL; 234 235 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 236 if (!filter) 237 return NULL; 238 239 filter->priv = priv; 240 filter->rxq_index = rxq_index; 241 INIT_WORK(&filter->work, mlx4_en_filter_work); 242 243 filter->src_ip = src_ip; 244 filter->dst_ip = dst_ip; 245 filter->ip_proto = ip_proto; 246 filter->src_port = src_port; 247 filter->dst_port = dst_port; 248 249 filter->flow_id = flow_id; 250 251 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 252 253 list_add_tail(&filter->next, &priv->filters); 254 hlist_add_head(&filter->filter_chain, 255 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 256 dst_port)); 257 258 return filter; 259 } 260 261 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 262 { 263 struct mlx4_en_priv *priv = filter->priv; 264 int rc; 265 266 list_del(&filter->next); 267 268 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 269 if (rc && rc != -ENOENT) 270 en_err(priv, "Error detaching flow. rc = %d\n", rc); 271 272 kfree(filter); 273 } 274 275 static inline struct mlx4_en_filter * 276 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 277 u8 ip_proto, __be16 src_port, __be16 dst_port) 278 { 279 struct mlx4_en_filter *filter; 280 struct mlx4_en_filter *ret = NULL; 281 282 hlist_for_each_entry(filter, 283 filter_hash_bucket(priv, src_ip, dst_ip, 284 src_port, dst_port), 285 filter_chain) { 286 if (filter->src_ip == src_ip && 287 filter->dst_ip == dst_ip && 288 filter->ip_proto == ip_proto && 289 filter->src_port == src_port && 290 filter->dst_port == dst_port) { 291 ret = filter; 292 break; 293 } 294 } 295 296 return ret; 297 } 298 299 static int 300 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 301 u16 rxq_index, u32 flow_id) 302 { 303 struct mlx4_en_priv *priv = netdev_priv(net_dev); 304 struct mlx4_en_filter *filter; 305 const struct iphdr *ip; 306 const __be16 *ports; 307 u8 ip_proto; 308 __be32 src_ip; 309 __be32 dst_ip; 310 __be16 src_port; 311 __be16 dst_port; 312 int nhoff = skb_network_offset(skb); 313 int ret = 0; 314 315 if (skb->protocol != htons(ETH_P_IP)) 316 return -EPROTONOSUPPORT; 317 318 ip = (const struct iphdr *)(skb->data + nhoff); 319 if (ip_is_fragment(ip)) 320 return -EPROTONOSUPPORT; 321 322 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 323 return -EPROTONOSUPPORT; 324 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 325 326 ip_proto = ip->protocol; 327 src_ip = ip->saddr; 328 dst_ip = ip->daddr; 329 src_port = ports[0]; 330 dst_port = ports[1]; 331 332 spin_lock_bh(&priv->filters_lock); 333 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 334 src_port, dst_port); 335 if (filter) { 336 if (filter->rxq_index == rxq_index) 337 goto out; 338 339 filter->rxq_index = rxq_index; 340 } else { 341 filter = mlx4_en_filter_alloc(priv, rxq_index, 342 src_ip, dst_ip, ip_proto, 343 src_port, dst_port, flow_id); 344 if (!filter) { 345 ret = -ENOMEM; 346 goto err; 347 } 348 } 349 350 queue_work(priv->mdev->workqueue, &filter->work); 351 352 out: 353 ret = filter->id; 354 err: 355 spin_unlock_bh(&priv->filters_lock); 356 357 return ret; 358 } 359 360 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 361 { 362 struct mlx4_en_filter *filter, *tmp; 363 LIST_HEAD(del_list); 364 365 spin_lock_bh(&priv->filters_lock); 366 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 367 list_move(&filter->next, &del_list); 368 hlist_del(&filter->filter_chain); 369 } 370 spin_unlock_bh(&priv->filters_lock); 371 372 list_for_each_entry_safe(filter, tmp, &del_list, next) { 373 cancel_work_sync(&filter->work); 374 mlx4_en_filter_free(filter); 375 } 376 } 377 378 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 379 { 380 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 381 LIST_HEAD(del_list); 382 int i = 0; 383 384 spin_lock_bh(&priv->filters_lock); 385 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 386 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 387 break; 388 389 if (filter->activated && 390 !work_pending(&filter->work) && 391 rps_may_expire_flow(priv->dev, 392 filter->rxq_index, filter->flow_id, 393 filter->id)) { 394 list_move(&filter->next, &del_list); 395 hlist_del(&filter->filter_chain); 396 } else 397 last_filter = filter; 398 399 i++; 400 } 401 402 if (last_filter && (&last_filter->next != priv->filters.next)) 403 list_move(&priv->filters, &last_filter->next); 404 405 spin_unlock_bh(&priv->filters_lock); 406 407 list_for_each_entry_safe(filter, tmp, &del_list, next) 408 mlx4_en_filter_free(filter); 409 } 410 #endif 411 412 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 413 __be16 proto, u16 vid) 414 { 415 struct mlx4_en_priv *priv = netdev_priv(dev); 416 struct mlx4_en_dev *mdev = priv->mdev; 417 int err; 418 int idx; 419 420 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 421 422 set_bit(vid, priv->active_vlans); 423 424 /* Add VID to port VLAN filter */ 425 mutex_lock(&mdev->state_lock); 426 if (mdev->device_up && priv->port_up) { 427 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 428 if (err) { 429 en_err(priv, "Failed configuring VLAN filter\n"); 430 goto out; 431 } 432 } 433 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 434 if (err) 435 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 436 437 out: 438 mutex_unlock(&mdev->state_lock); 439 return err; 440 } 441 442 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 443 __be16 proto, u16 vid) 444 { 445 struct mlx4_en_priv *priv = netdev_priv(dev); 446 struct mlx4_en_dev *mdev = priv->mdev; 447 int err = 0; 448 449 en_dbg(HW, priv, "Killing VID:%d\n", vid); 450 451 clear_bit(vid, priv->active_vlans); 452 453 /* Remove VID from port VLAN filter */ 454 mutex_lock(&mdev->state_lock); 455 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 456 457 if (mdev->device_up && priv->port_up) { 458 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 459 if (err) 460 en_err(priv, "Failed configuring VLAN filter\n"); 461 } 462 mutex_unlock(&mdev->state_lock); 463 464 return err; 465 } 466 467 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 468 { 469 int i; 470 for (i = ETH_ALEN - 1; i >= 0; --i) { 471 dst_mac[i] = src_mac & 0xff; 472 src_mac >>= 8; 473 } 474 memset(&dst_mac[ETH_ALEN], 0, 2); 475 } 476 477 478 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 479 int qpn, u64 *reg_id) 480 { 481 int err; 482 483 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 484 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 485 return 0; /* do nothing */ 486 487 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 488 MLX4_DOMAIN_NIC, reg_id); 489 if (err) { 490 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 491 return err; 492 } 493 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 494 return 0; 495 } 496 497 498 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 499 unsigned char *mac, int *qpn, u64 *reg_id) 500 { 501 struct mlx4_en_dev *mdev = priv->mdev; 502 struct mlx4_dev *dev = mdev->dev; 503 int err; 504 505 switch (dev->caps.steering_mode) { 506 case MLX4_STEERING_MODE_B0: { 507 struct mlx4_qp qp; 508 u8 gid[16] = {0}; 509 510 qp.qpn = *qpn; 511 memcpy(&gid[10], mac, ETH_ALEN); 512 gid[5] = priv->port; 513 514 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 515 break; 516 } 517 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 518 struct mlx4_spec_list spec_eth = { {NULL} }; 519 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 520 521 struct mlx4_net_trans_rule rule = { 522 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 523 .exclusive = 0, 524 .allow_loopback = 1, 525 .promisc_mode = MLX4_FS_REGULAR, 526 .priority = MLX4_DOMAIN_NIC, 527 }; 528 529 rule.port = priv->port; 530 rule.qpn = *qpn; 531 INIT_LIST_HEAD(&rule.list); 532 533 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 534 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 535 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 536 list_add_tail(&spec_eth.list, &rule.list); 537 538 err = mlx4_flow_attach(dev, &rule, reg_id); 539 break; 540 } 541 default: 542 return -EINVAL; 543 } 544 if (err) 545 en_warn(priv, "Failed Attaching Unicast\n"); 546 547 return err; 548 } 549 550 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 551 unsigned char *mac, int qpn, u64 reg_id) 552 { 553 struct mlx4_en_dev *mdev = priv->mdev; 554 struct mlx4_dev *dev = mdev->dev; 555 556 switch (dev->caps.steering_mode) { 557 case MLX4_STEERING_MODE_B0: { 558 struct mlx4_qp qp; 559 u8 gid[16] = {0}; 560 561 qp.qpn = qpn; 562 memcpy(&gid[10], mac, ETH_ALEN); 563 gid[5] = priv->port; 564 565 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 566 break; 567 } 568 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 569 mlx4_flow_detach(dev, reg_id); 570 break; 571 } 572 default: 573 en_err(priv, "Invalid steering mode.\n"); 574 } 575 } 576 577 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 578 { 579 struct mlx4_en_dev *mdev = priv->mdev; 580 struct mlx4_dev *dev = mdev->dev; 581 int index = 0; 582 int err = 0; 583 int *qpn = &priv->base_qpn; 584 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 585 586 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 587 priv->dev->dev_addr); 588 index = mlx4_register_mac(dev, priv->port, mac); 589 if (index < 0) { 590 err = index; 591 en_err(priv, "Failed adding MAC: %pM\n", 592 priv->dev->dev_addr); 593 return err; 594 } 595 596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 597 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 598 *qpn = base_qpn + index; 599 return 0; 600 } 601 602 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 603 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 604 if (err) { 605 en_err(priv, "Failed to reserve qp for mac registration\n"); 606 mlx4_unregister_mac(dev, priv->port, mac); 607 return err; 608 } 609 610 return 0; 611 } 612 613 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 614 { 615 struct mlx4_en_dev *mdev = priv->mdev; 616 struct mlx4_dev *dev = mdev->dev; 617 int qpn = priv->base_qpn; 618 619 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 620 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 621 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 622 priv->dev->dev_addr); 623 mlx4_unregister_mac(dev, priv->port, mac); 624 } else { 625 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 626 priv->port, qpn); 627 mlx4_qp_release_range(dev, qpn, 1); 628 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 629 } 630 } 631 632 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 633 unsigned char *new_mac, unsigned char *prev_mac) 634 { 635 struct mlx4_en_dev *mdev = priv->mdev; 636 struct mlx4_dev *dev = mdev->dev; 637 int err = 0; 638 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 639 640 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 641 struct hlist_head *bucket; 642 unsigned int mac_hash; 643 struct mlx4_mac_entry *entry; 644 struct hlist_node *tmp; 645 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 646 647 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 648 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 649 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 650 mlx4_en_uc_steer_release(priv, entry->mac, 651 qpn, entry->reg_id); 652 mlx4_unregister_mac(dev, priv->port, 653 prev_mac_u64); 654 hlist_del_rcu(&entry->hlist); 655 synchronize_rcu(); 656 memcpy(entry->mac, new_mac, ETH_ALEN); 657 entry->reg_id = 0; 658 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 659 hlist_add_head_rcu(&entry->hlist, 660 &priv->mac_hash[mac_hash]); 661 mlx4_register_mac(dev, priv->port, new_mac_u64); 662 err = mlx4_en_uc_steer_add(priv, new_mac, 663 &qpn, 664 &entry->reg_id); 665 if (err) 666 return err; 667 if (priv->tunnel_reg_id) { 668 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 669 priv->tunnel_reg_id = 0; 670 } 671 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 672 &priv->tunnel_reg_id); 673 return err; 674 } 675 } 676 return -EINVAL; 677 } 678 679 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 680 } 681 682 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 683 unsigned char new_mac[ETH_ALEN + 2]) 684 { 685 int err = 0; 686 687 if (priv->port_up) { 688 /* Remove old MAC and insert the new one */ 689 err = mlx4_en_replace_mac(priv, priv->base_qpn, 690 new_mac, priv->current_mac); 691 if (err) 692 en_err(priv, "Failed changing HW MAC address\n"); 693 } else 694 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 695 696 if (!err) 697 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 698 699 return err; 700 } 701 702 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 703 { 704 struct mlx4_en_priv *priv = netdev_priv(dev); 705 struct mlx4_en_dev *mdev = priv->mdev; 706 struct sockaddr *saddr = addr; 707 unsigned char new_mac[ETH_ALEN + 2]; 708 int err; 709 710 if (!is_valid_ether_addr(saddr->sa_data)) 711 return -EADDRNOTAVAIL; 712 713 mutex_lock(&mdev->state_lock); 714 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 715 err = mlx4_en_do_set_mac(priv, new_mac); 716 if (!err) 717 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 718 mutex_unlock(&mdev->state_lock); 719 720 return err; 721 } 722 723 static void mlx4_en_clear_list(struct net_device *dev) 724 { 725 struct mlx4_en_priv *priv = netdev_priv(dev); 726 struct mlx4_en_mc_list *tmp, *mc_to_del; 727 728 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 729 list_del(&mc_to_del->list); 730 kfree(mc_to_del); 731 } 732 } 733 734 static void mlx4_en_cache_mclist(struct net_device *dev) 735 { 736 struct mlx4_en_priv *priv = netdev_priv(dev); 737 struct netdev_hw_addr *ha; 738 struct mlx4_en_mc_list *tmp; 739 740 mlx4_en_clear_list(dev); 741 netdev_for_each_mc_addr(ha, dev) { 742 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 743 if (!tmp) { 744 mlx4_en_clear_list(dev); 745 return; 746 } 747 memcpy(tmp->addr, ha->addr, ETH_ALEN); 748 list_add_tail(&tmp->list, &priv->mc_list); 749 } 750 } 751 752 static void update_mclist_flags(struct mlx4_en_priv *priv, 753 struct list_head *dst, 754 struct list_head *src) 755 { 756 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 757 bool found; 758 759 /* Find all the entries that should be removed from dst, 760 * These are the entries that are not found in src 761 */ 762 list_for_each_entry(dst_tmp, dst, list) { 763 found = false; 764 list_for_each_entry(src_tmp, src, list) { 765 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 766 found = true; 767 break; 768 } 769 } 770 if (!found) 771 dst_tmp->action = MCLIST_REM; 772 } 773 774 /* Add entries that exist in src but not in dst 775 * mark them as need to add 776 */ 777 list_for_each_entry(src_tmp, src, list) { 778 found = false; 779 list_for_each_entry(dst_tmp, dst, list) { 780 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 781 dst_tmp->action = MCLIST_NONE; 782 found = true; 783 break; 784 } 785 } 786 if (!found) { 787 new_mc = kmemdup(src_tmp, 788 sizeof(struct mlx4_en_mc_list), 789 GFP_KERNEL); 790 if (!new_mc) 791 return; 792 793 new_mc->action = MCLIST_ADD; 794 list_add_tail(&new_mc->list, dst); 795 } 796 } 797 } 798 799 static void mlx4_en_set_rx_mode(struct net_device *dev) 800 { 801 struct mlx4_en_priv *priv = netdev_priv(dev); 802 803 if (!priv->port_up) 804 return; 805 806 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 807 } 808 809 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 810 struct mlx4_en_dev *mdev) 811 { 812 int err = 0; 813 814 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 815 if (netif_msg_rx_status(priv)) 816 en_warn(priv, "Entering promiscuous mode\n"); 817 priv->flags |= MLX4_EN_FLAG_PROMISC; 818 819 /* Enable promiscouos mode */ 820 switch (mdev->dev->caps.steering_mode) { 821 case MLX4_STEERING_MODE_DEVICE_MANAGED: 822 err = mlx4_flow_steer_promisc_add(mdev->dev, 823 priv->port, 824 priv->base_qpn, 825 MLX4_FS_ALL_DEFAULT); 826 if (err) 827 en_err(priv, "Failed enabling promiscuous mode\n"); 828 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 829 break; 830 831 case MLX4_STEERING_MODE_B0: 832 err = mlx4_unicast_promisc_add(mdev->dev, 833 priv->base_qpn, 834 priv->port); 835 if (err) 836 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 837 838 /* Add the default qp number as multicast 839 * promisc 840 */ 841 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 842 err = mlx4_multicast_promisc_add(mdev->dev, 843 priv->base_qpn, 844 priv->port); 845 if (err) 846 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 847 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 848 } 849 break; 850 851 case MLX4_STEERING_MODE_A0: 852 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 853 priv->port, 854 priv->base_qpn, 855 1); 856 if (err) 857 en_err(priv, "Failed enabling promiscuous mode\n"); 858 break; 859 } 860 861 /* Disable port multicast filter (unconditionally) */ 862 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 863 0, MLX4_MCAST_DISABLE); 864 if (err) 865 en_err(priv, "Failed disabling multicast filter\n"); 866 } 867 } 868 869 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 870 struct mlx4_en_dev *mdev) 871 { 872 int err = 0; 873 874 if (netif_msg_rx_status(priv)) 875 en_warn(priv, "Leaving promiscuous mode\n"); 876 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 877 878 /* Disable promiscouos mode */ 879 switch (mdev->dev->caps.steering_mode) { 880 case MLX4_STEERING_MODE_DEVICE_MANAGED: 881 err = mlx4_flow_steer_promisc_remove(mdev->dev, 882 priv->port, 883 MLX4_FS_ALL_DEFAULT); 884 if (err) 885 en_err(priv, "Failed disabling promiscuous mode\n"); 886 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 887 break; 888 889 case MLX4_STEERING_MODE_B0: 890 err = mlx4_unicast_promisc_remove(mdev->dev, 891 priv->base_qpn, 892 priv->port); 893 if (err) 894 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 895 /* Disable Multicast promisc */ 896 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 897 err = mlx4_multicast_promisc_remove(mdev->dev, 898 priv->base_qpn, 899 priv->port); 900 if (err) 901 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 902 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 903 } 904 break; 905 906 case MLX4_STEERING_MODE_A0: 907 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 908 priv->port, 909 priv->base_qpn, 0); 910 if (err) 911 en_err(priv, "Failed disabling promiscuous mode\n"); 912 break; 913 } 914 } 915 916 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 917 struct net_device *dev, 918 struct mlx4_en_dev *mdev) 919 { 920 struct mlx4_en_mc_list *mclist, *tmp; 921 u64 mcast_addr = 0; 922 u8 mc_list[16] = {0}; 923 int err = 0; 924 925 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 926 if (dev->flags & IFF_ALLMULTI) { 927 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 928 0, MLX4_MCAST_DISABLE); 929 if (err) 930 en_err(priv, "Failed disabling multicast filter\n"); 931 932 /* Add the default qp number as multicast promisc */ 933 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 934 switch (mdev->dev->caps.steering_mode) { 935 case MLX4_STEERING_MODE_DEVICE_MANAGED: 936 err = mlx4_flow_steer_promisc_add(mdev->dev, 937 priv->port, 938 priv->base_qpn, 939 MLX4_FS_MC_DEFAULT); 940 break; 941 942 case MLX4_STEERING_MODE_B0: 943 err = mlx4_multicast_promisc_add(mdev->dev, 944 priv->base_qpn, 945 priv->port); 946 break; 947 948 case MLX4_STEERING_MODE_A0: 949 break; 950 } 951 if (err) 952 en_err(priv, "Failed entering multicast promisc mode\n"); 953 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 954 } 955 } else { 956 /* Disable Multicast promisc */ 957 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 958 switch (mdev->dev->caps.steering_mode) { 959 case MLX4_STEERING_MODE_DEVICE_MANAGED: 960 err = mlx4_flow_steer_promisc_remove(mdev->dev, 961 priv->port, 962 MLX4_FS_MC_DEFAULT); 963 break; 964 965 case MLX4_STEERING_MODE_B0: 966 err = mlx4_multicast_promisc_remove(mdev->dev, 967 priv->base_qpn, 968 priv->port); 969 break; 970 971 case MLX4_STEERING_MODE_A0: 972 break; 973 } 974 if (err) 975 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 976 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 977 } 978 979 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 980 0, MLX4_MCAST_DISABLE); 981 if (err) 982 en_err(priv, "Failed disabling multicast filter\n"); 983 984 /* Flush mcast filter and init it with broadcast address */ 985 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 986 1, MLX4_MCAST_CONFIG); 987 988 /* Update multicast list - we cache all addresses so they won't 989 * change while HW is updated holding the command semaphor */ 990 netif_addr_lock_bh(dev); 991 mlx4_en_cache_mclist(dev); 992 netif_addr_unlock_bh(dev); 993 list_for_each_entry(mclist, &priv->mc_list, list) { 994 mcast_addr = mlx4_mac_to_u64(mclist->addr); 995 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 996 mcast_addr, 0, MLX4_MCAST_CONFIG); 997 } 998 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 999 0, MLX4_MCAST_ENABLE); 1000 if (err) 1001 en_err(priv, "Failed enabling multicast filter\n"); 1002 1003 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 1004 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1005 if (mclist->action == MCLIST_REM) { 1006 /* detach this address and delete from list */ 1007 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1008 mc_list[5] = priv->port; 1009 err = mlx4_multicast_detach(mdev->dev, 1010 &priv->rss_map.indir_qp, 1011 mc_list, 1012 MLX4_PROT_ETH, 1013 mclist->reg_id); 1014 if (err) 1015 en_err(priv, "Fail to detach multicast address\n"); 1016 1017 if (mclist->tunnel_reg_id) { 1018 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1019 if (err) 1020 en_err(priv, "Failed to detach multicast address\n"); 1021 } 1022 1023 /* remove from list */ 1024 list_del(&mclist->list); 1025 kfree(mclist); 1026 } else if (mclist->action == MCLIST_ADD) { 1027 /* attach the address */ 1028 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1029 /* needed for B0 steering support */ 1030 mc_list[5] = priv->port; 1031 err = mlx4_multicast_attach(mdev->dev, 1032 &priv->rss_map.indir_qp, 1033 mc_list, 1034 priv->port, 0, 1035 MLX4_PROT_ETH, 1036 &mclist->reg_id); 1037 if (err) 1038 en_err(priv, "Fail to attach multicast address\n"); 1039 1040 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1041 &mclist->tunnel_reg_id); 1042 if (err) 1043 en_err(priv, "Failed to attach multicast address\n"); 1044 } 1045 } 1046 } 1047 } 1048 1049 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1050 struct net_device *dev, 1051 struct mlx4_en_dev *mdev) 1052 { 1053 struct netdev_hw_addr *ha; 1054 struct mlx4_mac_entry *entry; 1055 struct hlist_node *tmp; 1056 bool found; 1057 u64 mac; 1058 int err = 0; 1059 struct hlist_head *bucket; 1060 unsigned int i; 1061 int removed = 0; 1062 u32 prev_flags; 1063 1064 /* Note that we do not need to protect our mac_hash traversal with rcu, 1065 * since all modification code is protected by mdev->state_lock 1066 */ 1067 1068 /* find what to remove */ 1069 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1070 bucket = &priv->mac_hash[i]; 1071 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1072 found = false; 1073 netdev_for_each_uc_addr(ha, dev) { 1074 if (ether_addr_equal_64bits(entry->mac, 1075 ha->addr)) { 1076 found = true; 1077 break; 1078 } 1079 } 1080 1081 /* MAC address of the port is not in uc list */ 1082 if (ether_addr_equal_64bits(entry->mac, 1083 priv->current_mac)) 1084 found = true; 1085 1086 if (!found) { 1087 mac = mlx4_mac_to_u64(entry->mac); 1088 mlx4_en_uc_steer_release(priv, entry->mac, 1089 priv->base_qpn, 1090 entry->reg_id); 1091 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1092 1093 hlist_del_rcu(&entry->hlist); 1094 kfree_rcu(entry, rcu); 1095 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1096 entry->mac, priv->port); 1097 ++removed; 1098 } 1099 } 1100 } 1101 1102 /* if we didn't remove anything, there is no use in trying to add 1103 * again once we are in a forced promisc mode state 1104 */ 1105 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1106 return; 1107 1108 prev_flags = priv->flags; 1109 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1110 1111 /* find what to add */ 1112 netdev_for_each_uc_addr(ha, dev) { 1113 found = false; 1114 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1115 hlist_for_each_entry(entry, bucket, hlist) { 1116 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1117 found = true; 1118 break; 1119 } 1120 } 1121 1122 if (!found) { 1123 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1124 if (!entry) { 1125 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1126 ha->addr, priv->port); 1127 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1128 break; 1129 } 1130 mac = mlx4_mac_to_u64(ha->addr); 1131 memcpy(entry->mac, ha->addr, ETH_ALEN); 1132 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1133 if (err < 0) { 1134 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1135 ha->addr, priv->port, err); 1136 kfree(entry); 1137 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1138 break; 1139 } 1140 err = mlx4_en_uc_steer_add(priv, ha->addr, 1141 &priv->base_qpn, 1142 &entry->reg_id); 1143 if (err) { 1144 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1145 ha->addr, priv->port, err); 1146 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1147 kfree(entry); 1148 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1149 break; 1150 } else { 1151 unsigned int mac_hash; 1152 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1153 ha->addr, priv->port); 1154 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1155 bucket = &priv->mac_hash[mac_hash]; 1156 hlist_add_head_rcu(&entry->hlist, bucket); 1157 } 1158 } 1159 } 1160 1161 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1162 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1163 priv->port); 1164 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1165 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1166 priv->port); 1167 } 1168 } 1169 1170 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1171 { 1172 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1173 rx_mode_task); 1174 struct mlx4_en_dev *mdev = priv->mdev; 1175 struct net_device *dev = priv->dev; 1176 1177 mutex_lock(&mdev->state_lock); 1178 if (!mdev->device_up) { 1179 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1180 goto out; 1181 } 1182 if (!priv->port_up) { 1183 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1184 goto out; 1185 } 1186 1187 if (!netif_carrier_ok(dev)) { 1188 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1189 if (priv->port_state.link_state) { 1190 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1191 netif_carrier_on(dev); 1192 en_dbg(LINK, priv, "Link Up\n"); 1193 } 1194 } 1195 } 1196 1197 if (dev->priv_flags & IFF_UNICAST_FLT) 1198 mlx4_en_do_uc_filter(priv, dev, mdev); 1199 1200 /* Promsicuous mode: disable all filters */ 1201 if ((dev->flags & IFF_PROMISC) || 1202 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1203 mlx4_en_set_promisc_mode(priv, mdev); 1204 goto out; 1205 } 1206 1207 /* Not in promiscuous mode */ 1208 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1209 mlx4_en_clear_promisc_mode(priv, mdev); 1210 1211 mlx4_en_do_multicast(priv, dev, mdev); 1212 out: 1213 mutex_unlock(&mdev->state_lock); 1214 } 1215 1216 #ifdef CONFIG_NET_POLL_CONTROLLER 1217 static void mlx4_en_netpoll(struct net_device *dev) 1218 { 1219 struct mlx4_en_priv *priv = netdev_priv(dev); 1220 struct mlx4_en_cq *cq; 1221 int i; 1222 1223 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1224 cq = priv->tx_cq[TX][i]; 1225 napi_schedule(&cq->napi); 1226 } 1227 } 1228 #endif 1229 1230 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1231 { 1232 u64 reg_id; 1233 int err = 0; 1234 int *qpn = &priv->base_qpn; 1235 struct mlx4_mac_entry *entry; 1236 1237 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1238 if (err) 1239 return err; 1240 1241 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1242 &priv->tunnel_reg_id); 1243 if (err) 1244 goto tunnel_err; 1245 1246 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1247 if (!entry) { 1248 err = -ENOMEM; 1249 goto alloc_err; 1250 } 1251 1252 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1253 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1254 entry->reg_id = reg_id; 1255 hlist_add_head_rcu(&entry->hlist, 1256 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1257 1258 return 0; 1259 1260 alloc_err: 1261 if (priv->tunnel_reg_id) 1262 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1263 1264 tunnel_err: 1265 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1266 return err; 1267 } 1268 1269 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1270 { 1271 u64 mac; 1272 unsigned int i; 1273 int qpn = priv->base_qpn; 1274 struct hlist_head *bucket; 1275 struct hlist_node *tmp; 1276 struct mlx4_mac_entry *entry; 1277 1278 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1279 bucket = &priv->mac_hash[i]; 1280 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1281 mac = mlx4_mac_to_u64(entry->mac); 1282 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1283 entry->mac); 1284 mlx4_en_uc_steer_release(priv, entry->mac, 1285 qpn, entry->reg_id); 1286 1287 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1288 hlist_del_rcu(&entry->hlist); 1289 kfree_rcu(entry, rcu); 1290 } 1291 } 1292 1293 if (priv->tunnel_reg_id) { 1294 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1295 priv->tunnel_reg_id = 0; 1296 } 1297 } 1298 1299 static void mlx4_en_tx_timeout(struct net_device *dev) 1300 { 1301 struct mlx4_en_priv *priv = netdev_priv(dev); 1302 struct mlx4_en_dev *mdev = priv->mdev; 1303 int i; 1304 1305 if (netif_msg_timer(priv)) 1306 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1307 1308 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1309 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i]; 1310 1311 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1312 continue; 1313 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1314 i, tx_ring->qpn, tx_ring->sp_cqn, 1315 tx_ring->cons, tx_ring->prod); 1316 } 1317 1318 priv->port_stats.tx_timeout++; 1319 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1320 queue_work(mdev->workqueue, &priv->watchdog_task); 1321 } 1322 1323 1324 static struct rtnl_link_stats64 * 1325 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1326 { 1327 struct mlx4_en_priv *priv = netdev_priv(dev); 1328 1329 spin_lock_bh(&priv->stats_lock); 1330 mlx4_en_fold_software_stats(dev); 1331 netdev_stats_to_stats64(stats, &dev->stats); 1332 spin_unlock_bh(&priv->stats_lock); 1333 1334 return stats; 1335 } 1336 1337 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1338 { 1339 struct mlx4_en_cq *cq; 1340 int i, t; 1341 1342 /* If we haven't received a specific coalescing setting 1343 * (module param), we set the moderation parameters as follows: 1344 * - moder_cnt is set to the number of mtu sized packets to 1345 * satisfy our coalescing target. 1346 * - moder_time is set to a fixed value. 1347 */ 1348 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1349 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1350 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1351 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1352 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1353 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1354 1355 /* Setup cq moderation params */ 1356 for (i = 0; i < priv->rx_ring_num; i++) { 1357 cq = priv->rx_cq[i]; 1358 cq->moder_cnt = priv->rx_frames; 1359 cq->moder_time = priv->rx_usecs; 1360 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1361 priv->last_moder_packets[i] = 0; 1362 priv->last_moder_bytes[i] = 0; 1363 } 1364 1365 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1366 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1367 cq = priv->tx_cq[t][i]; 1368 cq->moder_cnt = priv->tx_frames; 1369 cq->moder_time = priv->tx_usecs; 1370 } 1371 } 1372 1373 /* Reset auto-moderation params */ 1374 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1375 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1376 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1377 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1378 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1379 priv->adaptive_rx_coal = 1; 1380 priv->last_moder_jiffies = 0; 1381 priv->last_moder_tx_packets = 0; 1382 } 1383 1384 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1385 { 1386 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1387 struct mlx4_en_cq *cq; 1388 unsigned long packets; 1389 unsigned long rate; 1390 unsigned long avg_pkt_size; 1391 unsigned long rx_packets; 1392 unsigned long rx_bytes; 1393 unsigned long rx_pkt_diff; 1394 int moder_time; 1395 int ring, err; 1396 1397 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1398 return; 1399 1400 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1401 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); 1402 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); 1403 1404 rx_pkt_diff = ((unsigned long) (rx_packets - 1405 priv->last_moder_packets[ring])); 1406 packets = rx_pkt_diff; 1407 rate = packets * HZ / period; 1408 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1409 priv->last_moder_bytes[ring])) / packets : 0; 1410 1411 /* Apply auto-moderation only when packet rate 1412 * exceeds a rate that it matters */ 1413 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1414 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1415 if (rate < priv->pkt_rate_low) 1416 moder_time = priv->rx_usecs_low; 1417 else if (rate > priv->pkt_rate_high) 1418 moder_time = priv->rx_usecs_high; 1419 else 1420 moder_time = (rate - priv->pkt_rate_low) * 1421 (priv->rx_usecs_high - priv->rx_usecs_low) / 1422 (priv->pkt_rate_high - priv->pkt_rate_low) + 1423 priv->rx_usecs_low; 1424 } else { 1425 moder_time = priv->rx_usecs_low; 1426 } 1427 1428 if (moder_time != priv->last_moder_time[ring]) { 1429 priv->last_moder_time[ring] = moder_time; 1430 cq = priv->rx_cq[ring]; 1431 cq->moder_time = moder_time; 1432 cq->moder_cnt = priv->rx_frames; 1433 err = mlx4_en_set_cq_moder(priv, cq); 1434 if (err) 1435 en_err(priv, "Failed modifying moderation for cq:%d\n", 1436 ring); 1437 } 1438 priv->last_moder_packets[ring] = rx_packets; 1439 priv->last_moder_bytes[ring] = rx_bytes; 1440 } 1441 1442 priv->last_moder_jiffies = jiffies; 1443 } 1444 1445 static void mlx4_en_do_get_stats(struct work_struct *work) 1446 { 1447 struct delayed_work *delay = to_delayed_work(work); 1448 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1449 stats_task); 1450 struct mlx4_en_dev *mdev = priv->mdev; 1451 int err; 1452 1453 mutex_lock(&mdev->state_lock); 1454 if (mdev->device_up) { 1455 if (priv->port_up) { 1456 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1457 if (err) 1458 en_dbg(HW, priv, "Could not update stats\n"); 1459 1460 mlx4_en_auto_moderation(priv); 1461 } 1462 1463 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1464 } 1465 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1466 mlx4_en_do_set_mac(priv, priv->current_mac); 1467 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1468 } 1469 mutex_unlock(&mdev->state_lock); 1470 } 1471 1472 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1473 * periodically 1474 */ 1475 static void mlx4_en_service_task(struct work_struct *work) 1476 { 1477 struct delayed_work *delay = to_delayed_work(work); 1478 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1479 service_task); 1480 struct mlx4_en_dev *mdev = priv->mdev; 1481 1482 mutex_lock(&mdev->state_lock); 1483 if (mdev->device_up) { 1484 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1485 mlx4_en_ptp_overflow_check(mdev); 1486 1487 mlx4_en_recover_from_oom(priv); 1488 queue_delayed_work(mdev->workqueue, &priv->service_task, 1489 SERVICE_TASK_DELAY); 1490 } 1491 mutex_unlock(&mdev->state_lock); 1492 } 1493 1494 static void mlx4_en_linkstate(struct work_struct *work) 1495 { 1496 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1497 linkstate_task); 1498 struct mlx4_en_dev *mdev = priv->mdev; 1499 int linkstate = priv->link_state; 1500 1501 mutex_lock(&mdev->state_lock); 1502 /* If observable port state changed set carrier state and 1503 * report to system log */ 1504 if (priv->last_link_state != linkstate) { 1505 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1506 en_info(priv, "Link Down\n"); 1507 netif_carrier_off(priv->dev); 1508 } else { 1509 en_info(priv, "Link Up\n"); 1510 netif_carrier_on(priv->dev); 1511 } 1512 } 1513 priv->last_link_state = linkstate; 1514 mutex_unlock(&mdev->state_lock); 1515 } 1516 1517 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1518 { 1519 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1520 int numa_node = priv->mdev->dev->numa_node; 1521 1522 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1523 return -ENOMEM; 1524 1525 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1526 ring->affinity_mask); 1527 return 0; 1528 } 1529 1530 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1531 { 1532 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1533 } 1534 1535 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, 1536 int tx_ring_idx) 1537 { 1538 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; 1539 int rr_index = tx_ring_idx; 1540 1541 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; 1542 tx_ring->recycle_ring = priv->rx_ring[rr_index]; 1543 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", 1544 TX_XDP, tx_ring_idx, rr_index); 1545 } 1546 1547 int mlx4_en_start_port(struct net_device *dev) 1548 { 1549 struct mlx4_en_priv *priv = netdev_priv(dev); 1550 struct mlx4_en_dev *mdev = priv->mdev; 1551 struct mlx4_en_cq *cq; 1552 struct mlx4_en_tx_ring *tx_ring; 1553 int rx_index = 0; 1554 int err = 0; 1555 int i, t; 1556 int j; 1557 u8 mc_list[16] = {0}; 1558 1559 if (priv->port_up) { 1560 en_dbg(DRV, priv, "start port called while port already up\n"); 1561 return 0; 1562 } 1563 1564 INIT_LIST_HEAD(&priv->mc_list); 1565 INIT_LIST_HEAD(&priv->curr_list); 1566 INIT_LIST_HEAD(&priv->ethtool_list); 1567 memset(&priv->ethtool_rules[0], 0, 1568 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1569 1570 /* Calculate Rx buf size */ 1571 dev->mtu = min(dev->mtu, priv->max_mtu); 1572 mlx4_en_calc_rx_buf(dev); 1573 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1574 1575 /* Configure rx cq's and rings */ 1576 err = mlx4_en_activate_rx_rings(priv); 1577 if (err) { 1578 en_err(priv, "Failed to activate RX rings\n"); 1579 return err; 1580 } 1581 for (i = 0; i < priv->rx_ring_num; i++) { 1582 cq = priv->rx_cq[i]; 1583 1584 err = mlx4_en_init_affinity_hint(priv, i); 1585 if (err) { 1586 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1587 goto cq_err; 1588 } 1589 1590 err = mlx4_en_activate_cq(priv, cq, i); 1591 if (err) { 1592 en_err(priv, "Failed activating Rx CQ\n"); 1593 mlx4_en_free_affinity_hint(priv, i); 1594 goto cq_err; 1595 } 1596 1597 for (j = 0; j < cq->size; j++) { 1598 struct mlx4_cqe *cqe = NULL; 1599 1600 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1601 priv->cqe_factor; 1602 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1603 } 1604 1605 err = mlx4_en_set_cq_moder(priv, cq); 1606 if (err) { 1607 en_err(priv, "Failed setting cq moderation parameters\n"); 1608 mlx4_en_deactivate_cq(priv, cq); 1609 mlx4_en_free_affinity_hint(priv, i); 1610 goto cq_err; 1611 } 1612 mlx4_en_arm_cq(priv, cq); 1613 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1614 ++rx_index; 1615 } 1616 1617 /* Set qp number */ 1618 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1619 err = mlx4_en_get_qp(priv); 1620 if (err) { 1621 en_err(priv, "Failed getting eth qp\n"); 1622 goto cq_err; 1623 } 1624 mdev->mac_removed[priv->port] = 0; 1625 1626 priv->counter_index = 1627 mlx4_get_default_counter_index(mdev->dev, priv->port); 1628 1629 err = mlx4_en_config_rss_steer(priv); 1630 if (err) { 1631 en_err(priv, "Failed configuring rss steering\n"); 1632 goto mac_err; 1633 } 1634 1635 err = mlx4_en_create_drop_qp(priv); 1636 if (err) 1637 goto rss_err; 1638 1639 /* Configure tx cq's and rings */ 1640 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1641 u8 num_tx_rings_p_up = t == TX ? 1642 priv->num_tx_rings_p_up : priv->tx_ring_num[t]; 1643 1644 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1645 /* Configure cq */ 1646 cq = priv->tx_cq[t][i]; 1647 err = mlx4_en_activate_cq(priv, cq, i); 1648 if (err) { 1649 en_err(priv, "Failed allocating Tx CQ\n"); 1650 goto tx_err; 1651 } 1652 err = mlx4_en_set_cq_moder(priv, cq); 1653 if (err) { 1654 en_err(priv, "Failed setting cq moderation parameters\n"); 1655 mlx4_en_deactivate_cq(priv, cq); 1656 goto tx_err; 1657 } 1658 en_dbg(DRV, priv, 1659 "Resetting index of collapsed CQ:%d to -1\n", i); 1660 cq->buf->wqe_index = cpu_to_be16(0xffff); 1661 1662 /* Configure ring */ 1663 tx_ring = priv->tx_ring[t][i]; 1664 err = mlx4_en_activate_tx_ring(priv, tx_ring, 1665 cq->mcq.cqn, 1666 i / num_tx_rings_p_up); 1667 if (err) { 1668 en_err(priv, "Failed allocating Tx ring\n"); 1669 mlx4_en_deactivate_cq(priv, cq); 1670 goto tx_err; 1671 } 1672 if (t != TX_XDP) { 1673 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1674 tx_ring->recycle_ring = NULL; 1675 } else { 1676 mlx4_en_init_recycle_ring(priv, i); 1677 } 1678 1679 /* Arm CQ for TX completions */ 1680 mlx4_en_arm_cq(priv, cq); 1681 1682 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1683 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1684 *((u32 *)(tx_ring->buf + j)) = 0xffffffff; 1685 } 1686 } 1687 1688 /* Configure port */ 1689 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1690 priv->rx_skb_size + ETH_FCS_LEN, 1691 priv->prof->tx_pause, 1692 priv->prof->tx_ppp, 1693 priv->prof->rx_pause, 1694 priv->prof->rx_ppp); 1695 if (err) { 1696 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1697 priv->port, err); 1698 goto tx_err; 1699 } 1700 /* Set default qp number */ 1701 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1702 if (err) { 1703 en_err(priv, "Failed setting default qp numbers\n"); 1704 goto tx_err; 1705 } 1706 1707 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1708 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1709 if (err) { 1710 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1711 err); 1712 goto tx_err; 1713 } 1714 } 1715 1716 /* Init port */ 1717 en_dbg(HW, priv, "Initializing port\n"); 1718 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1719 if (err) { 1720 en_err(priv, "Failed Initializing port\n"); 1721 goto tx_err; 1722 } 1723 1724 /* Set Unicast and VXLAN steering rules */ 1725 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1726 mlx4_en_set_rss_steer_rules(priv)) 1727 mlx4_warn(mdev, "Failed setting steering rules\n"); 1728 1729 /* Attach rx QP to bradcast address */ 1730 eth_broadcast_addr(&mc_list[10]); 1731 mc_list[5] = priv->port; /* needed for B0 steering support */ 1732 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1733 priv->port, 0, MLX4_PROT_ETH, 1734 &priv->broadcast_id)) 1735 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1736 1737 /* Must redo promiscuous mode setup. */ 1738 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1739 1740 /* Schedule multicast task to populate multicast list */ 1741 queue_work(mdev->workqueue, &priv->rx_mode_task); 1742 1743 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1744 udp_tunnel_get_rx_info(dev); 1745 1746 priv->port_up = true; 1747 1748 /* Process all completions if exist to prevent 1749 * the queues freezing if they are full 1750 */ 1751 for (i = 0; i < priv->rx_ring_num; i++) { 1752 local_bh_disable(); 1753 napi_schedule(&priv->rx_cq[i]->napi); 1754 local_bh_enable(); 1755 } 1756 1757 netif_tx_start_all_queues(dev); 1758 netif_device_attach(dev); 1759 1760 return 0; 1761 1762 tx_err: 1763 if (t == MLX4_EN_NUM_TX_TYPES) { 1764 t--; 1765 i = priv->tx_ring_num[t]; 1766 } 1767 while (t >= 0) { 1768 while (i--) { 1769 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1770 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1771 } 1772 if (!t--) 1773 break; 1774 i = priv->tx_ring_num[t]; 1775 } 1776 mlx4_en_destroy_drop_qp(priv); 1777 rss_err: 1778 mlx4_en_release_rss_steer(priv); 1779 mac_err: 1780 mlx4_en_put_qp(priv); 1781 cq_err: 1782 while (rx_index--) { 1783 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1784 mlx4_en_free_affinity_hint(priv, rx_index); 1785 } 1786 for (i = 0; i < priv->rx_ring_num; i++) 1787 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1788 1789 return err; /* need to close devices */ 1790 } 1791 1792 1793 void mlx4_en_stop_port(struct net_device *dev, int detach) 1794 { 1795 struct mlx4_en_priv *priv = netdev_priv(dev); 1796 struct mlx4_en_dev *mdev = priv->mdev; 1797 struct mlx4_en_mc_list *mclist, *tmp; 1798 struct ethtool_flow_id *flow, *tmp_flow; 1799 int i, t; 1800 u8 mc_list[16] = {0}; 1801 1802 if (!priv->port_up) { 1803 en_dbg(DRV, priv, "stop port called while port already down\n"); 1804 return; 1805 } 1806 1807 /* close port*/ 1808 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1809 1810 /* Synchronize with tx routine */ 1811 netif_tx_lock_bh(dev); 1812 if (detach) 1813 netif_device_detach(dev); 1814 netif_tx_stop_all_queues(dev); 1815 netif_tx_unlock_bh(dev); 1816 1817 netif_tx_disable(dev); 1818 1819 spin_lock_bh(&priv->stats_lock); 1820 mlx4_en_fold_software_stats(dev); 1821 /* Set port as not active */ 1822 priv->port_up = false; 1823 spin_unlock_bh(&priv->stats_lock); 1824 1825 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1826 1827 /* Promsicuous mode */ 1828 if (mdev->dev->caps.steering_mode == 1829 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1830 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1831 MLX4_EN_FLAG_MC_PROMISC); 1832 mlx4_flow_steer_promisc_remove(mdev->dev, 1833 priv->port, 1834 MLX4_FS_ALL_DEFAULT); 1835 mlx4_flow_steer_promisc_remove(mdev->dev, 1836 priv->port, 1837 MLX4_FS_MC_DEFAULT); 1838 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1839 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1840 1841 /* Disable promiscouos mode */ 1842 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1843 priv->port); 1844 1845 /* Disable Multicast promisc */ 1846 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1847 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1848 priv->port); 1849 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1850 } 1851 } 1852 1853 /* Detach All multicasts */ 1854 eth_broadcast_addr(&mc_list[10]); 1855 mc_list[5] = priv->port; /* needed for B0 steering support */ 1856 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1857 MLX4_PROT_ETH, priv->broadcast_id); 1858 list_for_each_entry(mclist, &priv->curr_list, list) { 1859 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1860 mc_list[5] = priv->port; 1861 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1862 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1863 if (mclist->tunnel_reg_id) 1864 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1865 } 1866 mlx4_en_clear_list(dev); 1867 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1868 list_del(&mclist->list); 1869 kfree(mclist); 1870 } 1871 1872 /* Flush multicast filter */ 1873 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1874 1875 /* Remove flow steering rules for the port*/ 1876 if (mdev->dev->caps.steering_mode == 1877 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1878 ASSERT_RTNL(); 1879 list_for_each_entry_safe(flow, tmp_flow, 1880 &priv->ethtool_list, list) { 1881 mlx4_flow_detach(mdev->dev, flow->id); 1882 list_del(&flow->list); 1883 } 1884 } 1885 1886 mlx4_en_destroy_drop_qp(priv); 1887 1888 /* Free TX Rings */ 1889 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 1890 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1891 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1892 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1893 } 1894 } 1895 msleep(10); 1896 1897 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) 1898 for (i = 0; i < priv->tx_ring_num[t]; i++) 1899 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); 1900 1901 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1902 mlx4_en_delete_rss_steer_rules(priv); 1903 1904 /* Free RSS qps */ 1905 mlx4_en_release_rss_steer(priv); 1906 1907 /* Unregister Mac address for the port */ 1908 mlx4_en_put_qp(priv); 1909 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1910 mdev->mac_removed[priv->port] = 1; 1911 1912 /* Free RX Rings */ 1913 for (i = 0; i < priv->rx_ring_num; i++) { 1914 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1915 1916 napi_synchronize(&cq->napi); 1917 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1918 mlx4_en_deactivate_cq(priv, cq); 1919 1920 mlx4_en_free_affinity_hint(priv, i); 1921 } 1922 } 1923 1924 static void mlx4_en_restart(struct work_struct *work) 1925 { 1926 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1927 watchdog_task); 1928 struct mlx4_en_dev *mdev = priv->mdev; 1929 struct net_device *dev = priv->dev; 1930 1931 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1932 1933 rtnl_lock(); 1934 mutex_lock(&mdev->state_lock); 1935 if (priv->port_up) { 1936 mlx4_en_stop_port(dev, 1); 1937 if (mlx4_en_start_port(dev)) 1938 en_err(priv, "Failed restarting port %d\n", priv->port); 1939 } 1940 mutex_unlock(&mdev->state_lock); 1941 rtnl_unlock(); 1942 } 1943 1944 static void mlx4_en_clear_stats(struct net_device *dev) 1945 { 1946 struct mlx4_en_priv *priv = netdev_priv(dev); 1947 struct mlx4_en_dev *mdev = priv->mdev; 1948 struct mlx4_en_tx_ring **tx_ring; 1949 int i; 1950 1951 if (!mlx4_is_slave(mdev->dev)) 1952 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1953 en_dbg(HW, priv, "Failed dumping statistics\n"); 1954 1955 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1956 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1957 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1958 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 1959 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 1960 memset(&priv->rx_priority_flowstats, 0, 1961 sizeof(priv->rx_priority_flowstats)); 1962 memset(&priv->tx_priority_flowstats, 0, 1963 sizeof(priv->tx_priority_flowstats)); 1964 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 1965 1966 tx_ring = priv->tx_ring[TX]; 1967 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1968 tx_ring[i]->bytes = 0; 1969 tx_ring[i]->packets = 0; 1970 tx_ring[i]->tx_csum = 0; 1971 tx_ring[i]->tx_dropped = 0; 1972 tx_ring[i]->queue_stopped = 0; 1973 tx_ring[i]->wake_queue = 0; 1974 tx_ring[i]->tso_packets = 0; 1975 tx_ring[i]->xmit_more = 0; 1976 } 1977 for (i = 0; i < priv->rx_ring_num; i++) { 1978 priv->rx_ring[i]->bytes = 0; 1979 priv->rx_ring[i]->packets = 0; 1980 priv->rx_ring[i]->csum_ok = 0; 1981 priv->rx_ring[i]->csum_none = 0; 1982 priv->rx_ring[i]->csum_complete = 0; 1983 } 1984 } 1985 1986 static int mlx4_en_open(struct net_device *dev) 1987 { 1988 struct mlx4_en_priv *priv = netdev_priv(dev); 1989 struct mlx4_en_dev *mdev = priv->mdev; 1990 int err = 0; 1991 1992 mutex_lock(&mdev->state_lock); 1993 1994 if (!mdev->device_up) { 1995 en_err(priv, "Cannot open - device down/disabled\n"); 1996 err = -EBUSY; 1997 goto out; 1998 } 1999 2000 /* Reset HW statistics and SW counters */ 2001 mlx4_en_clear_stats(dev); 2002 2003 err = mlx4_en_start_port(dev); 2004 if (err) 2005 en_err(priv, "Failed starting port:%d\n", priv->port); 2006 2007 out: 2008 mutex_unlock(&mdev->state_lock); 2009 return err; 2010 } 2011 2012 2013 static int mlx4_en_close(struct net_device *dev) 2014 { 2015 struct mlx4_en_priv *priv = netdev_priv(dev); 2016 struct mlx4_en_dev *mdev = priv->mdev; 2017 2018 en_dbg(IFDOWN, priv, "Close port called\n"); 2019 2020 mutex_lock(&mdev->state_lock); 2021 2022 mlx4_en_stop_port(dev, 0); 2023 netif_carrier_off(dev); 2024 2025 mutex_unlock(&mdev->state_lock); 2026 return 0; 2027 } 2028 2029 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 2030 { 2031 int i, t; 2032 2033 #ifdef CONFIG_RFS_ACCEL 2034 priv->dev->rx_cpu_rmap = NULL; 2035 #endif 2036 2037 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2038 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2039 if (priv->tx_ring[t] && priv->tx_ring[t][i]) 2040 mlx4_en_destroy_tx_ring(priv, 2041 &priv->tx_ring[t][i]); 2042 if (priv->tx_cq[t] && priv->tx_cq[t][i]) 2043 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2044 } 2045 } 2046 2047 for (i = 0; i < priv->rx_ring_num; i++) { 2048 if (priv->rx_ring[i]) 2049 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2050 priv->prof->rx_ring_size, priv->stride); 2051 if (priv->rx_cq[i]) 2052 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2053 } 2054 2055 } 2056 2057 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 2058 { 2059 struct mlx4_en_port_profile *prof = priv->prof; 2060 int i, t; 2061 int node; 2062 2063 /* Create tx Rings */ 2064 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2065 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2066 node = cpu_to_node(i % num_online_cpus()); 2067 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], 2068 prof->tx_ring_size, i, t, node)) 2069 goto err; 2070 2071 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], 2072 prof->tx_ring_size, 2073 TXBB_SIZE, node, i)) 2074 goto err; 2075 } 2076 } 2077 2078 /* Create rx Rings */ 2079 for (i = 0; i < priv->rx_ring_num; i++) { 2080 node = cpu_to_node(i % num_online_cpus()); 2081 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2082 prof->rx_ring_size, i, RX, node)) 2083 goto err; 2084 2085 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2086 prof->rx_ring_size, priv->stride, 2087 node)) 2088 goto err; 2089 } 2090 2091 #ifdef CONFIG_RFS_ACCEL 2092 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2093 #endif 2094 2095 return 0; 2096 2097 err: 2098 en_err(priv, "Failed to allocate NIC resources\n"); 2099 for (i = 0; i < priv->rx_ring_num; i++) { 2100 if (priv->rx_ring[i]) 2101 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2102 prof->rx_ring_size, 2103 priv->stride); 2104 if (priv->rx_cq[i]) 2105 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2106 } 2107 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2108 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2109 if (priv->tx_ring[t][i]) 2110 mlx4_en_destroy_tx_ring(priv, 2111 &priv->tx_ring[t][i]); 2112 if (priv->tx_cq[t][i]) 2113 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2114 } 2115 } 2116 return -ENOMEM; 2117 } 2118 2119 2120 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2121 struct mlx4_en_priv *src, 2122 struct mlx4_en_port_profile *prof) 2123 { 2124 int t; 2125 2126 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2127 sizeof(dst->hwtstamp_config)); 2128 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; 2129 dst->rx_ring_num = prof->rx_ring_num; 2130 dst->flags = prof->flags; 2131 dst->mdev = src->mdev; 2132 dst->port = src->port; 2133 dst->dev = src->dev; 2134 dst->prof = prof; 2135 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2136 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2137 2138 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2139 dst->tx_ring_num[t] = prof->tx_ring_num[t]; 2140 if (!dst->tx_ring_num[t]) 2141 continue; 2142 2143 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * 2144 MAX_TX_RINGS, GFP_KERNEL); 2145 if (!dst->tx_ring[t]) 2146 goto err_free_tx; 2147 2148 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 2149 MAX_TX_RINGS, GFP_KERNEL); 2150 if (!dst->tx_cq[t]) { 2151 kfree(dst->tx_ring[t]); 2152 goto err_free_tx; 2153 } 2154 } 2155 2156 return 0; 2157 2158 err_free_tx: 2159 while (t--) { 2160 kfree(dst->tx_ring[t]); 2161 kfree(dst->tx_cq[t]); 2162 } 2163 return -ENOMEM; 2164 } 2165 2166 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2167 struct mlx4_en_priv *src) 2168 { 2169 int t; 2170 memcpy(dst->rx_ring, src->rx_ring, 2171 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2172 memcpy(dst->rx_cq, src->rx_cq, 2173 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2174 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2175 sizeof(dst->hwtstamp_config)); 2176 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2177 dst->tx_ring_num[t] = src->tx_ring_num[t]; 2178 dst->tx_ring[t] = src->tx_ring[t]; 2179 dst->tx_cq[t] = src->tx_cq[t]; 2180 } 2181 dst->rx_ring_num = src->rx_ring_num; 2182 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2183 } 2184 2185 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2186 struct mlx4_en_priv *tmp, 2187 struct mlx4_en_port_profile *prof) 2188 { 2189 int t; 2190 2191 mlx4_en_copy_priv(tmp, priv, prof); 2192 2193 if (mlx4_en_alloc_resources(tmp)) { 2194 en_warn(priv, 2195 "%s: Resource allocation failed, using previous configuration\n", 2196 __func__); 2197 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2198 kfree(tmp->tx_ring[t]); 2199 kfree(tmp->tx_cq[t]); 2200 } 2201 return -ENOMEM; 2202 } 2203 return 0; 2204 } 2205 2206 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2207 struct mlx4_en_priv *tmp) 2208 { 2209 mlx4_en_free_resources(priv); 2210 mlx4_en_update_priv(priv, tmp); 2211 } 2212 2213 void mlx4_en_destroy_netdev(struct net_device *dev) 2214 { 2215 struct mlx4_en_priv *priv = netdev_priv(dev); 2216 struct mlx4_en_dev *mdev = priv->mdev; 2217 int t; 2218 2219 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2220 2221 /* Unregister device - this will close the port if it was up */ 2222 if (priv->registered) { 2223 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2224 priv->port)); 2225 unregister_netdev(dev); 2226 } 2227 2228 if (priv->allocated) 2229 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2230 2231 cancel_delayed_work(&priv->stats_task); 2232 cancel_delayed_work(&priv->service_task); 2233 /* flush any pending task for this netdev */ 2234 flush_workqueue(mdev->workqueue); 2235 2236 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2237 mlx4_en_remove_timestamp(mdev); 2238 2239 /* Detach the netdev so tasks would not attempt to access it */ 2240 mutex_lock(&mdev->state_lock); 2241 mdev->pndev[priv->port] = NULL; 2242 mdev->upper[priv->port] = NULL; 2243 2244 #ifdef CONFIG_RFS_ACCEL 2245 mlx4_en_cleanup_filters(priv); 2246 #endif 2247 2248 mlx4_en_free_resources(priv); 2249 mutex_unlock(&mdev->state_lock); 2250 2251 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2252 kfree(priv->tx_ring[t]); 2253 kfree(priv->tx_cq[t]); 2254 } 2255 2256 free_netdev(dev); 2257 } 2258 2259 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) 2260 { 2261 struct mlx4_en_priv *priv = netdev_priv(dev); 2262 2263 if (mtu > MLX4_EN_MAX_XDP_MTU) { 2264 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", 2265 mtu, MLX4_EN_MAX_XDP_MTU); 2266 return false; 2267 } 2268 2269 return true; 2270 } 2271 2272 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2273 { 2274 struct mlx4_en_priv *priv = netdev_priv(dev); 2275 struct mlx4_en_dev *mdev = priv->mdev; 2276 int err = 0; 2277 2278 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2279 dev->mtu, new_mtu); 2280 2281 if (priv->tx_ring_num[TX_XDP] && 2282 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2283 return -EOPNOTSUPP; 2284 2285 dev->mtu = new_mtu; 2286 2287 if (netif_running(dev)) { 2288 mutex_lock(&mdev->state_lock); 2289 if (!mdev->device_up) { 2290 /* NIC is probably restarting - let watchdog task reset 2291 * the port */ 2292 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2293 } else { 2294 mlx4_en_stop_port(dev, 1); 2295 err = mlx4_en_start_port(dev); 2296 if (err) { 2297 en_err(priv, "Failed restarting port:%d\n", 2298 priv->port); 2299 queue_work(mdev->workqueue, &priv->watchdog_task); 2300 } 2301 } 2302 mutex_unlock(&mdev->state_lock); 2303 } 2304 return 0; 2305 } 2306 2307 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2308 { 2309 struct mlx4_en_priv *priv = netdev_priv(dev); 2310 struct mlx4_en_dev *mdev = priv->mdev; 2311 struct hwtstamp_config config; 2312 2313 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2314 return -EFAULT; 2315 2316 /* reserved for future extensions */ 2317 if (config.flags) 2318 return -EINVAL; 2319 2320 /* device doesn't support time stamping */ 2321 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2322 return -EINVAL; 2323 2324 /* TX HW timestamp */ 2325 switch (config.tx_type) { 2326 case HWTSTAMP_TX_OFF: 2327 case HWTSTAMP_TX_ON: 2328 break; 2329 default: 2330 return -ERANGE; 2331 } 2332 2333 /* RX HW timestamp */ 2334 switch (config.rx_filter) { 2335 case HWTSTAMP_FILTER_NONE: 2336 break; 2337 case HWTSTAMP_FILTER_ALL: 2338 case HWTSTAMP_FILTER_SOME: 2339 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2340 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2341 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2342 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2343 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2344 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2345 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2346 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2347 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2348 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2349 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2350 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2351 config.rx_filter = HWTSTAMP_FILTER_ALL; 2352 break; 2353 default: 2354 return -ERANGE; 2355 } 2356 2357 if (mlx4_en_reset_config(dev, config, dev->features)) { 2358 config.tx_type = HWTSTAMP_TX_OFF; 2359 config.rx_filter = HWTSTAMP_FILTER_NONE; 2360 } 2361 2362 return copy_to_user(ifr->ifr_data, &config, 2363 sizeof(config)) ? -EFAULT : 0; 2364 } 2365 2366 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2367 { 2368 struct mlx4_en_priv *priv = netdev_priv(dev); 2369 2370 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2371 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2372 } 2373 2374 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2375 { 2376 switch (cmd) { 2377 case SIOCSHWTSTAMP: 2378 return mlx4_en_hwtstamp_set(dev, ifr); 2379 case SIOCGHWTSTAMP: 2380 return mlx4_en_hwtstamp_get(dev, ifr); 2381 default: 2382 return -EOPNOTSUPP; 2383 } 2384 } 2385 2386 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2387 netdev_features_t features) 2388 { 2389 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2390 struct mlx4_en_dev *mdev = en_priv->mdev; 2391 2392 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2393 * enable/disable make sure S-TAG flag is always in same state as 2394 * C-TAG. 2395 */ 2396 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2397 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2398 features |= NETIF_F_HW_VLAN_STAG_RX; 2399 else 2400 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2401 2402 return features; 2403 } 2404 2405 static int mlx4_en_set_features(struct net_device *netdev, 2406 netdev_features_t features) 2407 { 2408 struct mlx4_en_priv *priv = netdev_priv(netdev); 2409 bool reset = false; 2410 int ret = 0; 2411 2412 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2413 en_info(priv, "Turn %s RX-FCS\n", 2414 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2415 reset = true; 2416 } 2417 2418 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2419 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2420 2421 en_info(priv, "Turn %s RX-ALL\n", 2422 ignore_fcs_value ? "ON" : "OFF"); 2423 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2424 priv->port, ignore_fcs_value); 2425 if (ret) 2426 return ret; 2427 } 2428 2429 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2430 en_info(priv, "Turn %s RX vlan strip offload\n", 2431 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2432 reset = true; 2433 } 2434 2435 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2436 en_info(priv, "Turn %s TX vlan strip offload\n", 2437 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2438 2439 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2440 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2441 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2442 2443 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2444 en_info(priv, "Turn %s loopback\n", 2445 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2446 mlx4_en_update_loopback_state(netdev, features); 2447 } 2448 2449 if (reset) { 2450 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2451 features); 2452 if (ret) 2453 return ret; 2454 } 2455 2456 return 0; 2457 } 2458 2459 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2460 { 2461 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2462 struct mlx4_en_dev *mdev = en_priv->mdev; 2463 u64 mac_u64 = mlx4_mac_to_u64(mac); 2464 2465 if (is_multicast_ether_addr(mac)) 2466 return -EINVAL; 2467 2468 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2469 } 2470 2471 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 2472 __be16 vlan_proto) 2473 { 2474 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2475 struct mlx4_en_dev *mdev = en_priv->mdev; 2476 2477 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos, 2478 vlan_proto); 2479 } 2480 2481 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2482 int max_tx_rate) 2483 { 2484 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2485 struct mlx4_en_dev *mdev = en_priv->mdev; 2486 2487 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2488 max_tx_rate); 2489 } 2490 2491 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2492 { 2493 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2494 struct mlx4_en_dev *mdev = en_priv->mdev; 2495 2496 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2497 } 2498 2499 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2500 { 2501 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2502 struct mlx4_en_dev *mdev = en_priv->mdev; 2503 2504 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2505 } 2506 2507 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2508 { 2509 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2510 struct mlx4_en_dev *mdev = en_priv->mdev; 2511 2512 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2513 } 2514 2515 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2516 struct ifla_vf_stats *vf_stats) 2517 { 2518 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2519 struct mlx4_en_dev *mdev = en_priv->mdev; 2520 2521 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2522 } 2523 2524 #define PORT_ID_BYTE_LEN 8 2525 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2526 struct netdev_phys_item_id *ppid) 2527 { 2528 struct mlx4_en_priv *priv = netdev_priv(dev); 2529 struct mlx4_dev *mdev = priv->mdev->dev; 2530 int i; 2531 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2532 2533 if (!phys_port_id) 2534 return -EOPNOTSUPP; 2535 2536 ppid->id_len = sizeof(phys_port_id); 2537 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2538 ppid->id[i] = phys_port_id & 0xff; 2539 phys_port_id >>= 8; 2540 } 2541 return 0; 2542 } 2543 2544 static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2545 { 2546 int ret; 2547 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2548 vxlan_add_task); 2549 2550 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2551 if (ret) 2552 goto out; 2553 2554 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2555 VXLAN_STEER_BY_OUTER_MAC, 1); 2556 out: 2557 if (ret) { 2558 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2559 return; 2560 } 2561 2562 /* set offloads */ 2563 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2564 NETIF_F_RXCSUM | 2565 NETIF_F_TSO | NETIF_F_TSO6 | 2566 NETIF_F_GSO_UDP_TUNNEL | 2567 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2568 NETIF_F_GSO_PARTIAL; 2569 } 2570 2571 static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2572 { 2573 int ret; 2574 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2575 vxlan_del_task); 2576 /* unset offloads */ 2577 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2578 NETIF_F_RXCSUM | 2579 NETIF_F_TSO | NETIF_F_TSO6 | 2580 NETIF_F_GSO_UDP_TUNNEL | 2581 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2582 NETIF_F_GSO_PARTIAL); 2583 2584 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2585 VXLAN_STEER_BY_OUTER_MAC, 0); 2586 if (ret) 2587 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2588 2589 priv->vxlan_port = 0; 2590 } 2591 2592 static void mlx4_en_add_vxlan_port(struct net_device *dev, 2593 struct udp_tunnel_info *ti) 2594 { 2595 struct mlx4_en_priv *priv = netdev_priv(dev); 2596 __be16 port = ti->port; 2597 __be16 current_port; 2598 2599 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2600 return; 2601 2602 if (ti->sa_family != AF_INET) 2603 return; 2604 2605 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2606 return; 2607 2608 current_port = priv->vxlan_port; 2609 if (current_port && current_port != port) { 2610 en_warn(priv, "vxlan port %d configured, can't add port %d\n", 2611 ntohs(current_port), ntohs(port)); 2612 return; 2613 } 2614 2615 priv->vxlan_port = port; 2616 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); 2617 } 2618 2619 static void mlx4_en_del_vxlan_port(struct net_device *dev, 2620 struct udp_tunnel_info *ti) 2621 { 2622 struct mlx4_en_priv *priv = netdev_priv(dev); 2623 __be16 port = ti->port; 2624 __be16 current_port; 2625 2626 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2627 return; 2628 2629 if (ti->sa_family != AF_INET) 2630 return; 2631 2632 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2633 return; 2634 2635 current_port = priv->vxlan_port; 2636 if (current_port != port) { 2637 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); 2638 return; 2639 } 2640 2641 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2642 } 2643 2644 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2645 struct net_device *dev, 2646 netdev_features_t features) 2647 { 2648 features = vlan_features_check(skb, features); 2649 features = vxlan_features_check(skb, features); 2650 2651 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2652 * support inner IPv6 checksums and segmentation so we need to 2653 * strip that feature if this is an IPv6 encapsulated frame. 2654 */ 2655 if (skb->encapsulation && 2656 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2657 struct mlx4_en_priv *priv = netdev_priv(dev); 2658 2659 if (!priv->vxlan_port || 2660 (ip_hdr(skb)->version != 4) || 2661 (udp_hdr(skb)->dest != priv->vxlan_port)) 2662 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2663 } 2664 2665 return features; 2666 } 2667 2668 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2669 { 2670 struct mlx4_en_priv *priv = netdev_priv(dev); 2671 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; 2672 struct mlx4_update_qp_params params; 2673 int err; 2674 2675 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2676 return -EOPNOTSUPP; 2677 2678 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2679 if (maxrate >> 12) { 2680 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2681 params.rate_val = maxrate / 1000; 2682 } else if (maxrate) { 2683 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2684 params.rate_val = maxrate; 2685 } else { /* zero serves to revoke the QP rate-limitation */ 2686 params.rate_unit = 0; 2687 params.rate_val = 0; 2688 } 2689 2690 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2691 ¶ms); 2692 return err; 2693 } 2694 2695 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) 2696 { 2697 struct mlx4_en_priv *priv = netdev_priv(dev); 2698 struct mlx4_en_dev *mdev = priv->mdev; 2699 struct mlx4_en_port_profile new_prof; 2700 struct bpf_prog *old_prog; 2701 struct mlx4_en_priv *tmp; 2702 int tx_changed = 0; 2703 int xdp_ring_num; 2704 int port_up = 0; 2705 int err; 2706 int i; 2707 2708 xdp_ring_num = prog ? priv->rx_ring_num : 0; 2709 2710 /* No need to reconfigure buffers when simply swapping the 2711 * program for a new one. 2712 */ 2713 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { 2714 if (prog) { 2715 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2716 if (IS_ERR(prog)) 2717 return PTR_ERR(prog); 2718 } 2719 mutex_lock(&mdev->state_lock); 2720 for (i = 0; i < priv->rx_ring_num; i++) { 2721 old_prog = rcu_dereference_protected( 2722 priv->rx_ring[i]->xdp_prog, 2723 lockdep_is_held(&mdev->state_lock)); 2724 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2725 if (old_prog) 2726 bpf_prog_put(old_prog); 2727 } 2728 mutex_unlock(&mdev->state_lock); 2729 return 0; 2730 } 2731 2732 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) 2733 return -EOPNOTSUPP; 2734 2735 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2736 if (!tmp) 2737 return -ENOMEM; 2738 2739 if (prog) { 2740 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2741 if (IS_ERR(prog)) { 2742 err = PTR_ERR(prog); 2743 goto out; 2744 } 2745 } 2746 2747 mutex_lock(&mdev->state_lock); 2748 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 2749 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; 2750 2751 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { 2752 tx_changed = 1; 2753 new_prof.tx_ring_num[TX] = 2754 MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP); 2755 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); 2756 } 2757 2758 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 2759 if (err) { 2760 if (prog) 2761 bpf_prog_sub(prog, priv->rx_ring_num - 1); 2762 goto unlock_out; 2763 } 2764 2765 if (priv->port_up) { 2766 port_up = 1; 2767 mlx4_en_stop_port(dev, 1); 2768 } 2769 2770 mlx4_en_safe_replace_resources(priv, tmp); 2771 if (tx_changed) 2772 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 2773 2774 for (i = 0; i < priv->rx_ring_num; i++) { 2775 old_prog = rcu_dereference_protected( 2776 priv->rx_ring[i]->xdp_prog, 2777 lockdep_is_held(&mdev->state_lock)); 2778 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2779 if (old_prog) 2780 bpf_prog_put(old_prog); 2781 } 2782 2783 if (port_up) { 2784 err = mlx4_en_start_port(dev); 2785 if (err) { 2786 en_err(priv, "Failed starting port %d for XDP change\n", 2787 priv->port); 2788 queue_work(mdev->workqueue, &priv->watchdog_task); 2789 } 2790 } 2791 2792 unlock_out: 2793 mutex_unlock(&mdev->state_lock); 2794 out: 2795 kfree(tmp); 2796 return err; 2797 } 2798 2799 static bool mlx4_xdp_attached(struct net_device *dev) 2800 { 2801 struct mlx4_en_priv *priv = netdev_priv(dev); 2802 2803 return !!priv->tx_ring_num[TX_XDP]; 2804 } 2805 2806 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) 2807 { 2808 switch (xdp->command) { 2809 case XDP_SETUP_PROG: 2810 return mlx4_xdp_set(dev, xdp->prog); 2811 case XDP_QUERY_PROG: 2812 xdp->prog_attached = mlx4_xdp_attached(dev); 2813 return 0; 2814 default: 2815 return -EINVAL; 2816 } 2817 } 2818 2819 static const struct net_device_ops mlx4_netdev_ops = { 2820 .ndo_open = mlx4_en_open, 2821 .ndo_stop = mlx4_en_close, 2822 .ndo_start_xmit = mlx4_en_xmit, 2823 .ndo_select_queue = mlx4_en_select_queue, 2824 .ndo_get_stats64 = mlx4_en_get_stats64, 2825 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2826 .ndo_set_mac_address = mlx4_en_set_mac, 2827 .ndo_validate_addr = eth_validate_addr, 2828 .ndo_change_mtu = mlx4_en_change_mtu, 2829 .ndo_do_ioctl = mlx4_en_ioctl, 2830 .ndo_tx_timeout = mlx4_en_tx_timeout, 2831 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2832 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2833 #ifdef CONFIG_NET_POLL_CONTROLLER 2834 .ndo_poll_controller = mlx4_en_netpoll, 2835 #endif 2836 .ndo_set_features = mlx4_en_set_features, 2837 .ndo_fix_features = mlx4_en_fix_features, 2838 .ndo_setup_tc = __mlx4_en_setup_tc, 2839 #ifdef CONFIG_RFS_ACCEL 2840 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2841 #endif 2842 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2843 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2844 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2845 .ndo_features_check = mlx4_en_features_check, 2846 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2847 .ndo_xdp = mlx4_xdp, 2848 }; 2849 2850 static const struct net_device_ops mlx4_netdev_ops_master = { 2851 .ndo_open = mlx4_en_open, 2852 .ndo_stop = mlx4_en_close, 2853 .ndo_start_xmit = mlx4_en_xmit, 2854 .ndo_select_queue = mlx4_en_select_queue, 2855 .ndo_get_stats64 = mlx4_en_get_stats64, 2856 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2857 .ndo_set_mac_address = mlx4_en_set_mac, 2858 .ndo_validate_addr = eth_validate_addr, 2859 .ndo_change_mtu = mlx4_en_change_mtu, 2860 .ndo_tx_timeout = mlx4_en_tx_timeout, 2861 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2862 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2863 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2864 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2865 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2866 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2867 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2868 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2869 .ndo_get_vf_config = mlx4_en_get_vf_config, 2870 #ifdef CONFIG_NET_POLL_CONTROLLER 2871 .ndo_poll_controller = mlx4_en_netpoll, 2872 #endif 2873 .ndo_set_features = mlx4_en_set_features, 2874 .ndo_fix_features = mlx4_en_fix_features, 2875 .ndo_setup_tc = __mlx4_en_setup_tc, 2876 #ifdef CONFIG_RFS_ACCEL 2877 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2878 #endif 2879 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2880 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2881 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2882 .ndo_features_check = mlx4_en_features_check, 2883 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2884 .ndo_xdp = mlx4_xdp, 2885 }; 2886 2887 struct mlx4_en_bond { 2888 struct work_struct work; 2889 struct mlx4_en_priv *priv; 2890 int is_bonded; 2891 struct mlx4_port_map port_map; 2892 }; 2893 2894 static void mlx4_en_bond_work(struct work_struct *work) 2895 { 2896 struct mlx4_en_bond *bond = container_of(work, 2897 struct mlx4_en_bond, 2898 work); 2899 int err = 0; 2900 struct mlx4_dev *dev = bond->priv->mdev->dev; 2901 2902 if (bond->is_bonded) { 2903 if (!mlx4_is_bonded(dev)) { 2904 err = mlx4_bond(dev); 2905 if (err) 2906 en_err(bond->priv, "Fail to bond device\n"); 2907 } 2908 if (!err) { 2909 err = mlx4_port_map_set(dev, &bond->port_map); 2910 if (err) 2911 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2912 bond->port_map.port1, 2913 bond->port_map.port2, 2914 err); 2915 } 2916 } else if (mlx4_is_bonded(dev)) { 2917 err = mlx4_unbond(dev); 2918 if (err) 2919 en_err(bond->priv, "Fail to unbond device\n"); 2920 } 2921 dev_put(bond->priv->dev); 2922 kfree(bond); 2923 } 2924 2925 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 2926 u8 v2p_p1, u8 v2p_p2) 2927 { 2928 struct mlx4_en_bond *bond = NULL; 2929 2930 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 2931 if (!bond) 2932 return -ENOMEM; 2933 2934 INIT_WORK(&bond->work, mlx4_en_bond_work); 2935 bond->priv = priv; 2936 bond->is_bonded = is_bonded; 2937 bond->port_map.port1 = v2p_p1; 2938 bond->port_map.port2 = v2p_p2; 2939 dev_hold(priv->dev); 2940 queue_work(priv->mdev->workqueue, &bond->work); 2941 return 0; 2942 } 2943 2944 int mlx4_en_netdev_event(struct notifier_block *this, 2945 unsigned long event, void *ptr) 2946 { 2947 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2948 u8 port = 0; 2949 struct mlx4_en_dev *mdev; 2950 struct mlx4_dev *dev; 2951 int i, num_eth_ports = 0; 2952 bool do_bond = true; 2953 struct mlx4_en_priv *priv; 2954 u8 v2p_port1 = 0; 2955 u8 v2p_port2 = 0; 2956 2957 if (!net_eq(dev_net(ndev), &init_net)) 2958 return NOTIFY_DONE; 2959 2960 mdev = container_of(this, struct mlx4_en_dev, nb); 2961 dev = mdev->dev; 2962 2963 /* Go into this mode only when two network devices set on two ports 2964 * of the same mlx4 device are slaves of the same bonding master 2965 */ 2966 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 2967 ++num_eth_ports; 2968 if (!port && (mdev->pndev[i] == ndev)) 2969 port = i; 2970 mdev->upper[i] = mdev->pndev[i] ? 2971 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 2972 /* condition not met: network device is a slave */ 2973 if (!mdev->upper[i]) 2974 do_bond = false; 2975 if (num_eth_ports < 2) 2976 continue; 2977 /* condition not met: same master */ 2978 if (mdev->upper[i] != mdev->upper[i-1]) 2979 do_bond = false; 2980 } 2981 /* condition not met: 2 salves */ 2982 do_bond = (num_eth_ports == 2) ? do_bond : false; 2983 2984 /* handle only events that come with enough info */ 2985 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 2986 return NOTIFY_DONE; 2987 2988 priv = netdev_priv(ndev); 2989 if (do_bond) { 2990 struct netdev_notifier_bonding_info *notifier_info = ptr; 2991 struct netdev_bonding_info *bonding_info = 2992 ¬ifier_info->bonding_info; 2993 2994 /* required mode 1, 2 or 4 */ 2995 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 2996 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 2997 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 2998 do_bond = false; 2999 3000 /* require exactly 2 slaves */ 3001 if (bonding_info->master.num_slaves != 2) 3002 do_bond = false; 3003 3004 /* calc v2p */ 3005 if (do_bond) { 3006 if (bonding_info->master.bond_mode == 3007 BOND_MODE_ACTIVEBACKUP) { 3008 /* in active-backup mode virtual ports are 3009 * mapped to the physical port of the active 3010 * slave */ 3011 if (bonding_info->slave.state == 3012 BOND_STATE_BACKUP) { 3013 if (port == 1) { 3014 v2p_port1 = 2; 3015 v2p_port2 = 2; 3016 } else { 3017 v2p_port1 = 1; 3018 v2p_port2 = 1; 3019 } 3020 } else { /* BOND_STATE_ACTIVE */ 3021 if (port == 1) { 3022 v2p_port1 = 1; 3023 v2p_port2 = 1; 3024 } else { 3025 v2p_port1 = 2; 3026 v2p_port2 = 2; 3027 } 3028 } 3029 } else { /* Active-Active */ 3030 /* in active-active mode a virtual port is 3031 * mapped to the native physical port if and only 3032 * if the physical port is up */ 3033 __s8 link = bonding_info->slave.link; 3034 3035 if (port == 1) 3036 v2p_port2 = 2; 3037 else 3038 v2p_port1 = 1; 3039 if ((link == BOND_LINK_UP) || 3040 (link == BOND_LINK_FAIL)) { 3041 if (port == 1) 3042 v2p_port1 = 1; 3043 else 3044 v2p_port2 = 2; 3045 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 3046 if (port == 1) 3047 v2p_port1 = 2; 3048 else 3049 v2p_port2 = 1; 3050 } 3051 } 3052 } 3053 } 3054 3055 mlx4_en_queue_bond_work(priv, do_bond, 3056 v2p_port1, v2p_port2); 3057 3058 return NOTIFY_DONE; 3059 } 3060 3061 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 3062 struct mlx4_en_stats_bitmap *stats_bitmap, 3063 u8 rx_ppp, u8 rx_pause, 3064 u8 tx_ppp, u8 tx_pause) 3065 { 3066 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 3067 3068 if (!mlx4_is_slave(dev) && 3069 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 3070 mutex_lock(&stats_bitmap->mutex); 3071 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 3072 3073 if (rx_ppp) 3074 bitmap_set(stats_bitmap->bitmap, last_i, 3075 NUM_FLOW_PRIORITY_STATS_RX); 3076 last_i += NUM_FLOW_PRIORITY_STATS_RX; 3077 3078 if (rx_pause && !(rx_ppp)) 3079 bitmap_set(stats_bitmap->bitmap, last_i, 3080 NUM_FLOW_STATS_RX); 3081 last_i += NUM_FLOW_STATS_RX; 3082 3083 if (tx_ppp) 3084 bitmap_set(stats_bitmap->bitmap, last_i, 3085 NUM_FLOW_PRIORITY_STATS_TX); 3086 last_i += NUM_FLOW_PRIORITY_STATS_TX; 3087 3088 if (tx_pause && !(tx_ppp)) 3089 bitmap_set(stats_bitmap->bitmap, last_i, 3090 NUM_FLOW_STATS_TX); 3091 last_i += NUM_FLOW_STATS_TX; 3092 3093 mutex_unlock(&stats_bitmap->mutex); 3094 } 3095 } 3096 3097 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 3098 struct mlx4_en_stats_bitmap *stats_bitmap, 3099 u8 rx_ppp, u8 rx_pause, 3100 u8 tx_ppp, u8 tx_pause) 3101 { 3102 int last_i = 0; 3103 3104 mutex_init(&stats_bitmap->mutex); 3105 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 3106 3107 if (mlx4_is_slave(dev)) { 3108 bitmap_set(stats_bitmap->bitmap, last_i + 3109 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 3110 bitmap_set(stats_bitmap->bitmap, last_i + 3111 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 3112 bitmap_set(stats_bitmap->bitmap, last_i + 3113 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 3114 bitmap_set(stats_bitmap->bitmap, last_i + 3115 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 3116 bitmap_set(stats_bitmap->bitmap, last_i + 3117 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 3118 bitmap_set(stats_bitmap->bitmap, last_i + 3119 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 3120 } else { 3121 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 3122 } 3123 last_i += NUM_MAIN_STATS; 3124 3125 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 3126 last_i += NUM_PORT_STATS; 3127 3128 if (mlx4_is_master(dev)) 3129 bitmap_set(stats_bitmap->bitmap, last_i, 3130 NUM_PF_STATS); 3131 last_i += NUM_PF_STATS; 3132 3133 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 3134 rx_ppp, rx_pause, 3135 tx_ppp, tx_pause); 3136 last_i += NUM_FLOW_STATS; 3137 3138 if (!mlx4_is_slave(dev)) 3139 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 3140 last_i += NUM_PKT_STATS; 3141 3142 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); 3143 last_i += NUM_XDP_STATS; 3144 } 3145 3146 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 3147 struct mlx4_en_port_profile *prof) 3148 { 3149 struct net_device *dev; 3150 struct mlx4_en_priv *priv; 3151 int i, t; 3152 int err; 3153 3154 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3155 MAX_TX_RINGS, MAX_RX_RINGS); 3156 if (dev == NULL) 3157 return -ENOMEM; 3158 3159 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); 3160 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 3161 3162 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 3163 dev->dev_port = port - 1; 3164 3165 /* 3166 * Initialize driver private data 3167 */ 3168 3169 priv = netdev_priv(dev); 3170 memset(priv, 0, sizeof(struct mlx4_en_priv)); 3171 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3172 spin_lock_init(&priv->stats_lock); 3173 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3174 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3175 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3176 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3177 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3178 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 3179 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 3180 #ifdef CONFIG_RFS_ACCEL 3181 INIT_LIST_HEAD(&priv->filters); 3182 spin_lock_init(&priv->filters_lock); 3183 #endif 3184 3185 priv->dev = dev; 3186 priv->mdev = mdev; 3187 priv->ddev = &mdev->pdev->dev; 3188 priv->prof = prof; 3189 priv->port = port; 3190 priv->port_up = false; 3191 priv->flags = prof->flags; 3192 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 3193 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 3194 MLX4_WQE_CTRL_SOLICITED); 3195 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 3196 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 3197 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 3198 3199 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 3200 priv->tx_ring_num[t] = prof->tx_ring_num[t]; 3201 if (!priv->tx_ring_num[t]) 3202 continue; 3203 3204 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * 3205 MAX_TX_RINGS, GFP_KERNEL); 3206 if (!priv->tx_ring[t]) { 3207 err = -ENOMEM; 3208 goto err_free_tx; 3209 } 3210 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 3211 MAX_TX_RINGS, GFP_KERNEL); 3212 if (!priv->tx_cq[t]) { 3213 kfree(priv->tx_ring[t]); 3214 err = -ENOMEM; 3215 goto out; 3216 } 3217 } 3218 priv->rx_ring_num = prof->rx_ring_num; 3219 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 3220 priv->cqe_size = mdev->dev->caps.cqe_size; 3221 priv->mac_index = -1; 3222 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3223 #ifdef CONFIG_MLX4_EN_DCB 3224 if (!mlx4_is_slave(priv->mdev->dev)) { 3225 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | 3226 DCB_CAP_DCBX_VER_IEEE; 3227 priv->flags |= MLX4_EN_DCB_ENABLED; 3228 priv->cee_config.pfc_state = false; 3229 3230 for (i = 0; i < MLX4_EN_NUM_UP; i++) 3231 priv->cee_config.dcb_pfc[i] = pfc_disabled; 3232 3233 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3234 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3235 } else { 3236 en_info(priv, "enabling only PFC DCB ops\n"); 3237 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 3238 } 3239 } 3240 #endif 3241 3242 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 3243 INIT_HLIST_HEAD(&priv->mac_hash[i]); 3244 3245 /* Query for default mac and max mtu */ 3246 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 3247 3248 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 3249 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 3250 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3251 3252 /* Set default MAC */ 3253 dev->addr_len = ETH_ALEN; 3254 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3255 if (!is_valid_ether_addr(dev->dev_addr)) { 3256 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3257 priv->port, dev->dev_addr); 3258 err = -EINVAL; 3259 goto out; 3260 } else if (mlx4_is_slave(priv->mdev->dev) && 3261 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3262 /* Random MAC was assigned in mlx4_slave_cap 3263 * in mlx4_core module 3264 */ 3265 dev->addr_assign_type |= NET_ADDR_RANDOM; 3266 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3267 } 3268 3269 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3270 3271 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3272 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3273 err = mlx4_en_alloc_resources(priv); 3274 if (err) 3275 goto out; 3276 3277 /* Initialize time stamping config */ 3278 priv->hwtstamp_config.flags = 0; 3279 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3280 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3281 3282 /* Allocate page for receive rings */ 3283 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3284 MLX4_EN_PAGE_SIZE); 3285 if (err) { 3286 en_err(priv, "Failed to allocate page for rx qps\n"); 3287 goto out; 3288 } 3289 priv->allocated = 1; 3290 3291 /* 3292 * Initialize netdev entry points 3293 */ 3294 if (mlx4_is_master(priv->mdev->dev)) 3295 dev->netdev_ops = &mlx4_netdev_ops_master; 3296 else 3297 dev->netdev_ops = &mlx4_netdev_ops; 3298 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3299 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 3300 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3301 3302 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3303 3304 /* 3305 * Set driver features 3306 */ 3307 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3308 if (mdev->LSO_support) 3309 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3310 3311 dev->vlan_features = dev->hw_features; 3312 3313 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3314 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3315 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3316 NETIF_F_HW_VLAN_CTAG_FILTER; 3317 dev->hw_features |= NETIF_F_LOOPBACK | 3318 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3319 3320 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3321 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3322 NETIF_F_HW_VLAN_STAG_FILTER; 3323 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3324 } 3325 3326 if (mlx4_is_slave(mdev->dev)) { 3327 bool vlan_offload_disabled; 3328 int phv; 3329 3330 err = get_phv_bit(mdev->dev, port, &phv); 3331 if (!err && phv) { 3332 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3333 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3334 } 3335 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port, 3336 &vlan_offload_disabled); 3337 if (!err && vlan_offload_disabled) { 3338 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3339 NETIF_F_HW_VLAN_CTAG_RX | 3340 NETIF_F_HW_VLAN_STAG_TX | 3341 NETIF_F_HW_VLAN_STAG_RX); 3342 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3343 NETIF_F_HW_VLAN_CTAG_RX | 3344 NETIF_F_HW_VLAN_STAG_TX | 3345 NETIF_F_HW_VLAN_STAG_RX); 3346 } 3347 } else { 3348 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3349 !(mdev->dev->caps.flags2 & 3350 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3351 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3352 } 3353 3354 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3355 dev->hw_features |= NETIF_F_RXFCS; 3356 3357 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3358 dev->hw_features |= NETIF_F_RXALL; 3359 3360 if (mdev->dev->caps.steering_mode == 3361 MLX4_STEERING_MODE_DEVICE_MANAGED && 3362 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3363 dev->hw_features |= NETIF_F_NTUPLE; 3364 3365 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3366 dev->priv_flags |= IFF_UNICAST_FLT; 3367 3368 /* Setting a default hash function value */ 3369 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3370 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3371 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3372 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3373 } else { 3374 en_warn(priv, 3375 "No RSS hash capabilities exposed, using Toeplitz\n"); 3376 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3377 } 3378 3379 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3380 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3381 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3382 NETIF_F_GSO_PARTIAL; 3383 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3384 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3385 NETIF_F_GSO_PARTIAL; 3386 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3387 } 3388 3389 /* MTU range: 46 - hw-specific max */ 3390 dev->min_mtu = MLX4_EN_MIN_MTU; 3391 dev->max_mtu = priv->max_mtu; 3392 3393 mdev->pndev[port] = dev; 3394 mdev->upper[port] = NULL; 3395 3396 netif_carrier_off(dev); 3397 mlx4_en_set_default_moderation(priv); 3398 3399 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); 3400 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3401 3402 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3403 3404 /* Configure port */ 3405 mlx4_en_calc_rx_buf(dev); 3406 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3407 priv->rx_skb_size + ETH_FCS_LEN, 3408 prof->tx_pause, prof->tx_ppp, 3409 prof->rx_pause, prof->rx_ppp); 3410 if (err) { 3411 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3412 priv->port, err); 3413 goto out; 3414 } 3415 3416 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3417 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3418 if (err) { 3419 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3420 err); 3421 goto out; 3422 } 3423 } 3424 3425 /* Init port */ 3426 en_warn(priv, "Initializing port\n"); 3427 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3428 if (err) { 3429 en_err(priv, "Failed Initializing port\n"); 3430 goto out; 3431 } 3432 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3433 3434 /* Initialize time stamp mechanism */ 3435 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3436 mlx4_en_init_timestamp(mdev); 3437 3438 queue_delayed_work(mdev->workqueue, &priv->service_task, 3439 SERVICE_TASK_DELAY); 3440 3441 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3442 mdev->profile.prof[priv->port].rx_ppp, 3443 mdev->profile.prof[priv->port].rx_pause, 3444 mdev->profile.prof[priv->port].tx_ppp, 3445 mdev->profile.prof[priv->port].tx_pause); 3446 3447 err = register_netdev(dev); 3448 if (err) { 3449 en_err(priv, "Netdev registration failed for port %d\n", port); 3450 goto out; 3451 } 3452 3453 priv->registered = 1; 3454 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3455 dev); 3456 3457 return 0; 3458 3459 err_free_tx: 3460 while (t--) { 3461 kfree(priv->tx_ring[t]); 3462 kfree(priv->tx_cq[t]); 3463 } 3464 out: 3465 mlx4_en_destroy_netdev(dev); 3466 return err; 3467 } 3468 3469 int mlx4_en_reset_config(struct net_device *dev, 3470 struct hwtstamp_config ts_config, 3471 netdev_features_t features) 3472 { 3473 struct mlx4_en_priv *priv = netdev_priv(dev); 3474 struct mlx4_en_dev *mdev = priv->mdev; 3475 struct mlx4_en_port_profile new_prof; 3476 struct mlx4_en_priv *tmp; 3477 int port_up = 0; 3478 int err = 0; 3479 3480 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3481 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3482 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3483 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3484 return 0; /* Nothing to change */ 3485 3486 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3487 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3488 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3489 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3490 return -EINVAL; 3491 } 3492 3493 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3494 if (!tmp) 3495 return -ENOMEM; 3496 3497 mutex_lock(&mdev->state_lock); 3498 3499 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3500 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3501 3502 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3503 if (err) 3504 goto out; 3505 3506 if (priv->port_up) { 3507 port_up = 1; 3508 mlx4_en_stop_port(dev, 1); 3509 } 3510 3511 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3512 ts_config.rx_filter, 3513 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3514 3515 mlx4_en_safe_replace_resources(priv, tmp); 3516 3517 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3518 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3519 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3520 else 3521 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3522 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3523 /* RX time-stamping is OFF, update the RX vlan offload 3524 * to the latest wanted state 3525 */ 3526 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3527 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3528 else 3529 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3530 } 3531 3532 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3533 if (features & NETIF_F_RXFCS) 3534 dev->features |= NETIF_F_RXFCS; 3535 else 3536 dev->features &= ~NETIF_F_RXFCS; 3537 } 3538 3539 /* RX vlan offload and RX time-stamping can't co-exist ! 3540 * Regardless of the caller's choice, 3541 * Turn Off RX vlan offload in case of time-stamping is ON 3542 */ 3543 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3544 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3545 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3546 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3547 } 3548 3549 if (port_up) { 3550 err = mlx4_en_start_port(dev); 3551 if (err) 3552 en_err(priv, "Failed starting port\n"); 3553 } 3554 3555 out: 3556 mutex_unlock(&mdev->state_lock); 3557 kfree(tmp); 3558 if (!err) 3559 netdev_features_change(dev); 3560 return err; 3561 } 3562