1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/bpf.h> 35 #include <linux/etherdevice.h> 36 #include <linux/tcp.h> 37 #include <linux/if_vlan.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/hash.h> 41 #include <net/ip.h> 42 #include <net/busy_poll.h> 43 #include <net/vxlan.h> 44 #include <net/devlink.h> 45 46 #include <linux/mlx4/driver.h> 47 #include <linux/mlx4/device.h> 48 #include <linux/mlx4/cmd.h> 49 #include <linux/mlx4/cq.h> 50 51 #include "mlx4_en.h" 52 #include "en_port.h" 53 54 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ 55 XDP_PACKET_HEADROOM)) 56 57 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 58 { 59 struct mlx4_en_priv *priv = netdev_priv(dev); 60 int i; 61 unsigned int offset = 0; 62 63 if (up && up != MLX4_EN_NUM_UP) 64 return -EINVAL; 65 66 netdev_set_num_tc(dev, up); 67 68 /* Partition Tx queues evenly amongst UP's */ 69 for (i = 0; i < up; i++) { 70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 71 offset += priv->num_tx_rings_p_up; 72 } 73 74 #ifdef CONFIG_MLX4_EN_DCB 75 if (!mlx4_is_slave(priv->mdev->dev)) { 76 if (up) { 77 if (priv->dcbx_cap) 78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 79 } else { 80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 81 priv->cee_config.pfc_state = false; 82 } 83 } 84 #endif /* CONFIG_MLX4_EN_DCB */ 85 86 return 0; 87 } 88 89 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 90 struct tc_to_netdev *tc) 91 { 92 if (tc->type != TC_SETUP_MQPRIO) 93 return -EINVAL; 94 95 return mlx4_en_setup_tc(dev, tc->tc); 96 } 97 98 #ifdef CONFIG_RFS_ACCEL 99 100 struct mlx4_en_filter { 101 struct list_head next; 102 struct work_struct work; 103 104 u8 ip_proto; 105 __be32 src_ip; 106 __be32 dst_ip; 107 __be16 src_port; 108 __be16 dst_port; 109 110 int rxq_index; 111 struct mlx4_en_priv *priv; 112 u32 flow_id; /* RFS infrastructure id */ 113 int id; /* mlx4_en driver id */ 114 u64 reg_id; /* Flow steering API id */ 115 u8 activated; /* Used to prevent expiry before filter 116 * is attached 117 */ 118 struct hlist_node filter_chain; 119 }; 120 121 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 122 123 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 124 { 125 switch (ip_proto) { 126 case IPPROTO_UDP: 127 return MLX4_NET_TRANS_RULE_ID_UDP; 128 case IPPROTO_TCP: 129 return MLX4_NET_TRANS_RULE_ID_TCP; 130 default: 131 return MLX4_NET_TRANS_RULE_NUM; 132 } 133 }; 134 135 /* Must not acquire state_lock, as its corresponding work_sync 136 * is done under it. 137 */ 138 static void mlx4_en_filter_work(struct work_struct *work) 139 { 140 struct mlx4_en_filter *filter = container_of(work, 141 struct mlx4_en_filter, 142 work); 143 struct mlx4_en_priv *priv = filter->priv; 144 struct mlx4_spec_list spec_tcp_udp = { 145 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 146 { 147 .tcp_udp = { 148 .dst_port = filter->dst_port, 149 .dst_port_msk = (__force __be16)-1, 150 .src_port = filter->src_port, 151 .src_port_msk = (__force __be16)-1, 152 }, 153 }, 154 }; 155 struct mlx4_spec_list spec_ip = { 156 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 157 { 158 .ipv4 = { 159 .dst_ip = filter->dst_ip, 160 .dst_ip_msk = (__force __be32)-1, 161 .src_ip = filter->src_ip, 162 .src_ip_msk = (__force __be32)-1, 163 }, 164 }, 165 }; 166 struct mlx4_spec_list spec_eth = { 167 .id = MLX4_NET_TRANS_RULE_ID_ETH, 168 }; 169 struct mlx4_net_trans_rule rule = { 170 .list = LIST_HEAD_INIT(rule.list), 171 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 172 .exclusive = 1, 173 .allow_loopback = 1, 174 .promisc_mode = MLX4_FS_REGULAR, 175 .port = priv->port, 176 .priority = MLX4_DOMAIN_RFS, 177 }; 178 int rc; 179 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 180 181 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 182 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 183 filter->ip_proto); 184 goto ignore; 185 } 186 list_add_tail(&spec_eth.list, &rule.list); 187 list_add_tail(&spec_ip.list, &rule.list); 188 list_add_tail(&spec_tcp_udp.list, &rule.list); 189 190 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 191 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 192 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 193 194 filter->activated = 0; 195 196 if (filter->reg_id) { 197 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 198 if (rc && rc != -ENOENT) 199 en_err(priv, "Error detaching flow. rc = %d\n", rc); 200 } 201 202 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 203 if (rc) 204 en_err(priv, "Error attaching flow. err = %d\n", rc); 205 206 ignore: 207 mlx4_en_filter_rfs_expire(priv); 208 209 filter->activated = 1; 210 } 211 212 static inline struct hlist_head * 213 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 214 __be16 src_port, __be16 dst_port) 215 { 216 unsigned long l; 217 int bucket_idx; 218 219 l = (__force unsigned long)src_port | 220 ((__force unsigned long)dst_port << 2); 221 l ^= (__force unsigned long)(src_ip ^ dst_ip); 222 223 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 224 225 return &priv->filter_hash[bucket_idx]; 226 } 227 228 static struct mlx4_en_filter * 229 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 230 __be32 dst_ip, u8 ip_proto, __be16 src_port, 231 __be16 dst_port, u32 flow_id) 232 { 233 struct mlx4_en_filter *filter = NULL; 234 235 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 236 if (!filter) 237 return NULL; 238 239 filter->priv = priv; 240 filter->rxq_index = rxq_index; 241 INIT_WORK(&filter->work, mlx4_en_filter_work); 242 243 filter->src_ip = src_ip; 244 filter->dst_ip = dst_ip; 245 filter->ip_proto = ip_proto; 246 filter->src_port = src_port; 247 filter->dst_port = dst_port; 248 249 filter->flow_id = flow_id; 250 251 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 252 253 list_add_tail(&filter->next, &priv->filters); 254 hlist_add_head(&filter->filter_chain, 255 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 256 dst_port)); 257 258 return filter; 259 } 260 261 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 262 { 263 struct mlx4_en_priv *priv = filter->priv; 264 int rc; 265 266 list_del(&filter->next); 267 268 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 269 if (rc && rc != -ENOENT) 270 en_err(priv, "Error detaching flow. rc = %d\n", rc); 271 272 kfree(filter); 273 } 274 275 static inline struct mlx4_en_filter * 276 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 277 u8 ip_proto, __be16 src_port, __be16 dst_port) 278 { 279 struct mlx4_en_filter *filter; 280 struct mlx4_en_filter *ret = NULL; 281 282 hlist_for_each_entry(filter, 283 filter_hash_bucket(priv, src_ip, dst_ip, 284 src_port, dst_port), 285 filter_chain) { 286 if (filter->src_ip == src_ip && 287 filter->dst_ip == dst_ip && 288 filter->ip_proto == ip_proto && 289 filter->src_port == src_port && 290 filter->dst_port == dst_port) { 291 ret = filter; 292 break; 293 } 294 } 295 296 return ret; 297 } 298 299 static int 300 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 301 u16 rxq_index, u32 flow_id) 302 { 303 struct mlx4_en_priv *priv = netdev_priv(net_dev); 304 struct mlx4_en_filter *filter; 305 const struct iphdr *ip; 306 const __be16 *ports; 307 u8 ip_proto; 308 __be32 src_ip; 309 __be32 dst_ip; 310 __be16 src_port; 311 __be16 dst_port; 312 int nhoff = skb_network_offset(skb); 313 int ret = 0; 314 315 if (skb->protocol != htons(ETH_P_IP)) 316 return -EPROTONOSUPPORT; 317 318 ip = (const struct iphdr *)(skb->data + nhoff); 319 if (ip_is_fragment(ip)) 320 return -EPROTONOSUPPORT; 321 322 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 323 return -EPROTONOSUPPORT; 324 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 325 326 ip_proto = ip->protocol; 327 src_ip = ip->saddr; 328 dst_ip = ip->daddr; 329 src_port = ports[0]; 330 dst_port = ports[1]; 331 332 spin_lock_bh(&priv->filters_lock); 333 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 334 src_port, dst_port); 335 if (filter) { 336 if (filter->rxq_index == rxq_index) 337 goto out; 338 339 filter->rxq_index = rxq_index; 340 } else { 341 filter = mlx4_en_filter_alloc(priv, rxq_index, 342 src_ip, dst_ip, ip_proto, 343 src_port, dst_port, flow_id); 344 if (!filter) { 345 ret = -ENOMEM; 346 goto err; 347 } 348 } 349 350 queue_work(priv->mdev->workqueue, &filter->work); 351 352 out: 353 ret = filter->id; 354 err: 355 spin_unlock_bh(&priv->filters_lock); 356 357 return ret; 358 } 359 360 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 361 { 362 struct mlx4_en_filter *filter, *tmp; 363 LIST_HEAD(del_list); 364 365 spin_lock_bh(&priv->filters_lock); 366 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 367 list_move(&filter->next, &del_list); 368 hlist_del(&filter->filter_chain); 369 } 370 spin_unlock_bh(&priv->filters_lock); 371 372 list_for_each_entry_safe(filter, tmp, &del_list, next) { 373 cancel_work_sync(&filter->work); 374 mlx4_en_filter_free(filter); 375 } 376 } 377 378 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 379 { 380 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 381 LIST_HEAD(del_list); 382 int i = 0; 383 384 spin_lock_bh(&priv->filters_lock); 385 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 386 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 387 break; 388 389 if (filter->activated && 390 !work_pending(&filter->work) && 391 rps_may_expire_flow(priv->dev, 392 filter->rxq_index, filter->flow_id, 393 filter->id)) { 394 list_move(&filter->next, &del_list); 395 hlist_del(&filter->filter_chain); 396 } else 397 last_filter = filter; 398 399 i++; 400 } 401 402 if (last_filter && (&last_filter->next != priv->filters.next)) 403 list_move(&priv->filters, &last_filter->next); 404 405 spin_unlock_bh(&priv->filters_lock); 406 407 list_for_each_entry_safe(filter, tmp, &del_list, next) 408 mlx4_en_filter_free(filter); 409 } 410 #endif 411 412 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 413 __be16 proto, u16 vid) 414 { 415 struct mlx4_en_priv *priv = netdev_priv(dev); 416 struct mlx4_en_dev *mdev = priv->mdev; 417 int err; 418 int idx; 419 420 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 421 422 set_bit(vid, priv->active_vlans); 423 424 /* Add VID to port VLAN filter */ 425 mutex_lock(&mdev->state_lock); 426 if (mdev->device_up && priv->port_up) { 427 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 428 if (err) { 429 en_err(priv, "Failed configuring VLAN filter\n"); 430 goto out; 431 } 432 } 433 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 434 if (err) 435 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 436 437 out: 438 mutex_unlock(&mdev->state_lock); 439 return err; 440 } 441 442 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 443 __be16 proto, u16 vid) 444 { 445 struct mlx4_en_priv *priv = netdev_priv(dev); 446 struct mlx4_en_dev *mdev = priv->mdev; 447 int err = 0; 448 449 en_dbg(HW, priv, "Killing VID:%d\n", vid); 450 451 clear_bit(vid, priv->active_vlans); 452 453 /* Remove VID from port VLAN filter */ 454 mutex_lock(&mdev->state_lock); 455 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 456 457 if (mdev->device_up && priv->port_up) { 458 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 459 if (err) 460 en_err(priv, "Failed configuring VLAN filter\n"); 461 } 462 mutex_unlock(&mdev->state_lock); 463 464 return err; 465 } 466 467 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 468 { 469 int i; 470 for (i = ETH_ALEN - 1; i >= 0; --i) { 471 dst_mac[i] = src_mac & 0xff; 472 src_mac >>= 8; 473 } 474 memset(&dst_mac[ETH_ALEN], 0, 2); 475 } 476 477 478 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 479 int qpn, u64 *reg_id) 480 { 481 int err; 482 483 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 484 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 485 return 0; /* do nothing */ 486 487 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 488 MLX4_DOMAIN_NIC, reg_id); 489 if (err) { 490 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 491 return err; 492 } 493 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 494 return 0; 495 } 496 497 498 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 499 unsigned char *mac, int *qpn, u64 *reg_id) 500 { 501 struct mlx4_en_dev *mdev = priv->mdev; 502 struct mlx4_dev *dev = mdev->dev; 503 int err; 504 505 switch (dev->caps.steering_mode) { 506 case MLX4_STEERING_MODE_B0: { 507 struct mlx4_qp qp; 508 u8 gid[16] = {0}; 509 510 qp.qpn = *qpn; 511 memcpy(&gid[10], mac, ETH_ALEN); 512 gid[5] = priv->port; 513 514 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 515 break; 516 } 517 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 518 struct mlx4_spec_list spec_eth = { {NULL} }; 519 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 520 521 struct mlx4_net_trans_rule rule = { 522 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 523 .exclusive = 0, 524 .allow_loopback = 1, 525 .promisc_mode = MLX4_FS_REGULAR, 526 .priority = MLX4_DOMAIN_NIC, 527 }; 528 529 rule.port = priv->port; 530 rule.qpn = *qpn; 531 INIT_LIST_HEAD(&rule.list); 532 533 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 534 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 535 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 536 list_add_tail(&spec_eth.list, &rule.list); 537 538 err = mlx4_flow_attach(dev, &rule, reg_id); 539 break; 540 } 541 default: 542 return -EINVAL; 543 } 544 if (err) 545 en_warn(priv, "Failed Attaching Unicast\n"); 546 547 return err; 548 } 549 550 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 551 unsigned char *mac, int qpn, u64 reg_id) 552 { 553 struct mlx4_en_dev *mdev = priv->mdev; 554 struct mlx4_dev *dev = mdev->dev; 555 556 switch (dev->caps.steering_mode) { 557 case MLX4_STEERING_MODE_B0: { 558 struct mlx4_qp qp; 559 u8 gid[16] = {0}; 560 561 qp.qpn = qpn; 562 memcpy(&gid[10], mac, ETH_ALEN); 563 gid[5] = priv->port; 564 565 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 566 break; 567 } 568 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 569 mlx4_flow_detach(dev, reg_id); 570 break; 571 } 572 default: 573 en_err(priv, "Invalid steering mode.\n"); 574 } 575 } 576 577 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 578 { 579 struct mlx4_en_dev *mdev = priv->mdev; 580 struct mlx4_dev *dev = mdev->dev; 581 int index = 0; 582 int err = 0; 583 int *qpn = &priv->base_qpn; 584 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 585 586 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 587 priv->dev->dev_addr); 588 index = mlx4_register_mac(dev, priv->port, mac); 589 if (index < 0) { 590 err = index; 591 en_err(priv, "Failed adding MAC: %pM\n", 592 priv->dev->dev_addr); 593 return err; 594 } 595 596 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 597 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 598 *qpn = base_qpn + index; 599 return 0; 600 } 601 602 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 603 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 604 if (err) { 605 en_err(priv, "Failed to reserve qp for mac registration\n"); 606 mlx4_unregister_mac(dev, priv->port, mac); 607 return err; 608 } 609 610 return 0; 611 } 612 613 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 614 { 615 struct mlx4_en_dev *mdev = priv->mdev; 616 struct mlx4_dev *dev = mdev->dev; 617 int qpn = priv->base_qpn; 618 619 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 620 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 621 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 622 priv->dev->dev_addr); 623 mlx4_unregister_mac(dev, priv->port, mac); 624 } else { 625 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 626 priv->port, qpn); 627 mlx4_qp_release_range(dev, qpn, 1); 628 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 629 } 630 } 631 632 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 633 unsigned char *new_mac, unsigned char *prev_mac) 634 { 635 struct mlx4_en_dev *mdev = priv->mdev; 636 struct mlx4_dev *dev = mdev->dev; 637 int err = 0; 638 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 639 640 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 641 struct hlist_head *bucket; 642 unsigned int mac_hash; 643 struct mlx4_mac_entry *entry; 644 struct hlist_node *tmp; 645 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 646 647 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 648 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 649 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 650 mlx4_en_uc_steer_release(priv, entry->mac, 651 qpn, entry->reg_id); 652 mlx4_unregister_mac(dev, priv->port, 653 prev_mac_u64); 654 hlist_del_rcu(&entry->hlist); 655 synchronize_rcu(); 656 memcpy(entry->mac, new_mac, ETH_ALEN); 657 entry->reg_id = 0; 658 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 659 hlist_add_head_rcu(&entry->hlist, 660 &priv->mac_hash[mac_hash]); 661 mlx4_register_mac(dev, priv->port, new_mac_u64); 662 err = mlx4_en_uc_steer_add(priv, new_mac, 663 &qpn, 664 &entry->reg_id); 665 if (err) 666 return err; 667 if (priv->tunnel_reg_id) { 668 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 669 priv->tunnel_reg_id = 0; 670 } 671 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 672 &priv->tunnel_reg_id); 673 return err; 674 } 675 } 676 return -EINVAL; 677 } 678 679 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 680 } 681 682 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 683 unsigned char new_mac[ETH_ALEN + 2]) 684 { 685 int err = 0; 686 687 if (priv->port_up) { 688 /* Remove old MAC and insert the new one */ 689 err = mlx4_en_replace_mac(priv, priv->base_qpn, 690 new_mac, priv->current_mac); 691 if (err) 692 en_err(priv, "Failed changing HW MAC address\n"); 693 } else 694 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 695 696 if (!err) 697 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 698 699 return err; 700 } 701 702 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 703 { 704 struct mlx4_en_priv *priv = netdev_priv(dev); 705 struct mlx4_en_dev *mdev = priv->mdev; 706 struct sockaddr *saddr = addr; 707 unsigned char new_mac[ETH_ALEN + 2]; 708 int err; 709 710 if (!is_valid_ether_addr(saddr->sa_data)) 711 return -EADDRNOTAVAIL; 712 713 mutex_lock(&mdev->state_lock); 714 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 715 err = mlx4_en_do_set_mac(priv, new_mac); 716 if (!err) 717 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 718 mutex_unlock(&mdev->state_lock); 719 720 return err; 721 } 722 723 static void mlx4_en_clear_list(struct net_device *dev) 724 { 725 struct mlx4_en_priv *priv = netdev_priv(dev); 726 struct mlx4_en_mc_list *tmp, *mc_to_del; 727 728 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 729 list_del(&mc_to_del->list); 730 kfree(mc_to_del); 731 } 732 } 733 734 static void mlx4_en_cache_mclist(struct net_device *dev) 735 { 736 struct mlx4_en_priv *priv = netdev_priv(dev); 737 struct netdev_hw_addr *ha; 738 struct mlx4_en_mc_list *tmp; 739 740 mlx4_en_clear_list(dev); 741 netdev_for_each_mc_addr(ha, dev) { 742 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 743 if (!tmp) { 744 mlx4_en_clear_list(dev); 745 return; 746 } 747 memcpy(tmp->addr, ha->addr, ETH_ALEN); 748 list_add_tail(&tmp->list, &priv->mc_list); 749 } 750 } 751 752 static void update_mclist_flags(struct mlx4_en_priv *priv, 753 struct list_head *dst, 754 struct list_head *src) 755 { 756 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 757 bool found; 758 759 /* Find all the entries that should be removed from dst, 760 * These are the entries that are not found in src 761 */ 762 list_for_each_entry(dst_tmp, dst, list) { 763 found = false; 764 list_for_each_entry(src_tmp, src, list) { 765 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 766 found = true; 767 break; 768 } 769 } 770 if (!found) 771 dst_tmp->action = MCLIST_REM; 772 } 773 774 /* Add entries that exist in src but not in dst 775 * mark them as need to add 776 */ 777 list_for_each_entry(src_tmp, src, list) { 778 found = false; 779 list_for_each_entry(dst_tmp, dst, list) { 780 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 781 dst_tmp->action = MCLIST_NONE; 782 found = true; 783 break; 784 } 785 } 786 if (!found) { 787 new_mc = kmemdup(src_tmp, 788 sizeof(struct mlx4_en_mc_list), 789 GFP_KERNEL); 790 if (!new_mc) 791 return; 792 793 new_mc->action = MCLIST_ADD; 794 list_add_tail(&new_mc->list, dst); 795 } 796 } 797 } 798 799 static void mlx4_en_set_rx_mode(struct net_device *dev) 800 { 801 struct mlx4_en_priv *priv = netdev_priv(dev); 802 803 if (!priv->port_up) 804 return; 805 806 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 807 } 808 809 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 810 struct mlx4_en_dev *mdev) 811 { 812 int err = 0; 813 814 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 815 if (netif_msg_rx_status(priv)) 816 en_warn(priv, "Entering promiscuous mode\n"); 817 priv->flags |= MLX4_EN_FLAG_PROMISC; 818 819 /* Enable promiscouos mode */ 820 switch (mdev->dev->caps.steering_mode) { 821 case MLX4_STEERING_MODE_DEVICE_MANAGED: 822 err = mlx4_flow_steer_promisc_add(mdev->dev, 823 priv->port, 824 priv->base_qpn, 825 MLX4_FS_ALL_DEFAULT); 826 if (err) 827 en_err(priv, "Failed enabling promiscuous mode\n"); 828 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 829 break; 830 831 case MLX4_STEERING_MODE_B0: 832 err = mlx4_unicast_promisc_add(mdev->dev, 833 priv->base_qpn, 834 priv->port); 835 if (err) 836 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 837 838 /* Add the default qp number as multicast 839 * promisc 840 */ 841 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 842 err = mlx4_multicast_promisc_add(mdev->dev, 843 priv->base_qpn, 844 priv->port); 845 if (err) 846 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 847 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 848 } 849 break; 850 851 case MLX4_STEERING_MODE_A0: 852 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 853 priv->port, 854 priv->base_qpn, 855 1); 856 if (err) 857 en_err(priv, "Failed enabling promiscuous mode\n"); 858 break; 859 } 860 861 /* Disable port multicast filter (unconditionally) */ 862 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 863 0, MLX4_MCAST_DISABLE); 864 if (err) 865 en_err(priv, "Failed disabling multicast filter\n"); 866 } 867 } 868 869 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 870 struct mlx4_en_dev *mdev) 871 { 872 int err = 0; 873 874 if (netif_msg_rx_status(priv)) 875 en_warn(priv, "Leaving promiscuous mode\n"); 876 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 877 878 /* Disable promiscouos mode */ 879 switch (mdev->dev->caps.steering_mode) { 880 case MLX4_STEERING_MODE_DEVICE_MANAGED: 881 err = mlx4_flow_steer_promisc_remove(mdev->dev, 882 priv->port, 883 MLX4_FS_ALL_DEFAULT); 884 if (err) 885 en_err(priv, "Failed disabling promiscuous mode\n"); 886 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 887 break; 888 889 case MLX4_STEERING_MODE_B0: 890 err = mlx4_unicast_promisc_remove(mdev->dev, 891 priv->base_qpn, 892 priv->port); 893 if (err) 894 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 895 /* Disable Multicast promisc */ 896 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 897 err = mlx4_multicast_promisc_remove(mdev->dev, 898 priv->base_qpn, 899 priv->port); 900 if (err) 901 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 902 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 903 } 904 break; 905 906 case MLX4_STEERING_MODE_A0: 907 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 908 priv->port, 909 priv->base_qpn, 0); 910 if (err) 911 en_err(priv, "Failed disabling promiscuous mode\n"); 912 break; 913 } 914 } 915 916 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 917 struct net_device *dev, 918 struct mlx4_en_dev *mdev) 919 { 920 struct mlx4_en_mc_list *mclist, *tmp; 921 u64 mcast_addr = 0; 922 u8 mc_list[16] = {0}; 923 int err = 0; 924 925 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 926 if (dev->flags & IFF_ALLMULTI) { 927 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 928 0, MLX4_MCAST_DISABLE); 929 if (err) 930 en_err(priv, "Failed disabling multicast filter\n"); 931 932 /* Add the default qp number as multicast promisc */ 933 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 934 switch (mdev->dev->caps.steering_mode) { 935 case MLX4_STEERING_MODE_DEVICE_MANAGED: 936 err = mlx4_flow_steer_promisc_add(mdev->dev, 937 priv->port, 938 priv->base_qpn, 939 MLX4_FS_MC_DEFAULT); 940 break; 941 942 case MLX4_STEERING_MODE_B0: 943 err = mlx4_multicast_promisc_add(mdev->dev, 944 priv->base_qpn, 945 priv->port); 946 break; 947 948 case MLX4_STEERING_MODE_A0: 949 break; 950 } 951 if (err) 952 en_err(priv, "Failed entering multicast promisc mode\n"); 953 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 954 } 955 } else { 956 /* Disable Multicast promisc */ 957 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 958 switch (mdev->dev->caps.steering_mode) { 959 case MLX4_STEERING_MODE_DEVICE_MANAGED: 960 err = mlx4_flow_steer_promisc_remove(mdev->dev, 961 priv->port, 962 MLX4_FS_MC_DEFAULT); 963 break; 964 965 case MLX4_STEERING_MODE_B0: 966 err = mlx4_multicast_promisc_remove(mdev->dev, 967 priv->base_qpn, 968 priv->port); 969 break; 970 971 case MLX4_STEERING_MODE_A0: 972 break; 973 } 974 if (err) 975 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 976 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 977 } 978 979 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 980 0, MLX4_MCAST_DISABLE); 981 if (err) 982 en_err(priv, "Failed disabling multicast filter\n"); 983 984 /* Flush mcast filter and init it with broadcast address */ 985 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 986 1, MLX4_MCAST_CONFIG); 987 988 /* Update multicast list - we cache all addresses so they won't 989 * change while HW is updated holding the command semaphor */ 990 netif_addr_lock_bh(dev); 991 mlx4_en_cache_mclist(dev); 992 netif_addr_unlock_bh(dev); 993 list_for_each_entry(mclist, &priv->mc_list, list) { 994 mcast_addr = mlx4_mac_to_u64(mclist->addr); 995 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 996 mcast_addr, 0, MLX4_MCAST_CONFIG); 997 } 998 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 999 0, MLX4_MCAST_ENABLE); 1000 if (err) 1001 en_err(priv, "Failed enabling multicast filter\n"); 1002 1003 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 1004 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1005 if (mclist->action == MCLIST_REM) { 1006 /* detach this address and delete from list */ 1007 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1008 mc_list[5] = priv->port; 1009 err = mlx4_multicast_detach(mdev->dev, 1010 &priv->rss_map.indir_qp, 1011 mc_list, 1012 MLX4_PROT_ETH, 1013 mclist->reg_id); 1014 if (err) 1015 en_err(priv, "Fail to detach multicast address\n"); 1016 1017 if (mclist->tunnel_reg_id) { 1018 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1019 if (err) 1020 en_err(priv, "Failed to detach multicast address\n"); 1021 } 1022 1023 /* remove from list */ 1024 list_del(&mclist->list); 1025 kfree(mclist); 1026 } else if (mclist->action == MCLIST_ADD) { 1027 /* attach the address */ 1028 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1029 /* needed for B0 steering support */ 1030 mc_list[5] = priv->port; 1031 err = mlx4_multicast_attach(mdev->dev, 1032 &priv->rss_map.indir_qp, 1033 mc_list, 1034 priv->port, 0, 1035 MLX4_PROT_ETH, 1036 &mclist->reg_id); 1037 if (err) 1038 en_err(priv, "Fail to attach multicast address\n"); 1039 1040 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1041 &mclist->tunnel_reg_id); 1042 if (err) 1043 en_err(priv, "Failed to attach multicast address\n"); 1044 } 1045 } 1046 } 1047 } 1048 1049 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1050 struct net_device *dev, 1051 struct mlx4_en_dev *mdev) 1052 { 1053 struct netdev_hw_addr *ha; 1054 struct mlx4_mac_entry *entry; 1055 struct hlist_node *tmp; 1056 bool found; 1057 u64 mac; 1058 int err = 0; 1059 struct hlist_head *bucket; 1060 unsigned int i; 1061 int removed = 0; 1062 u32 prev_flags; 1063 1064 /* Note that we do not need to protect our mac_hash traversal with rcu, 1065 * since all modification code is protected by mdev->state_lock 1066 */ 1067 1068 /* find what to remove */ 1069 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1070 bucket = &priv->mac_hash[i]; 1071 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1072 found = false; 1073 netdev_for_each_uc_addr(ha, dev) { 1074 if (ether_addr_equal_64bits(entry->mac, 1075 ha->addr)) { 1076 found = true; 1077 break; 1078 } 1079 } 1080 1081 /* MAC address of the port is not in uc list */ 1082 if (ether_addr_equal_64bits(entry->mac, 1083 priv->current_mac)) 1084 found = true; 1085 1086 if (!found) { 1087 mac = mlx4_mac_to_u64(entry->mac); 1088 mlx4_en_uc_steer_release(priv, entry->mac, 1089 priv->base_qpn, 1090 entry->reg_id); 1091 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1092 1093 hlist_del_rcu(&entry->hlist); 1094 kfree_rcu(entry, rcu); 1095 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1096 entry->mac, priv->port); 1097 ++removed; 1098 } 1099 } 1100 } 1101 1102 /* if we didn't remove anything, there is no use in trying to add 1103 * again once we are in a forced promisc mode state 1104 */ 1105 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1106 return; 1107 1108 prev_flags = priv->flags; 1109 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1110 1111 /* find what to add */ 1112 netdev_for_each_uc_addr(ha, dev) { 1113 found = false; 1114 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1115 hlist_for_each_entry(entry, bucket, hlist) { 1116 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1117 found = true; 1118 break; 1119 } 1120 } 1121 1122 if (!found) { 1123 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1124 if (!entry) { 1125 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1126 ha->addr, priv->port); 1127 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1128 break; 1129 } 1130 mac = mlx4_mac_to_u64(ha->addr); 1131 memcpy(entry->mac, ha->addr, ETH_ALEN); 1132 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1133 if (err < 0) { 1134 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1135 ha->addr, priv->port, err); 1136 kfree(entry); 1137 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1138 break; 1139 } 1140 err = mlx4_en_uc_steer_add(priv, ha->addr, 1141 &priv->base_qpn, 1142 &entry->reg_id); 1143 if (err) { 1144 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1145 ha->addr, priv->port, err); 1146 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1147 kfree(entry); 1148 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1149 break; 1150 } else { 1151 unsigned int mac_hash; 1152 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1153 ha->addr, priv->port); 1154 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1155 bucket = &priv->mac_hash[mac_hash]; 1156 hlist_add_head_rcu(&entry->hlist, bucket); 1157 } 1158 } 1159 } 1160 1161 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1162 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1163 priv->port); 1164 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1165 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1166 priv->port); 1167 } 1168 } 1169 1170 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1171 { 1172 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1173 rx_mode_task); 1174 struct mlx4_en_dev *mdev = priv->mdev; 1175 struct net_device *dev = priv->dev; 1176 1177 mutex_lock(&mdev->state_lock); 1178 if (!mdev->device_up) { 1179 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1180 goto out; 1181 } 1182 if (!priv->port_up) { 1183 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1184 goto out; 1185 } 1186 1187 if (!netif_carrier_ok(dev)) { 1188 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1189 if (priv->port_state.link_state) { 1190 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1191 netif_carrier_on(dev); 1192 en_dbg(LINK, priv, "Link Up\n"); 1193 } 1194 } 1195 } 1196 1197 if (dev->priv_flags & IFF_UNICAST_FLT) 1198 mlx4_en_do_uc_filter(priv, dev, mdev); 1199 1200 /* Promsicuous mode: disable all filters */ 1201 if ((dev->flags & IFF_PROMISC) || 1202 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1203 mlx4_en_set_promisc_mode(priv, mdev); 1204 goto out; 1205 } 1206 1207 /* Not in promiscuous mode */ 1208 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1209 mlx4_en_clear_promisc_mode(priv, mdev); 1210 1211 mlx4_en_do_multicast(priv, dev, mdev); 1212 out: 1213 mutex_unlock(&mdev->state_lock); 1214 } 1215 1216 #ifdef CONFIG_NET_POLL_CONTROLLER 1217 static void mlx4_en_netpoll(struct net_device *dev) 1218 { 1219 struct mlx4_en_priv *priv = netdev_priv(dev); 1220 struct mlx4_en_cq *cq; 1221 int i; 1222 1223 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1224 cq = priv->tx_cq[TX][i]; 1225 napi_schedule(&cq->napi); 1226 } 1227 } 1228 #endif 1229 1230 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1231 { 1232 u64 reg_id; 1233 int err = 0; 1234 int *qpn = &priv->base_qpn; 1235 struct mlx4_mac_entry *entry; 1236 1237 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1238 if (err) 1239 return err; 1240 1241 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1242 &priv->tunnel_reg_id); 1243 if (err) 1244 goto tunnel_err; 1245 1246 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1247 if (!entry) { 1248 err = -ENOMEM; 1249 goto alloc_err; 1250 } 1251 1252 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1253 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1254 entry->reg_id = reg_id; 1255 hlist_add_head_rcu(&entry->hlist, 1256 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1257 1258 return 0; 1259 1260 alloc_err: 1261 if (priv->tunnel_reg_id) 1262 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1263 1264 tunnel_err: 1265 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1266 return err; 1267 } 1268 1269 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1270 { 1271 u64 mac; 1272 unsigned int i; 1273 int qpn = priv->base_qpn; 1274 struct hlist_head *bucket; 1275 struct hlist_node *tmp; 1276 struct mlx4_mac_entry *entry; 1277 1278 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1279 bucket = &priv->mac_hash[i]; 1280 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1281 mac = mlx4_mac_to_u64(entry->mac); 1282 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1283 entry->mac); 1284 mlx4_en_uc_steer_release(priv, entry->mac, 1285 qpn, entry->reg_id); 1286 1287 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1288 hlist_del_rcu(&entry->hlist); 1289 kfree_rcu(entry, rcu); 1290 } 1291 } 1292 1293 if (priv->tunnel_reg_id) { 1294 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1295 priv->tunnel_reg_id = 0; 1296 } 1297 } 1298 1299 static void mlx4_en_tx_timeout(struct net_device *dev) 1300 { 1301 struct mlx4_en_priv *priv = netdev_priv(dev); 1302 struct mlx4_en_dev *mdev = priv->mdev; 1303 int i; 1304 1305 if (netif_msg_timer(priv)) 1306 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1307 1308 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1309 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i]; 1310 1311 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1312 continue; 1313 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1314 i, tx_ring->qpn, tx_ring->sp_cqn, 1315 tx_ring->cons, tx_ring->prod); 1316 } 1317 1318 priv->port_stats.tx_timeout++; 1319 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1320 queue_work(mdev->workqueue, &priv->watchdog_task); 1321 } 1322 1323 1324 static struct rtnl_link_stats64 * 1325 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1326 { 1327 struct mlx4_en_priv *priv = netdev_priv(dev); 1328 1329 spin_lock_bh(&priv->stats_lock); 1330 mlx4_en_fold_software_stats(dev); 1331 netdev_stats_to_stats64(stats, &dev->stats); 1332 spin_unlock_bh(&priv->stats_lock); 1333 1334 return stats; 1335 } 1336 1337 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1338 { 1339 struct mlx4_en_cq *cq; 1340 int i, t; 1341 1342 /* If we haven't received a specific coalescing setting 1343 * (module param), we set the moderation parameters as follows: 1344 * - moder_cnt is set to the number of mtu sized packets to 1345 * satisfy our coalescing target. 1346 * - moder_time is set to a fixed value. 1347 */ 1348 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1349 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1350 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1351 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1352 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1353 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1354 1355 /* Setup cq moderation params */ 1356 for (i = 0; i < priv->rx_ring_num; i++) { 1357 cq = priv->rx_cq[i]; 1358 cq->moder_cnt = priv->rx_frames; 1359 cq->moder_time = priv->rx_usecs; 1360 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1361 priv->last_moder_packets[i] = 0; 1362 priv->last_moder_bytes[i] = 0; 1363 } 1364 1365 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1366 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1367 cq = priv->tx_cq[t][i]; 1368 cq->moder_cnt = priv->tx_frames; 1369 cq->moder_time = priv->tx_usecs; 1370 } 1371 } 1372 1373 /* Reset auto-moderation params */ 1374 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1375 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1376 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1377 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1378 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1379 priv->adaptive_rx_coal = 1; 1380 priv->last_moder_jiffies = 0; 1381 priv->last_moder_tx_packets = 0; 1382 } 1383 1384 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1385 { 1386 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1387 struct mlx4_en_cq *cq; 1388 unsigned long packets; 1389 unsigned long rate; 1390 unsigned long avg_pkt_size; 1391 unsigned long rx_packets; 1392 unsigned long rx_bytes; 1393 unsigned long rx_pkt_diff; 1394 int moder_time; 1395 int ring, err; 1396 1397 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1398 return; 1399 1400 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1401 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); 1402 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); 1403 1404 rx_pkt_diff = ((unsigned long) (rx_packets - 1405 priv->last_moder_packets[ring])); 1406 packets = rx_pkt_diff; 1407 rate = packets * HZ / period; 1408 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1409 priv->last_moder_bytes[ring])) / packets : 0; 1410 1411 /* Apply auto-moderation only when packet rate 1412 * exceeds a rate that it matters */ 1413 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1414 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1415 if (rate < priv->pkt_rate_low) 1416 moder_time = priv->rx_usecs_low; 1417 else if (rate > priv->pkt_rate_high) 1418 moder_time = priv->rx_usecs_high; 1419 else 1420 moder_time = (rate - priv->pkt_rate_low) * 1421 (priv->rx_usecs_high - priv->rx_usecs_low) / 1422 (priv->pkt_rate_high - priv->pkt_rate_low) + 1423 priv->rx_usecs_low; 1424 } else { 1425 moder_time = priv->rx_usecs_low; 1426 } 1427 1428 if (moder_time != priv->last_moder_time[ring]) { 1429 priv->last_moder_time[ring] = moder_time; 1430 cq = priv->rx_cq[ring]; 1431 cq->moder_time = moder_time; 1432 cq->moder_cnt = priv->rx_frames; 1433 err = mlx4_en_set_cq_moder(priv, cq); 1434 if (err) 1435 en_err(priv, "Failed modifying moderation for cq:%d\n", 1436 ring); 1437 } 1438 priv->last_moder_packets[ring] = rx_packets; 1439 priv->last_moder_bytes[ring] = rx_bytes; 1440 } 1441 1442 priv->last_moder_jiffies = jiffies; 1443 } 1444 1445 static void mlx4_en_do_get_stats(struct work_struct *work) 1446 { 1447 struct delayed_work *delay = to_delayed_work(work); 1448 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1449 stats_task); 1450 struct mlx4_en_dev *mdev = priv->mdev; 1451 int err; 1452 1453 mutex_lock(&mdev->state_lock); 1454 if (mdev->device_up) { 1455 if (priv->port_up) { 1456 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1457 if (err) 1458 en_dbg(HW, priv, "Could not update stats\n"); 1459 1460 mlx4_en_auto_moderation(priv); 1461 } 1462 1463 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1464 } 1465 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1466 mlx4_en_do_set_mac(priv, priv->current_mac); 1467 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1468 } 1469 mutex_unlock(&mdev->state_lock); 1470 } 1471 1472 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1473 * periodically 1474 */ 1475 static void mlx4_en_service_task(struct work_struct *work) 1476 { 1477 struct delayed_work *delay = to_delayed_work(work); 1478 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1479 service_task); 1480 struct mlx4_en_dev *mdev = priv->mdev; 1481 1482 mutex_lock(&mdev->state_lock); 1483 if (mdev->device_up) { 1484 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1485 mlx4_en_ptp_overflow_check(mdev); 1486 1487 mlx4_en_recover_from_oom(priv); 1488 queue_delayed_work(mdev->workqueue, &priv->service_task, 1489 SERVICE_TASK_DELAY); 1490 } 1491 mutex_unlock(&mdev->state_lock); 1492 } 1493 1494 static void mlx4_en_linkstate(struct work_struct *work) 1495 { 1496 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1497 linkstate_task); 1498 struct mlx4_en_dev *mdev = priv->mdev; 1499 int linkstate = priv->link_state; 1500 1501 mutex_lock(&mdev->state_lock); 1502 /* If observable port state changed set carrier state and 1503 * report to system log */ 1504 if (priv->last_link_state != linkstate) { 1505 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1506 en_info(priv, "Link Down\n"); 1507 netif_carrier_off(priv->dev); 1508 } else { 1509 en_info(priv, "Link Up\n"); 1510 netif_carrier_on(priv->dev); 1511 } 1512 } 1513 priv->last_link_state = linkstate; 1514 mutex_unlock(&mdev->state_lock); 1515 } 1516 1517 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1518 { 1519 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1520 int numa_node = priv->mdev->dev->numa_node; 1521 1522 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1523 return -ENOMEM; 1524 1525 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1526 ring->affinity_mask); 1527 return 0; 1528 } 1529 1530 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1531 { 1532 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1533 } 1534 1535 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, 1536 int tx_ring_idx) 1537 { 1538 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; 1539 int rr_index = tx_ring_idx; 1540 1541 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; 1542 tx_ring->recycle_ring = priv->rx_ring[rr_index]; 1543 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", 1544 TX_XDP, tx_ring_idx, rr_index); 1545 } 1546 1547 int mlx4_en_start_port(struct net_device *dev) 1548 { 1549 struct mlx4_en_priv *priv = netdev_priv(dev); 1550 struct mlx4_en_dev *mdev = priv->mdev; 1551 struct mlx4_en_cq *cq; 1552 struct mlx4_en_tx_ring *tx_ring; 1553 int rx_index = 0; 1554 int err = 0; 1555 int i, t; 1556 int j; 1557 u8 mc_list[16] = {0}; 1558 1559 if (priv->port_up) { 1560 en_dbg(DRV, priv, "start port called while port already up\n"); 1561 return 0; 1562 } 1563 1564 INIT_LIST_HEAD(&priv->mc_list); 1565 INIT_LIST_HEAD(&priv->curr_list); 1566 INIT_LIST_HEAD(&priv->ethtool_list); 1567 memset(&priv->ethtool_rules[0], 0, 1568 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1569 1570 /* Calculate Rx buf size */ 1571 dev->mtu = min(dev->mtu, priv->max_mtu); 1572 mlx4_en_calc_rx_buf(dev); 1573 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1574 1575 /* Configure rx cq's and rings */ 1576 err = mlx4_en_activate_rx_rings(priv); 1577 if (err) { 1578 en_err(priv, "Failed to activate RX rings\n"); 1579 return err; 1580 } 1581 for (i = 0; i < priv->rx_ring_num; i++) { 1582 cq = priv->rx_cq[i]; 1583 1584 err = mlx4_en_init_affinity_hint(priv, i); 1585 if (err) { 1586 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1587 goto cq_err; 1588 } 1589 1590 err = mlx4_en_activate_cq(priv, cq, i); 1591 if (err) { 1592 en_err(priv, "Failed activating Rx CQ\n"); 1593 mlx4_en_free_affinity_hint(priv, i); 1594 goto cq_err; 1595 } 1596 1597 for (j = 0; j < cq->size; j++) { 1598 struct mlx4_cqe *cqe = NULL; 1599 1600 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1601 priv->cqe_factor; 1602 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1603 } 1604 1605 err = mlx4_en_set_cq_moder(priv, cq); 1606 if (err) { 1607 en_err(priv, "Failed setting cq moderation parameters\n"); 1608 mlx4_en_deactivate_cq(priv, cq); 1609 mlx4_en_free_affinity_hint(priv, i); 1610 goto cq_err; 1611 } 1612 mlx4_en_arm_cq(priv, cq); 1613 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1614 ++rx_index; 1615 } 1616 1617 /* Set qp number */ 1618 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1619 err = mlx4_en_get_qp(priv); 1620 if (err) { 1621 en_err(priv, "Failed getting eth qp\n"); 1622 goto cq_err; 1623 } 1624 mdev->mac_removed[priv->port] = 0; 1625 1626 priv->counter_index = 1627 mlx4_get_default_counter_index(mdev->dev, priv->port); 1628 1629 err = mlx4_en_config_rss_steer(priv); 1630 if (err) { 1631 en_err(priv, "Failed configuring rss steering\n"); 1632 goto mac_err; 1633 } 1634 1635 err = mlx4_en_create_drop_qp(priv); 1636 if (err) 1637 goto rss_err; 1638 1639 /* Configure tx cq's and rings */ 1640 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1641 u8 num_tx_rings_p_up = t == TX ? 1642 priv->num_tx_rings_p_up : priv->tx_ring_num[t]; 1643 1644 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1645 /* Configure cq */ 1646 cq = priv->tx_cq[t][i]; 1647 err = mlx4_en_activate_cq(priv, cq, i); 1648 if (err) { 1649 en_err(priv, "Failed allocating Tx CQ\n"); 1650 goto tx_err; 1651 } 1652 err = mlx4_en_set_cq_moder(priv, cq); 1653 if (err) { 1654 en_err(priv, "Failed setting cq moderation parameters\n"); 1655 mlx4_en_deactivate_cq(priv, cq); 1656 goto tx_err; 1657 } 1658 en_dbg(DRV, priv, 1659 "Resetting index of collapsed CQ:%d to -1\n", i); 1660 cq->buf->wqe_index = cpu_to_be16(0xffff); 1661 1662 /* Configure ring */ 1663 tx_ring = priv->tx_ring[t][i]; 1664 err = mlx4_en_activate_tx_ring(priv, tx_ring, 1665 cq->mcq.cqn, 1666 i / num_tx_rings_p_up); 1667 if (err) { 1668 en_err(priv, "Failed allocating Tx ring\n"); 1669 mlx4_en_deactivate_cq(priv, cq); 1670 goto tx_err; 1671 } 1672 if (t != TX_XDP) { 1673 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1674 tx_ring->recycle_ring = NULL; 1675 } else { 1676 mlx4_en_init_recycle_ring(priv, i); 1677 } 1678 1679 /* Arm CQ for TX completions */ 1680 mlx4_en_arm_cq(priv, cq); 1681 1682 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1683 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1684 *((u32 *)(tx_ring->buf + j)) = 0xffffffff; 1685 } 1686 } 1687 1688 /* Configure port */ 1689 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1690 priv->rx_skb_size + ETH_FCS_LEN, 1691 priv->prof->tx_pause, 1692 priv->prof->tx_ppp, 1693 priv->prof->rx_pause, 1694 priv->prof->rx_ppp); 1695 if (err) { 1696 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1697 priv->port, err); 1698 goto tx_err; 1699 } 1700 /* Set default qp number */ 1701 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1702 if (err) { 1703 en_err(priv, "Failed setting default qp numbers\n"); 1704 goto tx_err; 1705 } 1706 1707 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1708 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1709 if (err) { 1710 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1711 err); 1712 goto tx_err; 1713 } 1714 } 1715 1716 /* Init port */ 1717 en_dbg(HW, priv, "Initializing port\n"); 1718 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1719 if (err) { 1720 en_err(priv, "Failed Initializing port\n"); 1721 goto tx_err; 1722 } 1723 1724 /* Set Unicast and VXLAN steering rules */ 1725 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1726 mlx4_en_set_rss_steer_rules(priv)) 1727 mlx4_warn(mdev, "Failed setting steering rules\n"); 1728 1729 /* Attach rx QP to bradcast address */ 1730 eth_broadcast_addr(&mc_list[10]); 1731 mc_list[5] = priv->port; /* needed for B0 steering support */ 1732 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1733 priv->port, 0, MLX4_PROT_ETH, 1734 &priv->broadcast_id)) 1735 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1736 1737 /* Must redo promiscuous mode setup. */ 1738 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1739 1740 /* Schedule multicast task to populate multicast list */ 1741 queue_work(mdev->workqueue, &priv->rx_mode_task); 1742 1743 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1744 udp_tunnel_get_rx_info(dev); 1745 1746 priv->port_up = true; 1747 1748 /* Process all completions if exist to prevent 1749 * the queues freezing if they are full 1750 */ 1751 for (i = 0; i < priv->rx_ring_num; i++) 1752 napi_schedule(&priv->rx_cq[i]->napi); 1753 1754 netif_tx_start_all_queues(dev); 1755 netif_device_attach(dev); 1756 1757 return 0; 1758 1759 tx_err: 1760 if (t == MLX4_EN_NUM_TX_TYPES) { 1761 t--; 1762 i = priv->tx_ring_num[t]; 1763 } 1764 while (t >= 0) { 1765 while (i--) { 1766 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1767 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1768 } 1769 if (!t--) 1770 break; 1771 i = priv->tx_ring_num[t]; 1772 } 1773 mlx4_en_destroy_drop_qp(priv); 1774 rss_err: 1775 mlx4_en_release_rss_steer(priv); 1776 mac_err: 1777 mlx4_en_put_qp(priv); 1778 cq_err: 1779 while (rx_index--) { 1780 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1781 mlx4_en_free_affinity_hint(priv, rx_index); 1782 } 1783 for (i = 0; i < priv->rx_ring_num; i++) 1784 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1785 1786 return err; /* need to close devices */ 1787 } 1788 1789 1790 void mlx4_en_stop_port(struct net_device *dev, int detach) 1791 { 1792 struct mlx4_en_priv *priv = netdev_priv(dev); 1793 struct mlx4_en_dev *mdev = priv->mdev; 1794 struct mlx4_en_mc_list *mclist, *tmp; 1795 struct ethtool_flow_id *flow, *tmp_flow; 1796 int i, t; 1797 u8 mc_list[16] = {0}; 1798 1799 if (!priv->port_up) { 1800 en_dbg(DRV, priv, "stop port called while port already down\n"); 1801 return; 1802 } 1803 1804 /* close port*/ 1805 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1806 1807 /* Synchronize with tx routine */ 1808 netif_tx_lock_bh(dev); 1809 if (detach) 1810 netif_device_detach(dev); 1811 netif_tx_stop_all_queues(dev); 1812 netif_tx_unlock_bh(dev); 1813 1814 netif_tx_disable(dev); 1815 1816 spin_lock_bh(&priv->stats_lock); 1817 mlx4_en_fold_software_stats(dev); 1818 /* Set port as not active */ 1819 priv->port_up = false; 1820 spin_unlock_bh(&priv->stats_lock); 1821 1822 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1823 1824 /* Promsicuous mode */ 1825 if (mdev->dev->caps.steering_mode == 1826 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1827 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1828 MLX4_EN_FLAG_MC_PROMISC); 1829 mlx4_flow_steer_promisc_remove(mdev->dev, 1830 priv->port, 1831 MLX4_FS_ALL_DEFAULT); 1832 mlx4_flow_steer_promisc_remove(mdev->dev, 1833 priv->port, 1834 MLX4_FS_MC_DEFAULT); 1835 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1836 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1837 1838 /* Disable promiscouos mode */ 1839 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1840 priv->port); 1841 1842 /* Disable Multicast promisc */ 1843 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1844 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1845 priv->port); 1846 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1847 } 1848 } 1849 1850 /* Detach All multicasts */ 1851 eth_broadcast_addr(&mc_list[10]); 1852 mc_list[5] = priv->port; /* needed for B0 steering support */ 1853 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1854 MLX4_PROT_ETH, priv->broadcast_id); 1855 list_for_each_entry(mclist, &priv->curr_list, list) { 1856 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1857 mc_list[5] = priv->port; 1858 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1859 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1860 if (mclist->tunnel_reg_id) 1861 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1862 } 1863 mlx4_en_clear_list(dev); 1864 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1865 list_del(&mclist->list); 1866 kfree(mclist); 1867 } 1868 1869 /* Flush multicast filter */ 1870 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1871 1872 /* Remove flow steering rules for the port*/ 1873 if (mdev->dev->caps.steering_mode == 1874 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1875 ASSERT_RTNL(); 1876 list_for_each_entry_safe(flow, tmp_flow, 1877 &priv->ethtool_list, list) { 1878 mlx4_flow_detach(mdev->dev, flow->id); 1879 list_del(&flow->list); 1880 } 1881 } 1882 1883 mlx4_en_destroy_drop_qp(priv); 1884 1885 /* Free TX Rings */ 1886 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 1887 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1888 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1889 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1890 } 1891 } 1892 msleep(10); 1893 1894 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) 1895 for (i = 0; i < priv->tx_ring_num[t]; i++) 1896 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); 1897 1898 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1899 mlx4_en_delete_rss_steer_rules(priv); 1900 1901 /* Free RSS qps */ 1902 mlx4_en_release_rss_steer(priv); 1903 1904 /* Unregister Mac address for the port */ 1905 mlx4_en_put_qp(priv); 1906 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1907 mdev->mac_removed[priv->port] = 1; 1908 1909 /* Free RX Rings */ 1910 for (i = 0; i < priv->rx_ring_num; i++) { 1911 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1912 1913 napi_synchronize(&cq->napi); 1914 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1915 mlx4_en_deactivate_cq(priv, cq); 1916 1917 mlx4_en_free_affinity_hint(priv, i); 1918 } 1919 } 1920 1921 static void mlx4_en_restart(struct work_struct *work) 1922 { 1923 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1924 watchdog_task); 1925 struct mlx4_en_dev *mdev = priv->mdev; 1926 struct net_device *dev = priv->dev; 1927 1928 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1929 1930 rtnl_lock(); 1931 mutex_lock(&mdev->state_lock); 1932 if (priv->port_up) { 1933 mlx4_en_stop_port(dev, 1); 1934 if (mlx4_en_start_port(dev)) 1935 en_err(priv, "Failed restarting port %d\n", priv->port); 1936 } 1937 mutex_unlock(&mdev->state_lock); 1938 rtnl_unlock(); 1939 } 1940 1941 static void mlx4_en_clear_stats(struct net_device *dev) 1942 { 1943 struct mlx4_en_priv *priv = netdev_priv(dev); 1944 struct mlx4_en_dev *mdev = priv->mdev; 1945 struct mlx4_en_tx_ring **tx_ring; 1946 int i; 1947 1948 if (!mlx4_is_slave(mdev->dev)) 1949 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1950 en_dbg(HW, priv, "Failed dumping statistics\n"); 1951 1952 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1953 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1954 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1955 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 1956 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 1957 memset(&priv->rx_priority_flowstats, 0, 1958 sizeof(priv->rx_priority_flowstats)); 1959 memset(&priv->tx_priority_flowstats, 0, 1960 sizeof(priv->tx_priority_flowstats)); 1961 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 1962 1963 tx_ring = priv->tx_ring[TX]; 1964 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1965 tx_ring[i]->bytes = 0; 1966 tx_ring[i]->packets = 0; 1967 tx_ring[i]->tx_csum = 0; 1968 tx_ring[i]->tx_dropped = 0; 1969 tx_ring[i]->queue_stopped = 0; 1970 tx_ring[i]->wake_queue = 0; 1971 tx_ring[i]->tso_packets = 0; 1972 tx_ring[i]->xmit_more = 0; 1973 } 1974 for (i = 0; i < priv->rx_ring_num; i++) { 1975 priv->rx_ring[i]->bytes = 0; 1976 priv->rx_ring[i]->packets = 0; 1977 priv->rx_ring[i]->csum_ok = 0; 1978 priv->rx_ring[i]->csum_none = 0; 1979 priv->rx_ring[i]->csum_complete = 0; 1980 } 1981 } 1982 1983 static int mlx4_en_open(struct net_device *dev) 1984 { 1985 struct mlx4_en_priv *priv = netdev_priv(dev); 1986 struct mlx4_en_dev *mdev = priv->mdev; 1987 int err = 0; 1988 1989 mutex_lock(&mdev->state_lock); 1990 1991 if (!mdev->device_up) { 1992 en_err(priv, "Cannot open - device down/disabled\n"); 1993 err = -EBUSY; 1994 goto out; 1995 } 1996 1997 /* Reset HW statistics and SW counters */ 1998 mlx4_en_clear_stats(dev); 1999 2000 err = mlx4_en_start_port(dev); 2001 if (err) 2002 en_err(priv, "Failed starting port:%d\n", priv->port); 2003 2004 out: 2005 mutex_unlock(&mdev->state_lock); 2006 return err; 2007 } 2008 2009 2010 static int mlx4_en_close(struct net_device *dev) 2011 { 2012 struct mlx4_en_priv *priv = netdev_priv(dev); 2013 struct mlx4_en_dev *mdev = priv->mdev; 2014 2015 en_dbg(IFDOWN, priv, "Close port called\n"); 2016 2017 mutex_lock(&mdev->state_lock); 2018 2019 mlx4_en_stop_port(dev, 0); 2020 netif_carrier_off(dev); 2021 2022 mutex_unlock(&mdev->state_lock); 2023 return 0; 2024 } 2025 2026 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 2027 { 2028 int i, t; 2029 2030 #ifdef CONFIG_RFS_ACCEL 2031 priv->dev->rx_cpu_rmap = NULL; 2032 #endif 2033 2034 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2035 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2036 if (priv->tx_ring[t] && priv->tx_ring[t][i]) 2037 mlx4_en_destroy_tx_ring(priv, 2038 &priv->tx_ring[t][i]); 2039 if (priv->tx_cq[t] && priv->tx_cq[t][i]) 2040 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2041 } 2042 } 2043 2044 for (i = 0; i < priv->rx_ring_num; i++) { 2045 if (priv->rx_ring[i]) 2046 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2047 priv->prof->rx_ring_size, priv->stride); 2048 if (priv->rx_cq[i]) 2049 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2050 } 2051 2052 } 2053 2054 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 2055 { 2056 struct mlx4_en_port_profile *prof = priv->prof; 2057 int i, t; 2058 int node; 2059 2060 /* Create tx Rings */ 2061 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2062 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2063 node = cpu_to_node(i % num_online_cpus()); 2064 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], 2065 prof->tx_ring_size, i, t, node)) 2066 goto err; 2067 2068 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], 2069 prof->tx_ring_size, 2070 TXBB_SIZE, node, i)) 2071 goto err; 2072 } 2073 } 2074 2075 /* Create rx Rings */ 2076 for (i = 0; i < priv->rx_ring_num; i++) { 2077 node = cpu_to_node(i % num_online_cpus()); 2078 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2079 prof->rx_ring_size, i, RX, node)) 2080 goto err; 2081 2082 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2083 prof->rx_ring_size, priv->stride, 2084 node)) 2085 goto err; 2086 } 2087 2088 #ifdef CONFIG_RFS_ACCEL 2089 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2090 #endif 2091 2092 return 0; 2093 2094 err: 2095 en_err(priv, "Failed to allocate NIC resources\n"); 2096 for (i = 0; i < priv->rx_ring_num; i++) { 2097 if (priv->rx_ring[i]) 2098 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2099 prof->rx_ring_size, 2100 priv->stride); 2101 if (priv->rx_cq[i]) 2102 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2103 } 2104 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2105 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2106 if (priv->tx_ring[t][i]) 2107 mlx4_en_destroy_tx_ring(priv, 2108 &priv->tx_ring[t][i]); 2109 if (priv->tx_cq[t][i]) 2110 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2111 } 2112 } 2113 return -ENOMEM; 2114 } 2115 2116 2117 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2118 struct mlx4_en_priv *src, 2119 struct mlx4_en_port_profile *prof) 2120 { 2121 int t; 2122 2123 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2124 sizeof(dst->hwtstamp_config)); 2125 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; 2126 dst->rx_ring_num = prof->rx_ring_num; 2127 dst->flags = prof->flags; 2128 dst->mdev = src->mdev; 2129 dst->port = src->port; 2130 dst->dev = src->dev; 2131 dst->prof = prof; 2132 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2133 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2134 2135 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2136 dst->tx_ring_num[t] = prof->tx_ring_num[t]; 2137 if (!dst->tx_ring_num[t]) 2138 continue; 2139 2140 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * 2141 MAX_TX_RINGS, GFP_KERNEL); 2142 if (!dst->tx_ring[t]) 2143 goto err_free_tx; 2144 2145 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 2146 MAX_TX_RINGS, GFP_KERNEL); 2147 if (!dst->tx_cq[t]) { 2148 kfree(dst->tx_ring[t]); 2149 goto err_free_tx; 2150 } 2151 } 2152 2153 return 0; 2154 2155 err_free_tx: 2156 while (t--) { 2157 kfree(dst->tx_ring[t]); 2158 kfree(dst->tx_cq[t]); 2159 } 2160 return -ENOMEM; 2161 } 2162 2163 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2164 struct mlx4_en_priv *src) 2165 { 2166 int t; 2167 memcpy(dst->rx_ring, src->rx_ring, 2168 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2169 memcpy(dst->rx_cq, src->rx_cq, 2170 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2171 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2172 sizeof(dst->hwtstamp_config)); 2173 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2174 dst->tx_ring_num[t] = src->tx_ring_num[t]; 2175 dst->tx_ring[t] = src->tx_ring[t]; 2176 dst->tx_cq[t] = src->tx_cq[t]; 2177 } 2178 dst->rx_ring_num = src->rx_ring_num; 2179 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2180 } 2181 2182 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2183 struct mlx4_en_priv *tmp, 2184 struct mlx4_en_port_profile *prof) 2185 { 2186 int t; 2187 2188 mlx4_en_copy_priv(tmp, priv, prof); 2189 2190 if (mlx4_en_alloc_resources(tmp)) { 2191 en_warn(priv, 2192 "%s: Resource allocation failed, using previous configuration\n", 2193 __func__); 2194 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2195 kfree(tmp->tx_ring[t]); 2196 kfree(tmp->tx_cq[t]); 2197 } 2198 return -ENOMEM; 2199 } 2200 return 0; 2201 } 2202 2203 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2204 struct mlx4_en_priv *tmp) 2205 { 2206 mlx4_en_free_resources(priv); 2207 mlx4_en_update_priv(priv, tmp); 2208 } 2209 2210 void mlx4_en_destroy_netdev(struct net_device *dev) 2211 { 2212 struct mlx4_en_priv *priv = netdev_priv(dev); 2213 struct mlx4_en_dev *mdev = priv->mdev; 2214 int t; 2215 2216 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2217 2218 /* Unregister device - this will close the port if it was up */ 2219 if (priv->registered) { 2220 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2221 priv->port)); 2222 unregister_netdev(dev); 2223 } 2224 2225 if (priv->allocated) 2226 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2227 2228 cancel_delayed_work(&priv->stats_task); 2229 cancel_delayed_work(&priv->service_task); 2230 /* flush any pending task for this netdev */ 2231 flush_workqueue(mdev->workqueue); 2232 2233 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2234 mlx4_en_remove_timestamp(mdev); 2235 2236 /* Detach the netdev so tasks would not attempt to access it */ 2237 mutex_lock(&mdev->state_lock); 2238 mdev->pndev[priv->port] = NULL; 2239 mdev->upper[priv->port] = NULL; 2240 2241 #ifdef CONFIG_RFS_ACCEL 2242 mlx4_en_cleanup_filters(priv); 2243 #endif 2244 2245 mlx4_en_free_resources(priv); 2246 mutex_unlock(&mdev->state_lock); 2247 2248 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2249 kfree(priv->tx_ring[t]); 2250 kfree(priv->tx_cq[t]); 2251 } 2252 2253 free_netdev(dev); 2254 } 2255 2256 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) 2257 { 2258 struct mlx4_en_priv *priv = netdev_priv(dev); 2259 2260 if (mtu > MLX4_EN_MAX_XDP_MTU) { 2261 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", 2262 mtu, MLX4_EN_MAX_XDP_MTU); 2263 return false; 2264 } 2265 2266 return true; 2267 } 2268 2269 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2270 { 2271 struct mlx4_en_priv *priv = netdev_priv(dev); 2272 struct mlx4_en_dev *mdev = priv->mdev; 2273 int err = 0; 2274 2275 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2276 dev->mtu, new_mtu); 2277 2278 if (priv->tx_ring_num[TX_XDP] && 2279 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2280 return -EOPNOTSUPP; 2281 2282 dev->mtu = new_mtu; 2283 2284 if (netif_running(dev)) { 2285 mutex_lock(&mdev->state_lock); 2286 if (!mdev->device_up) { 2287 /* NIC is probably restarting - let watchdog task reset 2288 * the port */ 2289 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2290 } else { 2291 mlx4_en_stop_port(dev, 1); 2292 err = mlx4_en_start_port(dev); 2293 if (err) { 2294 en_err(priv, "Failed restarting port:%d\n", 2295 priv->port); 2296 queue_work(mdev->workqueue, &priv->watchdog_task); 2297 } 2298 } 2299 mutex_unlock(&mdev->state_lock); 2300 } 2301 return 0; 2302 } 2303 2304 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2305 { 2306 struct mlx4_en_priv *priv = netdev_priv(dev); 2307 struct mlx4_en_dev *mdev = priv->mdev; 2308 struct hwtstamp_config config; 2309 2310 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2311 return -EFAULT; 2312 2313 /* reserved for future extensions */ 2314 if (config.flags) 2315 return -EINVAL; 2316 2317 /* device doesn't support time stamping */ 2318 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2319 return -EINVAL; 2320 2321 /* TX HW timestamp */ 2322 switch (config.tx_type) { 2323 case HWTSTAMP_TX_OFF: 2324 case HWTSTAMP_TX_ON: 2325 break; 2326 default: 2327 return -ERANGE; 2328 } 2329 2330 /* RX HW timestamp */ 2331 switch (config.rx_filter) { 2332 case HWTSTAMP_FILTER_NONE: 2333 break; 2334 case HWTSTAMP_FILTER_ALL: 2335 case HWTSTAMP_FILTER_SOME: 2336 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2337 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2338 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2339 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2340 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2341 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2342 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2343 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2344 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2345 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2346 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2347 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2348 config.rx_filter = HWTSTAMP_FILTER_ALL; 2349 break; 2350 default: 2351 return -ERANGE; 2352 } 2353 2354 if (mlx4_en_reset_config(dev, config, dev->features)) { 2355 config.tx_type = HWTSTAMP_TX_OFF; 2356 config.rx_filter = HWTSTAMP_FILTER_NONE; 2357 } 2358 2359 return copy_to_user(ifr->ifr_data, &config, 2360 sizeof(config)) ? -EFAULT : 0; 2361 } 2362 2363 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2364 { 2365 struct mlx4_en_priv *priv = netdev_priv(dev); 2366 2367 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2368 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2369 } 2370 2371 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2372 { 2373 switch (cmd) { 2374 case SIOCSHWTSTAMP: 2375 return mlx4_en_hwtstamp_set(dev, ifr); 2376 case SIOCGHWTSTAMP: 2377 return mlx4_en_hwtstamp_get(dev, ifr); 2378 default: 2379 return -EOPNOTSUPP; 2380 } 2381 } 2382 2383 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2384 netdev_features_t features) 2385 { 2386 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2387 struct mlx4_en_dev *mdev = en_priv->mdev; 2388 2389 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2390 * enable/disable make sure S-TAG flag is always in same state as 2391 * C-TAG. 2392 */ 2393 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2394 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2395 features |= NETIF_F_HW_VLAN_STAG_RX; 2396 else 2397 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2398 2399 return features; 2400 } 2401 2402 static int mlx4_en_set_features(struct net_device *netdev, 2403 netdev_features_t features) 2404 { 2405 struct mlx4_en_priv *priv = netdev_priv(netdev); 2406 bool reset = false; 2407 int ret = 0; 2408 2409 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2410 en_info(priv, "Turn %s RX-FCS\n", 2411 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2412 reset = true; 2413 } 2414 2415 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2416 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2417 2418 en_info(priv, "Turn %s RX-ALL\n", 2419 ignore_fcs_value ? "ON" : "OFF"); 2420 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2421 priv->port, ignore_fcs_value); 2422 if (ret) 2423 return ret; 2424 } 2425 2426 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2427 en_info(priv, "Turn %s RX vlan strip offload\n", 2428 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2429 reset = true; 2430 } 2431 2432 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2433 en_info(priv, "Turn %s TX vlan strip offload\n", 2434 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2435 2436 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2437 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2438 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2439 2440 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2441 en_info(priv, "Turn %s loopback\n", 2442 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2443 mlx4_en_update_loopback_state(netdev, features); 2444 } 2445 2446 if (reset) { 2447 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2448 features); 2449 if (ret) 2450 return ret; 2451 } 2452 2453 return 0; 2454 } 2455 2456 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2457 { 2458 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2459 struct mlx4_en_dev *mdev = en_priv->mdev; 2460 u64 mac_u64 = mlx4_mac_to_u64(mac); 2461 2462 if (is_multicast_ether_addr(mac)) 2463 return -EINVAL; 2464 2465 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2466 } 2467 2468 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 2469 __be16 vlan_proto) 2470 { 2471 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2472 struct mlx4_en_dev *mdev = en_priv->mdev; 2473 2474 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos, 2475 vlan_proto); 2476 } 2477 2478 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2479 int max_tx_rate) 2480 { 2481 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2482 struct mlx4_en_dev *mdev = en_priv->mdev; 2483 2484 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2485 max_tx_rate); 2486 } 2487 2488 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2489 { 2490 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2491 struct mlx4_en_dev *mdev = en_priv->mdev; 2492 2493 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2494 } 2495 2496 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2497 { 2498 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2499 struct mlx4_en_dev *mdev = en_priv->mdev; 2500 2501 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2502 } 2503 2504 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2505 { 2506 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2507 struct mlx4_en_dev *mdev = en_priv->mdev; 2508 2509 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2510 } 2511 2512 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2513 struct ifla_vf_stats *vf_stats) 2514 { 2515 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2516 struct mlx4_en_dev *mdev = en_priv->mdev; 2517 2518 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2519 } 2520 2521 #define PORT_ID_BYTE_LEN 8 2522 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2523 struct netdev_phys_item_id *ppid) 2524 { 2525 struct mlx4_en_priv *priv = netdev_priv(dev); 2526 struct mlx4_dev *mdev = priv->mdev->dev; 2527 int i; 2528 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2529 2530 if (!phys_port_id) 2531 return -EOPNOTSUPP; 2532 2533 ppid->id_len = sizeof(phys_port_id); 2534 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2535 ppid->id[i] = phys_port_id & 0xff; 2536 phys_port_id >>= 8; 2537 } 2538 return 0; 2539 } 2540 2541 static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2542 { 2543 int ret; 2544 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2545 vxlan_add_task); 2546 2547 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2548 if (ret) 2549 goto out; 2550 2551 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2552 VXLAN_STEER_BY_OUTER_MAC, 1); 2553 out: 2554 if (ret) { 2555 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2556 return; 2557 } 2558 2559 /* set offloads */ 2560 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2561 NETIF_F_RXCSUM | 2562 NETIF_F_TSO | NETIF_F_TSO6 | 2563 NETIF_F_GSO_UDP_TUNNEL | 2564 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2565 NETIF_F_GSO_PARTIAL; 2566 } 2567 2568 static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2569 { 2570 int ret; 2571 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2572 vxlan_del_task); 2573 /* unset offloads */ 2574 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2575 NETIF_F_RXCSUM | 2576 NETIF_F_TSO | NETIF_F_TSO6 | 2577 NETIF_F_GSO_UDP_TUNNEL | 2578 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2579 NETIF_F_GSO_PARTIAL); 2580 2581 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2582 VXLAN_STEER_BY_OUTER_MAC, 0); 2583 if (ret) 2584 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2585 2586 priv->vxlan_port = 0; 2587 } 2588 2589 static void mlx4_en_add_vxlan_port(struct net_device *dev, 2590 struct udp_tunnel_info *ti) 2591 { 2592 struct mlx4_en_priv *priv = netdev_priv(dev); 2593 __be16 port = ti->port; 2594 __be16 current_port; 2595 2596 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2597 return; 2598 2599 if (ti->sa_family != AF_INET) 2600 return; 2601 2602 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2603 return; 2604 2605 current_port = priv->vxlan_port; 2606 if (current_port && current_port != port) { 2607 en_warn(priv, "vxlan port %d configured, can't add port %d\n", 2608 ntohs(current_port), ntohs(port)); 2609 return; 2610 } 2611 2612 priv->vxlan_port = port; 2613 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); 2614 } 2615 2616 static void mlx4_en_del_vxlan_port(struct net_device *dev, 2617 struct udp_tunnel_info *ti) 2618 { 2619 struct mlx4_en_priv *priv = netdev_priv(dev); 2620 __be16 port = ti->port; 2621 __be16 current_port; 2622 2623 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2624 return; 2625 2626 if (ti->sa_family != AF_INET) 2627 return; 2628 2629 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2630 return; 2631 2632 current_port = priv->vxlan_port; 2633 if (current_port != port) { 2634 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); 2635 return; 2636 } 2637 2638 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2639 } 2640 2641 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2642 struct net_device *dev, 2643 netdev_features_t features) 2644 { 2645 features = vlan_features_check(skb, features); 2646 features = vxlan_features_check(skb, features); 2647 2648 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2649 * support inner IPv6 checksums and segmentation so we need to 2650 * strip that feature if this is an IPv6 encapsulated frame. 2651 */ 2652 if (skb->encapsulation && 2653 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2654 struct mlx4_en_priv *priv = netdev_priv(dev); 2655 2656 if (!priv->vxlan_port || 2657 (ip_hdr(skb)->version != 4) || 2658 (udp_hdr(skb)->dest != priv->vxlan_port)) 2659 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2660 } 2661 2662 return features; 2663 } 2664 2665 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2666 { 2667 struct mlx4_en_priv *priv = netdev_priv(dev); 2668 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; 2669 struct mlx4_update_qp_params params; 2670 int err; 2671 2672 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2673 return -EOPNOTSUPP; 2674 2675 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2676 if (maxrate >> 12) { 2677 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2678 params.rate_val = maxrate / 1000; 2679 } else if (maxrate) { 2680 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2681 params.rate_val = maxrate; 2682 } else { /* zero serves to revoke the QP rate-limitation */ 2683 params.rate_unit = 0; 2684 params.rate_val = 0; 2685 } 2686 2687 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2688 ¶ms); 2689 return err; 2690 } 2691 2692 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) 2693 { 2694 struct mlx4_en_priv *priv = netdev_priv(dev); 2695 struct mlx4_en_dev *mdev = priv->mdev; 2696 struct mlx4_en_port_profile new_prof; 2697 struct bpf_prog *old_prog; 2698 struct mlx4_en_priv *tmp; 2699 int tx_changed = 0; 2700 int xdp_ring_num; 2701 int port_up = 0; 2702 int err; 2703 int i; 2704 2705 xdp_ring_num = prog ? priv->rx_ring_num : 0; 2706 2707 /* No need to reconfigure buffers when simply swapping the 2708 * program for a new one. 2709 */ 2710 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { 2711 if (prog) { 2712 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2713 if (IS_ERR(prog)) 2714 return PTR_ERR(prog); 2715 } 2716 mutex_lock(&mdev->state_lock); 2717 for (i = 0; i < priv->rx_ring_num; i++) { 2718 old_prog = rcu_dereference_protected( 2719 priv->rx_ring[i]->xdp_prog, 2720 lockdep_is_held(&mdev->state_lock)); 2721 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2722 if (old_prog) 2723 bpf_prog_put(old_prog); 2724 } 2725 mutex_unlock(&mdev->state_lock); 2726 return 0; 2727 } 2728 2729 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) 2730 return -EOPNOTSUPP; 2731 2732 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2733 if (!tmp) 2734 return -ENOMEM; 2735 2736 if (prog) { 2737 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2738 if (IS_ERR(prog)) { 2739 err = PTR_ERR(prog); 2740 goto out; 2741 } 2742 } 2743 2744 mutex_lock(&mdev->state_lock); 2745 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 2746 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; 2747 2748 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { 2749 tx_changed = 1; 2750 new_prof.tx_ring_num[TX] = 2751 MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP); 2752 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); 2753 } 2754 2755 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 2756 if (err) { 2757 if (prog) 2758 bpf_prog_sub(prog, priv->rx_ring_num - 1); 2759 goto unlock_out; 2760 } 2761 2762 if (priv->port_up) { 2763 port_up = 1; 2764 mlx4_en_stop_port(dev, 1); 2765 } 2766 2767 mlx4_en_safe_replace_resources(priv, tmp); 2768 if (tx_changed) 2769 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 2770 2771 for (i = 0; i < priv->rx_ring_num; i++) { 2772 old_prog = rcu_dereference_protected( 2773 priv->rx_ring[i]->xdp_prog, 2774 lockdep_is_held(&mdev->state_lock)); 2775 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2776 if (old_prog) 2777 bpf_prog_put(old_prog); 2778 } 2779 2780 if (port_up) { 2781 err = mlx4_en_start_port(dev); 2782 if (err) { 2783 en_err(priv, "Failed starting port %d for XDP change\n", 2784 priv->port); 2785 queue_work(mdev->workqueue, &priv->watchdog_task); 2786 } 2787 } 2788 2789 unlock_out: 2790 mutex_unlock(&mdev->state_lock); 2791 out: 2792 kfree(tmp); 2793 return err; 2794 } 2795 2796 static bool mlx4_xdp_attached(struct net_device *dev) 2797 { 2798 struct mlx4_en_priv *priv = netdev_priv(dev); 2799 2800 return !!priv->tx_ring_num[TX_XDP]; 2801 } 2802 2803 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) 2804 { 2805 switch (xdp->command) { 2806 case XDP_SETUP_PROG: 2807 return mlx4_xdp_set(dev, xdp->prog); 2808 case XDP_QUERY_PROG: 2809 xdp->prog_attached = mlx4_xdp_attached(dev); 2810 return 0; 2811 default: 2812 return -EINVAL; 2813 } 2814 } 2815 2816 static const struct net_device_ops mlx4_netdev_ops = { 2817 .ndo_open = mlx4_en_open, 2818 .ndo_stop = mlx4_en_close, 2819 .ndo_start_xmit = mlx4_en_xmit, 2820 .ndo_select_queue = mlx4_en_select_queue, 2821 .ndo_get_stats64 = mlx4_en_get_stats64, 2822 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2823 .ndo_set_mac_address = mlx4_en_set_mac, 2824 .ndo_validate_addr = eth_validate_addr, 2825 .ndo_change_mtu = mlx4_en_change_mtu, 2826 .ndo_do_ioctl = mlx4_en_ioctl, 2827 .ndo_tx_timeout = mlx4_en_tx_timeout, 2828 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2829 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2830 #ifdef CONFIG_NET_POLL_CONTROLLER 2831 .ndo_poll_controller = mlx4_en_netpoll, 2832 #endif 2833 .ndo_set_features = mlx4_en_set_features, 2834 .ndo_fix_features = mlx4_en_fix_features, 2835 .ndo_setup_tc = __mlx4_en_setup_tc, 2836 #ifdef CONFIG_RFS_ACCEL 2837 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2838 #endif 2839 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2840 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2841 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2842 .ndo_features_check = mlx4_en_features_check, 2843 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2844 .ndo_xdp = mlx4_xdp, 2845 }; 2846 2847 static const struct net_device_ops mlx4_netdev_ops_master = { 2848 .ndo_open = mlx4_en_open, 2849 .ndo_stop = mlx4_en_close, 2850 .ndo_start_xmit = mlx4_en_xmit, 2851 .ndo_select_queue = mlx4_en_select_queue, 2852 .ndo_get_stats64 = mlx4_en_get_stats64, 2853 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2854 .ndo_set_mac_address = mlx4_en_set_mac, 2855 .ndo_validate_addr = eth_validate_addr, 2856 .ndo_change_mtu = mlx4_en_change_mtu, 2857 .ndo_tx_timeout = mlx4_en_tx_timeout, 2858 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2859 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2860 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2861 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2862 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2863 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2864 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2865 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2866 .ndo_get_vf_config = mlx4_en_get_vf_config, 2867 #ifdef CONFIG_NET_POLL_CONTROLLER 2868 .ndo_poll_controller = mlx4_en_netpoll, 2869 #endif 2870 .ndo_set_features = mlx4_en_set_features, 2871 .ndo_fix_features = mlx4_en_fix_features, 2872 .ndo_setup_tc = __mlx4_en_setup_tc, 2873 #ifdef CONFIG_RFS_ACCEL 2874 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2875 #endif 2876 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2877 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2878 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2879 .ndo_features_check = mlx4_en_features_check, 2880 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2881 .ndo_xdp = mlx4_xdp, 2882 }; 2883 2884 struct mlx4_en_bond { 2885 struct work_struct work; 2886 struct mlx4_en_priv *priv; 2887 int is_bonded; 2888 struct mlx4_port_map port_map; 2889 }; 2890 2891 static void mlx4_en_bond_work(struct work_struct *work) 2892 { 2893 struct mlx4_en_bond *bond = container_of(work, 2894 struct mlx4_en_bond, 2895 work); 2896 int err = 0; 2897 struct mlx4_dev *dev = bond->priv->mdev->dev; 2898 2899 if (bond->is_bonded) { 2900 if (!mlx4_is_bonded(dev)) { 2901 err = mlx4_bond(dev); 2902 if (err) 2903 en_err(bond->priv, "Fail to bond device\n"); 2904 } 2905 if (!err) { 2906 err = mlx4_port_map_set(dev, &bond->port_map); 2907 if (err) 2908 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2909 bond->port_map.port1, 2910 bond->port_map.port2, 2911 err); 2912 } 2913 } else if (mlx4_is_bonded(dev)) { 2914 err = mlx4_unbond(dev); 2915 if (err) 2916 en_err(bond->priv, "Fail to unbond device\n"); 2917 } 2918 dev_put(bond->priv->dev); 2919 kfree(bond); 2920 } 2921 2922 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 2923 u8 v2p_p1, u8 v2p_p2) 2924 { 2925 struct mlx4_en_bond *bond = NULL; 2926 2927 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 2928 if (!bond) 2929 return -ENOMEM; 2930 2931 INIT_WORK(&bond->work, mlx4_en_bond_work); 2932 bond->priv = priv; 2933 bond->is_bonded = is_bonded; 2934 bond->port_map.port1 = v2p_p1; 2935 bond->port_map.port2 = v2p_p2; 2936 dev_hold(priv->dev); 2937 queue_work(priv->mdev->workqueue, &bond->work); 2938 return 0; 2939 } 2940 2941 int mlx4_en_netdev_event(struct notifier_block *this, 2942 unsigned long event, void *ptr) 2943 { 2944 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2945 u8 port = 0; 2946 struct mlx4_en_dev *mdev; 2947 struct mlx4_dev *dev; 2948 int i, num_eth_ports = 0; 2949 bool do_bond = true; 2950 struct mlx4_en_priv *priv; 2951 u8 v2p_port1 = 0; 2952 u8 v2p_port2 = 0; 2953 2954 if (!net_eq(dev_net(ndev), &init_net)) 2955 return NOTIFY_DONE; 2956 2957 mdev = container_of(this, struct mlx4_en_dev, nb); 2958 dev = mdev->dev; 2959 2960 /* Go into this mode only when two network devices set on two ports 2961 * of the same mlx4 device are slaves of the same bonding master 2962 */ 2963 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 2964 ++num_eth_ports; 2965 if (!port && (mdev->pndev[i] == ndev)) 2966 port = i; 2967 mdev->upper[i] = mdev->pndev[i] ? 2968 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 2969 /* condition not met: network device is a slave */ 2970 if (!mdev->upper[i]) 2971 do_bond = false; 2972 if (num_eth_ports < 2) 2973 continue; 2974 /* condition not met: same master */ 2975 if (mdev->upper[i] != mdev->upper[i-1]) 2976 do_bond = false; 2977 } 2978 /* condition not met: 2 salves */ 2979 do_bond = (num_eth_ports == 2) ? do_bond : false; 2980 2981 /* handle only events that come with enough info */ 2982 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 2983 return NOTIFY_DONE; 2984 2985 priv = netdev_priv(ndev); 2986 if (do_bond) { 2987 struct netdev_notifier_bonding_info *notifier_info = ptr; 2988 struct netdev_bonding_info *bonding_info = 2989 ¬ifier_info->bonding_info; 2990 2991 /* required mode 1, 2 or 4 */ 2992 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 2993 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 2994 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 2995 do_bond = false; 2996 2997 /* require exactly 2 slaves */ 2998 if (bonding_info->master.num_slaves != 2) 2999 do_bond = false; 3000 3001 /* calc v2p */ 3002 if (do_bond) { 3003 if (bonding_info->master.bond_mode == 3004 BOND_MODE_ACTIVEBACKUP) { 3005 /* in active-backup mode virtual ports are 3006 * mapped to the physical port of the active 3007 * slave */ 3008 if (bonding_info->slave.state == 3009 BOND_STATE_BACKUP) { 3010 if (port == 1) { 3011 v2p_port1 = 2; 3012 v2p_port2 = 2; 3013 } else { 3014 v2p_port1 = 1; 3015 v2p_port2 = 1; 3016 } 3017 } else { /* BOND_STATE_ACTIVE */ 3018 if (port == 1) { 3019 v2p_port1 = 1; 3020 v2p_port2 = 1; 3021 } else { 3022 v2p_port1 = 2; 3023 v2p_port2 = 2; 3024 } 3025 } 3026 } else { /* Active-Active */ 3027 /* in active-active mode a virtual port is 3028 * mapped to the native physical port if and only 3029 * if the physical port is up */ 3030 __s8 link = bonding_info->slave.link; 3031 3032 if (port == 1) 3033 v2p_port2 = 2; 3034 else 3035 v2p_port1 = 1; 3036 if ((link == BOND_LINK_UP) || 3037 (link == BOND_LINK_FAIL)) { 3038 if (port == 1) 3039 v2p_port1 = 1; 3040 else 3041 v2p_port2 = 2; 3042 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 3043 if (port == 1) 3044 v2p_port1 = 2; 3045 else 3046 v2p_port2 = 1; 3047 } 3048 } 3049 } 3050 } 3051 3052 mlx4_en_queue_bond_work(priv, do_bond, 3053 v2p_port1, v2p_port2); 3054 3055 return NOTIFY_DONE; 3056 } 3057 3058 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 3059 struct mlx4_en_stats_bitmap *stats_bitmap, 3060 u8 rx_ppp, u8 rx_pause, 3061 u8 tx_ppp, u8 tx_pause) 3062 { 3063 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 3064 3065 if (!mlx4_is_slave(dev) && 3066 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 3067 mutex_lock(&stats_bitmap->mutex); 3068 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 3069 3070 if (rx_ppp) 3071 bitmap_set(stats_bitmap->bitmap, last_i, 3072 NUM_FLOW_PRIORITY_STATS_RX); 3073 last_i += NUM_FLOW_PRIORITY_STATS_RX; 3074 3075 if (rx_pause && !(rx_ppp)) 3076 bitmap_set(stats_bitmap->bitmap, last_i, 3077 NUM_FLOW_STATS_RX); 3078 last_i += NUM_FLOW_STATS_RX; 3079 3080 if (tx_ppp) 3081 bitmap_set(stats_bitmap->bitmap, last_i, 3082 NUM_FLOW_PRIORITY_STATS_TX); 3083 last_i += NUM_FLOW_PRIORITY_STATS_TX; 3084 3085 if (tx_pause && !(tx_ppp)) 3086 bitmap_set(stats_bitmap->bitmap, last_i, 3087 NUM_FLOW_STATS_TX); 3088 last_i += NUM_FLOW_STATS_TX; 3089 3090 mutex_unlock(&stats_bitmap->mutex); 3091 } 3092 } 3093 3094 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 3095 struct mlx4_en_stats_bitmap *stats_bitmap, 3096 u8 rx_ppp, u8 rx_pause, 3097 u8 tx_ppp, u8 tx_pause) 3098 { 3099 int last_i = 0; 3100 3101 mutex_init(&stats_bitmap->mutex); 3102 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 3103 3104 if (mlx4_is_slave(dev)) { 3105 bitmap_set(stats_bitmap->bitmap, last_i + 3106 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 3107 bitmap_set(stats_bitmap->bitmap, last_i + 3108 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 3109 bitmap_set(stats_bitmap->bitmap, last_i + 3110 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 3111 bitmap_set(stats_bitmap->bitmap, last_i + 3112 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 3113 bitmap_set(stats_bitmap->bitmap, last_i + 3114 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 3115 bitmap_set(stats_bitmap->bitmap, last_i + 3116 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 3117 } else { 3118 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 3119 } 3120 last_i += NUM_MAIN_STATS; 3121 3122 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 3123 last_i += NUM_PORT_STATS; 3124 3125 if (mlx4_is_master(dev)) 3126 bitmap_set(stats_bitmap->bitmap, last_i, 3127 NUM_PF_STATS); 3128 last_i += NUM_PF_STATS; 3129 3130 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 3131 rx_ppp, rx_pause, 3132 tx_ppp, tx_pause); 3133 last_i += NUM_FLOW_STATS; 3134 3135 if (!mlx4_is_slave(dev)) 3136 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 3137 last_i += NUM_PKT_STATS; 3138 3139 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); 3140 last_i += NUM_XDP_STATS; 3141 } 3142 3143 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 3144 struct mlx4_en_port_profile *prof) 3145 { 3146 struct net_device *dev; 3147 struct mlx4_en_priv *priv; 3148 int i, t; 3149 int err; 3150 3151 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3152 MAX_TX_RINGS, MAX_RX_RINGS); 3153 if (dev == NULL) 3154 return -ENOMEM; 3155 3156 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); 3157 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 3158 3159 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 3160 dev->dev_port = port - 1; 3161 3162 /* 3163 * Initialize driver private data 3164 */ 3165 3166 priv = netdev_priv(dev); 3167 memset(priv, 0, sizeof(struct mlx4_en_priv)); 3168 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3169 spin_lock_init(&priv->stats_lock); 3170 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3171 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3172 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3173 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3174 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3175 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 3176 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 3177 #ifdef CONFIG_RFS_ACCEL 3178 INIT_LIST_HEAD(&priv->filters); 3179 spin_lock_init(&priv->filters_lock); 3180 #endif 3181 3182 priv->dev = dev; 3183 priv->mdev = mdev; 3184 priv->ddev = &mdev->pdev->dev; 3185 priv->prof = prof; 3186 priv->port = port; 3187 priv->port_up = false; 3188 priv->flags = prof->flags; 3189 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 3190 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 3191 MLX4_WQE_CTRL_SOLICITED); 3192 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 3193 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 3194 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 3195 3196 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 3197 priv->tx_ring_num[t] = prof->tx_ring_num[t]; 3198 if (!priv->tx_ring_num[t]) 3199 continue; 3200 3201 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * 3202 MAX_TX_RINGS, GFP_KERNEL); 3203 if (!priv->tx_ring[t]) { 3204 err = -ENOMEM; 3205 goto err_free_tx; 3206 } 3207 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * 3208 MAX_TX_RINGS, GFP_KERNEL); 3209 if (!priv->tx_cq[t]) { 3210 kfree(priv->tx_ring[t]); 3211 err = -ENOMEM; 3212 goto out; 3213 } 3214 } 3215 priv->rx_ring_num = prof->rx_ring_num; 3216 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 3217 priv->cqe_size = mdev->dev->caps.cqe_size; 3218 priv->mac_index = -1; 3219 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3220 #ifdef CONFIG_MLX4_EN_DCB 3221 if (!mlx4_is_slave(priv->mdev->dev)) { 3222 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | 3223 DCB_CAP_DCBX_VER_IEEE; 3224 priv->flags |= MLX4_EN_DCB_ENABLED; 3225 priv->cee_config.pfc_state = false; 3226 3227 for (i = 0; i < MLX4_EN_NUM_UP; i++) 3228 priv->cee_config.dcb_pfc[i] = pfc_disabled; 3229 3230 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3231 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3232 } else { 3233 en_info(priv, "enabling only PFC DCB ops\n"); 3234 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 3235 } 3236 } 3237 #endif 3238 3239 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 3240 INIT_HLIST_HEAD(&priv->mac_hash[i]); 3241 3242 /* Query for default mac and max mtu */ 3243 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 3244 3245 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 3246 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 3247 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3248 3249 /* Set default MAC */ 3250 dev->addr_len = ETH_ALEN; 3251 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3252 if (!is_valid_ether_addr(dev->dev_addr)) { 3253 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3254 priv->port, dev->dev_addr); 3255 err = -EINVAL; 3256 goto out; 3257 } else if (mlx4_is_slave(priv->mdev->dev) && 3258 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3259 /* Random MAC was assigned in mlx4_slave_cap 3260 * in mlx4_core module 3261 */ 3262 dev->addr_assign_type |= NET_ADDR_RANDOM; 3263 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3264 } 3265 3266 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3267 3268 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3269 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3270 err = mlx4_en_alloc_resources(priv); 3271 if (err) 3272 goto out; 3273 3274 /* Initialize time stamping config */ 3275 priv->hwtstamp_config.flags = 0; 3276 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3277 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3278 3279 /* Allocate page for receive rings */ 3280 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3281 MLX4_EN_PAGE_SIZE); 3282 if (err) { 3283 en_err(priv, "Failed to allocate page for rx qps\n"); 3284 goto out; 3285 } 3286 priv->allocated = 1; 3287 3288 /* 3289 * Initialize netdev entry points 3290 */ 3291 if (mlx4_is_master(priv->mdev->dev)) 3292 dev->netdev_ops = &mlx4_netdev_ops_master; 3293 else 3294 dev->netdev_ops = &mlx4_netdev_ops; 3295 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3296 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 3297 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3298 3299 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3300 3301 /* 3302 * Set driver features 3303 */ 3304 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3305 if (mdev->LSO_support) 3306 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3307 3308 dev->vlan_features = dev->hw_features; 3309 3310 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3311 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3312 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3313 NETIF_F_HW_VLAN_CTAG_FILTER; 3314 dev->hw_features |= NETIF_F_LOOPBACK | 3315 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3316 3317 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3318 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3319 NETIF_F_HW_VLAN_STAG_FILTER; 3320 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3321 } 3322 3323 if (mlx4_is_slave(mdev->dev)) { 3324 bool vlan_offload_disabled; 3325 int phv; 3326 3327 err = get_phv_bit(mdev->dev, port, &phv); 3328 if (!err && phv) { 3329 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3330 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3331 } 3332 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port, 3333 &vlan_offload_disabled); 3334 if (!err && vlan_offload_disabled) { 3335 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3336 NETIF_F_HW_VLAN_CTAG_RX | 3337 NETIF_F_HW_VLAN_STAG_TX | 3338 NETIF_F_HW_VLAN_STAG_RX); 3339 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3340 NETIF_F_HW_VLAN_CTAG_RX | 3341 NETIF_F_HW_VLAN_STAG_TX | 3342 NETIF_F_HW_VLAN_STAG_RX); 3343 } 3344 } else { 3345 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3346 !(mdev->dev->caps.flags2 & 3347 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3348 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3349 } 3350 3351 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3352 dev->hw_features |= NETIF_F_RXFCS; 3353 3354 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3355 dev->hw_features |= NETIF_F_RXALL; 3356 3357 if (mdev->dev->caps.steering_mode == 3358 MLX4_STEERING_MODE_DEVICE_MANAGED && 3359 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3360 dev->hw_features |= NETIF_F_NTUPLE; 3361 3362 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3363 dev->priv_flags |= IFF_UNICAST_FLT; 3364 3365 /* Setting a default hash function value */ 3366 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3367 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3368 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3369 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3370 } else { 3371 en_warn(priv, 3372 "No RSS hash capabilities exposed, using Toeplitz\n"); 3373 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3374 } 3375 3376 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3377 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3378 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3379 NETIF_F_GSO_PARTIAL; 3380 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3381 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3382 NETIF_F_GSO_PARTIAL; 3383 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3384 } 3385 3386 /* MTU range: 46 - hw-specific max */ 3387 dev->min_mtu = MLX4_EN_MIN_MTU; 3388 dev->max_mtu = priv->max_mtu; 3389 3390 mdev->pndev[port] = dev; 3391 mdev->upper[port] = NULL; 3392 3393 netif_carrier_off(dev); 3394 mlx4_en_set_default_moderation(priv); 3395 3396 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); 3397 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3398 3399 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3400 3401 /* Configure port */ 3402 mlx4_en_calc_rx_buf(dev); 3403 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3404 priv->rx_skb_size + ETH_FCS_LEN, 3405 prof->tx_pause, prof->tx_ppp, 3406 prof->rx_pause, prof->rx_ppp); 3407 if (err) { 3408 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3409 priv->port, err); 3410 goto out; 3411 } 3412 3413 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3414 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3415 if (err) { 3416 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3417 err); 3418 goto out; 3419 } 3420 } 3421 3422 /* Init port */ 3423 en_warn(priv, "Initializing port\n"); 3424 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3425 if (err) { 3426 en_err(priv, "Failed Initializing port\n"); 3427 goto out; 3428 } 3429 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3430 3431 /* Initialize time stamp mechanism */ 3432 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3433 mlx4_en_init_timestamp(mdev); 3434 3435 queue_delayed_work(mdev->workqueue, &priv->service_task, 3436 SERVICE_TASK_DELAY); 3437 3438 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3439 mdev->profile.prof[priv->port].rx_ppp, 3440 mdev->profile.prof[priv->port].rx_pause, 3441 mdev->profile.prof[priv->port].tx_ppp, 3442 mdev->profile.prof[priv->port].tx_pause); 3443 3444 err = register_netdev(dev); 3445 if (err) { 3446 en_err(priv, "Netdev registration failed for port %d\n", port); 3447 goto out; 3448 } 3449 3450 priv->registered = 1; 3451 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3452 dev); 3453 3454 return 0; 3455 3456 err_free_tx: 3457 while (t--) { 3458 kfree(priv->tx_ring[t]); 3459 kfree(priv->tx_cq[t]); 3460 } 3461 out: 3462 mlx4_en_destroy_netdev(dev); 3463 return err; 3464 } 3465 3466 int mlx4_en_reset_config(struct net_device *dev, 3467 struct hwtstamp_config ts_config, 3468 netdev_features_t features) 3469 { 3470 struct mlx4_en_priv *priv = netdev_priv(dev); 3471 struct mlx4_en_dev *mdev = priv->mdev; 3472 struct mlx4_en_port_profile new_prof; 3473 struct mlx4_en_priv *tmp; 3474 int port_up = 0; 3475 int err = 0; 3476 3477 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3478 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3479 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3480 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3481 return 0; /* Nothing to change */ 3482 3483 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3484 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3485 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3486 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3487 return -EINVAL; 3488 } 3489 3490 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3491 if (!tmp) 3492 return -ENOMEM; 3493 3494 mutex_lock(&mdev->state_lock); 3495 3496 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3497 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3498 3499 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3500 if (err) 3501 goto out; 3502 3503 if (priv->port_up) { 3504 port_up = 1; 3505 mlx4_en_stop_port(dev, 1); 3506 } 3507 3508 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3509 ts_config.rx_filter, 3510 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3511 3512 mlx4_en_safe_replace_resources(priv, tmp); 3513 3514 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3515 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3516 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3517 else 3518 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3519 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3520 /* RX time-stamping is OFF, update the RX vlan offload 3521 * to the latest wanted state 3522 */ 3523 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3524 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3525 else 3526 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3527 } 3528 3529 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3530 if (features & NETIF_F_RXFCS) 3531 dev->features |= NETIF_F_RXFCS; 3532 else 3533 dev->features &= ~NETIF_F_RXFCS; 3534 } 3535 3536 /* RX vlan offload and RX time-stamping can't co-exist ! 3537 * Regardless of the caller's choice, 3538 * Turn Off RX vlan offload in case of time-stamping is ON 3539 */ 3540 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3541 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3542 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3543 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3544 } 3545 3546 if (port_up) { 3547 err = mlx4_en_start_port(dev); 3548 if (err) 3549 en_err(priv, "Failed starting port\n"); 3550 } 3551 3552 out: 3553 mutex_unlock(&mdev->state_lock); 3554 kfree(tmp); 3555 if (!err) 3556 netdev_features_change(dev); 3557 return err; 3558 } 3559