1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/bpf.h> 35 #include <linux/etherdevice.h> 36 #include <linux/tcp.h> 37 #include <linux/if_vlan.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/hash.h> 41 #include <net/ip.h> 42 #include <net/vxlan.h> 43 #include <net/devlink.h> 44 45 #include <linux/mlx4/driver.h> 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/cmd.h> 48 #include <linux/mlx4/cq.h> 49 50 #include "mlx4_en.h" 51 #include "en_port.h" 52 53 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ 54 XDP_PACKET_HEADROOM - \ 55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))) 56 57 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 58 { 59 struct mlx4_en_priv *priv = netdev_priv(dev); 60 int i; 61 unsigned int offset = 0; 62 63 if (up && up != MLX4_EN_NUM_UP_HIGH) 64 return -EINVAL; 65 66 netdev_set_num_tc(dev, up); 67 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 68 /* Partition Tx queues evenly amongst UP's */ 69 for (i = 0; i < up; i++) { 70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 71 offset += priv->num_tx_rings_p_up; 72 } 73 74 #ifdef CONFIG_MLX4_EN_DCB 75 if (!mlx4_is_slave(priv->mdev->dev)) { 76 if (up) { 77 if (priv->dcbx_cap) 78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 79 } else { 80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 81 priv->cee_config.pfc_state = false; 82 } 83 } 84 #endif /* CONFIG_MLX4_EN_DCB */ 85 86 return 0; 87 } 88 89 int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) 90 { 91 struct mlx4_en_priv *priv = netdev_priv(dev); 92 struct mlx4_en_dev *mdev = priv->mdev; 93 struct mlx4_en_port_profile new_prof; 94 struct mlx4_en_priv *tmp; 95 int total_count; 96 int port_up = 0; 97 int err = 0; 98 99 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 100 if (!tmp) 101 return -ENOMEM; 102 103 mutex_lock(&mdev->state_lock); 104 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 105 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW : 106 MLX4_EN_NUM_UP_HIGH; 107 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * 108 new_prof.num_up; 109 total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; 110 if (total_count > MAX_TX_RINGS) { 111 err = -EINVAL; 112 en_err(priv, 113 "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", 114 total_count, MAX_TX_RINGS); 115 goto out; 116 } 117 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); 118 if (err) 119 goto out; 120 121 if (priv->port_up) { 122 port_up = 1; 123 mlx4_en_stop_port(dev, 1); 124 } 125 126 mlx4_en_safe_replace_resources(priv, tmp); 127 if (port_up) { 128 err = mlx4_en_start_port(dev); 129 if (err) { 130 en_err(priv, "Failed starting port for setup TC\n"); 131 goto out; 132 } 133 } 134 135 err = mlx4_en_setup_tc(dev, tc); 136 out: 137 mutex_unlock(&mdev->state_lock); 138 kfree(tmp); 139 return err; 140 } 141 142 static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, 143 void *type_data) 144 { 145 struct tc_mqprio_qopt *mqprio = type_data; 146 147 if (type != TC_SETUP_QDISC_MQPRIO) 148 return -EOPNOTSUPP; 149 150 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) 151 return -EINVAL; 152 153 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 154 155 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc); 156 } 157 158 #ifdef CONFIG_RFS_ACCEL 159 160 struct mlx4_en_filter { 161 struct list_head next; 162 struct work_struct work; 163 164 u8 ip_proto; 165 __be32 src_ip; 166 __be32 dst_ip; 167 __be16 src_port; 168 __be16 dst_port; 169 170 int rxq_index; 171 struct mlx4_en_priv *priv; 172 u32 flow_id; /* RFS infrastructure id */ 173 int id; /* mlx4_en driver id */ 174 u64 reg_id; /* Flow steering API id */ 175 u8 activated; /* Used to prevent expiry before filter 176 * is attached 177 */ 178 struct hlist_node filter_chain; 179 }; 180 181 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 182 183 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 184 { 185 switch (ip_proto) { 186 case IPPROTO_UDP: 187 return MLX4_NET_TRANS_RULE_ID_UDP; 188 case IPPROTO_TCP: 189 return MLX4_NET_TRANS_RULE_ID_TCP; 190 default: 191 return MLX4_NET_TRANS_RULE_NUM; 192 } 193 }; 194 195 /* Must not acquire state_lock, as its corresponding work_sync 196 * is done under it. 197 */ 198 static void mlx4_en_filter_work(struct work_struct *work) 199 { 200 struct mlx4_en_filter *filter = container_of(work, 201 struct mlx4_en_filter, 202 work); 203 struct mlx4_en_priv *priv = filter->priv; 204 struct mlx4_spec_list spec_tcp_udp = { 205 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 206 { 207 .tcp_udp = { 208 .dst_port = filter->dst_port, 209 .dst_port_msk = (__force __be16)-1, 210 .src_port = filter->src_port, 211 .src_port_msk = (__force __be16)-1, 212 }, 213 }, 214 }; 215 struct mlx4_spec_list spec_ip = { 216 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 217 { 218 .ipv4 = { 219 .dst_ip = filter->dst_ip, 220 .dst_ip_msk = (__force __be32)-1, 221 .src_ip = filter->src_ip, 222 .src_ip_msk = (__force __be32)-1, 223 }, 224 }, 225 }; 226 struct mlx4_spec_list spec_eth = { 227 .id = MLX4_NET_TRANS_RULE_ID_ETH, 228 }; 229 struct mlx4_net_trans_rule rule = { 230 .list = LIST_HEAD_INIT(rule.list), 231 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 232 .exclusive = 1, 233 .allow_loopback = 1, 234 .promisc_mode = MLX4_FS_REGULAR, 235 .port = priv->port, 236 .priority = MLX4_DOMAIN_RFS, 237 }; 238 int rc; 239 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 240 241 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 242 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 243 filter->ip_proto); 244 goto ignore; 245 } 246 list_add_tail(&spec_eth.list, &rule.list); 247 list_add_tail(&spec_ip.list, &rule.list); 248 list_add_tail(&spec_tcp_udp.list, &rule.list); 249 250 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 251 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 252 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 253 254 filter->activated = 0; 255 256 if (filter->reg_id) { 257 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 258 if (rc && rc != -ENOENT) 259 en_err(priv, "Error detaching flow. rc = %d\n", rc); 260 } 261 262 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 263 if (rc) 264 en_err(priv, "Error attaching flow. err = %d\n", rc); 265 266 ignore: 267 mlx4_en_filter_rfs_expire(priv); 268 269 filter->activated = 1; 270 } 271 272 static inline struct hlist_head * 273 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 274 __be16 src_port, __be16 dst_port) 275 { 276 unsigned long l; 277 int bucket_idx; 278 279 l = (__force unsigned long)src_port | 280 ((__force unsigned long)dst_port << 2); 281 l ^= (__force unsigned long)(src_ip ^ dst_ip); 282 283 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 284 285 return &priv->filter_hash[bucket_idx]; 286 } 287 288 static struct mlx4_en_filter * 289 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 290 __be32 dst_ip, u8 ip_proto, __be16 src_port, 291 __be16 dst_port, u32 flow_id) 292 { 293 struct mlx4_en_filter *filter = NULL; 294 295 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 296 if (!filter) 297 return NULL; 298 299 filter->priv = priv; 300 filter->rxq_index = rxq_index; 301 INIT_WORK(&filter->work, mlx4_en_filter_work); 302 303 filter->src_ip = src_ip; 304 filter->dst_ip = dst_ip; 305 filter->ip_proto = ip_proto; 306 filter->src_port = src_port; 307 filter->dst_port = dst_port; 308 309 filter->flow_id = flow_id; 310 311 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 312 313 list_add_tail(&filter->next, &priv->filters); 314 hlist_add_head(&filter->filter_chain, 315 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 316 dst_port)); 317 318 return filter; 319 } 320 321 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 322 { 323 struct mlx4_en_priv *priv = filter->priv; 324 int rc; 325 326 list_del(&filter->next); 327 328 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 329 if (rc && rc != -ENOENT) 330 en_err(priv, "Error detaching flow. rc = %d\n", rc); 331 332 kfree(filter); 333 } 334 335 static inline struct mlx4_en_filter * 336 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 337 u8 ip_proto, __be16 src_port, __be16 dst_port) 338 { 339 struct mlx4_en_filter *filter; 340 struct mlx4_en_filter *ret = NULL; 341 342 hlist_for_each_entry(filter, 343 filter_hash_bucket(priv, src_ip, dst_ip, 344 src_port, dst_port), 345 filter_chain) { 346 if (filter->src_ip == src_ip && 347 filter->dst_ip == dst_ip && 348 filter->ip_proto == ip_proto && 349 filter->src_port == src_port && 350 filter->dst_port == dst_port) { 351 ret = filter; 352 break; 353 } 354 } 355 356 return ret; 357 } 358 359 static int 360 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 361 u16 rxq_index, u32 flow_id) 362 { 363 struct mlx4_en_priv *priv = netdev_priv(net_dev); 364 struct mlx4_en_filter *filter; 365 const struct iphdr *ip; 366 const __be16 *ports; 367 u8 ip_proto; 368 __be32 src_ip; 369 __be32 dst_ip; 370 __be16 src_port; 371 __be16 dst_port; 372 int nhoff = skb_network_offset(skb); 373 int ret = 0; 374 375 if (skb->protocol != htons(ETH_P_IP)) 376 return -EPROTONOSUPPORT; 377 378 ip = (const struct iphdr *)(skb->data + nhoff); 379 if (ip_is_fragment(ip)) 380 return -EPROTONOSUPPORT; 381 382 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 383 return -EPROTONOSUPPORT; 384 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 385 386 ip_proto = ip->protocol; 387 src_ip = ip->saddr; 388 dst_ip = ip->daddr; 389 src_port = ports[0]; 390 dst_port = ports[1]; 391 392 spin_lock_bh(&priv->filters_lock); 393 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 394 src_port, dst_port); 395 if (filter) { 396 if (filter->rxq_index == rxq_index) 397 goto out; 398 399 filter->rxq_index = rxq_index; 400 } else { 401 filter = mlx4_en_filter_alloc(priv, rxq_index, 402 src_ip, dst_ip, ip_proto, 403 src_port, dst_port, flow_id); 404 if (!filter) { 405 ret = -ENOMEM; 406 goto err; 407 } 408 } 409 410 queue_work(priv->mdev->workqueue, &filter->work); 411 412 out: 413 ret = filter->id; 414 err: 415 spin_unlock_bh(&priv->filters_lock); 416 417 return ret; 418 } 419 420 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 421 { 422 struct mlx4_en_filter *filter, *tmp; 423 LIST_HEAD(del_list); 424 425 spin_lock_bh(&priv->filters_lock); 426 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 427 list_move(&filter->next, &del_list); 428 hlist_del(&filter->filter_chain); 429 } 430 spin_unlock_bh(&priv->filters_lock); 431 432 list_for_each_entry_safe(filter, tmp, &del_list, next) { 433 cancel_work_sync(&filter->work); 434 mlx4_en_filter_free(filter); 435 } 436 } 437 438 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 439 { 440 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 441 LIST_HEAD(del_list); 442 int i = 0; 443 444 spin_lock_bh(&priv->filters_lock); 445 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 446 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 447 break; 448 449 if (filter->activated && 450 !work_pending(&filter->work) && 451 rps_may_expire_flow(priv->dev, 452 filter->rxq_index, filter->flow_id, 453 filter->id)) { 454 list_move(&filter->next, &del_list); 455 hlist_del(&filter->filter_chain); 456 } else 457 last_filter = filter; 458 459 i++; 460 } 461 462 if (last_filter && (&last_filter->next != priv->filters.next)) 463 list_move(&priv->filters, &last_filter->next); 464 465 spin_unlock_bh(&priv->filters_lock); 466 467 list_for_each_entry_safe(filter, tmp, &del_list, next) 468 mlx4_en_filter_free(filter); 469 } 470 #endif 471 472 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 473 __be16 proto, u16 vid) 474 { 475 struct mlx4_en_priv *priv = netdev_priv(dev); 476 struct mlx4_en_dev *mdev = priv->mdev; 477 int err; 478 int idx; 479 480 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 481 482 set_bit(vid, priv->active_vlans); 483 484 /* Add VID to port VLAN filter */ 485 mutex_lock(&mdev->state_lock); 486 if (mdev->device_up && priv->port_up) { 487 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 488 if (err) { 489 en_err(priv, "Failed configuring VLAN filter\n"); 490 goto out; 491 } 492 } 493 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 494 if (err) 495 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 496 497 out: 498 mutex_unlock(&mdev->state_lock); 499 return err; 500 } 501 502 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 503 __be16 proto, u16 vid) 504 { 505 struct mlx4_en_priv *priv = netdev_priv(dev); 506 struct mlx4_en_dev *mdev = priv->mdev; 507 int err = 0; 508 509 en_dbg(HW, priv, "Killing VID:%d\n", vid); 510 511 clear_bit(vid, priv->active_vlans); 512 513 /* Remove VID from port VLAN filter */ 514 mutex_lock(&mdev->state_lock); 515 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 516 517 if (mdev->device_up && priv->port_up) { 518 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 519 if (err) 520 en_err(priv, "Failed configuring VLAN filter\n"); 521 } 522 mutex_unlock(&mdev->state_lock); 523 524 return err; 525 } 526 527 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 528 { 529 int i; 530 for (i = ETH_ALEN - 1; i >= 0; --i) { 531 dst_mac[i] = src_mac & 0xff; 532 src_mac >>= 8; 533 } 534 memset(&dst_mac[ETH_ALEN], 0, 2); 535 } 536 537 538 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 539 int qpn, u64 *reg_id) 540 { 541 int err; 542 543 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 544 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 545 return 0; /* do nothing */ 546 547 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 548 MLX4_DOMAIN_NIC, reg_id); 549 if (err) { 550 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 551 return err; 552 } 553 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 554 return 0; 555 } 556 557 558 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 559 unsigned char *mac, int *qpn, u64 *reg_id) 560 { 561 struct mlx4_en_dev *mdev = priv->mdev; 562 struct mlx4_dev *dev = mdev->dev; 563 int err; 564 565 switch (dev->caps.steering_mode) { 566 case MLX4_STEERING_MODE_B0: { 567 struct mlx4_qp qp; 568 u8 gid[16] = {0}; 569 570 qp.qpn = *qpn; 571 memcpy(&gid[10], mac, ETH_ALEN); 572 gid[5] = priv->port; 573 574 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 575 break; 576 } 577 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 578 struct mlx4_spec_list spec_eth = { {NULL} }; 579 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 580 581 struct mlx4_net_trans_rule rule = { 582 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 583 .exclusive = 0, 584 .allow_loopback = 1, 585 .promisc_mode = MLX4_FS_REGULAR, 586 .priority = MLX4_DOMAIN_NIC, 587 }; 588 589 rule.port = priv->port; 590 rule.qpn = *qpn; 591 INIT_LIST_HEAD(&rule.list); 592 593 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 594 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 595 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 596 list_add_tail(&spec_eth.list, &rule.list); 597 598 err = mlx4_flow_attach(dev, &rule, reg_id); 599 break; 600 } 601 default: 602 return -EINVAL; 603 } 604 if (err) 605 en_warn(priv, "Failed Attaching Unicast\n"); 606 607 return err; 608 } 609 610 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 611 unsigned char *mac, int qpn, u64 reg_id) 612 { 613 struct mlx4_en_dev *mdev = priv->mdev; 614 struct mlx4_dev *dev = mdev->dev; 615 616 switch (dev->caps.steering_mode) { 617 case MLX4_STEERING_MODE_B0: { 618 struct mlx4_qp qp; 619 u8 gid[16] = {0}; 620 621 qp.qpn = qpn; 622 memcpy(&gid[10], mac, ETH_ALEN); 623 gid[5] = priv->port; 624 625 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 626 break; 627 } 628 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 629 mlx4_flow_detach(dev, reg_id); 630 break; 631 } 632 default: 633 en_err(priv, "Invalid steering mode.\n"); 634 } 635 } 636 637 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 638 { 639 struct mlx4_en_dev *mdev = priv->mdev; 640 struct mlx4_dev *dev = mdev->dev; 641 int index = 0; 642 int err = 0; 643 int *qpn = &priv->base_qpn; 644 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 645 646 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 647 priv->dev->dev_addr); 648 index = mlx4_register_mac(dev, priv->port, mac); 649 if (index < 0) { 650 err = index; 651 en_err(priv, "Failed adding MAC: %pM\n", 652 priv->dev->dev_addr); 653 return err; 654 } 655 656 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode); 657 658 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 659 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 660 *qpn = base_qpn + index; 661 return 0; 662 } 663 664 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP, 665 MLX4_RES_USAGE_DRIVER); 666 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 667 if (err) { 668 en_err(priv, "Failed to reserve qp for mac registration\n"); 669 mlx4_unregister_mac(dev, priv->port, mac); 670 return err; 671 } 672 673 return 0; 674 } 675 676 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 677 { 678 struct mlx4_en_dev *mdev = priv->mdev; 679 struct mlx4_dev *dev = mdev->dev; 680 int qpn = priv->base_qpn; 681 682 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 683 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 684 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 685 priv->dev->dev_addr); 686 mlx4_unregister_mac(dev, priv->port, mac); 687 } else { 688 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 689 priv->port, qpn); 690 mlx4_qp_release_range(dev, qpn, 1); 691 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 692 } 693 } 694 695 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 696 unsigned char *new_mac, unsigned char *prev_mac) 697 { 698 struct mlx4_en_dev *mdev = priv->mdev; 699 struct mlx4_dev *dev = mdev->dev; 700 int err = 0; 701 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 702 703 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 704 struct hlist_head *bucket; 705 unsigned int mac_hash; 706 struct mlx4_mac_entry *entry; 707 struct hlist_node *tmp; 708 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 709 710 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 711 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 712 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 713 mlx4_en_uc_steer_release(priv, entry->mac, 714 qpn, entry->reg_id); 715 mlx4_unregister_mac(dev, priv->port, 716 prev_mac_u64); 717 hlist_del_rcu(&entry->hlist); 718 synchronize_rcu(); 719 memcpy(entry->mac, new_mac, ETH_ALEN); 720 entry->reg_id = 0; 721 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 722 hlist_add_head_rcu(&entry->hlist, 723 &priv->mac_hash[mac_hash]); 724 mlx4_register_mac(dev, priv->port, new_mac_u64); 725 err = mlx4_en_uc_steer_add(priv, new_mac, 726 &qpn, 727 &entry->reg_id); 728 if (err) 729 return err; 730 if (priv->tunnel_reg_id) { 731 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 732 priv->tunnel_reg_id = 0; 733 } 734 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 735 &priv->tunnel_reg_id); 736 return err; 737 } 738 } 739 return -EINVAL; 740 } 741 742 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 743 } 744 745 static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv, 746 unsigned char new_mac[ETH_ALEN + 2]) 747 { 748 struct mlx4_en_dev *mdev = priv->mdev; 749 int err; 750 751 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN)) 752 return; 753 754 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac); 755 if (err) 756 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n", 757 new_mac, priv->port, err); 758 } 759 760 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 761 unsigned char new_mac[ETH_ALEN + 2]) 762 { 763 int err = 0; 764 765 if (priv->port_up) { 766 /* Remove old MAC and insert the new one */ 767 err = mlx4_en_replace_mac(priv, priv->base_qpn, 768 new_mac, priv->current_mac); 769 if (err) 770 en_err(priv, "Failed changing HW MAC address\n"); 771 } else 772 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 773 774 if (!err) 775 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 776 777 return err; 778 } 779 780 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 781 { 782 struct mlx4_en_priv *priv = netdev_priv(dev); 783 struct mlx4_en_dev *mdev = priv->mdev; 784 struct sockaddr *saddr = addr; 785 unsigned char new_mac[ETH_ALEN + 2]; 786 int err; 787 788 if (!is_valid_ether_addr(saddr->sa_data)) 789 return -EADDRNOTAVAIL; 790 791 mutex_lock(&mdev->state_lock); 792 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 793 err = mlx4_en_do_set_mac(priv, new_mac); 794 if (err) 795 goto out; 796 797 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 798 mlx4_en_update_user_mac(priv, new_mac); 799 out: 800 mutex_unlock(&mdev->state_lock); 801 802 return err; 803 } 804 805 static void mlx4_en_clear_list(struct net_device *dev) 806 { 807 struct mlx4_en_priv *priv = netdev_priv(dev); 808 struct mlx4_en_mc_list *tmp, *mc_to_del; 809 810 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 811 list_del(&mc_to_del->list); 812 kfree(mc_to_del); 813 } 814 } 815 816 static void mlx4_en_cache_mclist(struct net_device *dev) 817 { 818 struct mlx4_en_priv *priv = netdev_priv(dev); 819 struct netdev_hw_addr *ha; 820 struct mlx4_en_mc_list *tmp; 821 822 mlx4_en_clear_list(dev); 823 netdev_for_each_mc_addr(ha, dev) { 824 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 825 if (!tmp) { 826 mlx4_en_clear_list(dev); 827 return; 828 } 829 memcpy(tmp->addr, ha->addr, ETH_ALEN); 830 list_add_tail(&tmp->list, &priv->mc_list); 831 } 832 } 833 834 static void update_mclist_flags(struct mlx4_en_priv *priv, 835 struct list_head *dst, 836 struct list_head *src) 837 { 838 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 839 bool found; 840 841 /* Find all the entries that should be removed from dst, 842 * These are the entries that are not found in src 843 */ 844 list_for_each_entry(dst_tmp, dst, list) { 845 found = false; 846 list_for_each_entry(src_tmp, src, list) { 847 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 848 found = true; 849 break; 850 } 851 } 852 if (!found) 853 dst_tmp->action = MCLIST_REM; 854 } 855 856 /* Add entries that exist in src but not in dst 857 * mark them as need to add 858 */ 859 list_for_each_entry(src_tmp, src, list) { 860 found = false; 861 list_for_each_entry(dst_tmp, dst, list) { 862 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 863 dst_tmp->action = MCLIST_NONE; 864 found = true; 865 break; 866 } 867 } 868 if (!found) { 869 new_mc = kmemdup(src_tmp, 870 sizeof(struct mlx4_en_mc_list), 871 GFP_KERNEL); 872 if (!new_mc) 873 return; 874 875 new_mc->action = MCLIST_ADD; 876 list_add_tail(&new_mc->list, dst); 877 } 878 } 879 } 880 881 static void mlx4_en_set_rx_mode(struct net_device *dev) 882 { 883 struct mlx4_en_priv *priv = netdev_priv(dev); 884 885 if (!priv->port_up) 886 return; 887 888 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 889 } 890 891 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 892 struct mlx4_en_dev *mdev) 893 { 894 int err = 0; 895 896 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 897 if (netif_msg_rx_status(priv)) 898 en_warn(priv, "Entering promiscuous mode\n"); 899 priv->flags |= MLX4_EN_FLAG_PROMISC; 900 901 /* Enable promiscouos mode */ 902 switch (mdev->dev->caps.steering_mode) { 903 case MLX4_STEERING_MODE_DEVICE_MANAGED: 904 err = mlx4_flow_steer_promisc_add(mdev->dev, 905 priv->port, 906 priv->base_qpn, 907 MLX4_FS_ALL_DEFAULT); 908 if (err) 909 en_err(priv, "Failed enabling promiscuous mode\n"); 910 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 911 break; 912 913 case MLX4_STEERING_MODE_B0: 914 err = mlx4_unicast_promisc_add(mdev->dev, 915 priv->base_qpn, 916 priv->port); 917 if (err) 918 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 919 920 /* Add the default qp number as multicast 921 * promisc 922 */ 923 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 924 err = mlx4_multicast_promisc_add(mdev->dev, 925 priv->base_qpn, 926 priv->port); 927 if (err) 928 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 929 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 930 } 931 break; 932 933 case MLX4_STEERING_MODE_A0: 934 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 935 priv->port, 936 priv->base_qpn, 937 1); 938 if (err) 939 en_err(priv, "Failed enabling promiscuous mode\n"); 940 break; 941 } 942 943 /* Disable port multicast filter (unconditionally) */ 944 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 945 0, MLX4_MCAST_DISABLE); 946 if (err) 947 en_err(priv, "Failed disabling multicast filter\n"); 948 } 949 } 950 951 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 952 struct mlx4_en_dev *mdev) 953 { 954 int err = 0; 955 956 if (netif_msg_rx_status(priv)) 957 en_warn(priv, "Leaving promiscuous mode\n"); 958 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 959 960 /* Disable promiscouos mode */ 961 switch (mdev->dev->caps.steering_mode) { 962 case MLX4_STEERING_MODE_DEVICE_MANAGED: 963 err = mlx4_flow_steer_promisc_remove(mdev->dev, 964 priv->port, 965 MLX4_FS_ALL_DEFAULT); 966 if (err) 967 en_err(priv, "Failed disabling promiscuous mode\n"); 968 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 969 break; 970 971 case MLX4_STEERING_MODE_B0: 972 err = mlx4_unicast_promisc_remove(mdev->dev, 973 priv->base_qpn, 974 priv->port); 975 if (err) 976 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 977 /* Disable Multicast promisc */ 978 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 979 err = mlx4_multicast_promisc_remove(mdev->dev, 980 priv->base_qpn, 981 priv->port); 982 if (err) 983 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 984 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 985 } 986 break; 987 988 case MLX4_STEERING_MODE_A0: 989 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 990 priv->port, 991 priv->base_qpn, 0); 992 if (err) 993 en_err(priv, "Failed disabling promiscuous mode\n"); 994 break; 995 } 996 } 997 998 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 999 struct net_device *dev, 1000 struct mlx4_en_dev *mdev) 1001 { 1002 struct mlx4_en_mc_list *mclist, *tmp; 1003 u64 mcast_addr = 0; 1004 u8 mc_list[16] = {0}; 1005 int err = 0; 1006 1007 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 1008 if (dev->flags & IFF_ALLMULTI) { 1009 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1010 0, MLX4_MCAST_DISABLE); 1011 if (err) 1012 en_err(priv, "Failed disabling multicast filter\n"); 1013 1014 /* Add the default qp number as multicast promisc */ 1015 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 1016 switch (mdev->dev->caps.steering_mode) { 1017 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1018 err = mlx4_flow_steer_promisc_add(mdev->dev, 1019 priv->port, 1020 priv->base_qpn, 1021 MLX4_FS_MC_DEFAULT); 1022 break; 1023 1024 case MLX4_STEERING_MODE_B0: 1025 err = mlx4_multicast_promisc_add(mdev->dev, 1026 priv->base_qpn, 1027 priv->port); 1028 break; 1029 1030 case MLX4_STEERING_MODE_A0: 1031 break; 1032 } 1033 if (err) 1034 en_err(priv, "Failed entering multicast promisc mode\n"); 1035 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 1036 } 1037 } else { 1038 /* Disable Multicast promisc */ 1039 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1040 switch (mdev->dev->caps.steering_mode) { 1041 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1042 err = mlx4_flow_steer_promisc_remove(mdev->dev, 1043 priv->port, 1044 MLX4_FS_MC_DEFAULT); 1045 break; 1046 1047 case MLX4_STEERING_MODE_B0: 1048 err = mlx4_multicast_promisc_remove(mdev->dev, 1049 priv->base_qpn, 1050 priv->port); 1051 break; 1052 1053 case MLX4_STEERING_MODE_A0: 1054 break; 1055 } 1056 if (err) 1057 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 1058 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1059 } 1060 1061 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1062 0, MLX4_MCAST_DISABLE); 1063 if (err) 1064 en_err(priv, "Failed disabling multicast filter\n"); 1065 1066 /* Flush mcast filter and init it with broadcast address */ 1067 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1068 1, MLX4_MCAST_CONFIG); 1069 1070 /* Update multicast list - we cache all addresses so they won't 1071 * change while HW is updated holding the command semaphor */ 1072 netif_addr_lock_bh(dev); 1073 mlx4_en_cache_mclist(dev); 1074 netif_addr_unlock_bh(dev); 1075 list_for_each_entry(mclist, &priv->mc_list, list) { 1076 mcast_addr = mlx4_mac_to_u64(mclist->addr); 1077 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 1078 mcast_addr, 0, MLX4_MCAST_CONFIG); 1079 } 1080 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1081 0, MLX4_MCAST_ENABLE); 1082 if (err) 1083 en_err(priv, "Failed enabling multicast filter\n"); 1084 1085 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 1086 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1087 if (mclist->action == MCLIST_REM) { 1088 /* detach this address and delete from list */ 1089 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1090 mc_list[5] = priv->port; 1091 err = mlx4_multicast_detach(mdev->dev, 1092 priv->rss_map.indir_qp, 1093 mc_list, 1094 MLX4_PROT_ETH, 1095 mclist->reg_id); 1096 if (err) 1097 en_err(priv, "Fail to detach multicast address\n"); 1098 1099 if (mclist->tunnel_reg_id) { 1100 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1101 if (err) 1102 en_err(priv, "Failed to detach multicast address\n"); 1103 } 1104 1105 /* remove from list */ 1106 list_del(&mclist->list); 1107 kfree(mclist); 1108 } else if (mclist->action == MCLIST_ADD) { 1109 /* attach the address */ 1110 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1111 /* needed for B0 steering support */ 1112 mc_list[5] = priv->port; 1113 err = mlx4_multicast_attach(mdev->dev, 1114 priv->rss_map.indir_qp, 1115 mc_list, 1116 priv->port, 0, 1117 MLX4_PROT_ETH, 1118 &mclist->reg_id); 1119 if (err) 1120 en_err(priv, "Fail to attach multicast address\n"); 1121 1122 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1123 &mclist->tunnel_reg_id); 1124 if (err) 1125 en_err(priv, "Failed to attach multicast address\n"); 1126 } 1127 } 1128 } 1129 } 1130 1131 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1132 struct net_device *dev, 1133 struct mlx4_en_dev *mdev) 1134 { 1135 struct netdev_hw_addr *ha; 1136 struct mlx4_mac_entry *entry; 1137 struct hlist_node *tmp; 1138 bool found; 1139 u64 mac; 1140 int err = 0; 1141 struct hlist_head *bucket; 1142 unsigned int i; 1143 int removed = 0; 1144 u32 prev_flags; 1145 1146 /* Note that we do not need to protect our mac_hash traversal with rcu, 1147 * since all modification code is protected by mdev->state_lock 1148 */ 1149 1150 /* find what to remove */ 1151 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1152 bucket = &priv->mac_hash[i]; 1153 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1154 found = false; 1155 netdev_for_each_uc_addr(ha, dev) { 1156 if (ether_addr_equal_64bits(entry->mac, 1157 ha->addr)) { 1158 found = true; 1159 break; 1160 } 1161 } 1162 1163 /* MAC address of the port is not in uc list */ 1164 if (ether_addr_equal_64bits(entry->mac, 1165 priv->current_mac)) 1166 found = true; 1167 1168 if (!found) { 1169 mac = mlx4_mac_to_u64(entry->mac); 1170 mlx4_en_uc_steer_release(priv, entry->mac, 1171 priv->base_qpn, 1172 entry->reg_id); 1173 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1174 1175 hlist_del_rcu(&entry->hlist); 1176 kfree_rcu(entry, rcu); 1177 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1178 entry->mac, priv->port); 1179 ++removed; 1180 } 1181 } 1182 } 1183 1184 /* if we didn't remove anything, there is no use in trying to add 1185 * again once we are in a forced promisc mode state 1186 */ 1187 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1188 return; 1189 1190 prev_flags = priv->flags; 1191 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1192 1193 /* find what to add */ 1194 netdev_for_each_uc_addr(ha, dev) { 1195 found = false; 1196 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1197 hlist_for_each_entry(entry, bucket, hlist) { 1198 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1199 found = true; 1200 break; 1201 } 1202 } 1203 1204 if (!found) { 1205 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1206 if (!entry) { 1207 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1208 ha->addr, priv->port); 1209 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1210 break; 1211 } 1212 mac = mlx4_mac_to_u64(ha->addr); 1213 memcpy(entry->mac, ha->addr, ETH_ALEN); 1214 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1215 if (err < 0) { 1216 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1217 ha->addr, priv->port, err); 1218 kfree(entry); 1219 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1220 break; 1221 } 1222 err = mlx4_en_uc_steer_add(priv, ha->addr, 1223 &priv->base_qpn, 1224 &entry->reg_id); 1225 if (err) { 1226 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1227 ha->addr, priv->port, err); 1228 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1229 kfree(entry); 1230 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1231 break; 1232 } else { 1233 unsigned int mac_hash; 1234 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1235 ha->addr, priv->port); 1236 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1237 bucket = &priv->mac_hash[mac_hash]; 1238 hlist_add_head_rcu(&entry->hlist, bucket); 1239 } 1240 } 1241 } 1242 1243 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1244 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1245 priv->port); 1246 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1247 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1248 priv->port); 1249 } 1250 } 1251 1252 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1253 { 1254 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1255 rx_mode_task); 1256 struct mlx4_en_dev *mdev = priv->mdev; 1257 struct net_device *dev = priv->dev; 1258 1259 mutex_lock(&mdev->state_lock); 1260 if (!mdev->device_up) { 1261 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1262 goto out; 1263 } 1264 if (!priv->port_up) { 1265 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1266 goto out; 1267 } 1268 1269 if (!netif_carrier_ok(dev)) { 1270 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1271 if (priv->port_state.link_state) { 1272 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1273 netif_carrier_on(dev); 1274 en_dbg(LINK, priv, "Link Up\n"); 1275 } 1276 } 1277 } 1278 1279 if (dev->priv_flags & IFF_UNICAST_FLT) 1280 mlx4_en_do_uc_filter(priv, dev, mdev); 1281 1282 /* Promsicuous mode: disable all filters */ 1283 if ((dev->flags & IFF_PROMISC) || 1284 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1285 mlx4_en_set_promisc_mode(priv, mdev); 1286 goto out; 1287 } 1288 1289 /* Not in promiscuous mode */ 1290 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1291 mlx4_en_clear_promisc_mode(priv, mdev); 1292 1293 mlx4_en_do_multicast(priv, dev, mdev); 1294 out: 1295 mutex_unlock(&mdev->state_lock); 1296 } 1297 1298 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1299 { 1300 u64 reg_id; 1301 int err = 0; 1302 int *qpn = &priv->base_qpn; 1303 struct mlx4_mac_entry *entry; 1304 1305 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1306 if (err) 1307 return err; 1308 1309 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1310 &priv->tunnel_reg_id); 1311 if (err) 1312 goto tunnel_err; 1313 1314 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1315 if (!entry) { 1316 err = -ENOMEM; 1317 goto alloc_err; 1318 } 1319 1320 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1321 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1322 entry->reg_id = reg_id; 1323 hlist_add_head_rcu(&entry->hlist, 1324 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1325 1326 return 0; 1327 1328 alloc_err: 1329 if (priv->tunnel_reg_id) 1330 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1331 1332 tunnel_err: 1333 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1334 return err; 1335 } 1336 1337 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1338 { 1339 u64 mac; 1340 unsigned int i; 1341 int qpn = priv->base_qpn; 1342 struct hlist_head *bucket; 1343 struct hlist_node *tmp; 1344 struct mlx4_mac_entry *entry; 1345 1346 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1347 bucket = &priv->mac_hash[i]; 1348 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1349 mac = mlx4_mac_to_u64(entry->mac); 1350 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1351 entry->mac); 1352 mlx4_en_uc_steer_release(priv, entry->mac, 1353 qpn, entry->reg_id); 1354 1355 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1356 hlist_del_rcu(&entry->hlist); 1357 kfree_rcu(entry, rcu); 1358 } 1359 } 1360 1361 if (priv->tunnel_reg_id) { 1362 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1363 priv->tunnel_reg_id = 0; 1364 } 1365 } 1366 1367 static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue) 1368 { 1369 struct mlx4_en_priv *priv = netdev_priv(dev); 1370 struct mlx4_en_dev *mdev = priv->mdev; 1371 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue]; 1372 1373 if (netif_msg_timer(priv)) 1374 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1375 1376 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1377 txqueue, tx_ring->qpn, tx_ring->sp_cqn, 1378 tx_ring->cons, tx_ring->prod); 1379 1380 priv->port_stats.tx_timeout++; 1381 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1382 queue_work(mdev->workqueue, &priv->watchdog_task); 1383 } 1384 1385 1386 static void 1387 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1388 { 1389 struct mlx4_en_priv *priv = netdev_priv(dev); 1390 1391 spin_lock_bh(&priv->stats_lock); 1392 mlx4_en_fold_software_stats(dev); 1393 netdev_stats_to_stats64(stats, &dev->stats); 1394 spin_unlock_bh(&priv->stats_lock); 1395 } 1396 1397 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1398 { 1399 struct mlx4_en_cq *cq; 1400 int i, t; 1401 1402 /* If we haven't received a specific coalescing setting 1403 * (module param), we set the moderation parameters as follows: 1404 * - moder_cnt is set to the number of mtu sized packets to 1405 * satisfy our coalescing target. 1406 * - moder_time is set to a fixed value. 1407 */ 1408 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1409 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1410 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1411 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1412 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1413 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1414 1415 /* Setup cq moderation params */ 1416 for (i = 0; i < priv->rx_ring_num; i++) { 1417 cq = priv->rx_cq[i]; 1418 cq->moder_cnt = priv->rx_frames; 1419 cq->moder_time = priv->rx_usecs; 1420 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1421 priv->last_moder_packets[i] = 0; 1422 priv->last_moder_bytes[i] = 0; 1423 } 1424 1425 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1426 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1427 cq = priv->tx_cq[t][i]; 1428 cq->moder_cnt = priv->tx_frames; 1429 cq->moder_time = priv->tx_usecs; 1430 } 1431 } 1432 1433 /* Reset auto-moderation params */ 1434 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1435 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1436 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1437 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1438 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1439 priv->adaptive_rx_coal = 1; 1440 priv->last_moder_jiffies = 0; 1441 priv->last_moder_tx_packets = 0; 1442 } 1443 1444 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1445 { 1446 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1447 u32 pkt_rate_high, pkt_rate_low; 1448 struct mlx4_en_cq *cq; 1449 unsigned long packets; 1450 unsigned long rate; 1451 unsigned long avg_pkt_size; 1452 unsigned long rx_packets; 1453 unsigned long rx_bytes; 1454 unsigned long rx_pkt_diff; 1455 int moder_time; 1456 int ring, err; 1457 1458 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1459 return; 1460 1461 pkt_rate_low = READ_ONCE(priv->pkt_rate_low); 1462 pkt_rate_high = READ_ONCE(priv->pkt_rate_high); 1463 1464 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1465 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); 1466 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); 1467 1468 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring]; 1469 packets = rx_pkt_diff; 1470 rate = packets * HZ / period; 1471 avg_pkt_size = packets ? (rx_bytes - 1472 priv->last_moder_bytes[ring]) / packets : 0; 1473 1474 /* Apply auto-moderation only when packet rate 1475 * exceeds a rate that it matters */ 1476 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1477 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1478 if (rate <= pkt_rate_low) 1479 moder_time = priv->rx_usecs_low; 1480 else if (rate >= pkt_rate_high) 1481 moder_time = priv->rx_usecs_high; 1482 else 1483 moder_time = (rate - pkt_rate_low) * 1484 (priv->rx_usecs_high - priv->rx_usecs_low) / 1485 (pkt_rate_high - pkt_rate_low) + 1486 priv->rx_usecs_low; 1487 } else { 1488 moder_time = priv->rx_usecs_low; 1489 } 1490 1491 cq = priv->rx_cq[ring]; 1492 if (moder_time != priv->last_moder_time[ring] || 1493 cq->moder_cnt != priv->rx_frames) { 1494 priv->last_moder_time[ring] = moder_time; 1495 cq->moder_time = moder_time; 1496 cq->moder_cnt = priv->rx_frames; 1497 err = mlx4_en_set_cq_moder(priv, cq); 1498 if (err) 1499 en_err(priv, "Failed modifying moderation for cq:%d\n", 1500 ring); 1501 } 1502 priv->last_moder_packets[ring] = rx_packets; 1503 priv->last_moder_bytes[ring] = rx_bytes; 1504 } 1505 1506 priv->last_moder_jiffies = jiffies; 1507 } 1508 1509 static void mlx4_en_do_get_stats(struct work_struct *work) 1510 { 1511 struct delayed_work *delay = to_delayed_work(work); 1512 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1513 stats_task); 1514 struct mlx4_en_dev *mdev = priv->mdev; 1515 int err; 1516 1517 mutex_lock(&mdev->state_lock); 1518 if (mdev->device_up) { 1519 if (priv->port_up) { 1520 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1521 if (err) 1522 en_dbg(HW, priv, "Could not update stats\n"); 1523 1524 mlx4_en_auto_moderation(priv); 1525 } 1526 1527 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1528 } 1529 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1530 mlx4_en_do_set_mac(priv, priv->current_mac); 1531 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1532 } 1533 mutex_unlock(&mdev->state_lock); 1534 } 1535 1536 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1537 * periodically 1538 */ 1539 static void mlx4_en_service_task(struct work_struct *work) 1540 { 1541 struct delayed_work *delay = to_delayed_work(work); 1542 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1543 service_task); 1544 struct mlx4_en_dev *mdev = priv->mdev; 1545 1546 mutex_lock(&mdev->state_lock); 1547 if (mdev->device_up) { 1548 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1549 mlx4_en_ptp_overflow_check(mdev); 1550 1551 mlx4_en_recover_from_oom(priv); 1552 queue_delayed_work(mdev->workqueue, &priv->service_task, 1553 SERVICE_TASK_DELAY); 1554 } 1555 mutex_unlock(&mdev->state_lock); 1556 } 1557 1558 static void mlx4_en_linkstate(struct work_struct *work) 1559 { 1560 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1561 linkstate_task); 1562 struct mlx4_en_dev *mdev = priv->mdev; 1563 int linkstate = priv->link_state; 1564 1565 mutex_lock(&mdev->state_lock); 1566 /* If observable port state changed set carrier state and 1567 * report to system log */ 1568 if (priv->last_link_state != linkstate) { 1569 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1570 en_info(priv, "Link Down\n"); 1571 netif_carrier_off(priv->dev); 1572 } else { 1573 en_info(priv, "Link Up\n"); 1574 netif_carrier_on(priv->dev); 1575 } 1576 } 1577 priv->last_link_state = linkstate; 1578 mutex_unlock(&mdev->state_lock); 1579 } 1580 1581 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1582 { 1583 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1584 int numa_node = priv->mdev->dev->numa_node; 1585 1586 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1587 return -ENOMEM; 1588 1589 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1590 ring->affinity_mask); 1591 return 0; 1592 } 1593 1594 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1595 { 1596 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1597 } 1598 1599 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, 1600 int tx_ring_idx) 1601 { 1602 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; 1603 int rr_index = tx_ring_idx; 1604 1605 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; 1606 tx_ring->recycle_ring = priv->rx_ring[rr_index]; 1607 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", 1608 TX_XDP, tx_ring_idx, rr_index); 1609 } 1610 1611 int mlx4_en_start_port(struct net_device *dev) 1612 { 1613 struct mlx4_en_priv *priv = netdev_priv(dev); 1614 struct mlx4_en_dev *mdev = priv->mdev; 1615 struct mlx4_en_cq *cq; 1616 struct mlx4_en_tx_ring *tx_ring; 1617 int rx_index = 0; 1618 int err = 0; 1619 int i, t; 1620 int j; 1621 u8 mc_list[16] = {0}; 1622 1623 if (priv->port_up) { 1624 en_dbg(DRV, priv, "start port called while port already up\n"); 1625 return 0; 1626 } 1627 1628 INIT_LIST_HEAD(&priv->mc_list); 1629 INIT_LIST_HEAD(&priv->curr_list); 1630 INIT_LIST_HEAD(&priv->ethtool_list); 1631 memset(&priv->ethtool_rules[0], 0, 1632 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1633 1634 /* Calculate Rx buf size */ 1635 dev->mtu = min(dev->mtu, priv->max_mtu); 1636 mlx4_en_calc_rx_buf(dev); 1637 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1638 1639 /* Configure rx cq's and rings */ 1640 err = mlx4_en_activate_rx_rings(priv); 1641 if (err) { 1642 en_err(priv, "Failed to activate RX rings\n"); 1643 return err; 1644 } 1645 for (i = 0; i < priv->rx_ring_num; i++) { 1646 cq = priv->rx_cq[i]; 1647 1648 err = mlx4_en_init_affinity_hint(priv, i); 1649 if (err) { 1650 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1651 goto cq_err; 1652 } 1653 1654 err = mlx4_en_activate_cq(priv, cq, i); 1655 if (err) { 1656 en_err(priv, "Failed activating Rx CQ\n"); 1657 mlx4_en_free_affinity_hint(priv, i); 1658 goto cq_err; 1659 } 1660 1661 for (j = 0; j < cq->size; j++) { 1662 struct mlx4_cqe *cqe = NULL; 1663 1664 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1665 priv->cqe_factor; 1666 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1667 } 1668 1669 err = mlx4_en_set_cq_moder(priv, cq); 1670 if (err) { 1671 en_err(priv, "Failed setting cq moderation parameters\n"); 1672 mlx4_en_deactivate_cq(priv, cq); 1673 mlx4_en_free_affinity_hint(priv, i); 1674 goto cq_err; 1675 } 1676 mlx4_en_arm_cq(priv, cq); 1677 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1678 ++rx_index; 1679 } 1680 1681 /* Set qp number */ 1682 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1683 err = mlx4_en_get_qp(priv); 1684 if (err) { 1685 en_err(priv, "Failed getting eth qp\n"); 1686 goto cq_err; 1687 } 1688 mdev->mac_removed[priv->port] = 0; 1689 1690 priv->counter_index = 1691 mlx4_get_default_counter_index(mdev->dev, priv->port); 1692 1693 err = mlx4_en_config_rss_steer(priv); 1694 if (err) { 1695 en_err(priv, "Failed configuring rss steering\n"); 1696 goto mac_err; 1697 } 1698 1699 err = mlx4_en_create_drop_qp(priv); 1700 if (err) 1701 goto rss_err; 1702 1703 /* Configure tx cq's and rings */ 1704 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1705 u8 num_tx_rings_p_up = t == TX ? 1706 priv->num_tx_rings_p_up : priv->tx_ring_num[t]; 1707 1708 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1709 /* Configure cq */ 1710 cq = priv->tx_cq[t][i]; 1711 err = mlx4_en_activate_cq(priv, cq, i); 1712 if (err) { 1713 en_err(priv, "Failed allocating Tx CQ\n"); 1714 goto tx_err; 1715 } 1716 err = mlx4_en_set_cq_moder(priv, cq); 1717 if (err) { 1718 en_err(priv, "Failed setting cq moderation parameters\n"); 1719 mlx4_en_deactivate_cq(priv, cq); 1720 goto tx_err; 1721 } 1722 en_dbg(DRV, priv, 1723 "Resetting index of collapsed CQ:%d to -1\n", i); 1724 cq->buf->wqe_index = cpu_to_be16(0xffff); 1725 1726 /* Configure ring */ 1727 tx_ring = priv->tx_ring[t][i]; 1728 err = mlx4_en_activate_tx_ring(priv, tx_ring, 1729 cq->mcq.cqn, 1730 i / num_tx_rings_p_up); 1731 if (err) { 1732 en_err(priv, "Failed allocating Tx ring\n"); 1733 mlx4_en_deactivate_cq(priv, cq); 1734 goto tx_err; 1735 } 1736 if (t != TX_XDP) { 1737 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1738 tx_ring->recycle_ring = NULL; 1739 1740 /* Arm CQ for TX completions */ 1741 mlx4_en_arm_cq(priv, cq); 1742 1743 } else { 1744 mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring); 1745 mlx4_en_init_recycle_ring(priv, i); 1746 /* XDP TX CQ should never be armed */ 1747 } 1748 1749 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1750 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1751 *((u32 *)(tx_ring->buf + j)) = 0xffffffff; 1752 } 1753 } 1754 1755 /* Configure port */ 1756 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1757 priv->rx_skb_size + ETH_FCS_LEN, 1758 priv->prof->tx_pause, 1759 priv->prof->tx_ppp, 1760 priv->prof->rx_pause, 1761 priv->prof->rx_ppp); 1762 if (err) { 1763 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1764 priv->port, err); 1765 goto tx_err; 1766 } 1767 1768 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); 1769 if (err) { 1770 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", 1771 dev->mtu, priv->port, err); 1772 goto tx_err; 1773 } 1774 1775 /* Set default qp number */ 1776 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1777 if (err) { 1778 en_err(priv, "Failed setting default qp numbers\n"); 1779 goto tx_err; 1780 } 1781 1782 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1783 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1784 if (err) { 1785 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1786 err); 1787 goto tx_err; 1788 } 1789 } 1790 1791 /* Init port */ 1792 en_dbg(HW, priv, "Initializing port\n"); 1793 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1794 if (err) { 1795 en_err(priv, "Failed Initializing port\n"); 1796 goto tx_err; 1797 } 1798 1799 /* Set Unicast and VXLAN steering rules */ 1800 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1801 mlx4_en_set_rss_steer_rules(priv)) 1802 mlx4_warn(mdev, "Failed setting steering rules\n"); 1803 1804 /* Attach rx QP to bradcast address */ 1805 eth_broadcast_addr(&mc_list[10]); 1806 mc_list[5] = priv->port; /* needed for B0 steering support */ 1807 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list, 1808 priv->port, 0, MLX4_PROT_ETH, 1809 &priv->broadcast_id)) 1810 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1811 1812 /* Must redo promiscuous mode setup. */ 1813 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1814 1815 /* Schedule multicast task to populate multicast list */ 1816 queue_work(mdev->workqueue, &priv->rx_mode_task); 1817 1818 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1819 udp_tunnel_nic_reset_ntf(dev); 1820 1821 priv->port_up = true; 1822 1823 /* Process all completions if exist to prevent 1824 * the queues freezing if they are full 1825 */ 1826 for (i = 0; i < priv->rx_ring_num; i++) { 1827 local_bh_disable(); 1828 napi_schedule(&priv->rx_cq[i]->napi); 1829 local_bh_enable(); 1830 } 1831 1832 netif_tx_start_all_queues(dev); 1833 netif_device_attach(dev); 1834 1835 return 0; 1836 1837 tx_err: 1838 if (t == MLX4_EN_NUM_TX_TYPES) { 1839 t--; 1840 i = priv->tx_ring_num[t]; 1841 } 1842 while (t >= 0) { 1843 while (i--) { 1844 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1845 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1846 } 1847 if (!t--) 1848 break; 1849 i = priv->tx_ring_num[t]; 1850 } 1851 mlx4_en_destroy_drop_qp(priv); 1852 rss_err: 1853 mlx4_en_release_rss_steer(priv); 1854 mac_err: 1855 mlx4_en_put_qp(priv); 1856 cq_err: 1857 while (rx_index--) { 1858 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1859 mlx4_en_free_affinity_hint(priv, rx_index); 1860 } 1861 for (i = 0; i < priv->rx_ring_num; i++) 1862 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1863 1864 return err; /* need to close devices */ 1865 } 1866 1867 1868 void mlx4_en_stop_port(struct net_device *dev, int detach) 1869 { 1870 struct mlx4_en_priv *priv = netdev_priv(dev); 1871 struct mlx4_en_dev *mdev = priv->mdev; 1872 struct mlx4_en_mc_list *mclist, *tmp; 1873 struct ethtool_flow_id *flow, *tmp_flow; 1874 int i, t; 1875 u8 mc_list[16] = {0}; 1876 1877 if (!priv->port_up) { 1878 en_dbg(DRV, priv, "stop port called while port already down\n"); 1879 return; 1880 } 1881 1882 /* close port*/ 1883 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1884 1885 /* Synchronize with tx routine */ 1886 netif_tx_lock_bh(dev); 1887 if (detach) 1888 netif_device_detach(dev); 1889 netif_tx_stop_all_queues(dev); 1890 netif_tx_unlock_bh(dev); 1891 1892 netif_tx_disable(dev); 1893 1894 spin_lock_bh(&priv->stats_lock); 1895 mlx4_en_fold_software_stats(dev); 1896 /* Set port as not active */ 1897 priv->port_up = false; 1898 spin_unlock_bh(&priv->stats_lock); 1899 1900 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1901 1902 /* Promsicuous mode */ 1903 if (mdev->dev->caps.steering_mode == 1904 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1905 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1906 MLX4_EN_FLAG_MC_PROMISC); 1907 mlx4_flow_steer_promisc_remove(mdev->dev, 1908 priv->port, 1909 MLX4_FS_ALL_DEFAULT); 1910 mlx4_flow_steer_promisc_remove(mdev->dev, 1911 priv->port, 1912 MLX4_FS_MC_DEFAULT); 1913 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1914 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1915 1916 /* Disable promiscouos mode */ 1917 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1918 priv->port); 1919 1920 /* Disable Multicast promisc */ 1921 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1922 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1923 priv->port); 1924 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1925 } 1926 } 1927 1928 /* Detach All multicasts */ 1929 eth_broadcast_addr(&mc_list[10]); 1930 mc_list[5] = priv->port; /* needed for B0 steering support */ 1931 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list, 1932 MLX4_PROT_ETH, priv->broadcast_id); 1933 list_for_each_entry(mclist, &priv->curr_list, list) { 1934 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1935 mc_list[5] = priv->port; 1936 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, 1937 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1938 if (mclist->tunnel_reg_id) 1939 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1940 } 1941 mlx4_en_clear_list(dev); 1942 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1943 list_del(&mclist->list); 1944 kfree(mclist); 1945 } 1946 1947 /* Flush multicast filter */ 1948 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1949 1950 /* Remove flow steering rules for the port*/ 1951 if (mdev->dev->caps.steering_mode == 1952 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1953 ASSERT_RTNL(); 1954 list_for_each_entry_safe(flow, tmp_flow, 1955 &priv->ethtool_list, list) { 1956 mlx4_flow_detach(mdev->dev, flow->id); 1957 list_del(&flow->list); 1958 } 1959 } 1960 1961 mlx4_en_destroy_drop_qp(priv); 1962 1963 /* Free TX Rings */ 1964 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 1965 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1966 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1967 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1968 } 1969 } 1970 msleep(10); 1971 1972 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) 1973 for (i = 0; i < priv->tx_ring_num[t]; i++) 1974 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); 1975 1976 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1977 mlx4_en_delete_rss_steer_rules(priv); 1978 1979 /* Free RSS qps */ 1980 mlx4_en_release_rss_steer(priv); 1981 1982 /* Unregister Mac address for the port */ 1983 mlx4_en_put_qp(priv); 1984 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1985 mdev->mac_removed[priv->port] = 1; 1986 1987 /* Free RX Rings */ 1988 for (i = 0; i < priv->rx_ring_num; i++) { 1989 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1990 1991 napi_synchronize(&cq->napi); 1992 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1993 mlx4_en_deactivate_cq(priv, cq); 1994 1995 mlx4_en_free_affinity_hint(priv, i); 1996 } 1997 } 1998 1999 static void mlx4_en_restart(struct work_struct *work) 2000 { 2001 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2002 watchdog_task); 2003 struct mlx4_en_dev *mdev = priv->mdev; 2004 struct net_device *dev = priv->dev; 2005 2006 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 2007 2008 rtnl_lock(); 2009 mutex_lock(&mdev->state_lock); 2010 if (priv->port_up) { 2011 mlx4_en_stop_port(dev, 1); 2012 if (mlx4_en_start_port(dev)) 2013 en_err(priv, "Failed restarting port %d\n", priv->port); 2014 } 2015 mutex_unlock(&mdev->state_lock); 2016 rtnl_unlock(); 2017 } 2018 2019 static void mlx4_en_clear_stats(struct net_device *dev) 2020 { 2021 struct mlx4_en_priv *priv = netdev_priv(dev); 2022 struct mlx4_en_dev *mdev = priv->mdev; 2023 struct mlx4_en_tx_ring **tx_ring; 2024 int i; 2025 2026 if (!mlx4_is_slave(mdev->dev)) 2027 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 2028 en_dbg(HW, priv, "Failed dumping statistics\n"); 2029 2030 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 2031 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 2032 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 2033 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 2034 memset(&priv->rx_priority_flowstats, 0, 2035 sizeof(priv->rx_priority_flowstats)); 2036 memset(&priv->tx_priority_flowstats, 0, 2037 sizeof(priv->tx_priority_flowstats)); 2038 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 2039 2040 tx_ring = priv->tx_ring[TX]; 2041 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 2042 tx_ring[i]->bytes = 0; 2043 tx_ring[i]->packets = 0; 2044 tx_ring[i]->tx_csum = 0; 2045 tx_ring[i]->tx_dropped = 0; 2046 tx_ring[i]->queue_stopped = 0; 2047 tx_ring[i]->wake_queue = 0; 2048 tx_ring[i]->tso_packets = 0; 2049 tx_ring[i]->xmit_more = 0; 2050 } 2051 for (i = 0; i < priv->rx_ring_num; i++) { 2052 priv->rx_ring[i]->bytes = 0; 2053 priv->rx_ring[i]->packets = 0; 2054 priv->rx_ring[i]->csum_ok = 0; 2055 priv->rx_ring[i]->csum_none = 0; 2056 priv->rx_ring[i]->csum_complete = 0; 2057 } 2058 } 2059 2060 static int mlx4_en_open(struct net_device *dev) 2061 { 2062 struct mlx4_en_priv *priv = netdev_priv(dev); 2063 struct mlx4_en_dev *mdev = priv->mdev; 2064 int err = 0; 2065 2066 mutex_lock(&mdev->state_lock); 2067 2068 if (!mdev->device_up) { 2069 en_err(priv, "Cannot open - device down/disabled\n"); 2070 err = -EBUSY; 2071 goto out; 2072 } 2073 2074 /* Reset HW statistics and SW counters */ 2075 mlx4_en_clear_stats(dev); 2076 2077 err = mlx4_en_start_port(dev); 2078 if (err) 2079 en_err(priv, "Failed starting port:%d\n", priv->port); 2080 2081 out: 2082 mutex_unlock(&mdev->state_lock); 2083 return err; 2084 } 2085 2086 2087 static int mlx4_en_close(struct net_device *dev) 2088 { 2089 struct mlx4_en_priv *priv = netdev_priv(dev); 2090 struct mlx4_en_dev *mdev = priv->mdev; 2091 2092 en_dbg(IFDOWN, priv, "Close port called\n"); 2093 2094 mutex_lock(&mdev->state_lock); 2095 2096 mlx4_en_stop_port(dev, 0); 2097 netif_carrier_off(dev); 2098 2099 mutex_unlock(&mdev->state_lock); 2100 return 0; 2101 } 2102 2103 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 2104 { 2105 int i, t; 2106 2107 #ifdef CONFIG_RFS_ACCEL 2108 priv->dev->rx_cpu_rmap = NULL; 2109 #endif 2110 2111 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2112 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2113 if (priv->tx_ring[t] && priv->tx_ring[t][i]) 2114 mlx4_en_destroy_tx_ring(priv, 2115 &priv->tx_ring[t][i]); 2116 if (priv->tx_cq[t] && priv->tx_cq[t][i]) 2117 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2118 } 2119 kfree(priv->tx_ring[t]); 2120 kfree(priv->tx_cq[t]); 2121 } 2122 2123 for (i = 0; i < priv->rx_ring_num; i++) { 2124 if (priv->rx_ring[i]) 2125 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2126 priv->prof->rx_ring_size, priv->stride); 2127 if (priv->rx_cq[i]) 2128 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2129 } 2130 2131 } 2132 2133 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 2134 { 2135 struct mlx4_en_port_profile *prof = priv->prof; 2136 int i, t; 2137 int node; 2138 2139 /* Create tx Rings */ 2140 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2141 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2142 node = cpu_to_node(i % num_online_cpus()); 2143 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], 2144 prof->tx_ring_size, i, t, node)) 2145 goto err; 2146 2147 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], 2148 prof->tx_ring_size, 2149 TXBB_SIZE, node, i)) 2150 goto err; 2151 } 2152 } 2153 2154 /* Create rx Rings */ 2155 for (i = 0; i < priv->rx_ring_num; i++) { 2156 node = cpu_to_node(i % num_online_cpus()); 2157 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2158 prof->rx_ring_size, i, RX, node)) 2159 goto err; 2160 2161 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2162 prof->rx_ring_size, priv->stride, 2163 node, i)) 2164 goto err; 2165 2166 } 2167 2168 #ifdef CONFIG_RFS_ACCEL 2169 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2170 #endif 2171 2172 return 0; 2173 2174 err: 2175 en_err(priv, "Failed to allocate NIC resources\n"); 2176 for (i = 0; i < priv->rx_ring_num; i++) { 2177 if (priv->rx_ring[i]) 2178 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2179 prof->rx_ring_size, 2180 priv->stride); 2181 if (priv->rx_cq[i]) 2182 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2183 } 2184 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2185 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2186 if (priv->tx_ring[t][i]) 2187 mlx4_en_destroy_tx_ring(priv, 2188 &priv->tx_ring[t][i]); 2189 if (priv->tx_cq[t][i]) 2190 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2191 } 2192 } 2193 return -ENOMEM; 2194 } 2195 2196 2197 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2198 struct mlx4_en_priv *src, 2199 struct mlx4_en_port_profile *prof) 2200 { 2201 int t; 2202 2203 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2204 sizeof(dst->hwtstamp_config)); 2205 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up; 2206 dst->rx_ring_num = prof->rx_ring_num; 2207 dst->flags = prof->flags; 2208 dst->mdev = src->mdev; 2209 dst->port = src->port; 2210 dst->dev = src->dev; 2211 dst->prof = prof; 2212 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2213 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2214 2215 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2216 dst->tx_ring_num[t] = prof->tx_ring_num[t]; 2217 if (!dst->tx_ring_num[t]) 2218 continue; 2219 2220 dst->tx_ring[t] = kcalloc(MAX_TX_RINGS, 2221 sizeof(struct mlx4_en_tx_ring *), 2222 GFP_KERNEL); 2223 if (!dst->tx_ring[t]) 2224 goto err_free_tx; 2225 2226 dst->tx_cq[t] = kcalloc(MAX_TX_RINGS, 2227 sizeof(struct mlx4_en_cq *), 2228 GFP_KERNEL); 2229 if (!dst->tx_cq[t]) { 2230 kfree(dst->tx_ring[t]); 2231 goto err_free_tx; 2232 } 2233 } 2234 2235 return 0; 2236 2237 err_free_tx: 2238 while (t--) { 2239 kfree(dst->tx_ring[t]); 2240 kfree(dst->tx_cq[t]); 2241 } 2242 return -ENOMEM; 2243 } 2244 2245 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2246 struct mlx4_en_priv *src) 2247 { 2248 int t; 2249 memcpy(dst->rx_ring, src->rx_ring, 2250 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2251 memcpy(dst->rx_cq, src->rx_cq, 2252 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2253 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2254 sizeof(dst->hwtstamp_config)); 2255 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2256 dst->tx_ring_num[t] = src->tx_ring_num[t]; 2257 dst->tx_ring[t] = src->tx_ring[t]; 2258 dst->tx_cq[t] = src->tx_cq[t]; 2259 } 2260 dst->num_tx_rings_p_up = src->num_tx_rings_p_up; 2261 dst->rx_ring_num = src->rx_ring_num; 2262 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2263 } 2264 2265 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2266 struct mlx4_en_priv *tmp, 2267 struct mlx4_en_port_profile *prof, 2268 bool carry_xdp_prog) 2269 { 2270 struct bpf_prog *xdp_prog; 2271 int i, t; 2272 2273 mlx4_en_copy_priv(tmp, priv, prof); 2274 2275 if (mlx4_en_alloc_resources(tmp)) { 2276 en_warn(priv, 2277 "%s: Resource allocation failed, using previous configuration\n", 2278 __func__); 2279 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2280 kfree(tmp->tx_ring[t]); 2281 kfree(tmp->tx_cq[t]); 2282 } 2283 return -ENOMEM; 2284 } 2285 2286 /* All rx_rings has the same xdp_prog. Pick the first one. */ 2287 xdp_prog = rcu_dereference_protected( 2288 priv->rx_ring[0]->xdp_prog, 2289 lockdep_is_held(&priv->mdev->state_lock)); 2290 2291 if (xdp_prog && carry_xdp_prog) { 2292 bpf_prog_add(xdp_prog, tmp->rx_ring_num); 2293 for (i = 0; i < tmp->rx_ring_num; i++) 2294 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, 2295 xdp_prog); 2296 } 2297 2298 return 0; 2299 } 2300 2301 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2302 struct mlx4_en_priv *tmp) 2303 { 2304 mlx4_en_free_resources(priv); 2305 mlx4_en_update_priv(priv, tmp); 2306 } 2307 2308 void mlx4_en_destroy_netdev(struct net_device *dev) 2309 { 2310 struct mlx4_en_priv *priv = netdev_priv(dev); 2311 struct mlx4_en_dev *mdev = priv->mdev; 2312 2313 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2314 2315 /* Unregister device - this will close the port if it was up */ 2316 if (priv->registered) { 2317 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2318 priv->port)); 2319 unregister_netdev(dev); 2320 } 2321 2322 if (priv->allocated) 2323 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2324 2325 cancel_delayed_work(&priv->stats_task); 2326 cancel_delayed_work(&priv->service_task); 2327 /* flush any pending task for this netdev */ 2328 flush_workqueue(mdev->workqueue); 2329 2330 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2331 mlx4_en_remove_timestamp(mdev); 2332 2333 /* Detach the netdev so tasks would not attempt to access it */ 2334 mutex_lock(&mdev->state_lock); 2335 mdev->pndev[priv->port] = NULL; 2336 mdev->upper[priv->port] = NULL; 2337 2338 #ifdef CONFIG_RFS_ACCEL 2339 mlx4_en_cleanup_filters(priv); 2340 #endif 2341 2342 mlx4_en_free_resources(priv); 2343 mutex_unlock(&mdev->state_lock); 2344 2345 free_netdev(dev); 2346 } 2347 2348 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) 2349 { 2350 struct mlx4_en_priv *priv = netdev_priv(dev); 2351 2352 if (mtu > MLX4_EN_MAX_XDP_MTU) { 2353 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", 2354 mtu, MLX4_EN_MAX_XDP_MTU); 2355 return false; 2356 } 2357 2358 return true; 2359 } 2360 2361 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2362 { 2363 struct mlx4_en_priv *priv = netdev_priv(dev); 2364 struct mlx4_en_dev *mdev = priv->mdev; 2365 int err = 0; 2366 2367 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2368 dev->mtu, new_mtu); 2369 2370 if (priv->tx_ring_num[TX_XDP] && 2371 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2372 return -EOPNOTSUPP; 2373 2374 dev->mtu = new_mtu; 2375 2376 if (netif_running(dev)) { 2377 mutex_lock(&mdev->state_lock); 2378 if (!mdev->device_up) { 2379 /* NIC is probably restarting - let watchdog task reset 2380 * the port */ 2381 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2382 } else { 2383 mlx4_en_stop_port(dev, 1); 2384 err = mlx4_en_start_port(dev); 2385 if (err) { 2386 en_err(priv, "Failed restarting port:%d\n", 2387 priv->port); 2388 queue_work(mdev->workqueue, &priv->watchdog_task); 2389 } 2390 } 2391 mutex_unlock(&mdev->state_lock); 2392 } 2393 return 0; 2394 } 2395 2396 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2397 { 2398 struct mlx4_en_priv *priv = netdev_priv(dev); 2399 struct mlx4_en_dev *mdev = priv->mdev; 2400 struct hwtstamp_config config; 2401 2402 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2403 return -EFAULT; 2404 2405 /* reserved for future extensions */ 2406 if (config.flags) 2407 return -EINVAL; 2408 2409 /* device doesn't support time stamping */ 2410 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2411 return -EINVAL; 2412 2413 /* TX HW timestamp */ 2414 switch (config.tx_type) { 2415 case HWTSTAMP_TX_OFF: 2416 case HWTSTAMP_TX_ON: 2417 break; 2418 default: 2419 return -ERANGE; 2420 } 2421 2422 /* RX HW timestamp */ 2423 switch (config.rx_filter) { 2424 case HWTSTAMP_FILTER_NONE: 2425 break; 2426 case HWTSTAMP_FILTER_ALL: 2427 case HWTSTAMP_FILTER_SOME: 2428 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2429 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2430 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2431 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2432 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2433 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2434 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2435 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2436 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2437 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2438 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2439 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2440 case HWTSTAMP_FILTER_NTP_ALL: 2441 config.rx_filter = HWTSTAMP_FILTER_ALL; 2442 break; 2443 default: 2444 return -ERANGE; 2445 } 2446 2447 if (mlx4_en_reset_config(dev, config, dev->features)) { 2448 config.tx_type = HWTSTAMP_TX_OFF; 2449 config.rx_filter = HWTSTAMP_FILTER_NONE; 2450 } 2451 2452 return copy_to_user(ifr->ifr_data, &config, 2453 sizeof(config)) ? -EFAULT : 0; 2454 } 2455 2456 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2457 { 2458 struct mlx4_en_priv *priv = netdev_priv(dev); 2459 2460 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2461 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2462 } 2463 2464 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2465 { 2466 switch (cmd) { 2467 case SIOCSHWTSTAMP: 2468 return mlx4_en_hwtstamp_set(dev, ifr); 2469 case SIOCGHWTSTAMP: 2470 return mlx4_en_hwtstamp_get(dev, ifr); 2471 default: 2472 return -EOPNOTSUPP; 2473 } 2474 } 2475 2476 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2477 netdev_features_t features) 2478 { 2479 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2480 struct mlx4_en_dev *mdev = en_priv->mdev; 2481 2482 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2483 * enable/disable make sure S-TAG flag is always in same state as 2484 * C-TAG. 2485 */ 2486 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2487 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2488 features |= NETIF_F_HW_VLAN_STAG_RX; 2489 else 2490 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2491 2492 return features; 2493 } 2494 2495 static int mlx4_en_set_features(struct net_device *netdev, 2496 netdev_features_t features) 2497 { 2498 struct mlx4_en_priv *priv = netdev_priv(netdev); 2499 bool reset = false; 2500 int ret = 0; 2501 2502 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2503 en_info(priv, "Turn %s RX-FCS\n", 2504 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2505 reset = true; 2506 } 2507 2508 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2509 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2510 2511 en_info(priv, "Turn %s RX-ALL\n", 2512 ignore_fcs_value ? "ON" : "OFF"); 2513 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2514 priv->port, ignore_fcs_value); 2515 if (ret) 2516 return ret; 2517 } 2518 2519 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2520 en_info(priv, "Turn %s RX vlan strip offload\n", 2521 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2522 reset = true; 2523 } 2524 2525 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2526 en_info(priv, "Turn %s TX vlan strip offload\n", 2527 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2528 2529 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2530 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2531 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2532 2533 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2534 en_info(priv, "Turn %s loopback\n", 2535 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2536 mlx4_en_update_loopback_state(netdev, features); 2537 } 2538 2539 if (reset) { 2540 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2541 features); 2542 if (ret) 2543 return ret; 2544 } 2545 2546 return 0; 2547 } 2548 2549 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2550 { 2551 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2552 struct mlx4_en_dev *mdev = en_priv->mdev; 2553 2554 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac); 2555 } 2556 2557 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 2558 __be16 vlan_proto) 2559 { 2560 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2561 struct mlx4_en_dev *mdev = en_priv->mdev; 2562 2563 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos, 2564 vlan_proto); 2565 } 2566 2567 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2568 int max_tx_rate) 2569 { 2570 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2571 struct mlx4_en_dev *mdev = en_priv->mdev; 2572 2573 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2574 max_tx_rate); 2575 } 2576 2577 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2578 { 2579 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2580 struct mlx4_en_dev *mdev = en_priv->mdev; 2581 2582 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2583 } 2584 2585 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2586 { 2587 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2588 struct mlx4_en_dev *mdev = en_priv->mdev; 2589 2590 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2591 } 2592 2593 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2594 { 2595 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2596 struct mlx4_en_dev *mdev = en_priv->mdev; 2597 2598 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2599 } 2600 2601 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2602 struct ifla_vf_stats *vf_stats) 2603 { 2604 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2605 struct mlx4_en_dev *mdev = en_priv->mdev; 2606 2607 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2608 } 2609 2610 #define PORT_ID_BYTE_LEN 8 2611 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2612 struct netdev_phys_item_id *ppid) 2613 { 2614 struct mlx4_en_priv *priv = netdev_priv(dev); 2615 struct mlx4_dev *mdev = priv->mdev->dev; 2616 int i; 2617 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2618 2619 if (!phys_port_id) 2620 return -EOPNOTSUPP; 2621 2622 ppid->id_len = sizeof(phys_port_id); 2623 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2624 ppid->id[i] = phys_port_id & 0xff; 2625 phys_port_id >>= 8; 2626 } 2627 return 0; 2628 } 2629 2630 static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table) 2631 { 2632 struct mlx4_en_priv *priv = netdev_priv(dev); 2633 struct udp_tunnel_info ti; 2634 int ret; 2635 2636 udp_tunnel_nic_get_port(dev, table, 0, &ti); 2637 priv->vxlan_port = ti.port; 2638 2639 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2640 if (ret) 2641 return ret; 2642 2643 return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2644 VXLAN_STEER_BY_OUTER_MAC, 2645 !!priv->vxlan_port); 2646 } 2647 2648 static const struct udp_tunnel_nic_info mlx4_udp_tunnels = { 2649 .sync_table = mlx4_udp_tunnel_sync, 2650 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 2651 UDP_TUNNEL_NIC_INFO_IPV4_ONLY, 2652 .tables = { 2653 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2654 }, 2655 }; 2656 2657 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2658 struct net_device *dev, 2659 netdev_features_t features) 2660 { 2661 features = vlan_features_check(skb, features); 2662 features = vxlan_features_check(skb, features); 2663 2664 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2665 * support inner IPv6 checksums and segmentation so we need to 2666 * strip that feature if this is an IPv6 encapsulated frame. 2667 */ 2668 if (skb->encapsulation && 2669 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2670 struct mlx4_en_priv *priv = netdev_priv(dev); 2671 2672 if (!priv->vxlan_port || 2673 (ip_hdr(skb)->version != 4) || 2674 (udp_hdr(skb)->dest != priv->vxlan_port)) 2675 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2676 } 2677 2678 return features; 2679 } 2680 2681 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2682 { 2683 struct mlx4_en_priv *priv = netdev_priv(dev); 2684 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; 2685 struct mlx4_update_qp_params params; 2686 int err; 2687 2688 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2689 return -EOPNOTSUPP; 2690 2691 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2692 if (maxrate >> 12) { 2693 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2694 params.rate_val = maxrate / 1000; 2695 } else if (maxrate) { 2696 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2697 params.rate_val = maxrate; 2698 } else { /* zero serves to revoke the QP rate-limitation */ 2699 params.rate_unit = 0; 2700 params.rate_val = 0; 2701 } 2702 2703 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2704 ¶ms); 2705 return err; 2706 } 2707 2708 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) 2709 { 2710 struct mlx4_en_priv *priv = netdev_priv(dev); 2711 struct mlx4_en_dev *mdev = priv->mdev; 2712 struct mlx4_en_port_profile new_prof; 2713 struct bpf_prog *old_prog; 2714 struct mlx4_en_priv *tmp; 2715 int tx_changed = 0; 2716 int xdp_ring_num; 2717 int port_up = 0; 2718 int err; 2719 int i; 2720 2721 xdp_ring_num = prog ? priv->rx_ring_num : 0; 2722 2723 /* No need to reconfigure buffers when simply swapping the 2724 * program for a new one. 2725 */ 2726 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { 2727 if (prog) 2728 bpf_prog_add(prog, priv->rx_ring_num - 1); 2729 2730 mutex_lock(&mdev->state_lock); 2731 for (i = 0; i < priv->rx_ring_num; i++) { 2732 old_prog = rcu_dereference_protected( 2733 priv->rx_ring[i]->xdp_prog, 2734 lockdep_is_held(&mdev->state_lock)); 2735 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2736 if (old_prog) 2737 bpf_prog_put(old_prog); 2738 } 2739 mutex_unlock(&mdev->state_lock); 2740 return 0; 2741 } 2742 2743 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) 2744 return -EOPNOTSUPP; 2745 2746 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2747 if (!tmp) 2748 return -ENOMEM; 2749 2750 if (prog) 2751 bpf_prog_add(prog, priv->rx_ring_num - 1); 2752 2753 mutex_lock(&mdev->state_lock); 2754 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 2755 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; 2756 2757 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { 2758 tx_changed = 1; 2759 new_prof.tx_ring_num[TX] = 2760 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up); 2761 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); 2762 } 2763 2764 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false); 2765 if (err) { 2766 if (prog) 2767 bpf_prog_sub(prog, priv->rx_ring_num - 1); 2768 goto unlock_out; 2769 } 2770 2771 if (priv->port_up) { 2772 port_up = 1; 2773 mlx4_en_stop_port(dev, 1); 2774 } 2775 2776 mlx4_en_safe_replace_resources(priv, tmp); 2777 if (tx_changed) 2778 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 2779 2780 for (i = 0; i < priv->rx_ring_num; i++) { 2781 old_prog = rcu_dereference_protected( 2782 priv->rx_ring[i]->xdp_prog, 2783 lockdep_is_held(&mdev->state_lock)); 2784 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2785 if (old_prog) 2786 bpf_prog_put(old_prog); 2787 } 2788 2789 if (port_up) { 2790 err = mlx4_en_start_port(dev); 2791 if (err) { 2792 en_err(priv, "Failed starting port %d for XDP change\n", 2793 priv->port); 2794 queue_work(mdev->workqueue, &priv->watchdog_task); 2795 } 2796 } 2797 2798 unlock_out: 2799 mutex_unlock(&mdev->state_lock); 2800 kfree(tmp); 2801 return err; 2802 } 2803 2804 static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2805 { 2806 switch (xdp->command) { 2807 case XDP_SETUP_PROG: 2808 return mlx4_xdp_set(dev, xdp->prog); 2809 default: 2810 return -EINVAL; 2811 } 2812 } 2813 2814 static const struct net_device_ops mlx4_netdev_ops = { 2815 .ndo_open = mlx4_en_open, 2816 .ndo_stop = mlx4_en_close, 2817 .ndo_start_xmit = mlx4_en_xmit, 2818 .ndo_select_queue = mlx4_en_select_queue, 2819 .ndo_get_stats64 = mlx4_en_get_stats64, 2820 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2821 .ndo_set_mac_address = mlx4_en_set_mac, 2822 .ndo_validate_addr = eth_validate_addr, 2823 .ndo_change_mtu = mlx4_en_change_mtu, 2824 .ndo_do_ioctl = mlx4_en_ioctl, 2825 .ndo_tx_timeout = mlx4_en_tx_timeout, 2826 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2827 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2828 .ndo_set_features = mlx4_en_set_features, 2829 .ndo_fix_features = mlx4_en_fix_features, 2830 .ndo_setup_tc = __mlx4_en_setup_tc, 2831 #ifdef CONFIG_RFS_ACCEL 2832 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2833 #endif 2834 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2835 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 2836 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 2837 .ndo_features_check = mlx4_en_features_check, 2838 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2839 .ndo_bpf = mlx4_xdp, 2840 }; 2841 2842 static const struct net_device_ops mlx4_netdev_ops_master = { 2843 .ndo_open = mlx4_en_open, 2844 .ndo_stop = mlx4_en_close, 2845 .ndo_start_xmit = mlx4_en_xmit, 2846 .ndo_select_queue = mlx4_en_select_queue, 2847 .ndo_get_stats64 = mlx4_en_get_stats64, 2848 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2849 .ndo_set_mac_address = mlx4_en_set_mac, 2850 .ndo_validate_addr = eth_validate_addr, 2851 .ndo_change_mtu = mlx4_en_change_mtu, 2852 .ndo_tx_timeout = mlx4_en_tx_timeout, 2853 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2854 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2855 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2856 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2857 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2858 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2859 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2860 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2861 .ndo_get_vf_config = mlx4_en_get_vf_config, 2862 .ndo_set_features = mlx4_en_set_features, 2863 .ndo_fix_features = mlx4_en_fix_features, 2864 .ndo_setup_tc = __mlx4_en_setup_tc, 2865 #ifdef CONFIG_RFS_ACCEL 2866 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2867 #endif 2868 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2869 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 2870 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 2871 .ndo_features_check = mlx4_en_features_check, 2872 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2873 .ndo_bpf = mlx4_xdp, 2874 }; 2875 2876 struct mlx4_en_bond { 2877 struct work_struct work; 2878 struct mlx4_en_priv *priv; 2879 int is_bonded; 2880 struct mlx4_port_map port_map; 2881 }; 2882 2883 static void mlx4_en_bond_work(struct work_struct *work) 2884 { 2885 struct mlx4_en_bond *bond = container_of(work, 2886 struct mlx4_en_bond, 2887 work); 2888 int err = 0; 2889 struct mlx4_dev *dev = bond->priv->mdev->dev; 2890 2891 if (bond->is_bonded) { 2892 if (!mlx4_is_bonded(dev)) { 2893 err = mlx4_bond(dev); 2894 if (err) 2895 en_err(bond->priv, "Fail to bond device\n"); 2896 } 2897 if (!err) { 2898 err = mlx4_port_map_set(dev, &bond->port_map); 2899 if (err) 2900 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2901 bond->port_map.port1, 2902 bond->port_map.port2, 2903 err); 2904 } 2905 } else if (mlx4_is_bonded(dev)) { 2906 err = mlx4_unbond(dev); 2907 if (err) 2908 en_err(bond->priv, "Fail to unbond device\n"); 2909 } 2910 dev_put(bond->priv->dev); 2911 kfree(bond); 2912 } 2913 2914 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 2915 u8 v2p_p1, u8 v2p_p2) 2916 { 2917 struct mlx4_en_bond *bond = NULL; 2918 2919 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 2920 if (!bond) 2921 return -ENOMEM; 2922 2923 INIT_WORK(&bond->work, mlx4_en_bond_work); 2924 bond->priv = priv; 2925 bond->is_bonded = is_bonded; 2926 bond->port_map.port1 = v2p_p1; 2927 bond->port_map.port2 = v2p_p2; 2928 dev_hold(priv->dev); 2929 queue_work(priv->mdev->workqueue, &bond->work); 2930 return 0; 2931 } 2932 2933 int mlx4_en_netdev_event(struct notifier_block *this, 2934 unsigned long event, void *ptr) 2935 { 2936 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2937 u8 port = 0; 2938 struct mlx4_en_dev *mdev; 2939 struct mlx4_dev *dev; 2940 int i, num_eth_ports = 0; 2941 bool do_bond = true; 2942 struct mlx4_en_priv *priv; 2943 u8 v2p_port1 = 0; 2944 u8 v2p_port2 = 0; 2945 2946 if (!net_eq(dev_net(ndev), &init_net)) 2947 return NOTIFY_DONE; 2948 2949 mdev = container_of(this, struct mlx4_en_dev, nb); 2950 dev = mdev->dev; 2951 2952 /* Go into this mode only when two network devices set on two ports 2953 * of the same mlx4 device are slaves of the same bonding master 2954 */ 2955 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 2956 ++num_eth_ports; 2957 if (!port && (mdev->pndev[i] == ndev)) 2958 port = i; 2959 mdev->upper[i] = mdev->pndev[i] ? 2960 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 2961 /* condition not met: network device is a slave */ 2962 if (!mdev->upper[i]) 2963 do_bond = false; 2964 if (num_eth_ports < 2) 2965 continue; 2966 /* condition not met: same master */ 2967 if (mdev->upper[i] != mdev->upper[i-1]) 2968 do_bond = false; 2969 } 2970 /* condition not met: 2 salves */ 2971 do_bond = (num_eth_ports == 2) ? do_bond : false; 2972 2973 /* handle only events that come with enough info */ 2974 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 2975 return NOTIFY_DONE; 2976 2977 priv = netdev_priv(ndev); 2978 if (do_bond) { 2979 struct netdev_notifier_bonding_info *notifier_info = ptr; 2980 struct netdev_bonding_info *bonding_info = 2981 ¬ifier_info->bonding_info; 2982 2983 /* required mode 1, 2 or 4 */ 2984 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 2985 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 2986 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 2987 do_bond = false; 2988 2989 /* require exactly 2 slaves */ 2990 if (bonding_info->master.num_slaves != 2) 2991 do_bond = false; 2992 2993 /* calc v2p */ 2994 if (do_bond) { 2995 if (bonding_info->master.bond_mode == 2996 BOND_MODE_ACTIVEBACKUP) { 2997 /* in active-backup mode virtual ports are 2998 * mapped to the physical port of the active 2999 * slave */ 3000 if (bonding_info->slave.state == 3001 BOND_STATE_BACKUP) { 3002 if (port == 1) { 3003 v2p_port1 = 2; 3004 v2p_port2 = 2; 3005 } else { 3006 v2p_port1 = 1; 3007 v2p_port2 = 1; 3008 } 3009 } else { /* BOND_STATE_ACTIVE */ 3010 if (port == 1) { 3011 v2p_port1 = 1; 3012 v2p_port2 = 1; 3013 } else { 3014 v2p_port1 = 2; 3015 v2p_port2 = 2; 3016 } 3017 } 3018 } else { /* Active-Active */ 3019 /* in active-active mode a virtual port is 3020 * mapped to the native physical port if and only 3021 * if the physical port is up */ 3022 __s8 link = bonding_info->slave.link; 3023 3024 if (port == 1) 3025 v2p_port2 = 2; 3026 else 3027 v2p_port1 = 1; 3028 if ((link == BOND_LINK_UP) || 3029 (link == BOND_LINK_FAIL)) { 3030 if (port == 1) 3031 v2p_port1 = 1; 3032 else 3033 v2p_port2 = 2; 3034 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 3035 if (port == 1) 3036 v2p_port1 = 2; 3037 else 3038 v2p_port2 = 1; 3039 } 3040 } 3041 } 3042 } 3043 3044 mlx4_en_queue_bond_work(priv, do_bond, 3045 v2p_port1, v2p_port2); 3046 3047 return NOTIFY_DONE; 3048 } 3049 3050 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 3051 struct mlx4_en_stats_bitmap *stats_bitmap, 3052 u8 rx_ppp, u8 rx_pause, 3053 u8 tx_ppp, u8 tx_pause) 3054 { 3055 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 3056 3057 if (!mlx4_is_slave(dev) && 3058 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 3059 mutex_lock(&stats_bitmap->mutex); 3060 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 3061 3062 if (rx_ppp) 3063 bitmap_set(stats_bitmap->bitmap, last_i, 3064 NUM_FLOW_PRIORITY_STATS_RX); 3065 last_i += NUM_FLOW_PRIORITY_STATS_RX; 3066 3067 if (rx_pause && !(rx_ppp)) 3068 bitmap_set(stats_bitmap->bitmap, last_i, 3069 NUM_FLOW_STATS_RX); 3070 last_i += NUM_FLOW_STATS_RX; 3071 3072 if (tx_ppp) 3073 bitmap_set(stats_bitmap->bitmap, last_i, 3074 NUM_FLOW_PRIORITY_STATS_TX); 3075 last_i += NUM_FLOW_PRIORITY_STATS_TX; 3076 3077 if (tx_pause && !(tx_ppp)) 3078 bitmap_set(stats_bitmap->bitmap, last_i, 3079 NUM_FLOW_STATS_TX); 3080 last_i += NUM_FLOW_STATS_TX; 3081 3082 mutex_unlock(&stats_bitmap->mutex); 3083 } 3084 } 3085 3086 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 3087 struct mlx4_en_stats_bitmap *stats_bitmap, 3088 u8 rx_ppp, u8 rx_pause, 3089 u8 tx_ppp, u8 tx_pause) 3090 { 3091 int last_i = 0; 3092 3093 mutex_init(&stats_bitmap->mutex); 3094 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 3095 3096 if (mlx4_is_slave(dev)) { 3097 bitmap_set(stats_bitmap->bitmap, last_i + 3098 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 3099 bitmap_set(stats_bitmap->bitmap, last_i + 3100 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 3101 bitmap_set(stats_bitmap->bitmap, last_i + 3102 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 3103 bitmap_set(stats_bitmap->bitmap, last_i + 3104 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 3105 bitmap_set(stats_bitmap->bitmap, last_i + 3106 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 3107 bitmap_set(stats_bitmap->bitmap, last_i + 3108 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 3109 } else { 3110 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 3111 } 3112 last_i += NUM_MAIN_STATS; 3113 3114 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 3115 last_i += NUM_PORT_STATS; 3116 3117 if (mlx4_is_master(dev)) 3118 bitmap_set(stats_bitmap->bitmap, last_i, 3119 NUM_PF_STATS); 3120 last_i += NUM_PF_STATS; 3121 3122 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 3123 rx_ppp, rx_pause, 3124 tx_ppp, tx_pause); 3125 last_i += NUM_FLOW_STATS; 3126 3127 if (!mlx4_is_slave(dev)) 3128 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 3129 last_i += NUM_PKT_STATS; 3130 3131 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); 3132 last_i += NUM_XDP_STATS; 3133 3134 if (!mlx4_is_slave(dev)) 3135 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS); 3136 last_i += NUM_PHY_STATS; 3137 } 3138 3139 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 3140 struct mlx4_en_port_profile *prof) 3141 { 3142 struct net_device *dev; 3143 struct mlx4_en_priv *priv; 3144 int i, t; 3145 int err; 3146 3147 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3148 MAX_TX_RINGS, MAX_RX_RINGS); 3149 if (dev == NULL) 3150 return -ENOMEM; 3151 3152 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); 3153 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 3154 3155 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 3156 dev->dev_port = port - 1; 3157 3158 /* 3159 * Initialize driver private data 3160 */ 3161 3162 priv = netdev_priv(dev); 3163 memset(priv, 0, sizeof(struct mlx4_en_priv)); 3164 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3165 spin_lock_init(&priv->stats_lock); 3166 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3167 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3168 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3169 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3170 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3171 #ifdef CONFIG_RFS_ACCEL 3172 INIT_LIST_HEAD(&priv->filters); 3173 spin_lock_init(&priv->filters_lock); 3174 #endif 3175 3176 priv->dev = dev; 3177 priv->mdev = mdev; 3178 priv->ddev = &mdev->pdev->dev; 3179 priv->prof = prof; 3180 priv->port = port; 3181 priv->port_up = false; 3182 priv->flags = prof->flags; 3183 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 3184 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 3185 MLX4_WQE_CTRL_SOLICITED); 3186 priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up; 3187 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 3188 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 3189 3190 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 3191 priv->tx_ring_num[t] = prof->tx_ring_num[t]; 3192 if (!priv->tx_ring_num[t]) 3193 continue; 3194 3195 priv->tx_ring[t] = kcalloc(MAX_TX_RINGS, 3196 sizeof(struct mlx4_en_tx_ring *), 3197 GFP_KERNEL); 3198 if (!priv->tx_ring[t]) { 3199 err = -ENOMEM; 3200 goto out; 3201 } 3202 priv->tx_cq[t] = kcalloc(MAX_TX_RINGS, 3203 sizeof(struct mlx4_en_cq *), 3204 GFP_KERNEL); 3205 if (!priv->tx_cq[t]) { 3206 err = -ENOMEM; 3207 goto out; 3208 } 3209 } 3210 priv->rx_ring_num = prof->rx_ring_num; 3211 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 3212 priv->cqe_size = mdev->dev->caps.cqe_size; 3213 priv->mac_index = -1; 3214 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3215 #ifdef CONFIG_MLX4_EN_DCB 3216 if (!mlx4_is_slave(priv->mdev->dev)) { 3217 u8 prio; 3218 3219 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { 3220 priv->ets.prio_tc[prio] = prio; 3221 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; 3222 } 3223 3224 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | 3225 DCB_CAP_DCBX_VER_IEEE; 3226 priv->flags |= MLX4_EN_DCB_ENABLED; 3227 priv->cee_config.pfc_state = false; 3228 3229 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++) 3230 priv->cee_config.dcb_pfc[i] = pfc_disabled; 3231 3232 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3233 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3234 } else { 3235 en_info(priv, "enabling only PFC DCB ops\n"); 3236 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 3237 } 3238 } 3239 #endif 3240 3241 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 3242 INIT_HLIST_HEAD(&priv->mac_hash[i]); 3243 3244 /* Query for default mac and max mtu */ 3245 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 3246 3247 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 3248 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 3249 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3250 3251 /* Set default MAC */ 3252 dev->addr_len = ETH_ALEN; 3253 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3254 if (!is_valid_ether_addr(dev->dev_addr)) { 3255 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", 3256 priv->port, dev->dev_addr); 3257 err = -EINVAL; 3258 goto out; 3259 } else if (mlx4_is_slave(priv->mdev->dev) && 3260 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3261 /* Random MAC was assigned in mlx4_slave_cap 3262 * in mlx4_core module 3263 */ 3264 dev->addr_assign_type |= NET_ADDR_RANDOM; 3265 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3266 } 3267 3268 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3269 3270 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3271 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3272 err = mlx4_en_alloc_resources(priv); 3273 if (err) 3274 goto out; 3275 3276 /* Initialize time stamping config */ 3277 priv->hwtstamp_config.flags = 0; 3278 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3279 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3280 3281 /* Allocate page for receive rings */ 3282 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3283 MLX4_EN_PAGE_SIZE); 3284 if (err) { 3285 en_err(priv, "Failed to allocate page for rx qps\n"); 3286 goto out; 3287 } 3288 priv->allocated = 1; 3289 3290 /* 3291 * Initialize netdev entry points 3292 */ 3293 if (mlx4_is_master(priv->mdev->dev)) 3294 dev->netdev_ops = &mlx4_netdev_ops_master; 3295 else 3296 dev->netdev_ops = &mlx4_netdev_ops; 3297 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3298 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 3299 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3300 3301 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3302 3303 /* 3304 * Set driver features 3305 */ 3306 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3307 if (mdev->LSO_support) 3308 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3309 3310 if (mdev->dev->caps.tunnel_offload_mode == 3311 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3312 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3313 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3314 NETIF_F_GSO_PARTIAL; 3315 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3316 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3317 NETIF_F_GSO_PARTIAL; 3318 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3319 dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3320 NETIF_F_RXCSUM | 3321 NETIF_F_TSO | NETIF_F_TSO6 | 3322 NETIF_F_GSO_UDP_TUNNEL | 3323 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3324 NETIF_F_GSO_PARTIAL; 3325 3326 dev->udp_tunnel_nic_info = &mlx4_udp_tunnels; 3327 } 3328 3329 dev->vlan_features = dev->hw_features; 3330 3331 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3332 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3333 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3334 NETIF_F_HW_VLAN_CTAG_FILTER; 3335 dev->hw_features |= NETIF_F_LOOPBACK | 3336 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3337 3338 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3339 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3340 NETIF_F_HW_VLAN_STAG_FILTER; 3341 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3342 } 3343 3344 if (mlx4_is_slave(mdev->dev)) { 3345 bool vlan_offload_disabled; 3346 int phv; 3347 3348 err = get_phv_bit(mdev->dev, port, &phv); 3349 if (!err && phv) { 3350 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3351 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3352 } 3353 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port, 3354 &vlan_offload_disabled); 3355 if (!err && vlan_offload_disabled) { 3356 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3357 NETIF_F_HW_VLAN_CTAG_RX | 3358 NETIF_F_HW_VLAN_STAG_TX | 3359 NETIF_F_HW_VLAN_STAG_RX); 3360 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3361 NETIF_F_HW_VLAN_CTAG_RX | 3362 NETIF_F_HW_VLAN_STAG_TX | 3363 NETIF_F_HW_VLAN_STAG_RX); 3364 } 3365 } else { 3366 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3367 !(mdev->dev->caps.flags2 & 3368 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3369 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3370 } 3371 3372 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3373 dev->hw_features |= NETIF_F_RXFCS; 3374 3375 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3376 dev->hw_features |= NETIF_F_RXALL; 3377 3378 if (mdev->dev->caps.steering_mode == 3379 MLX4_STEERING_MODE_DEVICE_MANAGED && 3380 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3381 dev->hw_features |= NETIF_F_NTUPLE; 3382 3383 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3384 dev->priv_flags |= IFF_UNICAST_FLT; 3385 3386 /* Setting a default hash function value */ 3387 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3388 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3389 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3390 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3391 } else { 3392 en_warn(priv, 3393 "No RSS hash capabilities exposed, using Toeplitz\n"); 3394 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3395 } 3396 3397 /* MTU range: 68 - hw-specific max */ 3398 dev->min_mtu = ETH_MIN_MTU; 3399 dev->max_mtu = priv->max_mtu; 3400 3401 mdev->pndev[port] = dev; 3402 mdev->upper[port] = NULL; 3403 3404 netif_carrier_off(dev); 3405 mlx4_en_set_default_moderation(priv); 3406 3407 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); 3408 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3409 3410 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3411 3412 /* Configure port */ 3413 mlx4_en_calc_rx_buf(dev); 3414 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3415 priv->rx_skb_size + ETH_FCS_LEN, 3416 prof->tx_pause, prof->tx_ppp, 3417 prof->rx_pause, prof->rx_ppp); 3418 if (err) { 3419 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3420 priv->port, err); 3421 goto out; 3422 } 3423 3424 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3425 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3426 if (err) { 3427 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3428 err); 3429 goto out; 3430 } 3431 } 3432 3433 /* Init port */ 3434 en_warn(priv, "Initializing port\n"); 3435 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3436 if (err) { 3437 en_err(priv, "Failed Initializing port\n"); 3438 goto out; 3439 } 3440 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3441 3442 /* Initialize time stamp mechanism */ 3443 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3444 mlx4_en_init_timestamp(mdev); 3445 3446 queue_delayed_work(mdev->workqueue, &priv->service_task, 3447 SERVICE_TASK_DELAY); 3448 3449 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3450 mdev->profile.prof[priv->port].rx_ppp, 3451 mdev->profile.prof[priv->port].rx_pause, 3452 mdev->profile.prof[priv->port].tx_ppp, 3453 mdev->profile.prof[priv->port].tx_pause); 3454 3455 err = register_netdev(dev); 3456 if (err) { 3457 en_err(priv, "Netdev registration failed for port %d\n", port); 3458 goto out; 3459 } 3460 3461 priv->registered = 1; 3462 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3463 dev); 3464 3465 return 0; 3466 3467 out: 3468 mlx4_en_destroy_netdev(dev); 3469 return err; 3470 } 3471 3472 int mlx4_en_reset_config(struct net_device *dev, 3473 struct hwtstamp_config ts_config, 3474 netdev_features_t features) 3475 { 3476 struct mlx4_en_priv *priv = netdev_priv(dev); 3477 struct mlx4_en_dev *mdev = priv->mdev; 3478 struct mlx4_en_port_profile new_prof; 3479 struct mlx4_en_priv *tmp; 3480 int port_up = 0; 3481 int err = 0; 3482 3483 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3484 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3485 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3486 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3487 return 0; /* Nothing to change */ 3488 3489 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3490 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3491 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3492 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3493 return -EINVAL; 3494 } 3495 3496 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3497 if (!tmp) 3498 return -ENOMEM; 3499 3500 mutex_lock(&mdev->state_lock); 3501 3502 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3503 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3504 3505 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); 3506 if (err) 3507 goto out; 3508 3509 if (priv->port_up) { 3510 port_up = 1; 3511 mlx4_en_stop_port(dev, 1); 3512 } 3513 3514 mlx4_en_safe_replace_resources(priv, tmp); 3515 3516 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3517 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3518 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3519 else 3520 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3521 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3522 /* RX time-stamping is OFF, update the RX vlan offload 3523 * to the latest wanted state 3524 */ 3525 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3526 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3527 else 3528 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3529 } 3530 3531 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3532 if (features & NETIF_F_RXFCS) 3533 dev->features |= NETIF_F_RXFCS; 3534 else 3535 dev->features &= ~NETIF_F_RXFCS; 3536 } 3537 3538 /* RX vlan offload and RX time-stamping can't co-exist ! 3539 * Regardless of the caller's choice, 3540 * Turn Off RX vlan offload in case of time-stamping is ON 3541 */ 3542 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3543 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3544 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3545 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3546 } 3547 3548 if (port_up) { 3549 err = mlx4_en_start_port(dev); 3550 if (err) 3551 en_err(priv, "Failed starting port\n"); 3552 } 3553 3554 out: 3555 mutex_unlock(&mdev->state_lock); 3556 kfree(tmp); 3557 if (!err) 3558 netdev_features_change(dev); 3559 return err; 3560 } 3561