1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/bpf.h> 35 #include <linux/etherdevice.h> 36 #include <linux/tcp.h> 37 #include <linux/if_vlan.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/hash.h> 41 #include <net/ip.h> 42 #include <net/vxlan.h> 43 #include <net/devlink.h> 44 45 #include <linux/mlx4/driver.h> 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/cmd.h> 48 #include <linux/mlx4/cq.h> 49 50 #include "mlx4_en.h" 51 #include "en_port.h" 52 53 #define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ 54 XDP_PACKET_HEADROOM)) 55 56 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 57 { 58 struct mlx4_en_priv *priv = netdev_priv(dev); 59 int i; 60 unsigned int offset = 0; 61 62 if (up && up != MLX4_EN_NUM_UP_HIGH) 63 return -EINVAL; 64 65 netdev_set_num_tc(dev, up); 66 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 67 /* Partition Tx queues evenly amongst UP's */ 68 for (i = 0; i < up; i++) { 69 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 70 offset += priv->num_tx_rings_p_up; 71 } 72 73 #ifdef CONFIG_MLX4_EN_DCB 74 if (!mlx4_is_slave(priv->mdev->dev)) { 75 if (up) { 76 if (priv->dcbx_cap) 77 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 78 } else { 79 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 80 priv->cee_config.pfc_state = false; 81 } 82 } 83 #endif /* CONFIG_MLX4_EN_DCB */ 84 85 return 0; 86 } 87 88 int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) 89 { 90 struct mlx4_en_priv *priv = netdev_priv(dev); 91 struct mlx4_en_dev *mdev = priv->mdev; 92 struct mlx4_en_port_profile new_prof; 93 struct mlx4_en_priv *tmp; 94 int total_count; 95 int port_up = 0; 96 int err = 0; 97 98 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 99 if (!tmp) 100 return -ENOMEM; 101 102 mutex_lock(&mdev->state_lock); 103 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 104 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW : 105 MLX4_EN_NUM_UP_HIGH; 106 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * 107 new_prof.num_up; 108 total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; 109 if (total_count > MAX_TX_RINGS) { 110 err = -EINVAL; 111 en_err(priv, 112 "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", 113 total_count, MAX_TX_RINGS); 114 goto out; 115 } 116 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); 117 if (err) 118 goto out; 119 120 if (priv->port_up) { 121 port_up = 1; 122 mlx4_en_stop_port(dev, 1); 123 } 124 125 mlx4_en_safe_replace_resources(priv, tmp); 126 if (port_up) { 127 err = mlx4_en_start_port(dev); 128 if (err) { 129 en_err(priv, "Failed starting port for setup TC\n"); 130 goto out; 131 } 132 } 133 134 err = mlx4_en_setup_tc(dev, tc); 135 out: 136 mutex_unlock(&mdev->state_lock); 137 kfree(tmp); 138 return err; 139 } 140 141 static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, 142 void *type_data) 143 { 144 struct tc_mqprio_qopt *mqprio = type_data; 145 146 if (type != TC_SETUP_QDISC_MQPRIO) 147 return -EOPNOTSUPP; 148 149 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) 150 return -EINVAL; 151 152 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 153 154 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc); 155 } 156 157 #ifdef CONFIG_RFS_ACCEL 158 159 struct mlx4_en_filter { 160 struct list_head next; 161 struct work_struct work; 162 163 u8 ip_proto; 164 __be32 src_ip; 165 __be32 dst_ip; 166 __be16 src_port; 167 __be16 dst_port; 168 169 int rxq_index; 170 struct mlx4_en_priv *priv; 171 u32 flow_id; /* RFS infrastructure id */ 172 int id; /* mlx4_en driver id */ 173 u64 reg_id; /* Flow steering API id */ 174 u8 activated; /* Used to prevent expiry before filter 175 * is attached 176 */ 177 struct hlist_node filter_chain; 178 }; 179 180 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 181 182 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 183 { 184 switch (ip_proto) { 185 case IPPROTO_UDP: 186 return MLX4_NET_TRANS_RULE_ID_UDP; 187 case IPPROTO_TCP: 188 return MLX4_NET_TRANS_RULE_ID_TCP; 189 default: 190 return MLX4_NET_TRANS_RULE_NUM; 191 } 192 }; 193 194 /* Must not acquire state_lock, as its corresponding work_sync 195 * is done under it. 196 */ 197 static void mlx4_en_filter_work(struct work_struct *work) 198 { 199 struct mlx4_en_filter *filter = container_of(work, 200 struct mlx4_en_filter, 201 work); 202 struct mlx4_en_priv *priv = filter->priv; 203 struct mlx4_spec_list spec_tcp_udp = { 204 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 205 { 206 .tcp_udp = { 207 .dst_port = filter->dst_port, 208 .dst_port_msk = (__force __be16)-1, 209 .src_port = filter->src_port, 210 .src_port_msk = (__force __be16)-1, 211 }, 212 }, 213 }; 214 struct mlx4_spec_list spec_ip = { 215 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 216 { 217 .ipv4 = { 218 .dst_ip = filter->dst_ip, 219 .dst_ip_msk = (__force __be32)-1, 220 .src_ip = filter->src_ip, 221 .src_ip_msk = (__force __be32)-1, 222 }, 223 }, 224 }; 225 struct mlx4_spec_list spec_eth = { 226 .id = MLX4_NET_TRANS_RULE_ID_ETH, 227 }; 228 struct mlx4_net_trans_rule rule = { 229 .list = LIST_HEAD_INIT(rule.list), 230 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 231 .exclusive = 1, 232 .allow_loopback = 1, 233 .promisc_mode = MLX4_FS_REGULAR, 234 .port = priv->port, 235 .priority = MLX4_DOMAIN_RFS, 236 }; 237 int rc; 238 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 239 240 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 241 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 242 filter->ip_proto); 243 goto ignore; 244 } 245 list_add_tail(&spec_eth.list, &rule.list); 246 list_add_tail(&spec_ip.list, &rule.list); 247 list_add_tail(&spec_tcp_udp.list, &rule.list); 248 249 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 250 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 251 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 252 253 filter->activated = 0; 254 255 if (filter->reg_id) { 256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 257 if (rc && rc != -ENOENT) 258 en_err(priv, "Error detaching flow. rc = %d\n", rc); 259 } 260 261 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 262 if (rc) 263 en_err(priv, "Error attaching flow. err = %d\n", rc); 264 265 ignore: 266 mlx4_en_filter_rfs_expire(priv); 267 268 filter->activated = 1; 269 } 270 271 static inline struct hlist_head * 272 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 273 __be16 src_port, __be16 dst_port) 274 { 275 unsigned long l; 276 int bucket_idx; 277 278 l = (__force unsigned long)src_port | 279 ((__force unsigned long)dst_port << 2); 280 l ^= (__force unsigned long)(src_ip ^ dst_ip); 281 282 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 283 284 return &priv->filter_hash[bucket_idx]; 285 } 286 287 static struct mlx4_en_filter * 288 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 289 __be32 dst_ip, u8 ip_proto, __be16 src_port, 290 __be16 dst_port, u32 flow_id) 291 { 292 struct mlx4_en_filter *filter = NULL; 293 294 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 295 if (!filter) 296 return NULL; 297 298 filter->priv = priv; 299 filter->rxq_index = rxq_index; 300 INIT_WORK(&filter->work, mlx4_en_filter_work); 301 302 filter->src_ip = src_ip; 303 filter->dst_ip = dst_ip; 304 filter->ip_proto = ip_proto; 305 filter->src_port = src_port; 306 filter->dst_port = dst_port; 307 308 filter->flow_id = flow_id; 309 310 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 311 312 list_add_tail(&filter->next, &priv->filters); 313 hlist_add_head(&filter->filter_chain, 314 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 315 dst_port)); 316 317 return filter; 318 } 319 320 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 321 { 322 struct mlx4_en_priv *priv = filter->priv; 323 int rc; 324 325 list_del(&filter->next); 326 327 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 328 if (rc && rc != -ENOENT) 329 en_err(priv, "Error detaching flow. rc = %d\n", rc); 330 331 kfree(filter); 332 } 333 334 static inline struct mlx4_en_filter * 335 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 336 u8 ip_proto, __be16 src_port, __be16 dst_port) 337 { 338 struct mlx4_en_filter *filter; 339 struct mlx4_en_filter *ret = NULL; 340 341 hlist_for_each_entry(filter, 342 filter_hash_bucket(priv, src_ip, dst_ip, 343 src_port, dst_port), 344 filter_chain) { 345 if (filter->src_ip == src_ip && 346 filter->dst_ip == dst_ip && 347 filter->ip_proto == ip_proto && 348 filter->src_port == src_port && 349 filter->dst_port == dst_port) { 350 ret = filter; 351 break; 352 } 353 } 354 355 return ret; 356 } 357 358 static int 359 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 360 u16 rxq_index, u32 flow_id) 361 { 362 struct mlx4_en_priv *priv = netdev_priv(net_dev); 363 struct mlx4_en_filter *filter; 364 const struct iphdr *ip; 365 const __be16 *ports; 366 u8 ip_proto; 367 __be32 src_ip; 368 __be32 dst_ip; 369 __be16 src_port; 370 __be16 dst_port; 371 int nhoff = skb_network_offset(skb); 372 int ret = 0; 373 374 if (skb->protocol != htons(ETH_P_IP)) 375 return -EPROTONOSUPPORT; 376 377 ip = (const struct iphdr *)(skb->data + nhoff); 378 if (ip_is_fragment(ip)) 379 return -EPROTONOSUPPORT; 380 381 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 382 return -EPROTONOSUPPORT; 383 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 384 385 ip_proto = ip->protocol; 386 src_ip = ip->saddr; 387 dst_ip = ip->daddr; 388 src_port = ports[0]; 389 dst_port = ports[1]; 390 391 spin_lock_bh(&priv->filters_lock); 392 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 393 src_port, dst_port); 394 if (filter) { 395 if (filter->rxq_index == rxq_index) 396 goto out; 397 398 filter->rxq_index = rxq_index; 399 } else { 400 filter = mlx4_en_filter_alloc(priv, rxq_index, 401 src_ip, dst_ip, ip_proto, 402 src_port, dst_port, flow_id); 403 if (!filter) { 404 ret = -ENOMEM; 405 goto err; 406 } 407 } 408 409 queue_work(priv->mdev->workqueue, &filter->work); 410 411 out: 412 ret = filter->id; 413 err: 414 spin_unlock_bh(&priv->filters_lock); 415 416 return ret; 417 } 418 419 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 420 { 421 struct mlx4_en_filter *filter, *tmp; 422 LIST_HEAD(del_list); 423 424 spin_lock_bh(&priv->filters_lock); 425 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 426 list_move(&filter->next, &del_list); 427 hlist_del(&filter->filter_chain); 428 } 429 spin_unlock_bh(&priv->filters_lock); 430 431 list_for_each_entry_safe(filter, tmp, &del_list, next) { 432 cancel_work_sync(&filter->work); 433 mlx4_en_filter_free(filter); 434 } 435 } 436 437 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 438 { 439 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 440 LIST_HEAD(del_list); 441 int i = 0; 442 443 spin_lock_bh(&priv->filters_lock); 444 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 445 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 446 break; 447 448 if (filter->activated && 449 !work_pending(&filter->work) && 450 rps_may_expire_flow(priv->dev, 451 filter->rxq_index, filter->flow_id, 452 filter->id)) { 453 list_move(&filter->next, &del_list); 454 hlist_del(&filter->filter_chain); 455 } else 456 last_filter = filter; 457 458 i++; 459 } 460 461 if (last_filter && (&last_filter->next != priv->filters.next)) 462 list_move(&priv->filters, &last_filter->next); 463 464 spin_unlock_bh(&priv->filters_lock); 465 466 list_for_each_entry_safe(filter, tmp, &del_list, next) 467 mlx4_en_filter_free(filter); 468 } 469 #endif 470 471 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 472 __be16 proto, u16 vid) 473 { 474 struct mlx4_en_priv *priv = netdev_priv(dev); 475 struct mlx4_en_dev *mdev = priv->mdev; 476 int err; 477 int idx; 478 479 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 480 481 set_bit(vid, priv->active_vlans); 482 483 /* Add VID to port VLAN filter */ 484 mutex_lock(&mdev->state_lock); 485 if (mdev->device_up && priv->port_up) { 486 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 487 if (err) { 488 en_err(priv, "Failed configuring VLAN filter\n"); 489 goto out; 490 } 491 } 492 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 493 if (err) 494 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 495 496 out: 497 mutex_unlock(&mdev->state_lock); 498 return err; 499 } 500 501 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 502 __be16 proto, u16 vid) 503 { 504 struct mlx4_en_priv *priv = netdev_priv(dev); 505 struct mlx4_en_dev *mdev = priv->mdev; 506 int err = 0; 507 508 en_dbg(HW, priv, "Killing VID:%d\n", vid); 509 510 clear_bit(vid, priv->active_vlans); 511 512 /* Remove VID from port VLAN filter */ 513 mutex_lock(&mdev->state_lock); 514 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 515 516 if (mdev->device_up && priv->port_up) { 517 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 518 if (err) 519 en_err(priv, "Failed configuring VLAN filter\n"); 520 } 521 mutex_unlock(&mdev->state_lock); 522 523 return err; 524 } 525 526 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 527 { 528 int i; 529 for (i = ETH_ALEN - 1; i >= 0; --i) { 530 dst_mac[i] = src_mac & 0xff; 531 src_mac >>= 8; 532 } 533 memset(&dst_mac[ETH_ALEN], 0, 2); 534 } 535 536 537 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 538 int qpn, u64 *reg_id) 539 { 540 int err; 541 542 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 543 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 544 return 0; /* do nothing */ 545 546 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 547 MLX4_DOMAIN_NIC, reg_id); 548 if (err) { 549 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 550 return err; 551 } 552 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 553 return 0; 554 } 555 556 557 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 558 unsigned char *mac, int *qpn, u64 *reg_id) 559 { 560 struct mlx4_en_dev *mdev = priv->mdev; 561 struct mlx4_dev *dev = mdev->dev; 562 int err; 563 564 switch (dev->caps.steering_mode) { 565 case MLX4_STEERING_MODE_B0: { 566 struct mlx4_qp qp; 567 u8 gid[16] = {0}; 568 569 qp.qpn = *qpn; 570 memcpy(&gid[10], mac, ETH_ALEN); 571 gid[5] = priv->port; 572 573 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 574 break; 575 } 576 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 577 struct mlx4_spec_list spec_eth = { {NULL} }; 578 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 579 580 struct mlx4_net_trans_rule rule = { 581 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 582 .exclusive = 0, 583 .allow_loopback = 1, 584 .promisc_mode = MLX4_FS_REGULAR, 585 .priority = MLX4_DOMAIN_NIC, 586 }; 587 588 rule.port = priv->port; 589 rule.qpn = *qpn; 590 INIT_LIST_HEAD(&rule.list); 591 592 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 593 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 594 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 595 list_add_tail(&spec_eth.list, &rule.list); 596 597 err = mlx4_flow_attach(dev, &rule, reg_id); 598 break; 599 } 600 default: 601 return -EINVAL; 602 } 603 if (err) 604 en_warn(priv, "Failed Attaching Unicast\n"); 605 606 return err; 607 } 608 609 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 610 unsigned char *mac, int qpn, u64 reg_id) 611 { 612 struct mlx4_en_dev *mdev = priv->mdev; 613 struct mlx4_dev *dev = mdev->dev; 614 615 switch (dev->caps.steering_mode) { 616 case MLX4_STEERING_MODE_B0: { 617 struct mlx4_qp qp; 618 u8 gid[16] = {0}; 619 620 qp.qpn = qpn; 621 memcpy(&gid[10], mac, ETH_ALEN); 622 gid[5] = priv->port; 623 624 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 625 break; 626 } 627 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 628 mlx4_flow_detach(dev, reg_id); 629 break; 630 } 631 default: 632 en_err(priv, "Invalid steering mode.\n"); 633 } 634 } 635 636 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 637 { 638 struct mlx4_en_dev *mdev = priv->mdev; 639 struct mlx4_dev *dev = mdev->dev; 640 int index = 0; 641 int err = 0; 642 int *qpn = &priv->base_qpn; 643 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 644 645 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 646 priv->dev->dev_addr); 647 index = mlx4_register_mac(dev, priv->port, mac); 648 if (index < 0) { 649 err = index; 650 en_err(priv, "Failed adding MAC: %pM\n", 651 priv->dev->dev_addr); 652 return err; 653 } 654 655 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode); 656 657 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 658 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 659 *qpn = base_qpn + index; 660 return 0; 661 } 662 663 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP, 664 MLX4_RES_USAGE_DRIVER); 665 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 666 if (err) { 667 en_err(priv, "Failed to reserve qp for mac registration\n"); 668 mlx4_unregister_mac(dev, priv->port, mac); 669 return err; 670 } 671 672 return 0; 673 } 674 675 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 676 { 677 struct mlx4_en_dev *mdev = priv->mdev; 678 struct mlx4_dev *dev = mdev->dev; 679 int qpn = priv->base_qpn; 680 681 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 682 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 683 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 684 priv->dev->dev_addr); 685 mlx4_unregister_mac(dev, priv->port, mac); 686 } else { 687 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 688 priv->port, qpn); 689 mlx4_qp_release_range(dev, qpn, 1); 690 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 691 } 692 } 693 694 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 695 unsigned char *new_mac, unsigned char *prev_mac) 696 { 697 struct mlx4_en_dev *mdev = priv->mdev; 698 struct mlx4_dev *dev = mdev->dev; 699 int err = 0; 700 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 701 702 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 703 struct hlist_head *bucket; 704 unsigned int mac_hash; 705 struct mlx4_mac_entry *entry; 706 struct hlist_node *tmp; 707 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 708 709 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 710 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 711 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 712 mlx4_en_uc_steer_release(priv, entry->mac, 713 qpn, entry->reg_id); 714 mlx4_unregister_mac(dev, priv->port, 715 prev_mac_u64); 716 hlist_del_rcu(&entry->hlist); 717 synchronize_rcu(); 718 memcpy(entry->mac, new_mac, ETH_ALEN); 719 entry->reg_id = 0; 720 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 721 hlist_add_head_rcu(&entry->hlist, 722 &priv->mac_hash[mac_hash]); 723 mlx4_register_mac(dev, priv->port, new_mac_u64); 724 err = mlx4_en_uc_steer_add(priv, new_mac, 725 &qpn, 726 &entry->reg_id); 727 if (err) 728 return err; 729 if (priv->tunnel_reg_id) { 730 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 731 priv->tunnel_reg_id = 0; 732 } 733 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 734 &priv->tunnel_reg_id); 735 return err; 736 } 737 } 738 return -EINVAL; 739 } 740 741 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 742 } 743 744 static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv, 745 unsigned char new_mac[ETH_ALEN + 2]) 746 { 747 struct mlx4_en_dev *mdev = priv->mdev; 748 int err; 749 750 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN)) 751 return; 752 753 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac); 754 if (err) 755 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n", 756 new_mac, priv->port, err); 757 } 758 759 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 760 unsigned char new_mac[ETH_ALEN + 2]) 761 { 762 int err = 0; 763 764 if (priv->port_up) { 765 /* Remove old MAC and insert the new one */ 766 err = mlx4_en_replace_mac(priv, priv->base_qpn, 767 new_mac, priv->current_mac); 768 if (err) 769 en_err(priv, "Failed changing HW MAC address\n"); 770 } else 771 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 772 773 if (!err) 774 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 775 776 return err; 777 } 778 779 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 780 { 781 struct mlx4_en_priv *priv = netdev_priv(dev); 782 struct mlx4_en_dev *mdev = priv->mdev; 783 struct sockaddr *saddr = addr; 784 unsigned char new_mac[ETH_ALEN + 2]; 785 int err; 786 787 if (!is_valid_ether_addr(saddr->sa_data)) 788 return -EADDRNOTAVAIL; 789 790 mutex_lock(&mdev->state_lock); 791 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 792 err = mlx4_en_do_set_mac(priv, new_mac); 793 if (err) 794 goto out; 795 796 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 797 mlx4_en_update_user_mac(priv, new_mac); 798 out: 799 mutex_unlock(&mdev->state_lock); 800 801 return err; 802 } 803 804 static void mlx4_en_clear_list(struct net_device *dev) 805 { 806 struct mlx4_en_priv *priv = netdev_priv(dev); 807 struct mlx4_en_mc_list *tmp, *mc_to_del; 808 809 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 810 list_del(&mc_to_del->list); 811 kfree(mc_to_del); 812 } 813 } 814 815 static void mlx4_en_cache_mclist(struct net_device *dev) 816 { 817 struct mlx4_en_priv *priv = netdev_priv(dev); 818 struct netdev_hw_addr *ha; 819 struct mlx4_en_mc_list *tmp; 820 821 mlx4_en_clear_list(dev); 822 netdev_for_each_mc_addr(ha, dev) { 823 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 824 if (!tmp) { 825 mlx4_en_clear_list(dev); 826 return; 827 } 828 memcpy(tmp->addr, ha->addr, ETH_ALEN); 829 list_add_tail(&tmp->list, &priv->mc_list); 830 } 831 } 832 833 static void update_mclist_flags(struct mlx4_en_priv *priv, 834 struct list_head *dst, 835 struct list_head *src) 836 { 837 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 838 bool found; 839 840 /* Find all the entries that should be removed from dst, 841 * These are the entries that are not found in src 842 */ 843 list_for_each_entry(dst_tmp, dst, list) { 844 found = false; 845 list_for_each_entry(src_tmp, src, list) { 846 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 847 found = true; 848 break; 849 } 850 } 851 if (!found) 852 dst_tmp->action = MCLIST_REM; 853 } 854 855 /* Add entries that exist in src but not in dst 856 * mark them as need to add 857 */ 858 list_for_each_entry(src_tmp, src, list) { 859 found = false; 860 list_for_each_entry(dst_tmp, dst, list) { 861 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 862 dst_tmp->action = MCLIST_NONE; 863 found = true; 864 break; 865 } 866 } 867 if (!found) { 868 new_mc = kmemdup(src_tmp, 869 sizeof(struct mlx4_en_mc_list), 870 GFP_KERNEL); 871 if (!new_mc) 872 return; 873 874 new_mc->action = MCLIST_ADD; 875 list_add_tail(&new_mc->list, dst); 876 } 877 } 878 } 879 880 static void mlx4_en_set_rx_mode(struct net_device *dev) 881 { 882 struct mlx4_en_priv *priv = netdev_priv(dev); 883 884 if (!priv->port_up) 885 return; 886 887 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 888 } 889 890 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 891 struct mlx4_en_dev *mdev) 892 { 893 int err = 0; 894 895 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 896 if (netif_msg_rx_status(priv)) 897 en_warn(priv, "Entering promiscuous mode\n"); 898 priv->flags |= MLX4_EN_FLAG_PROMISC; 899 900 /* Enable promiscouos mode */ 901 switch (mdev->dev->caps.steering_mode) { 902 case MLX4_STEERING_MODE_DEVICE_MANAGED: 903 err = mlx4_flow_steer_promisc_add(mdev->dev, 904 priv->port, 905 priv->base_qpn, 906 MLX4_FS_ALL_DEFAULT); 907 if (err) 908 en_err(priv, "Failed enabling promiscuous mode\n"); 909 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 910 break; 911 912 case MLX4_STEERING_MODE_B0: 913 err = mlx4_unicast_promisc_add(mdev->dev, 914 priv->base_qpn, 915 priv->port); 916 if (err) 917 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 918 919 /* Add the default qp number as multicast 920 * promisc 921 */ 922 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 923 err = mlx4_multicast_promisc_add(mdev->dev, 924 priv->base_qpn, 925 priv->port); 926 if (err) 927 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 928 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 929 } 930 break; 931 932 case MLX4_STEERING_MODE_A0: 933 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 934 priv->port, 935 priv->base_qpn, 936 1); 937 if (err) 938 en_err(priv, "Failed enabling promiscuous mode\n"); 939 break; 940 } 941 942 /* Disable port multicast filter (unconditionally) */ 943 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 944 0, MLX4_MCAST_DISABLE); 945 if (err) 946 en_err(priv, "Failed disabling multicast filter\n"); 947 } 948 } 949 950 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 951 struct mlx4_en_dev *mdev) 952 { 953 int err = 0; 954 955 if (netif_msg_rx_status(priv)) 956 en_warn(priv, "Leaving promiscuous mode\n"); 957 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 958 959 /* Disable promiscouos mode */ 960 switch (mdev->dev->caps.steering_mode) { 961 case MLX4_STEERING_MODE_DEVICE_MANAGED: 962 err = mlx4_flow_steer_promisc_remove(mdev->dev, 963 priv->port, 964 MLX4_FS_ALL_DEFAULT); 965 if (err) 966 en_err(priv, "Failed disabling promiscuous mode\n"); 967 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 968 break; 969 970 case MLX4_STEERING_MODE_B0: 971 err = mlx4_unicast_promisc_remove(mdev->dev, 972 priv->base_qpn, 973 priv->port); 974 if (err) 975 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 976 /* Disable Multicast promisc */ 977 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 978 err = mlx4_multicast_promisc_remove(mdev->dev, 979 priv->base_qpn, 980 priv->port); 981 if (err) 982 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 983 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 984 } 985 break; 986 987 case MLX4_STEERING_MODE_A0: 988 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 989 priv->port, 990 priv->base_qpn, 0); 991 if (err) 992 en_err(priv, "Failed disabling promiscuous mode\n"); 993 break; 994 } 995 } 996 997 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 998 struct net_device *dev, 999 struct mlx4_en_dev *mdev) 1000 { 1001 struct mlx4_en_mc_list *mclist, *tmp; 1002 u64 mcast_addr = 0; 1003 u8 mc_list[16] = {0}; 1004 int err = 0; 1005 1006 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 1007 if (dev->flags & IFF_ALLMULTI) { 1008 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1009 0, MLX4_MCAST_DISABLE); 1010 if (err) 1011 en_err(priv, "Failed disabling multicast filter\n"); 1012 1013 /* Add the default qp number as multicast promisc */ 1014 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 1015 switch (mdev->dev->caps.steering_mode) { 1016 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1017 err = mlx4_flow_steer_promisc_add(mdev->dev, 1018 priv->port, 1019 priv->base_qpn, 1020 MLX4_FS_MC_DEFAULT); 1021 break; 1022 1023 case MLX4_STEERING_MODE_B0: 1024 err = mlx4_multicast_promisc_add(mdev->dev, 1025 priv->base_qpn, 1026 priv->port); 1027 break; 1028 1029 case MLX4_STEERING_MODE_A0: 1030 break; 1031 } 1032 if (err) 1033 en_err(priv, "Failed entering multicast promisc mode\n"); 1034 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 1035 } 1036 } else { 1037 /* Disable Multicast promisc */ 1038 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1039 switch (mdev->dev->caps.steering_mode) { 1040 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1041 err = mlx4_flow_steer_promisc_remove(mdev->dev, 1042 priv->port, 1043 MLX4_FS_MC_DEFAULT); 1044 break; 1045 1046 case MLX4_STEERING_MODE_B0: 1047 err = mlx4_multicast_promisc_remove(mdev->dev, 1048 priv->base_qpn, 1049 priv->port); 1050 break; 1051 1052 case MLX4_STEERING_MODE_A0: 1053 break; 1054 } 1055 if (err) 1056 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 1057 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1058 } 1059 1060 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1061 0, MLX4_MCAST_DISABLE); 1062 if (err) 1063 en_err(priv, "Failed disabling multicast filter\n"); 1064 1065 /* Flush mcast filter and init it with broadcast address */ 1066 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1067 1, MLX4_MCAST_CONFIG); 1068 1069 /* Update multicast list - we cache all addresses so they won't 1070 * change while HW is updated holding the command semaphor */ 1071 netif_addr_lock_bh(dev); 1072 mlx4_en_cache_mclist(dev); 1073 netif_addr_unlock_bh(dev); 1074 list_for_each_entry(mclist, &priv->mc_list, list) { 1075 mcast_addr = mlx4_mac_to_u64(mclist->addr); 1076 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 1077 mcast_addr, 0, MLX4_MCAST_CONFIG); 1078 } 1079 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1080 0, MLX4_MCAST_ENABLE); 1081 if (err) 1082 en_err(priv, "Failed enabling multicast filter\n"); 1083 1084 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 1085 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1086 if (mclist->action == MCLIST_REM) { 1087 /* detach this address and delete from list */ 1088 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1089 mc_list[5] = priv->port; 1090 err = mlx4_multicast_detach(mdev->dev, 1091 priv->rss_map.indir_qp, 1092 mc_list, 1093 MLX4_PROT_ETH, 1094 mclist->reg_id); 1095 if (err) 1096 en_err(priv, "Fail to detach multicast address\n"); 1097 1098 if (mclist->tunnel_reg_id) { 1099 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1100 if (err) 1101 en_err(priv, "Failed to detach multicast address\n"); 1102 } 1103 1104 /* remove from list */ 1105 list_del(&mclist->list); 1106 kfree(mclist); 1107 } else if (mclist->action == MCLIST_ADD) { 1108 /* attach the address */ 1109 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1110 /* needed for B0 steering support */ 1111 mc_list[5] = priv->port; 1112 err = mlx4_multicast_attach(mdev->dev, 1113 priv->rss_map.indir_qp, 1114 mc_list, 1115 priv->port, 0, 1116 MLX4_PROT_ETH, 1117 &mclist->reg_id); 1118 if (err) 1119 en_err(priv, "Fail to attach multicast address\n"); 1120 1121 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1122 &mclist->tunnel_reg_id); 1123 if (err) 1124 en_err(priv, "Failed to attach multicast address\n"); 1125 } 1126 } 1127 } 1128 } 1129 1130 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1131 struct net_device *dev, 1132 struct mlx4_en_dev *mdev) 1133 { 1134 struct netdev_hw_addr *ha; 1135 struct mlx4_mac_entry *entry; 1136 struct hlist_node *tmp; 1137 bool found; 1138 u64 mac; 1139 int err = 0; 1140 struct hlist_head *bucket; 1141 unsigned int i; 1142 int removed = 0; 1143 u32 prev_flags; 1144 1145 /* Note that we do not need to protect our mac_hash traversal with rcu, 1146 * since all modification code is protected by mdev->state_lock 1147 */ 1148 1149 /* find what to remove */ 1150 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1151 bucket = &priv->mac_hash[i]; 1152 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1153 found = false; 1154 netdev_for_each_uc_addr(ha, dev) { 1155 if (ether_addr_equal_64bits(entry->mac, 1156 ha->addr)) { 1157 found = true; 1158 break; 1159 } 1160 } 1161 1162 /* MAC address of the port is not in uc list */ 1163 if (ether_addr_equal_64bits(entry->mac, 1164 priv->current_mac)) 1165 found = true; 1166 1167 if (!found) { 1168 mac = mlx4_mac_to_u64(entry->mac); 1169 mlx4_en_uc_steer_release(priv, entry->mac, 1170 priv->base_qpn, 1171 entry->reg_id); 1172 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1173 1174 hlist_del_rcu(&entry->hlist); 1175 kfree_rcu(entry, rcu); 1176 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1177 entry->mac, priv->port); 1178 ++removed; 1179 } 1180 } 1181 } 1182 1183 /* if we didn't remove anything, there is no use in trying to add 1184 * again once we are in a forced promisc mode state 1185 */ 1186 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1187 return; 1188 1189 prev_flags = priv->flags; 1190 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1191 1192 /* find what to add */ 1193 netdev_for_each_uc_addr(ha, dev) { 1194 found = false; 1195 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1196 hlist_for_each_entry(entry, bucket, hlist) { 1197 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1198 found = true; 1199 break; 1200 } 1201 } 1202 1203 if (!found) { 1204 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1205 if (!entry) { 1206 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1207 ha->addr, priv->port); 1208 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1209 break; 1210 } 1211 mac = mlx4_mac_to_u64(ha->addr); 1212 memcpy(entry->mac, ha->addr, ETH_ALEN); 1213 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1214 if (err < 0) { 1215 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1216 ha->addr, priv->port, err); 1217 kfree(entry); 1218 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1219 break; 1220 } 1221 err = mlx4_en_uc_steer_add(priv, ha->addr, 1222 &priv->base_qpn, 1223 &entry->reg_id); 1224 if (err) { 1225 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1226 ha->addr, priv->port, err); 1227 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1228 kfree(entry); 1229 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1230 break; 1231 } else { 1232 unsigned int mac_hash; 1233 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1234 ha->addr, priv->port); 1235 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1236 bucket = &priv->mac_hash[mac_hash]; 1237 hlist_add_head_rcu(&entry->hlist, bucket); 1238 } 1239 } 1240 } 1241 1242 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1243 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1244 priv->port); 1245 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1246 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1247 priv->port); 1248 } 1249 } 1250 1251 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1252 { 1253 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1254 rx_mode_task); 1255 struct mlx4_en_dev *mdev = priv->mdev; 1256 struct net_device *dev = priv->dev; 1257 1258 mutex_lock(&mdev->state_lock); 1259 if (!mdev->device_up) { 1260 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1261 goto out; 1262 } 1263 if (!priv->port_up) { 1264 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1265 goto out; 1266 } 1267 1268 if (!netif_carrier_ok(dev)) { 1269 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1270 if (priv->port_state.link_state) { 1271 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1272 netif_carrier_on(dev); 1273 en_dbg(LINK, priv, "Link Up\n"); 1274 } 1275 } 1276 } 1277 1278 if (dev->priv_flags & IFF_UNICAST_FLT) 1279 mlx4_en_do_uc_filter(priv, dev, mdev); 1280 1281 /* Promsicuous mode: disable all filters */ 1282 if ((dev->flags & IFF_PROMISC) || 1283 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1284 mlx4_en_set_promisc_mode(priv, mdev); 1285 goto out; 1286 } 1287 1288 /* Not in promiscuous mode */ 1289 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1290 mlx4_en_clear_promisc_mode(priv, mdev); 1291 1292 mlx4_en_do_multicast(priv, dev, mdev); 1293 out: 1294 mutex_unlock(&mdev->state_lock); 1295 } 1296 1297 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1298 { 1299 u64 reg_id; 1300 int err = 0; 1301 int *qpn = &priv->base_qpn; 1302 struct mlx4_mac_entry *entry; 1303 1304 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1305 if (err) 1306 return err; 1307 1308 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1309 &priv->tunnel_reg_id); 1310 if (err) 1311 goto tunnel_err; 1312 1313 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1314 if (!entry) { 1315 err = -ENOMEM; 1316 goto alloc_err; 1317 } 1318 1319 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1320 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1321 entry->reg_id = reg_id; 1322 hlist_add_head_rcu(&entry->hlist, 1323 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1324 1325 return 0; 1326 1327 alloc_err: 1328 if (priv->tunnel_reg_id) 1329 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1330 1331 tunnel_err: 1332 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1333 return err; 1334 } 1335 1336 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1337 { 1338 u64 mac; 1339 unsigned int i; 1340 int qpn = priv->base_qpn; 1341 struct hlist_head *bucket; 1342 struct hlist_node *tmp; 1343 struct mlx4_mac_entry *entry; 1344 1345 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1346 bucket = &priv->mac_hash[i]; 1347 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1348 mac = mlx4_mac_to_u64(entry->mac); 1349 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1350 entry->mac); 1351 mlx4_en_uc_steer_release(priv, entry->mac, 1352 qpn, entry->reg_id); 1353 1354 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1355 hlist_del_rcu(&entry->hlist); 1356 kfree_rcu(entry, rcu); 1357 } 1358 } 1359 1360 if (priv->tunnel_reg_id) { 1361 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1362 priv->tunnel_reg_id = 0; 1363 } 1364 } 1365 1366 static void mlx4_en_tx_timeout(struct net_device *dev) 1367 { 1368 struct mlx4_en_priv *priv = netdev_priv(dev); 1369 struct mlx4_en_dev *mdev = priv->mdev; 1370 int i; 1371 1372 if (netif_msg_timer(priv)) 1373 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1374 1375 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 1376 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i]; 1377 1378 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1379 continue; 1380 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1381 i, tx_ring->qpn, tx_ring->sp_cqn, 1382 tx_ring->cons, tx_ring->prod); 1383 } 1384 1385 priv->port_stats.tx_timeout++; 1386 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1387 queue_work(mdev->workqueue, &priv->watchdog_task); 1388 } 1389 1390 1391 static void 1392 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1393 { 1394 struct mlx4_en_priv *priv = netdev_priv(dev); 1395 1396 spin_lock_bh(&priv->stats_lock); 1397 mlx4_en_fold_software_stats(dev); 1398 netdev_stats_to_stats64(stats, &dev->stats); 1399 spin_unlock_bh(&priv->stats_lock); 1400 } 1401 1402 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1403 { 1404 struct mlx4_en_cq *cq; 1405 int i, t; 1406 1407 /* If we haven't received a specific coalescing setting 1408 * (module param), we set the moderation parameters as follows: 1409 * - moder_cnt is set to the number of mtu sized packets to 1410 * satisfy our coalescing target. 1411 * - moder_time is set to a fixed value. 1412 */ 1413 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1414 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1415 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1416 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1417 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1418 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1419 1420 /* Setup cq moderation params */ 1421 for (i = 0; i < priv->rx_ring_num; i++) { 1422 cq = priv->rx_cq[i]; 1423 cq->moder_cnt = priv->rx_frames; 1424 cq->moder_time = priv->rx_usecs; 1425 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1426 priv->last_moder_packets[i] = 0; 1427 priv->last_moder_bytes[i] = 0; 1428 } 1429 1430 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1431 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1432 cq = priv->tx_cq[t][i]; 1433 cq->moder_cnt = priv->tx_frames; 1434 cq->moder_time = priv->tx_usecs; 1435 } 1436 } 1437 1438 /* Reset auto-moderation params */ 1439 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1440 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1441 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1442 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1443 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1444 priv->adaptive_rx_coal = 1; 1445 priv->last_moder_jiffies = 0; 1446 priv->last_moder_tx_packets = 0; 1447 } 1448 1449 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1450 { 1451 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1452 u32 pkt_rate_high, pkt_rate_low; 1453 struct mlx4_en_cq *cq; 1454 unsigned long packets; 1455 unsigned long rate; 1456 unsigned long avg_pkt_size; 1457 unsigned long rx_packets; 1458 unsigned long rx_bytes; 1459 unsigned long rx_pkt_diff; 1460 int moder_time; 1461 int ring, err; 1462 1463 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1464 return; 1465 1466 pkt_rate_low = READ_ONCE(priv->pkt_rate_low); 1467 pkt_rate_high = READ_ONCE(priv->pkt_rate_high); 1468 1469 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1470 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); 1471 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); 1472 1473 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring]; 1474 packets = rx_pkt_diff; 1475 rate = packets * HZ / period; 1476 avg_pkt_size = packets ? (rx_bytes - 1477 priv->last_moder_bytes[ring]) / packets : 0; 1478 1479 /* Apply auto-moderation only when packet rate 1480 * exceeds a rate that it matters */ 1481 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1482 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1483 if (rate <= pkt_rate_low) 1484 moder_time = priv->rx_usecs_low; 1485 else if (rate >= pkt_rate_high) 1486 moder_time = priv->rx_usecs_high; 1487 else 1488 moder_time = (rate - pkt_rate_low) * 1489 (priv->rx_usecs_high - priv->rx_usecs_low) / 1490 (pkt_rate_high - pkt_rate_low) + 1491 priv->rx_usecs_low; 1492 } else { 1493 moder_time = priv->rx_usecs_low; 1494 } 1495 1496 cq = priv->rx_cq[ring]; 1497 if (moder_time != priv->last_moder_time[ring] || 1498 cq->moder_cnt != priv->rx_frames) { 1499 priv->last_moder_time[ring] = moder_time; 1500 cq->moder_time = moder_time; 1501 cq->moder_cnt = priv->rx_frames; 1502 err = mlx4_en_set_cq_moder(priv, cq); 1503 if (err) 1504 en_err(priv, "Failed modifying moderation for cq:%d\n", 1505 ring); 1506 } 1507 priv->last_moder_packets[ring] = rx_packets; 1508 priv->last_moder_bytes[ring] = rx_bytes; 1509 } 1510 1511 priv->last_moder_jiffies = jiffies; 1512 } 1513 1514 static void mlx4_en_do_get_stats(struct work_struct *work) 1515 { 1516 struct delayed_work *delay = to_delayed_work(work); 1517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1518 stats_task); 1519 struct mlx4_en_dev *mdev = priv->mdev; 1520 int err; 1521 1522 mutex_lock(&mdev->state_lock); 1523 if (mdev->device_up) { 1524 if (priv->port_up) { 1525 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1526 if (err) 1527 en_dbg(HW, priv, "Could not update stats\n"); 1528 1529 mlx4_en_auto_moderation(priv); 1530 } 1531 1532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1533 } 1534 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1535 mlx4_en_do_set_mac(priv, priv->current_mac); 1536 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1537 } 1538 mutex_unlock(&mdev->state_lock); 1539 } 1540 1541 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1542 * periodically 1543 */ 1544 static void mlx4_en_service_task(struct work_struct *work) 1545 { 1546 struct delayed_work *delay = to_delayed_work(work); 1547 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1548 service_task); 1549 struct mlx4_en_dev *mdev = priv->mdev; 1550 1551 mutex_lock(&mdev->state_lock); 1552 if (mdev->device_up) { 1553 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1554 mlx4_en_ptp_overflow_check(mdev); 1555 1556 mlx4_en_recover_from_oom(priv); 1557 queue_delayed_work(mdev->workqueue, &priv->service_task, 1558 SERVICE_TASK_DELAY); 1559 } 1560 mutex_unlock(&mdev->state_lock); 1561 } 1562 1563 static void mlx4_en_linkstate(struct work_struct *work) 1564 { 1565 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1566 linkstate_task); 1567 struct mlx4_en_dev *mdev = priv->mdev; 1568 int linkstate = priv->link_state; 1569 1570 mutex_lock(&mdev->state_lock); 1571 /* If observable port state changed set carrier state and 1572 * report to system log */ 1573 if (priv->last_link_state != linkstate) { 1574 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1575 en_info(priv, "Link Down\n"); 1576 netif_carrier_off(priv->dev); 1577 } else { 1578 en_info(priv, "Link Up\n"); 1579 netif_carrier_on(priv->dev); 1580 } 1581 } 1582 priv->last_link_state = linkstate; 1583 mutex_unlock(&mdev->state_lock); 1584 } 1585 1586 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1587 { 1588 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1589 int numa_node = priv->mdev->dev->numa_node; 1590 1591 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1592 return -ENOMEM; 1593 1594 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1595 ring->affinity_mask); 1596 return 0; 1597 } 1598 1599 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1600 { 1601 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1602 } 1603 1604 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, 1605 int tx_ring_idx) 1606 { 1607 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; 1608 int rr_index = tx_ring_idx; 1609 1610 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; 1611 tx_ring->recycle_ring = priv->rx_ring[rr_index]; 1612 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", 1613 TX_XDP, tx_ring_idx, rr_index); 1614 } 1615 1616 int mlx4_en_start_port(struct net_device *dev) 1617 { 1618 struct mlx4_en_priv *priv = netdev_priv(dev); 1619 struct mlx4_en_dev *mdev = priv->mdev; 1620 struct mlx4_en_cq *cq; 1621 struct mlx4_en_tx_ring *tx_ring; 1622 int rx_index = 0; 1623 int err = 0; 1624 int i, t; 1625 int j; 1626 u8 mc_list[16] = {0}; 1627 1628 if (priv->port_up) { 1629 en_dbg(DRV, priv, "start port called while port already up\n"); 1630 return 0; 1631 } 1632 1633 INIT_LIST_HEAD(&priv->mc_list); 1634 INIT_LIST_HEAD(&priv->curr_list); 1635 INIT_LIST_HEAD(&priv->ethtool_list); 1636 memset(&priv->ethtool_rules[0], 0, 1637 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1638 1639 /* Calculate Rx buf size */ 1640 dev->mtu = min(dev->mtu, priv->max_mtu); 1641 mlx4_en_calc_rx_buf(dev); 1642 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1643 1644 /* Configure rx cq's and rings */ 1645 err = mlx4_en_activate_rx_rings(priv); 1646 if (err) { 1647 en_err(priv, "Failed to activate RX rings\n"); 1648 return err; 1649 } 1650 for (i = 0; i < priv->rx_ring_num; i++) { 1651 cq = priv->rx_cq[i]; 1652 1653 err = mlx4_en_init_affinity_hint(priv, i); 1654 if (err) { 1655 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1656 goto cq_err; 1657 } 1658 1659 err = mlx4_en_activate_cq(priv, cq, i); 1660 if (err) { 1661 en_err(priv, "Failed activating Rx CQ\n"); 1662 mlx4_en_free_affinity_hint(priv, i); 1663 goto cq_err; 1664 } 1665 1666 for (j = 0; j < cq->size; j++) { 1667 struct mlx4_cqe *cqe = NULL; 1668 1669 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1670 priv->cqe_factor; 1671 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1672 } 1673 1674 err = mlx4_en_set_cq_moder(priv, cq); 1675 if (err) { 1676 en_err(priv, "Failed setting cq moderation parameters\n"); 1677 mlx4_en_deactivate_cq(priv, cq); 1678 mlx4_en_free_affinity_hint(priv, i); 1679 goto cq_err; 1680 } 1681 mlx4_en_arm_cq(priv, cq); 1682 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1683 ++rx_index; 1684 } 1685 1686 /* Set qp number */ 1687 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1688 err = mlx4_en_get_qp(priv); 1689 if (err) { 1690 en_err(priv, "Failed getting eth qp\n"); 1691 goto cq_err; 1692 } 1693 mdev->mac_removed[priv->port] = 0; 1694 1695 priv->counter_index = 1696 mlx4_get_default_counter_index(mdev->dev, priv->port); 1697 1698 err = mlx4_en_config_rss_steer(priv); 1699 if (err) { 1700 en_err(priv, "Failed configuring rss steering\n"); 1701 goto mac_err; 1702 } 1703 1704 err = mlx4_en_create_drop_qp(priv); 1705 if (err) 1706 goto rss_err; 1707 1708 /* Configure tx cq's and rings */ 1709 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1710 u8 num_tx_rings_p_up = t == TX ? 1711 priv->num_tx_rings_p_up : priv->tx_ring_num[t]; 1712 1713 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1714 /* Configure cq */ 1715 cq = priv->tx_cq[t][i]; 1716 err = mlx4_en_activate_cq(priv, cq, i); 1717 if (err) { 1718 en_err(priv, "Failed allocating Tx CQ\n"); 1719 goto tx_err; 1720 } 1721 err = mlx4_en_set_cq_moder(priv, cq); 1722 if (err) { 1723 en_err(priv, "Failed setting cq moderation parameters\n"); 1724 mlx4_en_deactivate_cq(priv, cq); 1725 goto tx_err; 1726 } 1727 en_dbg(DRV, priv, 1728 "Resetting index of collapsed CQ:%d to -1\n", i); 1729 cq->buf->wqe_index = cpu_to_be16(0xffff); 1730 1731 /* Configure ring */ 1732 tx_ring = priv->tx_ring[t][i]; 1733 err = mlx4_en_activate_tx_ring(priv, tx_ring, 1734 cq->mcq.cqn, 1735 i / num_tx_rings_p_up); 1736 if (err) { 1737 en_err(priv, "Failed allocating Tx ring\n"); 1738 mlx4_en_deactivate_cq(priv, cq); 1739 goto tx_err; 1740 } 1741 if (t != TX_XDP) { 1742 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1743 tx_ring->recycle_ring = NULL; 1744 1745 /* Arm CQ for TX completions */ 1746 mlx4_en_arm_cq(priv, cq); 1747 1748 } else { 1749 mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring); 1750 mlx4_en_init_recycle_ring(priv, i); 1751 /* XDP TX CQ should never be armed */ 1752 } 1753 1754 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1755 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1756 *((u32 *)(tx_ring->buf + j)) = 0xffffffff; 1757 } 1758 } 1759 1760 /* Configure port */ 1761 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1762 priv->rx_skb_size + ETH_FCS_LEN, 1763 priv->prof->tx_pause, 1764 priv->prof->tx_ppp, 1765 priv->prof->rx_pause, 1766 priv->prof->rx_ppp); 1767 if (err) { 1768 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1769 priv->port, err); 1770 goto tx_err; 1771 } 1772 1773 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu); 1774 if (err) { 1775 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n", 1776 dev->mtu, priv->port, err); 1777 goto tx_err; 1778 } 1779 1780 /* Set default qp number */ 1781 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1782 if (err) { 1783 en_err(priv, "Failed setting default qp numbers\n"); 1784 goto tx_err; 1785 } 1786 1787 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1788 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1789 if (err) { 1790 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1791 err); 1792 goto tx_err; 1793 } 1794 } 1795 1796 /* Init port */ 1797 en_dbg(HW, priv, "Initializing port\n"); 1798 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1799 if (err) { 1800 en_err(priv, "Failed Initializing port\n"); 1801 goto tx_err; 1802 } 1803 1804 /* Set Unicast and VXLAN steering rules */ 1805 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1806 mlx4_en_set_rss_steer_rules(priv)) 1807 mlx4_warn(mdev, "Failed setting steering rules\n"); 1808 1809 /* Attach rx QP to bradcast address */ 1810 eth_broadcast_addr(&mc_list[10]); 1811 mc_list[5] = priv->port; /* needed for B0 steering support */ 1812 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list, 1813 priv->port, 0, MLX4_PROT_ETH, 1814 &priv->broadcast_id)) 1815 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1816 1817 /* Must redo promiscuous mode setup. */ 1818 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1819 1820 /* Schedule multicast task to populate multicast list */ 1821 queue_work(mdev->workqueue, &priv->rx_mode_task); 1822 1823 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1824 udp_tunnel_get_rx_info(dev); 1825 1826 priv->port_up = true; 1827 1828 /* Process all completions if exist to prevent 1829 * the queues freezing if they are full 1830 */ 1831 for (i = 0; i < priv->rx_ring_num; i++) { 1832 local_bh_disable(); 1833 napi_schedule(&priv->rx_cq[i]->napi); 1834 local_bh_enable(); 1835 } 1836 1837 netif_tx_start_all_queues(dev); 1838 netif_device_attach(dev); 1839 1840 return 0; 1841 1842 tx_err: 1843 if (t == MLX4_EN_NUM_TX_TYPES) { 1844 t--; 1845 i = priv->tx_ring_num[t]; 1846 } 1847 while (t >= 0) { 1848 while (i--) { 1849 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1850 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1851 } 1852 if (!t--) 1853 break; 1854 i = priv->tx_ring_num[t]; 1855 } 1856 mlx4_en_destroy_drop_qp(priv); 1857 rss_err: 1858 mlx4_en_release_rss_steer(priv); 1859 mac_err: 1860 mlx4_en_put_qp(priv); 1861 cq_err: 1862 while (rx_index--) { 1863 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1864 mlx4_en_free_affinity_hint(priv, rx_index); 1865 } 1866 for (i = 0; i < priv->rx_ring_num; i++) 1867 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1868 1869 return err; /* need to close devices */ 1870 } 1871 1872 1873 void mlx4_en_stop_port(struct net_device *dev, int detach) 1874 { 1875 struct mlx4_en_priv *priv = netdev_priv(dev); 1876 struct mlx4_en_dev *mdev = priv->mdev; 1877 struct mlx4_en_mc_list *mclist, *tmp; 1878 struct ethtool_flow_id *flow, *tmp_flow; 1879 int i, t; 1880 u8 mc_list[16] = {0}; 1881 1882 if (!priv->port_up) { 1883 en_dbg(DRV, priv, "stop port called while port already down\n"); 1884 return; 1885 } 1886 1887 /* close port*/ 1888 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1889 1890 /* Synchronize with tx routine */ 1891 netif_tx_lock_bh(dev); 1892 if (detach) 1893 netif_device_detach(dev); 1894 netif_tx_stop_all_queues(dev); 1895 netif_tx_unlock_bh(dev); 1896 1897 netif_tx_disable(dev); 1898 1899 spin_lock_bh(&priv->stats_lock); 1900 mlx4_en_fold_software_stats(dev); 1901 /* Set port as not active */ 1902 priv->port_up = false; 1903 spin_unlock_bh(&priv->stats_lock); 1904 1905 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1906 1907 /* Promsicuous mode */ 1908 if (mdev->dev->caps.steering_mode == 1909 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1910 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1911 MLX4_EN_FLAG_MC_PROMISC); 1912 mlx4_flow_steer_promisc_remove(mdev->dev, 1913 priv->port, 1914 MLX4_FS_ALL_DEFAULT); 1915 mlx4_flow_steer_promisc_remove(mdev->dev, 1916 priv->port, 1917 MLX4_FS_MC_DEFAULT); 1918 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1919 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1920 1921 /* Disable promiscouos mode */ 1922 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1923 priv->port); 1924 1925 /* Disable Multicast promisc */ 1926 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1927 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1928 priv->port); 1929 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1930 } 1931 } 1932 1933 /* Detach All multicasts */ 1934 eth_broadcast_addr(&mc_list[10]); 1935 mc_list[5] = priv->port; /* needed for B0 steering support */ 1936 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list, 1937 MLX4_PROT_ETH, priv->broadcast_id); 1938 list_for_each_entry(mclist, &priv->curr_list, list) { 1939 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1940 mc_list[5] = priv->port; 1941 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, 1942 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1943 if (mclist->tunnel_reg_id) 1944 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1945 } 1946 mlx4_en_clear_list(dev); 1947 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1948 list_del(&mclist->list); 1949 kfree(mclist); 1950 } 1951 1952 /* Flush multicast filter */ 1953 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1954 1955 /* Remove flow steering rules for the port*/ 1956 if (mdev->dev->caps.steering_mode == 1957 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1958 ASSERT_RTNL(); 1959 list_for_each_entry_safe(flow, tmp_flow, 1960 &priv->ethtool_list, list) { 1961 mlx4_flow_detach(mdev->dev, flow->id); 1962 list_del(&flow->list); 1963 } 1964 } 1965 1966 mlx4_en_destroy_drop_qp(priv); 1967 1968 /* Free TX Rings */ 1969 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 1970 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1971 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); 1972 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); 1973 } 1974 } 1975 msleep(10); 1976 1977 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) 1978 for (i = 0; i < priv->tx_ring_num[t]; i++) 1979 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); 1980 1981 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1982 mlx4_en_delete_rss_steer_rules(priv); 1983 1984 /* Free RSS qps */ 1985 mlx4_en_release_rss_steer(priv); 1986 1987 /* Unregister Mac address for the port */ 1988 mlx4_en_put_qp(priv); 1989 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1990 mdev->mac_removed[priv->port] = 1; 1991 1992 /* Free RX Rings */ 1993 for (i = 0; i < priv->rx_ring_num; i++) { 1994 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1995 1996 napi_synchronize(&cq->napi); 1997 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1998 mlx4_en_deactivate_cq(priv, cq); 1999 2000 mlx4_en_free_affinity_hint(priv, i); 2001 } 2002 } 2003 2004 static void mlx4_en_restart(struct work_struct *work) 2005 { 2006 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2007 watchdog_task); 2008 struct mlx4_en_dev *mdev = priv->mdev; 2009 struct net_device *dev = priv->dev; 2010 2011 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 2012 2013 rtnl_lock(); 2014 mutex_lock(&mdev->state_lock); 2015 if (priv->port_up) { 2016 mlx4_en_stop_port(dev, 1); 2017 if (mlx4_en_start_port(dev)) 2018 en_err(priv, "Failed restarting port %d\n", priv->port); 2019 } 2020 mutex_unlock(&mdev->state_lock); 2021 rtnl_unlock(); 2022 } 2023 2024 static void mlx4_en_clear_stats(struct net_device *dev) 2025 { 2026 struct mlx4_en_priv *priv = netdev_priv(dev); 2027 struct mlx4_en_dev *mdev = priv->mdev; 2028 struct mlx4_en_tx_ring **tx_ring; 2029 int i; 2030 2031 if (!mlx4_is_slave(mdev->dev)) 2032 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 2033 en_dbg(HW, priv, "Failed dumping statistics\n"); 2034 2035 memset(&priv->pstats, 0, sizeof(priv->pstats)); 2036 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 2037 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 2038 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 2039 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 2040 memset(&priv->rx_priority_flowstats, 0, 2041 sizeof(priv->rx_priority_flowstats)); 2042 memset(&priv->tx_priority_flowstats, 0, 2043 sizeof(priv->tx_priority_flowstats)); 2044 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 2045 2046 tx_ring = priv->tx_ring[TX]; 2047 for (i = 0; i < priv->tx_ring_num[TX]; i++) { 2048 tx_ring[i]->bytes = 0; 2049 tx_ring[i]->packets = 0; 2050 tx_ring[i]->tx_csum = 0; 2051 tx_ring[i]->tx_dropped = 0; 2052 tx_ring[i]->queue_stopped = 0; 2053 tx_ring[i]->wake_queue = 0; 2054 tx_ring[i]->tso_packets = 0; 2055 tx_ring[i]->xmit_more = 0; 2056 } 2057 for (i = 0; i < priv->rx_ring_num; i++) { 2058 priv->rx_ring[i]->bytes = 0; 2059 priv->rx_ring[i]->packets = 0; 2060 priv->rx_ring[i]->csum_ok = 0; 2061 priv->rx_ring[i]->csum_none = 0; 2062 priv->rx_ring[i]->csum_complete = 0; 2063 } 2064 } 2065 2066 static int mlx4_en_open(struct net_device *dev) 2067 { 2068 struct mlx4_en_priv *priv = netdev_priv(dev); 2069 struct mlx4_en_dev *mdev = priv->mdev; 2070 int err = 0; 2071 2072 mutex_lock(&mdev->state_lock); 2073 2074 if (!mdev->device_up) { 2075 en_err(priv, "Cannot open - device down/disabled\n"); 2076 err = -EBUSY; 2077 goto out; 2078 } 2079 2080 /* Reset HW statistics and SW counters */ 2081 mlx4_en_clear_stats(dev); 2082 2083 err = mlx4_en_start_port(dev); 2084 if (err) 2085 en_err(priv, "Failed starting port:%d\n", priv->port); 2086 2087 out: 2088 mutex_unlock(&mdev->state_lock); 2089 return err; 2090 } 2091 2092 2093 static int mlx4_en_close(struct net_device *dev) 2094 { 2095 struct mlx4_en_priv *priv = netdev_priv(dev); 2096 struct mlx4_en_dev *mdev = priv->mdev; 2097 2098 en_dbg(IFDOWN, priv, "Close port called\n"); 2099 2100 mutex_lock(&mdev->state_lock); 2101 2102 mlx4_en_stop_port(dev, 0); 2103 netif_carrier_off(dev); 2104 2105 mutex_unlock(&mdev->state_lock); 2106 return 0; 2107 } 2108 2109 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 2110 { 2111 int i, t; 2112 2113 #ifdef CONFIG_RFS_ACCEL 2114 priv->dev->rx_cpu_rmap = NULL; 2115 #endif 2116 2117 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2118 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2119 if (priv->tx_ring[t] && priv->tx_ring[t][i]) 2120 mlx4_en_destroy_tx_ring(priv, 2121 &priv->tx_ring[t][i]); 2122 if (priv->tx_cq[t] && priv->tx_cq[t][i]) 2123 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2124 } 2125 kfree(priv->tx_ring[t]); 2126 kfree(priv->tx_cq[t]); 2127 } 2128 2129 for (i = 0; i < priv->rx_ring_num; i++) { 2130 if (priv->rx_ring[i]) 2131 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2132 priv->prof->rx_ring_size, priv->stride); 2133 if (priv->rx_cq[i]) 2134 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2135 } 2136 2137 } 2138 2139 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 2140 { 2141 struct mlx4_en_port_profile *prof = priv->prof; 2142 int i, t; 2143 int node; 2144 2145 /* Create tx Rings */ 2146 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2147 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2148 node = cpu_to_node(i % num_online_cpus()); 2149 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], 2150 prof->tx_ring_size, i, t, node)) 2151 goto err; 2152 2153 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], 2154 prof->tx_ring_size, 2155 TXBB_SIZE, node, i)) 2156 goto err; 2157 } 2158 } 2159 2160 /* Create rx Rings */ 2161 for (i = 0; i < priv->rx_ring_num; i++) { 2162 node = cpu_to_node(i % num_online_cpus()); 2163 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2164 prof->rx_ring_size, i, RX, node)) 2165 goto err; 2166 2167 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2168 prof->rx_ring_size, priv->stride, 2169 node, i)) 2170 goto err; 2171 2172 } 2173 2174 #ifdef CONFIG_RFS_ACCEL 2175 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2176 #endif 2177 2178 return 0; 2179 2180 err: 2181 en_err(priv, "Failed to allocate NIC resources\n"); 2182 for (i = 0; i < priv->rx_ring_num; i++) { 2183 if (priv->rx_ring[i]) 2184 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2185 prof->rx_ring_size, 2186 priv->stride); 2187 if (priv->rx_cq[i]) 2188 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2189 } 2190 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2191 for (i = 0; i < priv->tx_ring_num[t]; i++) { 2192 if (priv->tx_ring[t][i]) 2193 mlx4_en_destroy_tx_ring(priv, 2194 &priv->tx_ring[t][i]); 2195 if (priv->tx_cq[t][i]) 2196 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2197 } 2198 } 2199 return -ENOMEM; 2200 } 2201 2202 2203 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2204 struct mlx4_en_priv *src, 2205 struct mlx4_en_port_profile *prof) 2206 { 2207 int t; 2208 2209 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2210 sizeof(dst->hwtstamp_config)); 2211 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up; 2212 dst->rx_ring_num = prof->rx_ring_num; 2213 dst->flags = prof->flags; 2214 dst->mdev = src->mdev; 2215 dst->port = src->port; 2216 dst->dev = src->dev; 2217 dst->prof = prof; 2218 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2219 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2220 2221 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2222 dst->tx_ring_num[t] = prof->tx_ring_num[t]; 2223 if (!dst->tx_ring_num[t]) 2224 continue; 2225 2226 dst->tx_ring[t] = kcalloc(MAX_TX_RINGS, 2227 sizeof(struct mlx4_en_tx_ring *), 2228 GFP_KERNEL); 2229 if (!dst->tx_ring[t]) 2230 goto err_free_tx; 2231 2232 dst->tx_cq[t] = kcalloc(MAX_TX_RINGS, 2233 sizeof(struct mlx4_en_cq *), 2234 GFP_KERNEL); 2235 if (!dst->tx_cq[t]) { 2236 kfree(dst->tx_ring[t]); 2237 goto err_free_tx; 2238 } 2239 } 2240 2241 return 0; 2242 2243 err_free_tx: 2244 while (t--) { 2245 kfree(dst->tx_ring[t]); 2246 kfree(dst->tx_cq[t]); 2247 } 2248 return -ENOMEM; 2249 } 2250 2251 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2252 struct mlx4_en_priv *src) 2253 { 2254 int t; 2255 memcpy(dst->rx_ring, src->rx_ring, 2256 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2257 memcpy(dst->rx_cq, src->rx_cq, 2258 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2259 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2260 sizeof(dst->hwtstamp_config)); 2261 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2262 dst->tx_ring_num[t] = src->tx_ring_num[t]; 2263 dst->tx_ring[t] = src->tx_ring[t]; 2264 dst->tx_cq[t] = src->tx_cq[t]; 2265 } 2266 dst->num_tx_rings_p_up = src->num_tx_rings_p_up; 2267 dst->rx_ring_num = src->rx_ring_num; 2268 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2269 } 2270 2271 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2272 struct mlx4_en_priv *tmp, 2273 struct mlx4_en_port_profile *prof, 2274 bool carry_xdp_prog) 2275 { 2276 struct bpf_prog *xdp_prog; 2277 int i, t; 2278 2279 mlx4_en_copy_priv(tmp, priv, prof); 2280 2281 if (mlx4_en_alloc_resources(tmp)) { 2282 en_warn(priv, 2283 "%s: Resource allocation failed, using previous configuration\n", 2284 __func__); 2285 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2286 kfree(tmp->tx_ring[t]); 2287 kfree(tmp->tx_cq[t]); 2288 } 2289 return -ENOMEM; 2290 } 2291 2292 /* All rx_rings has the same xdp_prog. Pick the first one. */ 2293 xdp_prog = rcu_dereference_protected( 2294 priv->rx_ring[0]->xdp_prog, 2295 lockdep_is_held(&priv->mdev->state_lock)); 2296 2297 if (xdp_prog && carry_xdp_prog) { 2298 bpf_prog_add(xdp_prog, tmp->rx_ring_num); 2299 for (i = 0; i < tmp->rx_ring_num; i++) 2300 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, 2301 xdp_prog); 2302 } 2303 2304 return 0; 2305 } 2306 2307 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2308 struct mlx4_en_priv *tmp) 2309 { 2310 mlx4_en_free_resources(priv); 2311 mlx4_en_update_priv(priv, tmp); 2312 } 2313 2314 void mlx4_en_destroy_netdev(struct net_device *dev) 2315 { 2316 struct mlx4_en_priv *priv = netdev_priv(dev); 2317 struct mlx4_en_dev *mdev = priv->mdev; 2318 2319 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2320 2321 /* Unregister device - this will close the port if it was up */ 2322 if (priv->registered) { 2323 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2324 priv->port)); 2325 unregister_netdev(dev); 2326 } 2327 2328 if (priv->allocated) 2329 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2330 2331 cancel_delayed_work(&priv->stats_task); 2332 cancel_delayed_work(&priv->service_task); 2333 /* flush any pending task for this netdev */ 2334 flush_workqueue(mdev->workqueue); 2335 2336 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2337 mlx4_en_remove_timestamp(mdev); 2338 2339 /* Detach the netdev so tasks would not attempt to access it */ 2340 mutex_lock(&mdev->state_lock); 2341 mdev->pndev[priv->port] = NULL; 2342 mdev->upper[priv->port] = NULL; 2343 2344 #ifdef CONFIG_RFS_ACCEL 2345 mlx4_en_cleanup_filters(priv); 2346 #endif 2347 2348 mlx4_en_free_resources(priv); 2349 mutex_unlock(&mdev->state_lock); 2350 2351 free_netdev(dev); 2352 } 2353 2354 static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) 2355 { 2356 struct mlx4_en_priv *priv = netdev_priv(dev); 2357 2358 if (mtu > MLX4_EN_MAX_XDP_MTU) { 2359 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", 2360 mtu, MLX4_EN_MAX_XDP_MTU); 2361 return false; 2362 } 2363 2364 return true; 2365 } 2366 2367 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2368 { 2369 struct mlx4_en_priv *priv = netdev_priv(dev); 2370 struct mlx4_en_dev *mdev = priv->mdev; 2371 int err = 0; 2372 2373 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2374 dev->mtu, new_mtu); 2375 2376 if (priv->tx_ring_num[TX_XDP] && 2377 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2378 return -EOPNOTSUPP; 2379 2380 dev->mtu = new_mtu; 2381 2382 if (netif_running(dev)) { 2383 mutex_lock(&mdev->state_lock); 2384 if (!mdev->device_up) { 2385 /* NIC is probably restarting - let watchdog task reset 2386 * the port */ 2387 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2388 } else { 2389 mlx4_en_stop_port(dev, 1); 2390 err = mlx4_en_start_port(dev); 2391 if (err) { 2392 en_err(priv, "Failed restarting port:%d\n", 2393 priv->port); 2394 queue_work(mdev->workqueue, &priv->watchdog_task); 2395 } 2396 } 2397 mutex_unlock(&mdev->state_lock); 2398 } 2399 return 0; 2400 } 2401 2402 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2403 { 2404 struct mlx4_en_priv *priv = netdev_priv(dev); 2405 struct mlx4_en_dev *mdev = priv->mdev; 2406 struct hwtstamp_config config; 2407 2408 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2409 return -EFAULT; 2410 2411 /* reserved for future extensions */ 2412 if (config.flags) 2413 return -EINVAL; 2414 2415 /* device doesn't support time stamping */ 2416 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2417 return -EINVAL; 2418 2419 /* TX HW timestamp */ 2420 switch (config.tx_type) { 2421 case HWTSTAMP_TX_OFF: 2422 case HWTSTAMP_TX_ON: 2423 break; 2424 default: 2425 return -ERANGE; 2426 } 2427 2428 /* RX HW timestamp */ 2429 switch (config.rx_filter) { 2430 case HWTSTAMP_FILTER_NONE: 2431 break; 2432 case HWTSTAMP_FILTER_ALL: 2433 case HWTSTAMP_FILTER_SOME: 2434 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2435 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2436 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2437 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2438 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2439 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2440 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2441 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2442 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2443 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2444 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2445 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2446 case HWTSTAMP_FILTER_NTP_ALL: 2447 config.rx_filter = HWTSTAMP_FILTER_ALL; 2448 break; 2449 default: 2450 return -ERANGE; 2451 } 2452 2453 if (mlx4_en_reset_config(dev, config, dev->features)) { 2454 config.tx_type = HWTSTAMP_TX_OFF; 2455 config.rx_filter = HWTSTAMP_FILTER_NONE; 2456 } 2457 2458 return copy_to_user(ifr->ifr_data, &config, 2459 sizeof(config)) ? -EFAULT : 0; 2460 } 2461 2462 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2463 { 2464 struct mlx4_en_priv *priv = netdev_priv(dev); 2465 2466 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2467 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2468 } 2469 2470 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2471 { 2472 switch (cmd) { 2473 case SIOCSHWTSTAMP: 2474 return mlx4_en_hwtstamp_set(dev, ifr); 2475 case SIOCGHWTSTAMP: 2476 return mlx4_en_hwtstamp_get(dev, ifr); 2477 default: 2478 return -EOPNOTSUPP; 2479 } 2480 } 2481 2482 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2483 netdev_features_t features) 2484 { 2485 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2486 struct mlx4_en_dev *mdev = en_priv->mdev; 2487 2488 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2489 * enable/disable make sure S-TAG flag is always in same state as 2490 * C-TAG. 2491 */ 2492 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2493 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2494 features |= NETIF_F_HW_VLAN_STAG_RX; 2495 else 2496 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2497 2498 return features; 2499 } 2500 2501 static int mlx4_en_set_features(struct net_device *netdev, 2502 netdev_features_t features) 2503 { 2504 struct mlx4_en_priv *priv = netdev_priv(netdev); 2505 bool reset = false; 2506 int ret = 0; 2507 2508 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2509 en_info(priv, "Turn %s RX-FCS\n", 2510 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2511 reset = true; 2512 } 2513 2514 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2515 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2516 2517 en_info(priv, "Turn %s RX-ALL\n", 2518 ignore_fcs_value ? "ON" : "OFF"); 2519 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2520 priv->port, ignore_fcs_value); 2521 if (ret) 2522 return ret; 2523 } 2524 2525 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2526 en_info(priv, "Turn %s RX vlan strip offload\n", 2527 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2528 reset = true; 2529 } 2530 2531 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2532 en_info(priv, "Turn %s TX vlan strip offload\n", 2533 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2534 2535 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2536 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2537 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2538 2539 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2540 en_info(priv, "Turn %s loopback\n", 2541 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2542 mlx4_en_update_loopback_state(netdev, features); 2543 } 2544 2545 if (reset) { 2546 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2547 features); 2548 if (ret) 2549 return ret; 2550 } 2551 2552 return 0; 2553 } 2554 2555 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2556 { 2557 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2558 struct mlx4_en_dev *mdev = en_priv->mdev; 2559 2560 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac); 2561 } 2562 2563 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 2564 __be16 vlan_proto) 2565 { 2566 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2567 struct mlx4_en_dev *mdev = en_priv->mdev; 2568 2569 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos, 2570 vlan_proto); 2571 } 2572 2573 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2574 int max_tx_rate) 2575 { 2576 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2577 struct mlx4_en_dev *mdev = en_priv->mdev; 2578 2579 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2580 max_tx_rate); 2581 } 2582 2583 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2584 { 2585 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2586 struct mlx4_en_dev *mdev = en_priv->mdev; 2587 2588 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2589 } 2590 2591 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2592 { 2593 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2594 struct mlx4_en_dev *mdev = en_priv->mdev; 2595 2596 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2597 } 2598 2599 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2600 { 2601 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2602 struct mlx4_en_dev *mdev = en_priv->mdev; 2603 2604 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2605 } 2606 2607 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2608 struct ifla_vf_stats *vf_stats) 2609 { 2610 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2611 struct mlx4_en_dev *mdev = en_priv->mdev; 2612 2613 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2614 } 2615 2616 #define PORT_ID_BYTE_LEN 8 2617 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2618 struct netdev_phys_item_id *ppid) 2619 { 2620 struct mlx4_en_priv *priv = netdev_priv(dev); 2621 struct mlx4_dev *mdev = priv->mdev->dev; 2622 int i; 2623 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2624 2625 if (!phys_port_id) 2626 return -EOPNOTSUPP; 2627 2628 ppid->id_len = sizeof(phys_port_id); 2629 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2630 ppid->id[i] = phys_port_id & 0xff; 2631 phys_port_id >>= 8; 2632 } 2633 return 0; 2634 } 2635 2636 static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2637 { 2638 int ret; 2639 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2640 vxlan_add_task); 2641 2642 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2643 if (ret) 2644 goto out; 2645 2646 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2647 VXLAN_STEER_BY_OUTER_MAC, 1); 2648 out: 2649 if (ret) { 2650 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2651 return; 2652 } 2653 } 2654 2655 static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2656 { 2657 int ret; 2658 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2659 vxlan_del_task); 2660 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2661 VXLAN_STEER_BY_OUTER_MAC, 0); 2662 if (ret) 2663 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2664 2665 priv->vxlan_port = 0; 2666 } 2667 2668 static void mlx4_en_add_vxlan_port(struct net_device *dev, 2669 struct udp_tunnel_info *ti) 2670 { 2671 struct mlx4_en_priv *priv = netdev_priv(dev); 2672 __be16 port = ti->port; 2673 __be16 current_port; 2674 2675 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2676 return; 2677 2678 if (ti->sa_family != AF_INET) 2679 return; 2680 2681 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2682 return; 2683 2684 current_port = priv->vxlan_port; 2685 if (current_port && current_port != port) { 2686 en_warn(priv, "vxlan port %d configured, can't add port %d\n", 2687 ntohs(current_port), ntohs(port)); 2688 return; 2689 } 2690 2691 priv->vxlan_port = port; 2692 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); 2693 } 2694 2695 static void mlx4_en_del_vxlan_port(struct net_device *dev, 2696 struct udp_tunnel_info *ti) 2697 { 2698 struct mlx4_en_priv *priv = netdev_priv(dev); 2699 __be16 port = ti->port; 2700 __be16 current_port; 2701 2702 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2703 return; 2704 2705 if (ti->sa_family != AF_INET) 2706 return; 2707 2708 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2709 return; 2710 2711 current_port = priv->vxlan_port; 2712 if (current_port != port) { 2713 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); 2714 return; 2715 } 2716 2717 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2718 } 2719 2720 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2721 struct net_device *dev, 2722 netdev_features_t features) 2723 { 2724 features = vlan_features_check(skb, features); 2725 features = vxlan_features_check(skb, features); 2726 2727 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2728 * support inner IPv6 checksums and segmentation so we need to 2729 * strip that feature if this is an IPv6 encapsulated frame. 2730 */ 2731 if (skb->encapsulation && 2732 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2733 struct mlx4_en_priv *priv = netdev_priv(dev); 2734 2735 if (!priv->vxlan_port || 2736 (ip_hdr(skb)->version != 4) || 2737 (udp_hdr(skb)->dest != priv->vxlan_port)) 2738 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2739 } 2740 2741 return features; 2742 } 2743 2744 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2745 { 2746 struct mlx4_en_priv *priv = netdev_priv(dev); 2747 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; 2748 struct mlx4_update_qp_params params; 2749 int err; 2750 2751 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2752 return -EOPNOTSUPP; 2753 2754 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2755 if (maxrate >> 12) { 2756 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2757 params.rate_val = maxrate / 1000; 2758 } else if (maxrate) { 2759 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2760 params.rate_val = maxrate; 2761 } else { /* zero serves to revoke the QP rate-limitation */ 2762 params.rate_unit = 0; 2763 params.rate_val = 0; 2764 } 2765 2766 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2767 ¶ms); 2768 return err; 2769 } 2770 2771 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) 2772 { 2773 struct mlx4_en_priv *priv = netdev_priv(dev); 2774 struct mlx4_en_dev *mdev = priv->mdev; 2775 struct mlx4_en_port_profile new_prof; 2776 struct bpf_prog *old_prog; 2777 struct mlx4_en_priv *tmp; 2778 int tx_changed = 0; 2779 int xdp_ring_num; 2780 int port_up = 0; 2781 int err; 2782 int i; 2783 2784 xdp_ring_num = prog ? priv->rx_ring_num : 0; 2785 2786 /* No need to reconfigure buffers when simply swapping the 2787 * program for a new one. 2788 */ 2789 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { 2790 if (prog) 2791 bpf_prog_add(prog, priv->rx_ring_num - 1); 2792 2793 mutex_lock(&mdev->state_lock); 2794 for (i = 0; i < priv->rx_ring_num; i++) { 2795 old_prog = rcu_dereference_protected( 2796 priv->rx_ring[i]->xdp_prog, 2797 lockdep_is_held(&mdev->state_lock)); 2798 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2799 if (old_prog) 2800 bpf_prog_put(old_prog); 2801 } 2802 mutex_unlock(&mdev->state_lock); 2803 return 0; 2804 } 2805 2806 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) 2807 return -EOPNOTSUPP; 2808 2809 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2810 if (!tmp) 2811 return -ENOMEM; 2812 2813 if (prog) 2814 bpf_prog_add(prog, priv->rx_ring_num - 1); 2815 2816 mutex_lock(&mdev->state_lock); 2817 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 2818 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; 2819 2820 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { 2821 tx_changed = 1; 2822 new_prof.tx_ring_num[TX] = 2823 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up); 2824 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); 2825 } 2826 2827 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false); 2828 if (err) { 2829 if (prog) 2830 bpf_prog_sub(prog, priv->rx_ring_num - 1); 2831 goto unlock_out; 2832 } 2833 2834 if (priv->port_up) { 2835 port_up = 1; 2836 mlx4_en_stop_port(dev, 1); 2837 } 2838 2839 mlx4_en_safe_replace_resources(priv, tmp); 2840 if (tx_changed) 2841 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 2842 2843 for (i = 0; i < priv->rx_ring_num; i++) { 2844 old_prog = rcu_dereference_protected( 2845 priv->rx_ring[i]->xdp_prog, 2846 lockdep_is_held(&mdev->state_lock)); 2847 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2848 if (old_prog) 2849 bpf_prog_put(old_prog); 2850 } 2851 2852 if (port_up) { 2853 err = mlx4_en_start_port(dev); 2854 if (err) { 2855 en_err(priv, "Failed starting port %d for XDP change\n", 2856 priv->port); 2857 queue_work(mdev->workqueue, &priv->watchdog_task); 2858 } 2859 } 2860 2861 unlock_out: 2862 mutex_unlock(&mdev->state_lock); 2863 kfree(tmp); 2864 return err; 2865 } 2866 2867 static u32 mlx4_xdp_query(struct net_device *dev) 2868 { 2869 struct mlx4_en_priv *priv = netdev_priv(dev); 2870 struct mlx4_en_dev *mdev = priv->mdev; 2871 const struct bpf_prog *xdp_prog; 2872 u32 prog_id = 0; 2873 2874 if (!priv->tx_ring_num[TX_XDP]) 2875 return prog_id; 2876 2877 mutex_lock(&mdev->state_lock); 2878 xdp_prog = rcu_dereference_protected( 2879 priv->rx_ring[0]->xdp_prog, 2880 lockdep_is_held(&mdev->state_lock)); 2881 if (xdp_prog) 2882 prog_id = xdp_prog->aux->id; 2883 mutex_unlock(&mdev->state_lock); 2884 2885 return prog_id; 2886 } 2887 2888 static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2889 { 2890 switch (xdp->command) { 2891 case XDP_SETUP_PROG: 2892 return mlx4_xdp_set(dev, xdp->prog); 2893 case XDP_QUERY_PROG: 2894 xdp->prog_id = mlx4_xdp_query(dev); 2895 return 0; 2896 default: 2897 return -EINVAL; 2898 } 2899 } 2900 2901 static const struct net_device_ops mlx4_netdev_ops = { 2902 .ndo_open = mlx4_en_open, 2903 .ndo_stop = mlx4_en_close, 2904 .ndo_start_xmit = mlx4_en_xmit, 2905 .ndo_select_queue = mlx4_en_select_queue, 2906 .ndo_get_stats64 = mlx4_en_get_stats64, 2907 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2908 .ndo_set_mac_address = mlx4_en_set_mac, 2909 .ndo_validate_addr = eth_validate_addr, 2910 .ndo_change_mtu = mlx4_en_change_mtu, 2911 .ndo_do_ioctl = mlx4_en_ioctl, 2912 .ndo_tx_timeout = mlx4_en_tx_timeout, 2913 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2914 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2915 .ndo_set_features = mlx4_en_set_features, 2916 .ndo_fix_features = mlx4_en_fix_features, 2917 .ndo_setup_tc = __mlx4_en_setup_tc, 2918 #ifdef CONFIG_RFS_ACCEL 2919 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2920 #endif 2921 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2922 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2923 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2924 .ndo_features_check = mlx4_en_features_check, 2925 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2926 .ndo_bpf = mlx4_xdp, 2927 }; 2928 2929 static const struct net_device_ops mlx4_netdev_ops_master = { 2930 .ndo_open = mlx4_en_open, 2931 .ndo_stop = mlx4_en_close, 2932 .ndo_start_xmit = mlx4_en_xmit, 2933 .ndo_select_queue = mlx4_en_select_queue, 2934 .ndo_get_stats64 = mlx4_en_get_stats64, 2935 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2936 .ndo_set_mac_address = mlx4_en_set_mac, 2937 .ndo_validate_addr = eth_validate_addr, 2938 .ndo_change_mtu = mlx4_en_change_mtu, 2939 .ndo_tx_timeout = mlx4_en_tx_timeout, 2940 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2941 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2942 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2943 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2944 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2945 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2946 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2947 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2948 .ndo_get_vf_config = mlx4_en_get_vf_config, 2949 .ndo_set_features = mlx4_en_set_features, 2950 .ndo_fix_features = mlx4_en_fix_features, 2951 .ndo_setup_tc = __mlx4_en_setup_tc, 2952 #ifdef CONFIG_RFS_ACCEL 2953 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2954 #endif 2955 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2956 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2957 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2958 .ndo_features_check = mlx4_en_features_check, 2959 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2960 .ndo_bpf = mlx4_xdp, 2961 }; 2962 2963 struct mlx4_en_bond { 2964 struct work_struct work; 2965 struct mlx4_en_priv *priv; 2966 int is_bonded; 2967 struct mlx4_port_map port_map; 2968 }; 2969 2970 static void mlx4_en_bond_work(struct work_struct *work) 2971 { 2972 struct mlx4_en_bond *bond = container_of(work, 2973 struct mlx4_en_bond, 2974 work); 2975 int err = 0; 2976 struct mlx4_dev *dev = bond->priv->mdev->dev; 2977 2978 if (bond->is_bonded) { 2979 if (!mlx4_is_bonded(dev)) { 2980 err = mlx4_bond(dev); 2981 if (err) 2982 en_err(bond->priv, "Fail to bond device\n"); 2983 } 2984 if (!err) { 2985 err = mlx4_port_map_set(dev, &bond->port_map); 2986 if (err) 2987 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2988 bond->port_map.port1, 2989 bond->port_map.port2, 2990 err); 2991 } 2992 } else if (mlx4_is_bonded(dev)) { 2993 err = mlx4_unbond(dev); 2994 if (err) 2995 en_err(bond->priv, "Fail to unbond device\n"); 2996 } 2997 dev_put(bond->priv->dev); 2998 kfree(bond); 2999 } 3000 3001 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 3002 u8 v2p_p1, u8 v2p_p2) 3003 { 3004 struct mlx4_en_bond *bond = NULL; 3005 3006 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 3007 if (!bond) 3008 return -ENOMEM; 3009 3010 INIT_WORK(&bond->work, mlx4_en_bond_work); 3011 bond->priv = priv; 3012 bond->is_bonded = is_bonded; 3013 bond->port_map.port1 = v2p_p1; 3014 bond->port_map.port2 = v2p_p2; 3015 dev_hold(priv->dev); 3016 queue_work(priv->mdev->workqueue, &bond->work); 3017 return 0; 3018 } 3019 3020 int mlx4_en_netdev_event(struct notifier_block *this, 3021 unsigned long event, void *ptr) 3022 { 3023 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 3024 u8 port = 0; 3025 struct mlx4_en_dev *mdev; 3026 struct mlx4_dev *dev; 3027 int i, num_eth_ports = 0; 3028 bool do_bond = true; 3029 struct mlx4_en_priv *priv; 3030 u8 v2p_port1 = 0; 3031 u8 v2p_port2 = 0; 3032 3033 if (!net_eq(dev_net(ndev), &init_net)) 3034 return NOTIFY_DONE; 3035 3036 mdev = container_of(this, struct mlx4_en_dev, nb); 3037 dev = mdev->dev; 3038 3039 /* Go into this mode only when two network devices set on two ports 3040 * of the same mlx4 device are slaves of the same bonding master 3041 */ 3042 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 3043 ++num_eth_ports; 3044 if (!port && (mdev->pndev[i] == ndev)) 3045 port = i; 3046 mdev->upper[i] = mdev->pndev[i] ? 3047 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 3048 /* condition not met: network device is a slave */ 3049 if (!mdev->upper[i]) 3050 do_bond = false; 3051 if (num_eth_ports < 2) 3052 continue; 3053 /* condition not met: same master */ 3054 if (mdev->upper[i] != mdev->upper[i-1]) 3055 do_bond = false; 3056 } 3057 /* condition not met: 2 salves */ 3058 do_bond = (num_eth_ports == 2) ? do_bond : false; 3059 3060 /* handle only events that come with enough info */ 3061 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 3062 return NOTIFY_DONE; 3063 3064 priv = netdev_priv(ndev); 3065 if (do_bond) { 3066 struct netdev_notifier_bonding_info *notifier_info = ptr; 3067 struct netdev_bonding_info *bonding_info = 3068 ¬ifier_info->bonding_info; 3069 3070 /* required mode 1, 2 or 4 */ 3071 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 3072 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 3073 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 3074 do_bond = false; 3075 3076 /* require exactly 2 slaves */ 3077 if (bonding_info->master.num_slaves != 2) 3078 do_bond = false; 3079 3080 /* calc v2p */ 3081 if (do_bond) { 3082 if (bonding_info->master.bond_mode == 3083 BOND_MODE_ACTIVEBACKUP) { 3084 /* in active-backup mode virtual ports are 3085 * mapped to the physical port of the active 3086 * slave */ 3087 if (bonding_info->slave.state == 3088 BOND_STATE_BACKUP) { 3089 if (port == 1) { 3090 v2p_port1 = 2; 3091 v2p_port2 = 2; 3092 } else { 3093 v2p_port1 = 1; 3094 v2p_port2 = 1; 3095 } 3096 } else { /* BOND_STATE_ACTIVE */ 3097 if (port == 1) { 3098 v2p_port1 = 1; 3099 v2p_port2 = 1; 3100 } else { 3101 v2p_port1 = 2; 3102 v2p_port2 = 2; 3103 } 3104 } 3105 } else { /* Active-Active */ 3106 /* in active-active mode a virtual port is 3107 * mapped to the native physical port if and only 3108 * if the physical port is up */ 3109 __s8 link = bonding_info->slave.link; 3110 3111 if (port == 1) 3112 v2p_port2 = 2; 3113 else 3114 v2p_port1 = 1; 3115 if ((link == BOND_LINK_UP) || 3116 (link == BOND_LINK_FAIL)) { 3117 if (port == 1) 3118 v2p_port1 = 1; 3119 else 3120 v2p_port2 = 2; 3121 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 3122 if (port == 1) 3123 v2p_port1 = 2; 3124 else 3125 v2p_port2 = 1; 3126 } 3127 } 3128 } 3129 } 3130 3131 mlx4_en_queue_bond_work(priv, do_bond, 3132 v2p_port1, v2p_port2); 3133 3134 return NOTIFY_DONE; 3135 } 3136 3137 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 3138 struct mlx4_en_stats_bitmap *stats_bitmap, 3139 u8 rx_ppp, u8 rx_pause, 3140 u8 tx_ppp, u8 tx_pause) 3141 { 3142 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 3143 3144 if (!mlx4_is_slave(dev) && 3145 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 3146 mutex_lock(&stats_bitmap->mutex); 3147 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 3148 3149 if (rx_ppp) 3150 bitmap_set(stats_bitmap->bitmap, last_i, 3151 NUM_FLOW_PRIORITY_STATS_RX); 3152 last_i += NUM_FLOW_PRIORITY_STATS_RX; 3153 3154 if (rx_pause && !(rx_ppp)) 3155 bitmap_set(stats_bitmap->bitmap, last_i, 3156 NUM_FLOW_STATS_RX); 3157 last_i += NUM_FLOW_STATS_RX; 3158 3159 if (tx_ppp) 3160 bitmap_set(stats_bitmap->bitmap, last_i, 3161 NUM_FLOW_PRIORITY_STATS_TX); 3162 last_i += NUM_FLOW_PRIORITY_STATS_TX; 3163 3164 if (tx_pause && !(tx_ppp)) 3165 bitmap_set(stats_bitmap->bitmap, last_i, 3166 NUM_FLOW_STATS_TX); 3167 last_i += NUM_FLOW_STATS_TX; 3168 3169 mutex_unlock(&stats_bitmap->mutex); 3170 } 3171 } 3172 3173 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 3174 struct mlx4_en_stats_bitmap *stats_bitmap, 3175 u8 rx_ppp, u8 rx_pause, 3176 u8 tx_ppp, u8 tx_pause) 3177 { 3178 int last_i = 0; 3179 3180 mutex_init(&stats_bitmap->mutex); 3181 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 3182 3183 if (mlx4_is_slave(dev)) { 3184 bitmap_set(stats_bitmap->bitmap, last_i + 3185 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 3186 bitmap_set(stats_bitmap->bitmap, last_i + 3187 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 3188 bitmap_set(stats_bitmap->bitmap, last_i + 3189 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 3190 bitmap_set(stats_bitmap->bitmap, last_i + 3191 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 3192 bitmap_set(stats_bitmap->bitmap, last_i + 3193 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 3194 bitmap_set(stats_bitmap->bitmap, last_i + 3195 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 3196 } else { 3197 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 3198 } 3199 last_i += NUM_MAIN_STATS; 3200 3201 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 3202 last_i += NUM_PORT_STATS; 3203 3204 if (mlx4_is_master(dev)) 3205 bitmap_set(stats_bitmap->bitmap, last_i, 3206 NUM_PF_STATS); 3207 last_i += NUM_PF_STATS; 3208 3209 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 3210 rx_ppp, rx_pause, 3211 tx_ppp, tx_pause); 3212 last_i += NUM_FLOW_STATS; 3213 3214 if (!mlx4_is_slave(dev)) 3215 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 3216 last_i += NUM_PKT_STATS; 3217 3218 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); 3219 last_i += NUM_XDP_STATS; 3220 3221 if (!mlx4_is_slave(dev)) 3222 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS); 3223 last_i += NUM_PHY_STATS; 3224 } 3225 3226 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 3227 struct mlx4_en_port_profile *prof) 3228 { 3229 struct net_device *dev; 3230 struct mlx4_en_priv *priv; 3231 int i, t; 3232 int err; 3233 3234 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3235 MAX_TX_RINGS, MAX_RX_RINGS); 3236 if (dev == NULL) 3237 return -ENOMEM; 3238 3239 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); 3240 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 3241 3242 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 3243 dev->dev_port = port - 1; 3244 3245 /* 3246 * Initialize driver private data 3247 */ 3248 3249 priv = netdev_priv(dev); 3250 memset(priv, 0, sizeof(struct mlx4_en_priv)); 3251 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3252 spin_lock_init(&priv->stats_lock); 3253 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3254 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3255 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3256 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3257 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3258 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 3259 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 3260 #ifdef CONFIG_RFS_ACCEL 3261 INIT_LIST_HEAD(&priv->filters); 3262 spin_lock_init(&priv->filters_lock); 3263 #endif 3264 3265 priv->dev = dev; 3266 priv->mdev = mdev; 3267 priv->ddev = &mdev->pdev->dev; 3268 priv->prof = prof; 3269 priv->port = port; 3270 priv->port_up = false; 3271 priv->flags = prof->flags; 3272 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 3273 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 3274 MLX4_WQE_CTRL_SOLICITED); 3275 priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up; 3276 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 3277 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 3278 3279 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 3280 priv->tx_ring_num[t] = prof->tx_ring_num[t]; 3281 if (!priv->tx_ring_num[t]) 3282 continue; 3283 3284 priv->tx_ring[t] = kcalloc(MAX_TX_RINGS, 3285 sizeof(struct mlx4_en_tx_ring *), 3286 GFP_KERNEL); 3287 if (!priv->tx_ring[t]) { 3288 err = -ENOMEM; 3289 goto out; 3290 } 3291 priv->tx_cq[t] = kcalloc(MAX_TX_RINGS, 3292 sizeof(struct mlx4_en_cq *), 3293 GFP_KERNEL); 3294 if (!priv->tx_cq[t]) { 3295 err = -ENOMEM; 3296 goto out; 3297 } 3298 } 3299 priv->rx_ring_num = prof->rx_ring_num; 3300 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 3301 priv->cqe_size = mdev->dev->caps.cqe_size; 3302 priv->mac_index = -1; 3303 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3304 #ifdef CONFIG_MLX4_EN_DCB 3305 if (!mlx4_is_slave(priv->mdev->dev)) { 3306 u8 prio; 3307 3308 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { 3309 priv->ets.prio_tc[prio] = prio; 3310 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; 3311 } 3312 3313 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | 3314 DCB_CAP_DCBX_VER_IEEE; 3315 priv->flags |= MLX4_EN_DCB_ENABLED; 3316 priv->cee_config.pfc_state = false; 3317 3318 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++) 3319 priv->cee_config.dcb_pfc[i] = pfc_disabled; 3320 3321 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3322 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3323 } else { 3324 en_info(priv, "enabling only PFC DCB ops\n"); 3325 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 3326 } 3327 } 3328 #endif 3329 3330 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 3331 INIT_HLIST_HEAD(&priv->mac_hash[i]); 3332 3333 /* Query for default mac and max mtu */ 3334 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 3335 3336 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 3337 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 3338 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3339 3340 /* Set default MAC */ 3341 dev->addr_len = ETH_ALEN; 3342 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3343 if (!is_valid_ether_addr(dev->dev_addr)) { 3344 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", 3345 priv->port, dev->dev_addr); 3346 err = -EINVAL; 3347 goto out; 3348 } else if (mlx4_is_slave(priv->mdev->dev) && 3349 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3350 /* Random MAC was assigned in mlx4_slave_cap 3351 * in mlx4_core module 3352 */ 3353 dev->addr_assign_type |= NET_ADDR_RANDOM; 3354 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3355 } 3356 3357 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3358 3359 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3360 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3361 err = mlx4_en_alloc_resources(priv); 3362 if (err) 3363 goto out; 3364 3365 /* Initialize time stamping config */ 3366 priv->hwtstamp_config.flags = 0; 3367 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3368 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3369 3370 /* Allocate page for receive rings */ 3371 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3372 MLX4_EN_PAGE_SIZE); 3373 if (err) { 3374 en_err(priv, "Failed to allocate page for rx qps\n"); 3375 goto out; 3376 } 3377 priv->allocated = 1; 3378 3379 /* 3380 * Initialize netdev entry points 3381 */ 3382 if (mlx4_is_master(priv->mdev->dev)) 3383 dev->netdev_ops = &mlx4_netdev_ops_master; 3384 else 3385 dev->netdev_ops = &mlx4_netdev_ops; 3386 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3387 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); 3388 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3389 3390 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3391 3392 /* 3393 * Set driver features 3394 */ 3395 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3396 if (mdev->LSO_support) 3397 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3398 3399 if (mdev->dev->caps.tunnel_offload_mode == 3400 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3401 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3402 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3403 NETIF_F_GSO_PARTIAL; 3404 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3405 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3406 NETIF_F_GSO_PARTIAL; 3407 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3408 dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3409 NETIF_F_RXCSUM | 3410 NETIF_F_TSO | NETIF_F_TSO6 | 3411 NETIF_F_GSO_UDP_TUNNEL | 3412 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3413 NETIF_F_GSO_PARTIAL; 3414 } 3415 3416 dev->vlan_features = dev->hw_features; 3417 3418 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3419 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3420 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3421 NETIF_F_HW_VLAN_CTAG_FILTER; 3422 dev->hw_features |= NETIF_F_LOOPBACK | 3423 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3424 3425 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3426 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3427 NETIF_F_HW_VLAN_STAG_FILTER; 3428 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3429 } 3430 3431 if (mlx4_is_slave(mdev->dev)) { 3432 bool vlan_offload_disabled; 3433 int phv; 3434 3435 err = get_phv_bit(mdev->dev, port, &phv); 3436 if (!err && phv) { 3437 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3438 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3439 } 3440 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port, 3441 &vlan_offload_disabled); 3442 if (!err && vlan_offload_disabled) { 3443 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3444 NETIF_F_HW_VLAN_CTAG_RX | 3445 NETIF_F_HW_VLAN_STAG_TX | 3446 NETIF_F_HW_VLAN_STAG_RX); 3447 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3448 NETIF_F_HW_VLAN_CTAG_RX | 3449 NETIF_F_HW_VLAN_STAG_TX | 3450 NETIF_F_HW_VLAN_STAG_RX); 3451 } 3452 } else { 3453 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3454 !(mdev->dev->caps.flags2 & 3455 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3456 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3457 } 3458 3459 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3460 dev->hw_features |= NETIF_F_RXFCS; 3461 3462 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3463 dev->hw_features |= NETIF_F_RXALL; 3464 3465 if (mdev->dev->caps.steering_mode == 3466 MLX4_STEERING_MODE_DEVICE_MANAGED && 3467 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3468 dev->hw_features |= NETIF_F_NTUPLE; 3469 3470 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3471 dev->priv_flags |= IFF_UNICAST_FLT; 3472 3473 /* Setting a default hash function value */ 3474 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3475 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3476 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3477 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3478 } else { 3479 en_warn(priv, 3480 "No RSS hash capabilities exposed, using Toeplitz\n"); 3481 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3482 } 3483 3484 /* MTU range: 68 - hw-specific max */ 3485 dev->min_mtu = ETH_MIN_MTU; 3486 dev->max_mtu = priv->max_mtu; 3487 3488 mdev->pndev[port] = dev; 3489 mdev->upper[port] = NULL; 3490 3491 netif_carrier_off(dev); 3492 mlx4_en_set_default_moderation(priv); 3493 3494 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); 3495 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3496 3497 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3498 3499 /* Configure port */ 3500 mlx4_en_calc_rx_buf(dev); 3501 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3502 priv->rx_skb_size + ETH_FCS_LEN, 3503 prof->tx_pause, prof->tx_ppp, 3504 prof->rx_pause, prof->rx_ppp); 3505 if (err) { 3506 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3507 priv->port, err); 3508 goto out; 3509 } 3510 3511 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3512 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3513 if (err) { 3514 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3515 err); 3516 goto out; 3517 } 3518 } 3519 3520 /* Init port */ 3521 en_warn(priv, "Initializing port\n"); 3522 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3523 if (err) { 3524 en_err(priv, "Failed Initializing port\n"); 3525 goto out; 3526 } 3527 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3528 3529 /* Initialize time stamp mechanism */ 3530 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3531 mlx4_en_init_timestamp(mdev); 3532 3533 queue_delayed_work(mdev->workqueue, &priv->service_task, 3534 SERVICE_TASK_DELAY); 3535 3536 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3537 mdev->profile.prof[priv->port].rx_ppp, 3538 mdev->profile.prof[priv->port].rx_pause, 3539 mdev->profile.prof[priv->port].tx_ppp, 3540 mdev->profile.prof[priv->port].tx_pause); 3541 3542 err = register_netdev(dev); 3543 if (err) { 3544 en_err(priv, "Netdev registration failed for port %d\n", port); 3545 goto out; 3546 } 3547 3548 priv->registered = 1; 3549 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3550 dev); 3551 3552 return 0; 3553 3554 out: 3555 mlx4_en_destroy_netdev(dev); 3556 return err; 3557 } 3558 3559 int mlx4_en_reset_config(struct net_device *dev, 3560 struct hwtstamp_config ts_config, 3561 netdev_features_t features) 3562 { 3563 struct mlx4_en_priv *priv = netdev_priv(dev); 3564 struct mlx4_en_dev *mdev = priv->mdev; 3565 struct mlx4_en_port_profile new_prof; 3566 struct mlx4_en_priv *tmp; 3567 int port_up = 0; 3568 int err = 0; 3569 3570 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3571 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3572 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3573 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3574 return 0; /* Nothing to change */ 3575 3576 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3577 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3578 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3579 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3580 return -EINVAL; 3581 } 3582 3583 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3584 if (!tmp) 3585 return -ENOMEM; 3586 3587 mutex_lock(&mdev->state_lock); 3588 3589 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3590 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3591 3592 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); 3593 if (err) 3594 goto out; 3595 3596 if (priv->port_up) { 3597 port_up = 1; 3598 mlx4_en_stop_port(dev, 1); 3599 } 3600 3601 mlx4_en_safe_replace_resources(priv, tmp); 3602 3603 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3604 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3605 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3606 else 3607 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3608 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3609 /* RX time-stamping is OFF, update the RX vlan offload 3610 * to the latest wanted state 3611 */ 3612 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3613 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3614 else 3615 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3616 } 3617 3618 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3619 if (features & NETIF_F_RXFCS) 3620 dev->features |= NETIF_F_RXFCS; 3621 else 3622 dev->features &= ~NETIF_F_RXFCS; 3623 } 3624 3625 /* RX vlan offload and RX time-stamping can't co-exist ! 3626 * Regardless of the caller's choice, 3627 * Turn Off RX vlan offload in case of time-stamping is ON 3628 */ 3629 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3630 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3631 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3632 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3633 } 3634 3635 if (port_up) { 3636 err = mlx4_en_start_port(dev); 3637 if (err) 3638 en_err(priv, "Failed starting port\n"); 3639 } 3640 3641 out: 3642 mutex_unlock(&mdev->state_lock); 3643 kfree(tmp); 3644 if (!err) 3645 netdev_features_change(dev); 3646 return err; 3647 } 3648