1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/bpf.h> 35 #include <linux/etherdevice.h> 36 #include <linux/tcp.h> 37 #include <linux/if_vlan.h> 38 #include <linux/delay.h> 39 #include <linux/slab.h> 40 #include <linux/hash.h> 41 #include <net/ip.h> 42 #include <net/busy_poll.h> 43 #include <net/vxlan.h> 44 #include <net/devlink.h> 45 46 #include <linux/mlx4/driver.h> 47 #include <linux/mlx4/device.h> 48 #include <linux/mlx4/cmd.h> 49 #include <linux/mlx4/cq.h> 50 51 #include "mlx4_en.h" 52 #include "en_port.h" 53 54 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 55 { 56 struct mlx4_en_priv *priv = netdev_priv(dev); 57 int i; 58 unsigned int offset = 0; 59 60 if (up && up != MLX4_EN_NUM_UP) 61 return -EINVAL; 62 63 netdev_set_num_tc(dev, up); 64 65 /* Partition Tx queues evenly amongst UP's */ 66 for (i = 0; i < up; i++) { 67 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 68 offset += priv->num_tx_rings_p_up; 69 } 70 71 #ifdef CONFIG_MLX4_EN_DCB 72 if (!mlx4_is_slave(priv->mdev->dev)) { 73 if (up) { 74 if (priv->dcbx_cap) 75 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 76 } else { 77 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 78 priv->cee_config.pfc_state = false; 79 } 80 } 81 #endif /* CONFIG_MLX4_EN_DCB */ 82 83 return 0; 84 } 85 86 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 87 struct tc_to_netdev *tc) 88 { 89 if (tc->type != TC_SETUP_MQPRIO) 90 return -EINVAL; 91 92 return mlx4_en_setup_tc(dev, tc->tc); 93 } 94 95 #ifdef CONFIG_RFS_ACCEL 96 97 struct mlx4_en_filter { 98 struct list_head next; 99 struct work_struct work; 100 101 u8 ip_proto; 102 __be32 src_ip; 103 __be32 dst_ip; 104 __be16 src_port; 105 __be16 dst_port; 106 107 int rxq_index; 108 struct mlx4_en_priv *priv; 109 u32 flow_id; /* RFS infrastructure id */ 110 int id; /* mlx4_en driver id */ 111 u64 reg_id; /* Flow steering API id */ 112 u8 activated; /* Used to prevent expiry before filter 113 * is attached 114 */ 115 struct hlist_node filter_chain; 116 }; 117 118 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 119 120 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 121 { 122 switch (ip_proto) { 123 case IPPROTO_UDP: 124 return MLX4_NET_TRANS_RULE_ID_UDP; 125 case IPPROTO_TCP: 126 return MLX4_NET_TRANS_RULE_ID_TCP; 127 default: 128 return MLX4_NET_TRANS_RULE_NUM; 129 } 130 }; 131 132 static void mlx4_en_filter_work(struct work_struct *work) 133 { 134 struct mlx4_en_filter *filter = container_of(work, 135 struct mlx4_en_filter, 136 work); 137 struct mlx4_en_priv *priv = filter->priv; 138 struct mlx4_spec_list spec_tcp_udp = { 139 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 140 { 141 .tcp_udp = { 142 .dst_port = filter->dst_port, 143 .dst_port_msk = (__force __be16)-1, 144 .src_port = filter->src_port, 145 .src_port_msk = (__force __be16)-1, 146 }, 147 }, 148 }; 149 struct mlx4_spec_list spec_ip = { 150 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 151 { 152 .ipv4 = { 153 .dst_ip = filter->dst_ip, 154 .dst_ip_msk = (__force __be32)-1, 155 .src_ip = filter->src_ip, 156 .src_ip_msk = (__force __be32)-1, 157 }, 158 }, 159 }; 160 struct mlx4_spec_list spec_eth = { 161 .id = MLX4_NET_TRANS_RULE_ID_ETH, 162 }; 163 struct mlx4_net_trans_rule rule = { 164 .list = LIST_HEAD_INIT(rule.list), 165 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 166 .exclusive = 1, 167 .allow_loopback = 1, 168 .promisc_mode = MLX4_FS_REGULAR, 169 .port = priv->port, 170 .priority = MLX4_DOMAIN_RFS, 171 }; 172 int rc; 173 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 174 175 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 176 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 177 filter->ip_proto); 178 goto ignore; 179 } 180 list_add_tail(&spec_eth.list, &rule.list); 181 list_add_tail(&spec_ip.list, &rule.list); 182 list_add_tail(&spec_tcp_udp.list, &rule.list); 183 184 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 185 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 186 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 187 188 filter->activated = 0; 189 190 if (filter->reg_id) { 191 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 192 if (rc && rc != -ENOENT) 193 en_err(priv, "Error detaching flow. rc = %d\n", rc); 194 } 195 196 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 197 if (rc) 198 en_err(priv, "Error attaching flow. err = %d\n", rc); 199 200 ignore: 201 mlx4_en_filter_rfs_expire(priv); 202 203 filter->activated = 1; 204 } 205 206 static inline struct hlist_head * 207 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 208 __be16 src_port, __be16 dst_port) 209 { 210 unsigned long l; 211 int bucket_idx; 212 213 l = (__force unsigned long)src_port | 214 ((__force unsigned long)dst_port << 2); 215 l ^= (__force unsigned long)(src_ip ^ dst_ip); 216 217 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 218 219 return &priv->filter_hash[bucket_idx]; 220 } 221 222 static struct mlx4_en_filter * 223 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 224 __be32 dst_ip, u8 ip_proto, __be16 src_port, 225 __be16 dst_port, u32 flow_id) 226 { 227 struct mlx4_en_filter *filter = NULL; 228 229 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 230 if (!filter) 231 return NULL; 232 233 filter->priv = priv; 234 filter->rxq_index = rxq_index; 235 INIT_WORK(&filter->work, mlx4_en_filter_work); 236 237 filter->src_ip = src_ip; 238 filter->dst_ip = dst_ip; 239 filter->ip_proto = ip_proto; 240 filter->src_port = src_port; 241 filter->dst_port = dst_port; 242 243 filter->flow_id = flow_id; 244 245 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 246 247 list_add_tail(&filter->next, &priv->filters); 248 hlist_add_head(&filter->filter_chain, 249 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 250 dst_port)); 251 252 return filter; 253 } 254 255 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 256 { 257 struct mlx4_en_priv *priv = filter->priv; 258 int rc; 259 260 list_del(&filter->next); 261 262 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 263 if (rc && rc != -ENOENT) 264 en_err(priv, "Error detaching flow. rc = %d\n", rc); 265 266 kfree(filter); 267 } 268 269 static inline struct mlx4_en_filter * 270 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 271 u8 ip_proto, __be16 src_port, __be16 dst_port) 272 { 273 struct mlx4_en_filter *filter; 274 struct mlx4_en_filter *ret = NULL; 275 276 hlist_for_each_entry(filter, 277 filter_hash_bucket(priv, src_ip, dst_ip, 278 src_port, dst_port), 279 filter_chain) { 280 if (filter->src_ip == src_ip && 281 filter->dst_ip == dst_ip && 282 filter->ip_proto == ip_proto && 283 filter->src_port == src_port && 284 filter->dst_port == dst_port) { 285 ret = filter; 286 break; 287 } 288 } 289 290 return ret; 291 } 292 293 static int 294 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 295 u16 rxq_index, u32 flow_id) 296 { 297 struct mlx4_en_priv *priv = netdev_priv(net_dev); 298 struct mlx4_en_filter *filter; 299 const struct iphdr *ip; 300 const __be16 *ports; 301 u8 ip_proto; 302 __be32 src_ip; 303 __be32 dst_ip; 304 __be16 src_port; 305 __be16 dst_port; 306 int nhoff = skb_network_offset(skb); 307 int ret = 0; 308 309 if (skb->protocol != htons(ETH_P_IP)) 310 return -EPROTONOSUPPORT; 311 312 ip = (const struct iphdr *)(skb->data + nhoff); 313 if (ip_is_fragment(ip)) 314 return -EPROTONOSUPPORT; 315 316 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 317 return -EPROTONOSUPPORT; 318 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 319 320 ip_proto = ip->protocol; 321 src_ip = ip->saddr; 322 dst_ip = ip->daddr; 323 src_port = ports[0]; 324 dst_port = ports[1]; 325 326 spin_lock_bh(&priv->filters_lock); 327 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 328 src_port, dst_port); 329 if (filter) { 330 if (filter->rxq_index == rxq_index) 331 goto out; 332 333 filter->rxq_index = rxq_index; 334 } else { 335 filter = mlx4_en_filter_alloc(priv, rxq_index, 336 src_ip, dst_ip, ip_proto, 337 src_port, dst_port, flow_id); 338 if (!filter) { 339 ret = -ENOMEM; 340 goto err; 341 } 342 } 343 344 queue_work(priv->mdev->workqueue, &filter->work); 345 346 out: 347 ret = filter->id; 348 err: 349 spin_unlock_bh(&priv->filters_lock); 350 351 return ret; 352 } 353 354 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 355 { 356 struct mlx4_en_filter *filter, *tmp; 357 LIST_HEAD(del_list); 358 359 spin_lock_bh(&priv->filters_lock); 360 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 361 list_move(&filter->next, &del_list); 362 hlist_del(&filter->filter_chain); 363 } 364 spin_unlock_bh(&priv->filters_lock); 365 366 list_for_each_entry_safe(filter, tmp, &del_list, next) { 367 cancel_work_sync(&filter->work); 368 mlx4_en_filter_free(filter); 369 } 370 } 371 372 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 373 { 374 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 375 LIST_HEAD(del_list); 376 int i = 0; 377 378 spin_lock_bh(&priv->filters_lock); 379 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 380 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 381 break; 382 383 if (filter->activated && 384 !work_pending(&filter->work) && 385 rps_may_expire_flow(priv->dev, 386 filter->rxq_index, filter->flow_id, 387 filter->id)) { 388 list_move(&filter->next, &del_list); 389 hlist_del(&filter->filter_chain); 390 } else 391 last_filter = filter; 392 393 i++; 394 } 395 396 if (last_filter && (&last_filter->next != priv->filters.next)) 397 list_move(&priv->filters, &last_filter->next); 398 399 spin_unlock_bh(&priv->filters_lock); 400 401 list_for_each_entry_safe(filter, tmp, &del_list, next) 402 mlx4_en_filter_free(filter); 403 } 404 #endif 405 406 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 407 __be16 proto, u16 vid) 408 { 409 struct mlx4_en_priv *priv = netdev_priv(dev); 410 struct mlx4_en_dev *mdev = priv->mdev; 411 int err; 412 int idx; 413 414 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 415 416 set_bit(vid, priv->active_vlans); 417 418 /* Add VID to port VLAN filter */ 419 mutex_lock(&mdev->state_lock); 420 if (mdev->device_up && priv->port_up) { 421 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 422 if (err) { 423 en_err(priv, "Failed configuring VLAN filter\n"); 424 goto out; 425 } 426 } 427 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 428 if (err) 429 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 430 431 out: 432 mutex_unlock(&mdev->state_lock); 433 return err; 434 } 435 436 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 437 __be16 proto, u16 vid) 438 { 439 struct mlx4_en_priv *priv = netdev_priv(dev); 440 struct mlx4_en_dev *mdev = priv->mdev; 441 int err = 0; 442 443 en_dbg(HW, priv, "Killing VID:%d\n", vid); 444 445 clear_bit(vid, priv->active_vlans); 446 447 /* Remove VID from port VLAN filter */ 448 mutex_lock(&mdev->state_lock); 449 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 450 451 if (mdev->device_up && priv->port_up) { 452 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 453 if (err) 454 en_err(priv, "Failed configuring VLAN filter\n"); 455 } 456 mutex_unlock(&mdev->state_lock); 457 458 return err; 459 } 460 461 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 462 { 463 int i; 464 for (i = ETH_ALEN - 1; i >= 0; --i) { 465 dst_mac[i] = src_mac & 0xff; 466 src_mac >>= 8; 467 } 468 memset(&dst_mac[ETH_ALEN], 0, 2); 469 } 470 471 472 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 473 int qpn, u64 *reg_id) 474 { 475 int err; 476 477 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 478 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 479 return 0; /* do nothing */ 480 481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 482 MLX4_DOMAIN_NIC, reg_id); 483 if (err) { 484 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 485 return err; 486 } 487 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 488 return 0; 489 } 490 491 492 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 493 unsigned char *mac, int *qpn, u64 *reg_id) 494 { 495 struct mlx4_en_dev *mdev = priv->mdev; 496 struct mlx4_dev *dev = mdev->dev; 497 int err; 498 499 switch (dev->caps.steering_mode) { 500 case MLX4_STEERING_MODE_B0: { 501 struct mlx4_qp qp; 502 u8 gid[16] = {0}; 503 504 qp.qpn = *qpn; 505 memcpy(&gid[10], mac, ETH_ALEN); 506 gid[5] = priv->port; 507 508 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 509 break; 510 } 511 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 512 struct mlx4_spec_list spec_eth = { {NULL} }; 513 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 514 515 struct mlx4_net_trans_rule rule = { 516 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 517 .exclusive = 0, 518 .allow_loopback = 1, 519 .promisc_mode = MLX4_FS_REGULAR, 520 .priority = MLX4_DOMAIN_NIC, 521 }; 522 523 rule.port = priv->port; 524 rule.qpn = *qpn; 525 INIT_LIST_HEAD(&rule.list); 526 527 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 528 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 529 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 530 list_add_tail(&spec_eth.list, &rule.list); 531 532 err = mlx4_flow_attach(dev, &rule, reg_id); 533 break; 534 } 535 default: 536 return -EINVAL; 537 } 538 if (err) 539 en_warn(priv, "Failed Attaching Unicast\n"); 540 541 return err; 542 } 543 544 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 545 unsigned char *mac, int qpn, u64 reg_id) 546 { 547 struct mlx4_en_dev *mdev = priv->mdev; 548 struct mlx4_dev *dev = mdev->dev; 549 550 switch (dev->caps.steering_mode) { 551 case MLX4_STEERING_MODE_B0: { 552 struct mlx4_qp qp; 553 u8 gid[16] = {0}; 554 555 qp.qpn = qpn; 556 memcpy(&gid[10], mac, ETH_ALEN); 557 gid[5] = priv->port; 558 559 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 560 break; 561 } 562 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 563 mlx4_flow_detach(dev, reg_id); 564 break; 565 } 566 default: 567 en_err(priv, "Invalid steering mode.\n"); 568 } 569 } 570 571 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 572 { 573 struct mlx4_en_dev *mdev = priv->mdev; 574 struct mlx4_dev *dev = mdev->dev; 575 int index = 0; 576 int err = 0; 577 int *qpn = &priv->base_qpn; 578 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 579 580 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 581 priv->dev->dev_addr); 582 index = mlx4_register_mac(dev, priv->port, mac); 583 if (index < 0) { 584 err = index; 585 en_err(priv, "Failed adding MAC: %pM\n", 586 priv->dev->dev_addr); 587 return err; 588 } 589 590 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 591 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 592 *qpn = base_qpn + index; 593 return 0; 594 } 595 596 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 597 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 598 if (err) { 599 en_err(priv, "Failed to reserve qp for mac registration\n"); 600 mlx4_unregister_mac(dev, priv->port, mac); 601 return err; 602 } 603 604 return 0; 605 } 606 607 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 608 { 609 struct mlx4_en_dev *mdev = priv->mdev; 610 struct mlx4_dev *dev = mdev->dev; 611 int qpn = priv->base_qpn; 612 613 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 614 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 615 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 616 priv->dev->dev_addr); 617 mlx4_unregister_mac(dev, priv->port, mac); 618 } else { 619 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 620 priv->port, qpn); 621 mlx4_qp_release_range(dev, qpn, 1); 622 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 623 } 624 } 625 626 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 627 unsigned char *new_mac, unsigned char *prev_mac) 628 { 629 struct mlx4_en_dev *mdev = priv->mdev; 630 struct mlx4_dev *dev = mdev->dev; 631 int err = 0; 632 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 633 634 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 635 struct hlist_head *bucket; 636 unsigned int mac_hash; 637 struct mlx4_mac_entry *entry; 638 struct hlist_node *tmp; 639 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 640 641 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 642 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 643 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 644 mlx4_en_uc_steer_release(priv, entry->mac, 645 qpn, entry->reg_id); 646 mlx4_unregister_mac(dev, priv->port, 647 prev_mac_u64); 648 hlist_del_rcu(&entry->hlist); 649 synchronize_rcu(); 650 memcpy(entry->mac, new_mac, ETH_ALEN); 651 entry->reg_id = 0; 652 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 653 hlist_add_head_rcu(&entry->hlist, 654 &priv->mac_hash[mac_hash]); 655 mlx4_register_mac(dev, priv->port, new_mac_u64); 656 err = mlx4_en_uc_steer_add(priv, new_mac, 657 &qpn, 658 &entry->reg_id); 659 if (err) 660 return err; 661 if (priv->tunnel_reg_id) { 662 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 663 priv->tunnel_reg_id = 0; 664 } 665 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 666 &priv->tunnel_reg_id); 667 return err; 668 } 669 } 670 return -EINVAL; 671 } 672 673 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 674 } 675 676 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 677 unsigned char new_mac[ETH_ALEN + 2]) 678 { 679 int err = 0; 680 681 if (priv->port_up) { 682 /* Remove old MAC and insert the new one */ 683 err = mlx4_en_replace_mac(priv, priv->base_qpn, 684 new_mac, priv->current_mac); 685 if (err) 686 en_err(priv, "Failed changing HW MAC address\n"); 687 } else 688 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 689 690 if (!err) 691 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 692 693 return err; 694 } 695 696 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 697 { 698 struct mlx4_en_priv *priv = netdev_priv(dev); 699 struct mlx4_en_dev *mdev = priv->mdev; 700 struct sockaddr *saddr = addr; 701 unsigned char new_mac[ETH_ALEN + 2]; 702 int err; 703 704 if (!is_valid_ether_addr(saddr->sa_data)) 705 return -EADDRNOTAVAIL; 706 707 mutex_lock(&mdev->state_lock); 708 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 709 err = mlx4_en_do_set_mac(priv, new_mac); 710 if (!err) 711 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 712 mutex_unlock(&mdev->state_lock); 713 714 return err; 715 } 716 717 static void mlx4_en_clear_list(struct net_device *dev) 718 { 719 struct mlx4_en_priv *priv = netdev_priv(dev); 720 struct mlx4_en_mc_list *tmp, *mc_to_del; 721 722 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 723 list_del(&mc_to_del->list); 724 kfree(mc_to_del); 725 } 726 } 727 728 static void mlx4_en_cache_mclist(struct net_device *dev) 729 { 730 struct mlx4_en_priv *priv = netdev_priv(dev); 731 struct netdev_hw_addr *ha; 732 struct mlx4_en_mc_list *tmp; 733 734 mlx4_en_clear_list(dev); 735 netdev_for_each_mc_addr(ha, dev) { 736 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 737 if (!tmp) { 738 mlx4_en_clear_list(dev); 739 return; 740 } 741 memcpy(tmp->addr, ha->addr, ETH_ALEN); 742 list_add_tail(&tmp->list, &priv->mc_list); 743 } 744 } 745 746 static void update_mclist_flags(struct mlx4_en_priv *priv, 747 struct list_head *dst, 748 struct list_head *src) 749 { 750 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 751 bool found; 752 753 /* Find all the entries that should be removed from dst, 754 * These are the entries that are not found in src 755 */ 756 list_for_each_entry(dst_tmp, dst, list) { 757 found = false; 758 list_for_each_entry(src_tmp, src, list) { 759 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 760 found = true; 761 break; 762 } 763 } 764 if (!found) 765 dst_tmp->action = MCLIST_REM; 766 } 767 768 /* Add entries that exist in src but not in dst 769 * mark them as need to add 770 */ 771 list_for_each_entry(src_tmp, src, list) { 772 found = false; 773 list_for_each_entry(dst_tmp, dst, list) { 774 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 775 dst_tmp->action = MCLIST_NONE; 776 found = true; 777 break; 778 } 779 } 780 if (!found) { 781 new_mc = kmemdup(src_tmp, 782 sizeof(struct mlx4_en_mc_list), 783 GFP_KERNEL); 784 if (!new_mc) 785 return; 786 787 new_mc->action = MCLIST_ADD; 788 list_add_tail(&new_mc->list, dst); 789 } 790 } 791 } 792 793 static void mlx4_en_set_rx_mode(struct net_device *dev) 794 { 795 struct mlx4_en_priv *priv = netdev_priv(dev); 796 797 if (!priv->port_up) 798 return; 799 800 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 801 } 802 803 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 804 struct mlx4_en_dev *mdev) 805 { 806 int err = 0; 807 808 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 809 if (netif_msg_rx_status(priv)) 810 en_warn(priv, "Entering promiscuous mode\n"); 811 priv->flags |= MLX4_EN_FLAG_PROMISC; 812 813 /* Enable promiscouos mode */ 814 switch (mdev->dev->caps.steering_mode) { 815 case MLX4_STEERING_MODE_DEVICE_MANAGED: 816 err = mlx4_flow_steer_promisc_add(mdev->dev, 817 priv->port, 818 priv->base_qpn, 819 MLX4_FS_ALL_DEFAULT); 820 if (err) 821 en_err(priv, "Failed enabling promiscuous mode\n"); 822 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 823 break; 824 825 case MLX4_STEERING_MODE_B0: 826 err = mlx4_unicast_promisc_add(mdev->dev, 827 priv->base_qpn, 828 priv->port); 829 if (err) 830 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 831 832 /* Add the default qp number as multicast 833 * promisc 834 */ 835 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 836 err = mlx4_multicast_promisc_add(mdev->dev, 837 priv->base_qpn, 838 priv->port); 839 if (err) 840 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 841 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 842 } 843 break; 844 845 case MLX4_STEERING_MODE_A0: 846 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 847 priv->port, 848 priv->base_qpn, 849 1); 850 if (err) 851 en_err(priv, "Failed enabling promiscuous mode\n"); 852 break; 853 } 854 855 /* Disable port multicast filter (unconditionally) */ 856 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 857 0, MLX4_MCAST_DISABLE); 858 if (err) 859 en_err(priv, "Failed disabling multicast filter\n"); 860 } 861 } 862 863 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 864 struct mlx4_en_dev *mdev) 865 { 866 int err = 0; 867 868 if (netif_msg_rx_status(priv)) 869 en_warn(priv, "Leaving promiscuous mode\n"); 870 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 871 872 /* Disable promiscouos mode */ 873 switch (mdev->dev->caps.steering_mode) { 874 case MLX4_STEERING_MODE_DEVICE_MANAGED: 875 err = mlx4_flow_steer_promisc_remove(mdev->dev, 876 priv->port, 877 MLX4_FS_ALL_DEFAULT); 878 if (err) 879 en_err(priv, "Failed disabling promiscuous mode\n"); 880 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 881 break; 882 883 case MLX4_STEERING_MODE_B0: 884 err = mlx4_unicast_promisc_remove(mdev->dev, 885 priv->base_qpn, 886 priv->port); 887 if (err) 888 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 889 /* Disable Multicast promisc */ 890 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 891 err = mlx4_multicast_promisc_remove(mdev->dev, 892 priv->base_qpn, 893 priv->port); 894 if (err) 895 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 896 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 897 } 898 break; 899 900 case MLX4_STEERING_MODE_A0: 901 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 902 priv->port, 903 priv->base_qpn, 0); 904 if (err) 905 en_err(priv, "Failed disabling promiscuous mode\n"); 906 break; 907 } 908 } 909 910 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 911 struct net_device *dev, 912 struct mlx4_en_dev *mdev) 913 { 914 struct mlx4_en_mc_list *mclist, *tmp; 915 u64 mcast_addr = 0; 916 u8 mc_list[16] = {0}; 917 int err = 0; 918 919 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 920 if (dev->flags & IFF_ALLMULTI) { 921 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 922 0, MLX4_MCAST_DISABLE); 923 if (err) 924 en_err(priv, "Failed disabling multicast filter\n"); 925 926 /* Add the default qp number as multicast promisc */ 927 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 928 switch (mdev->dev->caps.steering_mode) { 929 case MLX4_STEERING_MODE_DEVICE_MANAGED: 930 err = mlx4_flow_steer_promisc_add(mdev->dev, 931 priv->port, 932 priv->base_qpn, 933 MLX4_FS_MC_DEFAULT); 934 break; 935 936 case MLX4_STEERING_MODE_B0: 937 err = mlx4_multicast_promisc_add(mdev->dev, 938 priv->base_qpn, 939 priv->port); 940 break; 941 942 case MLX4_STEERING_MODE_A0: 943 break; 944 } 945 if (err) 946 en_err(priv, "Failed entering multicast promisc mode\n"); 947 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 948 } 949 } else { 950 /* Disable Multicast promisc */ 951 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 952 switch (mdev->dev->caps.steering_mode) { 953 case MLX4_STEERING_MODE_DEVICE_MANAGED: 954 err = mlx4_flow_steer_promisc_remove(mdev->dev, 955 priv->port, 956 MLX4_FS_MC_DEFAULT); 957 break; 958 959 case MLX4_STEERING_MODE_B0: 960 err = mlx4_multicast_promisc_remove(mdev->dev, 961 priv->base_qpn, 962 priv->port); 963 break; 964 965 case MLX4_STEERING_MODE_A0: 966 break; 967 } 968 if (err) 969 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 970 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 971 } 972 973 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 974 0, MLX4_MCAST_DISABLE); 975 if (err) 976 en_err(priv, "Failed disabling multicast filter\n"); 977 978 /* Flush mcast filter and init it with broadcast address */ 979 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 980 1, MLX4_MCAST_CONFIG); 981 982 /* Update multicast list - we cache all addresses so they won't 983 * change while HW is updated holding the command semaphor */ 984 netif_addr_lock_bh(dev); 985 mlx4_en_cache_mclist(dev); 986 netif_addr_unlock_bh(dev); 987 list_for_each_entry(mclist, &priv->mc_list, list) { 988 mcast_addr = mlx4_mac_to_u64(mclist->addr); 989 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 990 mcast_addr, 0, MLX4_MCAST_CONFIG); 991 } 992 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 993 0, MLX4_MCAST_ENABLE); 994 if (err) 995 en_err(priv, "Failed enabling multicast filter\n"); 996 997 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 998 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 999 if (mclist->action == MCLIST_REM) { 1000 /* detach this address and delete from list */ 1001 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1002 mc_list[5] = priv->port; 1003 err = mlx4_multicast_detach(mdev->dev, 1004 &priv->rss_map.indir_qp, 1005 mc_list, 1006 MLX4_PROT_ETH, 1007 mclist->reg_id); 1008 if (err) 1009 en_err(priv, "Fail to detach multicast address\n"); 1010 1011 if (mclist->tunnel_reg_id) { 1012 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1013 if (err) 1014 en_err(priv, "Failed to detach multicast address\n"); 1015 } 1016 1017 /* remove from list */ 1018 list_del(&mclist->list); 1019 kfree(mclist); 1020 } else if (mclist->action == MCLIST_ADD) { 1021 /* attach the address */ 1022 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1023 /* needed for B0 steering support */ 1024 mc_list[5] = priv->port; 1025 err = mlx4_multicast_attach(mdev->dev, 1026 &priv->rss_map.indir_qp, 1027 mc_list, 1028 priv->port, 0, 1029 MLX4_PROT_ETH, 1030 &mclist->reg_id); 1031 if (err) 1032 en_err(priv, "Fail to attach multicast address\n"); 1033 1034 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1035 &mclist->tunnel_reg_id); 1036 if (err) 1037 en_err(priv, "Failed to attach multicast address\n"); 1038 } 1039 } 1040 } 1041 } 1042 1043 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1044 struct net_device *dev, 1045 struct mlx4_en_dev *mdev) 1046 { 1047 struct netdev_hw_addr *ha; 1048 struct mlx4_mac_entry *entry; 1049 struct hlist_node *tmp; 1050 bool found; 1051 u64 mac; 1052 int err = 0; 1053 struct hlist_head *bucket; 1054 unsigned int i; 1055 int removed = 0; 1056 u32 prev_flags; 1057 1058 /* Note that we do not need to protect our mac_hash traversal with rcu, 1059 * since all modification code is protected by mdev->state_lock 1060 */ 1061 1062 /* find what to remove */ 1063 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1064 bucket = &priv->mac_hash[i]; 1065 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1066 found = false; 1067 netdev_for_each_uc_addr(ha, dev) { 1068 if (ether_addr_equal_64bits(entry->mac, 1069 ha->addr)) { 1070 found = true; 1071 break; 1072 } 1073 } 1074 1075 /* MAC address of the port is not in uc list */ 1076 if (ether_addr_equal_64bits(entry->mac, 1077 priv->current_mac)) 1078 found = true; 1079 1080 if (!found) { 1081 mac = mlx4_mac_to_u64(entry->mac); 1082 mlx4_en_uc_steer_release(priv, entry->mac, 1083 priv->base_qpn, 1084 entry->reg_id); 1085 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1086 1087 hlist_del_rcu(&entry->hlist); 1088 kfree_rcu(entry, rcu); 1089 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1090 entry->mac, priv->port); 1091 ++removed; 1092 } 1093 } 1094 } 1095 1096 /* if we didn't remove anything, there is no use in trying to add 1097 * again once we are in a forced promisc mode state 1098 */ 1099 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1100 return; 1101 1102 prev_flags = priv->flags; 1103 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1104 1105 /* find what to add */ 1106 netdev_for_each_uc_addr(ha, dev) { 1107 found = false; 1108 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1109 hlist_for_each_entry(entry, bucket, hlist) { 1110 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1111 found = true; 1112 break; 1113 } 1114 } 1115 1116 if (!found) { 1117 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1118 if (!entry) { 1119 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1120 ha->addr, priv->port); 1121 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1122 break; 1123 } 1124 mac = mlx4_mac_to_u64(ha->addr); 1125 memcpy(entry->mac, ha->addr, ETH_ALEN); 1126 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1127 if (err < 0) { 1128 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1129 ha->addr, priv->port, err); 1130 kfree(entry); 1131 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1132 break; 1133 } 1134 err = mlx4_en_uc_steer_add(priv, ha->addr, 1135 &priv->base_qpn, 1136 &entry->reg_id); 1137 if (err) { 1138 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1139 ha->addr, priv->port, err); 1140 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1141 kfree(entry); 1142 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1143 break; 1144 } else { 1145 unsigned int mac_hash; 1146 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1147 ha->addr, priv->port); 1148 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1149 bucket = &priv->mac_hash[mac_hash]; 1150 hlist_add_head_rcu(&entry->hlist, bucket); 1151 } 1152 } 1153 } 1154 1155 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1156 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1157 priv->port); 1158 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1159 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1160 priv->port); 1161 } 1162 } 1163 1164 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1165 { 1166 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1167 rx_mode_task); 1168 struct mlx4_en_dev *mdev = priv->mdev; 1169 struct net_device *dev = priv->dev; 1170 1171 mutex_lock(&mdev->state_lock); 1172 if (!mdev->device_up) { 1173 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1174 goto out; 1175 } 1176 if (!priv->port_up) { 1177 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1178 goto out; 1179 } 1180 1181 if (!netif_carrier_ok(dev)) { 1182 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1183 if (priv->port_state.link_state) { 1184 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1185 netif_carrier_on(dev); 1186 en_dbg(LINK, priv, "Link Up\n"); 1187 } 1188 } 1189 } 1190 1191 if (dev->priv_flags & IFF_UNICAST_FLT) 1192 mlx4_en_do_uc_filter(priv, dev, mdev); 1193 1194 /* Promsicuous mode: disable all filters */ 1195 if ((dev->flags & IFF_PROMISC) || 1196 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1197 mlx4_en_set_promisc_mode(priv, mdev); 1198 goto out; 1199 } 1200 1201 /* Not in promiscuous mode */ 1202 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1203 mlx4_en_clear_promisc_mode(priv, mdev); 1204 1205 mlx4_en_do_multicast(priv, dev, mdev); 1206 out: 1207 mutex_unlock(&mdev->state_lock); 1208 } 1209 1210 #ifdef CONFIG_NET_POLL_CONTROLLER 1211 static void mlx4_en_netpoll(struct net_device *dev) 1212 { 1213 struct mlx4_en_priv *priv = netdev_priv(dev); 1214 struct mlx4_en_cq *cq; 1215 int i; 1216 1217 for (i = 0; i < priv->tx_ring_num; i++) { 1218 cq = priv->tx_cq[i]; 1219 napi_schedule(&cq->napi); 1220 } 1221 } 1222 #endif 1223 1224 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1225 { 1226 u64 reg_id; 1227 int err = 0; 1228 int *qpn = &priv->base_qpn; 1229 struct mlx4_mac_entry *entry; 1230 1231 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1232 if (err) 1233 return err; 1234 1235 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1236 &priv->tunnel_reg_id); 1237 if (err) 1238 goto tunnel_err; 1239 1240 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1241 if (!entry) { 1242 err = -ENOMEM; 1243 goto alloc_err; 1244 } 1245 1246 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1247 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1248 entry->reg_id = reg_id; 1249 hlist_add_head_rcu(&entry->hlist, 1250 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1251 1252 return 0; 1253 1254 alloc_err: 1255 if (priv->tunnel_reg_id) 1256 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1257 1258 tunnel_err: 1259 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1260 return err; 1261 } 1262 1263 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1264 { 1265 u64 mac; 1266 unsigned int i; 1267 int qpn = priv->base_qpn; 1268 struct hlist_head *bucket; 1269 struct hlist_node *tmp; 1270 struct mlx4_mac_entry *entry; 1271 1272 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1273 bucket = &priv->mac_hash[i]; 1274 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1275 mac = mlx4_mac_to_u64(entry->mac); 1276 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1277 entry->mac); 1278 mlx4_en_uc_steer_release(priv, entry->mac, 1279 qpn, entry->reg_id); 1280 1281 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1282 hlist_del_rcu(&entry->hlist); 1283 kfree_rcu(entry, rcu); 1284 } 1285 } 1286 1287 if (priv->tunnel_reg_id) { 1288 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1289 priv->tunnel_reg_id = 0; 1290 } 1291 } 1292 1293 static void mlx4_en_tx_timeout(struct net_device *dev) 1294 { 1295 struct mlx4_en_priv *priv = netdev_priv(dev); 1296 struct mlx4_en_dev *mdev = priv->mdev; 1297 int i; 1298 1299 if (netif_msg_timer(priv)) 1300 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1301 1302 for (i = 0; i < priv->tx_ring_num; i++) { 1303 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1304 continue; 1305 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1306 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, 1307 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); 1308 } 1309 1310 priv->port_stats.tx_timeout++; 1311 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1312 queue_work(mdev->workqueue, &priv->watchdog_task); 1313 } 1314 1315 1316 static struct rtnl_link_stats64 * 1317 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1318 { 1319 struct mlx4_en_priv *priv = netdev_priv(dev); 1320 1321 spin_lock_bh(&priv->stats_lock); 1322 netdev_stats_to_stats64(stats, &dev->stats); 1323 spin_unlock_bh(&priv->stats_lock); 1324 1325 return stats; 1326 } 1327 1328 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1329 { 1330 struct mlx4_en_cq *cq; 1331 int i; 1332 1333 /* If we haven't received a specific coalescing setting 1334 * (module param), we set the moderation parameters as follows: 1335 * - moder_cnt is set to the number of mtu sized packets to 1336 * satisfy our coalescing target. 1337 * - moder_time is set to a fixed value. 1338 */ 1339 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1340 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1341 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1342 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1343 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1344 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1345 1346 /* Setup cq moderation params */ 1347 for (i = 0; i < priv->rx_ring_num; i++) { 1348 cq = priv->rx_cq[i]; 1349 cq->moder_cnt = priv->rx_frames; 1350 cq->moder_time = priv->rx_usecs; 1351 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1352 priv->last_moder_packets[i] = 0; 1353 priv->last_moder_bytes[i] = 0; 1354 } 1355 1356 for (i = 0; i < priv->tx_ring_num; i++) { 1357 cq = priv->tx_cq[i]; 1358 cq->moder_cnt = priv->tx_frames; 1359 cq->moder_time = priv->tx_usecs; 1360 } 1361 1362 /* Reset auto-moderation params */ 1363 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1364 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1365 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1366 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1367 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1368 priv->adaptive_rx_coal = 1; 1369 priv->last_moder_jiffies = 0; 1370 priv->last_moder_tx_packets = 0; 1371 } 1372 1373 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1374 { 1375 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1376 struct mlx4_en_cq *cq; 1377 unsigned long packets; 1378 unsigned long rate; 1379 unsigned long avg_pkt_size; 1380 unsigned long rx_packets; 1381 unsigned long rx_bytes; 1382 unsigned long rx_pkt_diff; 1383 int moder_time; 1384 int ring, err; 1385 1386 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1387 return; 1388 1389 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1390 spin_lock_bh(&priv->stats_lock); 1391 rx_packets = priv->rx_ring[ring]->packets; 1392 rx_bytes = priv->rx_ring[ring]->bytes; 1393 spin_unlock_bh(&priv->stats_lock); 1394 1395 rx_pkt_diff = ((unsigned long) (rx_packets - 1396 priv->last_moder_packets[ring])); 1397 packets = rx_pkt_diff; 1398 rate = packets * HZ / period; 1399 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1400 priv->last_moder_bytes[ring])) / packets : 0; 1401 1402 /* Apply auto-moderation only when packet rate 1403 * exceeds a rate that it matters */ 1404 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1405 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1406 if (rate < priv->pkt_rate_low) 1407 moder_time = priv->rx_usecs_low; 1408 else if (rate > priv->pkt_rate_high) 1409 moder_time = priv->rx_usecs_high; 1410 else 1411 moder_time = (rate - priv->pkt_rate_low) * 1412 (priv->rx_usecs_high - priv->rx_usecs_low) / 1413 (priv->pkt_rate_high - priv->pkt_rate_low) + 1414 priv->rx_usecs_low; 1415 } else { 1416 moder_time = priv->rx_usecs_low; 1417 } 1418 1419 if (moder_time != priv->last_moder_time[ring]) { 1420 priv->last_moder_time[ring] = moder_time; 1421 cq = priv->rx_cq[ring]; 1422 cq->moder_time = moder_time; 1423 cq->moder_cnt = priv->rx_frames; 1424 err = mlx4_en_set_cq_moder(priv, cq); 1425 if (err) 1426 en_err(priv, "Failed modifying moderation for cq:%d\n", 1427 ring); 1428 } 1429 priv->last_moder_packets[ring] = rx_packets; 1430 priv->last_moder_bytes[ring] = rx_bytes; 1431 } 1432 1433 priv->last_moder_jiffies = jiffies; 1434 } 1435 1436 static void mlx4_en_do_get_stats(struct work_struct *work) 1437 { 1438 struct delayed_work *delay = to_delayed_work(work); 1439 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1440 stats_task); 1441 struct mlx4_en_dev *mdev = priv->mdev; 1442 int err; 1443 1444 mutex_lock(&mdev->state_lock); 1445 if (mdev->device_up) { 1446 if (priv->port_up) { 1447 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1448 if (err) 1449 en_dbg(HW, priv, "Could not update stats\n"); 1450 1451 mlx4_en_auto_moderation(priv); 1452 } 1453 1454 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1455 } 1456 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1457 mlx4_en_do_set_mac(priv, priv->current_mac); 1458 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1459 } 1460 mutex_unlock(&mdev->state_lock); 1461 } 1462 1463 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1464 * periodically 1465 */ 1466 static void mlx4_en_service_task(struct work_struct *work) 1467 { 1468 struct delayed_work *delay = to_delayed_work(work); 1469 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1470 service_task); 1471 struct mlx4_en_dev *mdev = priv->mdev; 1472 1473 mutex_lock(&mdev->state_lock); 1474 if (mdev->device_up) { 1475 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1476 mlx4_en_ptp_overflow_check(mdev); 1477 1478 mlx4_en_recover_from_oom(priv); 1479 queue_delayed_work(mdev->workqueue, &priv->service_task, 1480 SERVICE_TASK_DELAY); 1481 } 1482 mutex_unlock(&mdev->state_lock); 1483 } 1484 1485 static void mlx4_en_linkstate(struct work_struct *work) 1486 { 1487 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1488 linkstate_task); 1489 struct mlx4_en_dev *mdev = priv->mdev; 1490 int linkstate = priv->link_state; 1491 1492 mutex_lock(&mdev->state_lock); 1493 /* If observable port state changed set carrier state and 1494 * report to system log */ 1495 if (priv->last_link_state != linkstate) { 1496 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1497 en_info(priv, "Link Down\n"); 1498 netif_carrier_off(priv->dev); 1499 } else { 1500 en_info(priv, "Link Up\n"); 1501 netif_carrier_on(priv->dev); 1502 } 1503 } 1504 priv->last_link_state = linkstate; 1505 mutex_unlock(&mdev->state_lock); 1506 } 1507 1508 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1509 { 1510 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1511 int numa_node = priv->mdev->dev->numa_node; 1512 1513 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1514 return -ENOMEM; 1515 1516 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1517 ring->affinity_mask); 1518 return 0; 1519 } 1520 1521 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1522 { 1523 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1524 } 1525 1526 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, 1527 int tx_ring_idx) 1528 { 1529 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; 1530 int rr_index; 1531 1532 rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; 1533 if (rr_index >= 0) { 1534 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; 1535 tx_ring->recycle_ring = priv->rx_ring[rr_index]; 1536 en_dbg(DRV, priv, 1537 "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", 1538 tx_ring_idx, rr_index); 1539 } else { 1540 tx_ring->recycle_ring = NULL; 1541 } 1542 } 1543 1544 int mlx4_en_start_port(struct net_device *dev) 1545 { 1546 struct mlx4_en_priv *priv = netdev_priv(dev); 1547 struct mlx4_en_dev *mdev = priv->mdev; 1548 struct mlx4_en_cq *cq; 1549 struct mlx4_en_tx_ring *tx_ring; 1550 int rx_index = 0; 1551 int tx_index = 0; 1552 int err = 0; 1553 int i; 1554 int j; 1555 u8 mc_list[16] = {0}; 1556 1557 if (priv->port_up) { 1558 en_dbg(DRV, priv, "start port called while port already up\n"); 1559 return 0; 1560 } 1561 1562 INIT_LIST_HEAD(&priv->mc_list); 1563 INIT_LIST_HEAD(&priv->curr_list); 1564 INIT_LIST_HEAD(&priv->ethtool_list); 1565 memset(&priv->ethtool_rules[0], 0, 1566 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1567 1568 /* Calculate Rx buf size */ 1569 dev->mtu = min(dev->mtu, priv->max_mtu); 1570 mlx4_en_calc_rx_buf(dev); 1571 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1572 1573 /* Configure rx cq's and rings */ 1574 err = mlx4_en_activate_rx_rings(priv); 1575 if (err) { 1576 en_err(priv, "Failed to activate RX rings\n"); 1577 return err; 1578 } 1579 for (i = 0; i < priv->rx_ring_num; i++) { 1580 cq = priv->rx_cq[i]; 1581 1582 err = mlx4_en_init_affinity_hint(priv, i); 1583 if (err) { 1584 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1585 goto cq_err; 1586 } 1587 1588 err = mlx4_en_activate_cq(priv, cq, i); 1589 if (err) { 1590 en_err(priv, "Failed activating Rx CQ\n"); 1591 mlx4_en_free_affinity_hint(priv, i); 1592 goto cq_err; 1593 } 1594 1595 for (j = 0; j < cq->size; j++) { 1596 struct mlx4_cqe *cqe = NULL; 1597 1598 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1599 priv->cqe_factor; 1600 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1601 } 1602 1603 err = mlx4_en_set_cq_moder(priv, cq); 1604 if (err) { 1605 en_err(priv, "Failed setting cq moderation parameters\n"); 1606 mlx4_en_deactivate_cq(priv, cq); 1607 mlx4_en_free_affinity_hint(priv, i); 1608 goto cq_err; 1609 } 1610 mlx4_en_arm_cq(priv, cq); 1611 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1612 ++rx_index; 1613 } 1614 1615 /* Set qp number */ 1616 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1617 err = mlx4_en_get_qp(priv); 1618 if (err) { 1619 en_err(priv, "Failed getting eth qp\n"); 1620 goto cq_err; 1621 } 1622 mdev->mac_removed[priv->port] = 0; 1623 1624 priv->counter_index = 1625 mlx4_get_default_counter_index(mdev->dev, priv->port); 1626 1627 err = mlx4_en_config_rss_steer(priv); 1628 if (err) { 1629 en_err(priv, "Failed configuring rss steering\n"); 1630 goto mac_err; 1631 } 1632 1633 err = mlx4_en_create_drop_qp(priv); 1634 if (err) 1635 goto rss_err; 1636 1637 /* Configure tx cq's and rings */ 1638 for (i = 0; i < priv->tx_ring_num; i++) { 1639 /* Configure cq */ 1640 cq = priv->tx_cq[i]; 1641 err = mlx4_en_activate_cq(priv, cq, i); 1642 if (err) { 1643 en_err(priv, "Failed allocating Tx CQ\n"); 1644 goto tx_err; 1645 } 1646 err = mlx4_en_set_cq_moder(priv, cq); 1647 if (err) { 1648 en_err(priv, "Failed setting cq moderation parameters\n"); 1649 mlx4_en_deactivate_cq(priv, cq); 1650 goto tx_err; 1651 } 1652 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1653 cq->buf->wqe_index = cpu_to_be16(0xffff); 1654 1655 /* Configure ring */ 1656 tx_ring = priv->tx_ring[i]; 1657 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1658 i / priv->num_tx_rings_p_up); 1659 if (err) { 1660 en_err(priv, "Failed allocating Tx ring\n"); 1661 mlx4_en_deactivate_cq(priv, cq); 1662 goto tx_err; 1663 } 1664 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1665 1666 mlx4_en_init_recycle_ring(priv, i); 1667 1668 /* Arm CQ for TX completions */ 1669 mlx4_en_arm_cq(priv, cq); 1670 1671 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1672 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1673 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1674 ++tx_index; 1675 } 1676 1677 /* Configure port */ 1678 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1679 priv->rx_skb_size + ETH_FCS_LEN, 1680 priv->prof->tx_pause, 1681 priv->prof->tx_ppp, 1682 priv->prof->rx_pause, 1683 priv->prof->rx_ppp); 1684 if (err) { 1685 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1686 priv->port, err); 1687 goto tx_err; 1688 } 1689 /* Set default qp number */ 1690 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1691 if (err) { 1692 en_err(priv, "Failed setting default qp numbers\n"); 1693 goto tx_err; 1694 } 1695 1696 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1697 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1698 if (err) { 1699 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1700 err); 1701 goto tx_err; 1702 } 1703 } 1704 1705 /* Init port */ 1706 en_dbg(HW, priv, "Initializing port\n"); 1707 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1708 if (err) { 1709 en_err(priv, "Failed Initializing port\n"); 1710 goto tx_err; 1711 } 1712 1713 /* Set Unicast and VXLAN steering rules */ 1714 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1715 mlx4_en_set_rss_steer_rules(priv)) 1716 mlx4_warn(mdev, "Failed setting steering rules\n"); 1717 1718 /* Attach rx QP to bradcast address */ 1719 eth_broadcast_addr(&mc_list[10]); 1720 mc_list[5] = priv->port; /* needed for B0 steering support */ 1721 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1722 priv->port, 0, MLX4_PROT_ETH, 1723 &priv->broadcast_id)) 1724 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1725 1726 /* Must redo promiscuous mode setup. */ 1727 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1728 1729 /* Schedule multicast task to populate multicast list */ 1730 queue_work(mdev->workqueue, &priv->rx_mode_task); 1731 1732 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1733 udp_tunnel_get_rx_info(dev); 1734 1735 priv->port_up = true; 1736 1737 /* Process all completions if exist to prevent 1738 * the queues freezing if they are full 1739 */ 1740 for (i = 0; i < priv->rx_ring_num; i++) 1741 napi_schedule(&priv->rx_cq[i]->napi); 1742 1743 netif_tx_start_all_queues(dev); 1744 netif_device_attach(dev); 1745 1746 return 0; 1747 1748 tx_err: 1749 while (tx_index--) { 1750 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1751 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1752 } 1753 mlx4_en_destroy_drop_qp(priv); 1754 rss_err: 1755 mlx4_en_release_rss_steer(priv); 1756 mac_err: 1757 mlx4_en_put_qp(priv); 1758 cq_err: 1759 while (rx_index--) { 1760 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1761 mlx4_en_free_affinity_hint(priv, rx_index); 1762 } 1763 for (i = 0; i < priv->rx_ring_num; i++) 1764 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1765 1766 return err; /* need to close devices */ 1767 } 1768 1769 1770 void mlx4_en_stop_port(struct net_device *dev, int detach) 1771 { 1772 struct mlx4_en_priv *priv = netdev_priv(dev); 1773 struct mlx4_en_dev *mdev = priv->mdev; 1774 struct mlx4_en_mc_list *mclist, *tmp; 1775 struct ethtool_flow_id *flow, *tmp_flow; 1776 int i; 1777 u8 mc_list[16] = {0}; 1778 1779 if (!priv->port_up) { 1780 en_dbg(DRV, priv, "stop port called while port already down\n"); 1781 return; 1782 } 1783 1784 /* close port*/ 1785 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1786 1787 /* Synchronize with tx routine */ 1788 netif_tx_lock_bh(dev); 1789 if (detach) 1790 netif_device_detach(dev); 1791 netif_tx_stop_all_queues(dev); 1792 netif_tx_unlock_bh(dev); 1793 1794 netif_tx_disable(dev); 1795 1796 /* Set port as not active */ 1797 priv->port_up = false; 1798 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1799 1800 /* Promsicuous mode */ 1801 if (mdev->dev->caps.steering_mode == 1802 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1803 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1804 MLX4_EN_FLAG_MC_PROMISC); 1805 mlx4_flow_steer_promisc_remove(mdev->dev, 1806 priv->port, 1807 MLX4_FS_ALL_DEFAULT); 1808 mlx4_flow_steer_promisc_remove(mdev->dev, 1809 priv->port, 1810 MLX4_FS_MC_DEFAULT); 1811 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1812 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1813 1814 /* Disable promiscouos mode */ 1815 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1816 priv->port); 1817 1818 /* Disable Multicast promisc */ 1819 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1820 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1821 priv->port); 1822 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1823 } 1824 } 1825 1826 /* Detach All multicasts */ 1827 eth_broadcast_addr(&mc_list[10]); 1828 mc_list[5] = priv->port; /* needed for B0 steering support */ 1829 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1830 MLX4_PROT_ETH, priv->broadcast_id); 1831 list_for_each_entry(mclist, &priv->curr_list, list) { 1832 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1833 mc_list[5] = priv->port; 1834 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1835 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1836 if (mclist->tunnel_reg_id) 1837 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1838 } 1839 mlx4_en_clear_list(dev); 1840 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1841 list_del(&mclist->list); 1842 kfree(mclist); 1843 } 1844 1845 /* Flush multicast filter */ 1846 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1847 1848 /* Remove flow steering rules for the port*/ 1849 if (mdev->dev->caps.steering_mode == 1850 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1851 ASSERT_RTNL(); 1852 list_for_each_entry_safe(flow, tmp_flow, 1853 &priv->ethtool_list, list) { 1854 mlx4_flow_detach(mdev->dev, flow->id); 1855 list_del(&flow->list); 1856 } 1857 } 1858 1859 mlx4_en_destroy_drop_qp(priv); 1860 1861 /* Free TX Rings */ 1862 for (i = 0; i < priv->tx_ring_num; i++) { 1863 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1864 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1865 } 1866 msleep(10); 1867 1868 for (i = 0; i < priv->tx_ring_num; i++) 1869 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1870 1871 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1872 mlx4_en_delete_rss_steer_rules(priv); 1873 1874 /* Free RSS qps */ 1875 mlx4_en_release_rss_steer(priv); 1876 1877 /* Unregister Mac address for the port */ 1878 mlx4_en_put_qp(priv); 1879 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1880 mdev->mac_removed[priv->port] = 1; 1881 1882 /* Free RX Rings */ 1883 for (i = 0; i < priv->rx_ring_num; i++) { 1884 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1885 1886 napi_synchronize(&cq->napi); 1887 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1888 mlx4_en_deactivate_cq(priv, cq); 1889 1890 mlx4_en_free_affinity_hint(priv, i); 1891 } 1892 } 1893 1894 static void mlx4_en_restart(struct work_struct *work) 1895 { 1896 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1897 watchdog_task); 1898 struct mlx4_en_dev *mdev = priv->mdev; 1899 struct net_device *dev = priv->dev; 1900 1901 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1902 1903 rtnl_lock(); 1904 mutex_lock(&mdev->state_lock); 1905 if (priv->port_up) { 1906 mlx4_en_stop_port(dev, 1); 1907 if (mlx4_en_start_port(dev)) 1908 en_err(priv, "Failed restarting port %d\n", priv->port); 1909 } 1910 mutex_unlock(&mdev->state_lock); 1911 rtnl_unlock(); 1912 } 1913 1914 static void mlx4_en_clear_stats(struct net_device *dev) 1915 { 1916 struct mlx4_en_priv *priv = netdev_priv(dev); 1917 struct mlx4_en_dev *mdev = priv->mdev; 1918 int i; 1919 1920 if (!mlx4_is_slave(mdev->dev)) 1921 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1922 en_dbg(HW, priv, "Failed dumping statistics\n"); 1923 1924 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1925 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1926 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1927 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 1928 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 1929 memset(&priv->rx_priority_flowstats, 0, 1930 sizeof(priv->rx_priority_flowstats)); 1931 memset(&priv->tx_priority_flowstats, 0, 1932 sizeof(priv->tx_priority_flowstats)); 1933 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 1934 1935 for (i = 0; i < priv->tx_ring_num; i++) { 1936 priv->tx_ring[i]->bytes = 0; 1937 priv->tx_ring[i]->packets = 0; 1938 priv->tx_ring[i]->tx_csum = 0; 1939 priv->tx_ring[i]->tx_dropped = 0; 1940 priv->tx_ring[i]->queue_stopped = 0; 1941 priv->tx_ring[i]->wake_queue = 0; 1942 priv->tx_ring[i]->tso_packets = 0; 1943 priv->tx_ring[i]->xmit_more = 0; 1944 } 1945 for (i = 0; i < priv->rx_ring_num; i++) { 1946 priv->rx_ring[i]->bytes = 0; 1947 priv->rx_ring[i]->packets = 0; 1948 priv->rx_ring[i]->csum_ok = 0; 1949 priv->rx_ring[i]->csum_none = 0; 1950 priv->rx_ring[i]->csum_complete = 0; 1951 } 1952 } 1953 1954 static int mlx4_en_open(struct net_device *dev) 1955 { 1956 struct mlx4_en_priv *priv = netdev_priv(dev); 1957 struct mlx4_en_dev *mdev = priv->mdev; 1958 int err = 0; 1959 1960 mutex_lock(&mdev->state_lock); 1961 1962 if (!mdev->device_up) { 1963 en_err(priv, "Cannot open - device down/disabled\n"); 1964 err = -EBUSY; 1965 goto out; 1966 } 1967 1968 /* Reset HW statistics and SW counters */ 1969 mlx4_en_clear_stats(dev); 1970 1971 err = mlx4_en_start_port(dev); 1972 if (err) 1973 en_err(priv, "Failed starting port:%d\n", priv->port); 1974 1975 out: 1976 mutex_unlock(&mdev->state_lock); 1977 return err; 1978 } 1979 1980 1981 static int mlx4_en_close(struct net_device *dev) 1982 { 1983 struct mlx4_en_priv *priv = netdev_priv(dev); 1984 struct mlx4_en_dev *mdev = priv->mdev; 1985 1986 en_dbg(IFDOWN, priv, "Close port called\n"); 1987 1988 mutex_lock(&mdev->state_lock); 1989 1990 mlx4_en_stop_port(dev, 0); 1991 netif_carrier_off(dev); 1992 1993 mutex_unlock(&mdev->state_lock); 1994 return 0; 1995 } 1996 1997 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1998 { 1999 int i; 2000 2001 #ifdef CONFIG_RFS_ACCEL 2002 priv->dev->rx_cpu_rmap = NULL; 2003 #endif 2004 2005 for (i = 0; i < priv->tx_ring_num; i++) { 2006 if (priv->tx_ring && priv->tx_ring[i]) 2007 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 2008 if (priv->tx_cq && priv->tx_cq[i]) 2009 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 2010 } 2011 2012 for (i = 0; i < priv->rx_ring_num; i++) { 2013 if (priv->rx_ring[i]) 2014 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2015 priv->prof->rx_ring_size, priv->stride); 2016 if (priv->rx_cq[i]) 2017 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2018 } 2019 2020 } 2021 2022 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 2023 { 2024 struct mlx4_en_port_profile *prof = priv->prof; 2025 int i; 2026 int node; 2027 2028 /* Create tx Rings */ 2029 for (i = 0; i < priv->tx_ring_num; i++) { 2030 node = cpu_to_node(i % num_online_cpus()); 2031 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 2032 prof->tx_ring_size, i, TX, node)) 2033 goto err; 2034 2035 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 2036 prof->tx_ring_size, TXBB_SIZE, 2037 node, i)) 2038 goto err; 2039 } 2040 2041 /* Create rx Rings */ 2042 for (i = 0; i < priv->rx_ring_num; i++) { 2043 node = cpu_to_node(i % num_online_cpus()); 2044 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2045 prof->rx_ring_size, i, RX, node)) 2046 goto err; 2047 2048 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2049 prof->rx_ring_size, priv->stride, 2050 node)) 2051 goto err; 2052 } 2053 2054 #ifdef CONFIG_RFS_ACCEL 2055 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2056 #endif 2057 2058 return 0; 2059 2060 err: 2061 en_err(priv, "Failed to allocate NIC resources\n"); 2062 for (i = 0; i < priv->rx_ring_num; i++) { 2063 if (priv->rx_ring[i]) 2064 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2065 prof->rx_ring_size, 2066 priv->stride); 2067 if (priv->rx_cq[i]) 2068 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2069 } 2070 for (i = 0; i < priv->tx_ring_num; i++) { 2071 if (priv->tx_ring[i]) 2072 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 2073 if (priv->tx_cq[i]) 2074 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 2075 } 2076 return -ENOMEM; 2077 } 2078 2079 static void mlx4_en_shutdown(struct net_device *dev) 2080 { 2081 rtnl_lock(); 2082 netif_device_detach(dev); 2083 mlx4_en_close(dev); 2084 rtnl_unlock(); 2085 } 2086 2087 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2088 struct mlx4_en_priv *src, 2089 struct mlx4_en_port_profile *prof) 2090 { 2091 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2092 sizeof(dst->hwtstamp_config)); 2093 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; 2094 dst->tx_ring_num = prof->tx_ring_num; 2095 dst->rx_ring_num = prof->rx_ring_num; 2096 dst->flags = prof->flags; 2097 dst->mdev = src->mdev; 2098 dst->port = src->port; 2099 dst->dev = src->dev; 2100 dst->prof = prof; 2101 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2102 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2103 2104 dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2105 GFP_KERNEL); 2106 if (!dst->tx_ring) 2107 return -ENOMEM; 2108 2109 dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 2110 GFP_KERNEL); 2111 if (!dst->tx_cq) { 2112 kfree(dst->tx_ring); 2113 return -ENOMEM; 2114 } 2115 return 0; 2116 } 2117 2118 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2119 struct mlx4_en_priv *src) 2120 { 2121 memcpy(dst->rx_ring, src->rx_ring, 2122 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2123 memcpy(dst->rx_cq, src->rx_cq, 2124 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2125 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2126 sizeof(dst->hwtstamp_config)); 2127 dst->tx_ring_num = src->tx_ring_num; 2128 dst->rx_ring_num = src->rx_ring_num; 2129 dst->tx_ring = src->tx_ring; 2130 dst->tx_cq = src->tx_cq; 2131 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2132 } 2133 2134 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2135 struct mlx4_en_priv *tmp, 2136 struct mlx4_en_port_profile *prof) 2137 { 2138 mlx4_en_copy_priv(tmp, priv, prof); 2139 2140 if (mlx4_en_alloc_resources(tmp)) { 2141 en_warn(priv, 2142 "%s: Resource allocation failed, using previous configuration\n", 2143 __func__); 2144 kfree(tmp->tx_ring); 2145 kfree(tmp->tx_cq); 2146 return -ENOMEM; 2147 } 2148 return 0; 2149 } 2150 2151 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2152 struct mlx4_en_priv *tmp) 2153 { 2154 mlx4_en_free_resources(priv); 2155 mlx4_en_update_priv(priv, tmp); 2156 } 2157 2158 void mlx4_en_destroy_netdev(struct net_device *dev) 2159 { 2160 struct mlx4_en_priv *priv = netdev_priv(dev); 2161 struct mlx4_en_dev *mdev = priv->mdev; 2162 bool shutdown = mdev->dev->persist->interface_state & 2163 MLX4_INTERFACE_STATE_SHUTDOWN; 2164 2165 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2166 2167 /* Unregister device - this will close the port if it was up */ 2168 if (priv->registered) { 2169 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2170 priv->port)); 2171 if (shutdown) 2172 mlx4_en_shutdown(dev); 2173 else 2174 unregister_netdev(dev); 2175 } 2176 2177 if (priv->allocated) 2178 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2179 2180 cancel_delayed_work(&priv->stats_task); 2181 cancel_delayed_work(&priv->service_task); 2182 /* flush any pending task for this netdev */ 2183 flush_workqueue(mdev->workqueue); 2184 2185 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2186 mlx4_en_remove_timestamp(mdev); 2187 2188 /* Detach the netdev so tasks would not attempt to access it */ 2189 mutex_lock(&mdev->state_lock); 2190 mdev->pndev[priv->port] = NULL; 2191 mdev->upper[priv->port] = NULL; 2192 mutex_unlock(&mdev->state_lock); 2193 2194 #ifdef CONFIG_RFS_ACCEL 2195 mlx4_en_cleanup_filters(priv); 2196 #endif 2197 2198 mlx4_en_free_resources(priv); 2199 2200 kfree(priv->tx_ring); 2201 kfree(priv->tx_cq); 2202 2203 if (!shutdown) 2204 free_netdev(dev); 2205 dev->ethtool_ops = NULL; 2206 } 2207 2208 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2209 { 2210 struct mlx4_en_priv *priv = netdev_priv(dev); 2211 struct mlx4_en_dev *mdev = priv->mdev; 2212 int err = 0; 2213 2214 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2215 dev->mtu, new_mtu); 2216 2217 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 2218 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 2219 return -EPERM; 2220 } 2221 if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { 2222 en_err(priv, "MTU size:%d requires frags but XDP running\n", 2223 new_mtu); 2224 return -EOPNOTSUPP; 2225 } 2226 dev->mtu = new_mtu; 2227 2228 if (netif_running(dev)) { 2229 mutex_lock(&mdev->state_lock); 2230 if (!mdev->device_up) { 2231 /* NIC is probably restarting - let watchdog task reset 2232 * the port */ 2233 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2234 } else { 2235 mlx4_en_stop_port(dev, 1); 2236 err = mlx4_en_start_port(dev); 2237 if (err) { 2238 en_err(priv, "Failed restarting port:%d\n", 2239 priv->port); 2240 queue_work(mdev->workqueue, &priv->watchdog_task); 2241 } 2242 } 2243 mutex_unlock(&mdev->state_lock); 2244 } 2245 return 0; 2246 } 2247 2248 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2249 { 2250 struct mlx4_en_priv *priv = netdev_priv(dev); 2251 struct mlx4_en_dev *mdev = priv->mdev; 2252 struct hwtstamp_config config; 2253 2254 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2255 return -EFAULT; 2256 2257 /* reserved for future extensions */ 2258 if (config.flags) 2259 return -EINVAL; 2260 2261 /* device doesn't support time stamping */ 2262 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2263 return -EINVAL; 2264 2265 /* TX HW timestamp */ 2266 switch (config.tx_type) { 2267 case HWTSTAMP_TX_OFF: 2268 case HWTSTAMP_TX_ON: 2269 break; 2270 default: 2271 return -ERANGE; 2272 } 2273 2274 /* RX HW timestamp */ 2275 switch (config.rx_filter) { 2276 case HWTSTAMP_FILTER_NONE: 2277 break; 2278 case HWTSTAMP_FILTER_ALL: 2279 case HWTSTAMP_FILTER_SOME: 2280 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2281 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2282 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2283 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2284 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2285 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2286 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2287 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2288 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2289 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2290 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2291 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2292 config.rx_filter = HWTSTAMP_FILTER_ALL; 2293 break; 2294 default: 2295 return -ERANGE; 2296 } 2297 2298 if (mlx4_en_reset_config(dev, config, dev->features)) { 2299 config.tx_type = HWTSTAMP_TX_OFF; 2300 config.rx_filter = HWTSTAMP_FILTER_NONE; 2301 } 2302 2303 return copy_to_user(ifr->ifr_data, &config, 2304 sizeof(config)) ? -EFAULT : 0; 2305 } 2306 2307 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2308 { 2309 struct mlx4_en_priv *priv = netdev_priv(dev); 2310 2311 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2312 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2313 } 2314 2315 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2316 { 2317 switch (cmd) { 2318 case SIOCSHWTSTAMP: 2319 return mlx4_en_hwtstamp_set(dev, ifr); 2320 case SIOCGHWTSTAMP: 2321 return mlx4_en_hwtstamp_get(dev, ifr); 2322 default: 2323 return -EOPNOTSUPP; 2324 } 2325 } 2326 2327 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2328 netdev_features_t features) 2329 { 2330 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2331 struct mlx4_en_dev *mdev = en_priv->mdev; 2332 2333 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2334 * enable/disable make sure S-TAG flag is always in same state as 2335 * C-TAG. 2336 */ 2337 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2338 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2339 features |= NETIF_F_HW_VLAN_STAG_RX; 2340 else 2341 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2342 2343 return features; 2344 } 2345 2346 static int mlx4_en_set_features(struct net_device *netdev, 2347 netdev_features_t features) 2348 { 2349 struct mlx4_en_priv *priv = netdev_priv(netdev); 2350 bool reset = false; 2351 int ret = 0; 2352 2353 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2354 en_info(priv, "Turn %s RX-FCS\n", 2355 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2356 reset = true; 2357 } 2358 2359 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2360 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2361 2362 en_info(priv, "Turn %s RX-ALL\n", 2363 ignore_fcs_value ? "ON" : "OFF"); 2364 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2365 priv->port, ignore_fcs_value); 2366 if (ret) 2367 return ret; 2368 } 2369 2370 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2371 en_info(priv, "Turn %s RX vlan strip offload\n", 2372 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2373 reset = true; 2374 } 2375 2376 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2377 en_info(priv, "Turn %s TX vlan strip offload\n", 2378 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2379 2380 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2381 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2382 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2383 2384 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2385 en_info(priv, "Turn %s loopback\n", 2386 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2387 mlx4_en_update_loopback_state(netdev, features); 2388 } 2389 2390 if (reset) { 2391 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2392 features); 2393 if (ret) 2394 return ret; 2395 } 2396 2397 return 0; 2398 } 2399 2400 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2401 { 2402 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2403 struct mlx4_en_dev *mdev = en_priv->mdev; 2404 u64 mac_u64 = mlx4_mac_to_u64(mac); 2405 2406 if (is_multicast_ether_addr(mac)) 2407 return -EINVAL; 2408 2409 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2410 } 2411 2412 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 2413 __be16 vlan_proto) 2414 { 2415 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2416 struct mlx4_en_dev *mdev = en_priv->mdev; 2417 2418 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos, 2419 vlan_proto); 2420 } 2421 2422 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2423 int max_tx_rate) 2424 { 2425 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2426 struct mlx4_en_dev *mdev = en_priv->mdev; 2427 2428 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2429 max_tx_rate); 2430 } 2431 2432 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2433 { 2434 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2435 struct mlx4_en_dev *mdev = en_priv->mdev; 2436 2437 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2438 } 2439 2440 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2441 { 2442 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2443 struct mlx4_en_dev *mdev = en_priv->mdev; 2444 2445 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2446 } 2447 2448 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2449 { 2450 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2451 struct mlx4_en_dev *mdev = en_priv->mdev; 2452 2453 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2454 } 2455 2456 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2457 struct ifla_vf_stats *vf_stats) 2458 { 2459 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2460 struct mlx4_en_dev *mdev = en_priv->mdev; 2461 2462 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2463 } 2464 2465 #define PORT_ID_BYTE_LEN 8 2466 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2467 struct netdev_phys_item_id *ppid) 2468 { 2469 struct mlx4_en_priv *priv = netdev_priv(dev); 2470 struct mlx4_dev *mdev = priv->mdev->dev; 2471 int i; 2472 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2473 2474 if (!phys_port_id) 2475 return -EOPNOTSUPP; 2476 2477 ppid->id_len = sizeof(phys_port_id); 2478 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2479 ppid->id[i] = phys_port_id & 0xff; 2480 phys_port_id >>= 8; 2481 } 2482 return 0; 2483 } 2484 2485 static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2486 { 2487 int ret; 2488 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2489 vxlan_add_task); 2490 2491 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2492 if (ret) 2493 goto out; 2494 2495 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2496 VXLAN_STEER_BY_OUTER_MAC, 1); 2497 out: 2498 if (ret) { 2499 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2500 return; 2501 } 2502 2503 /* set offloads */ 2504 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2505 NETIF_F_RXCSUM | 2506 NETIF_F_TSO | NETIF_F_TSO6 | 2507 NETIF_F_GSO_UDP_TUNNEL | 2508 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2509 NETIF_F_GSO_PARTIAL; 2510 } 2511 2512 static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2513 { 2514 int ret; 2515 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2516 vxlan_del_task); 2517 /* unset offloads */ 2518 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2519 NETIF_F_RXCSUM | 2520 NETIF_F_TSO | NETIF_F_TSO6 | 2521 NETIF_F_GSO_UDP_TUNNEL | 2522 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2523 NETIF_F_GSO_PARTIAL); 2524 2525 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2526 VXLAN_STEER_BY_OUTER_MAC, 0); 2527 if (ret) 2528 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2529 2530 priv->vxlan_port = 0; 2531 } 2532 2533 static void mlx4_en_add_vxlan_port(struct net_device *dev, 2534 struct udp_tunnel_info *ti) 2535 { 2536 struct mlx4_en_priv *priv = netdev_priv(dev); 2537 __be16 port = ti->port; 2538 __be16 current_port; 2539 2540 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2541 return; 2542 2543 if (ti->sa_family != AF_INET) 2544 return; 2545 2546 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2547 return; 2548 2549 current_port = priv->vxlan_port; 2550 if (current_port && current_port != port) { 2551 en_warn(priv, "vxlan port %d configured, can't add port %d\n", 2552 ntohs(current_port), ntohs(port)); 2553 return; 2554 } 2555 2556 priv->vxlan_port = port; 2557 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); 2558 } 2559 2560 static void mlx4_en_del_vxlan_port(struct net_device *dev, 2561 struct udp_tunnel_info *ti) 2562 { 2563 struct mlx4_en_priv *priv = netdev_priv(dev); 2564 __be16 port = ti->port; 2565 __be16 current_port; 2566 2567 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2568 return; 2569 2570 if (ti->sa_family != AF_INET) 2571 return; 2572 2573 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2574 return; 2575 2576 current_port = priv->vxlan_port; 2577 if (current_port != port) { 2578 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); 2579 return; 2580 } 2581 2582 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2583 } 2584 2585 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2586 struct net_device *dev, 2587 netdev_features_t features) 2588 { 2589 features = vlan_features_check(skb, features); 2590 features = vxlan_features_check(skb, features); 2591 2592 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2593 * support inner IPv6 checksums and segmentation so we need to 2594 * strip that feature if this is an IPv6 encapsulated frame. 2595 */ 2596 if (skb->encapsulation && 2597 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2598 struct mlx4_en_priv *priv = netdev_priv(dev); 2599 2600 if (!priv->vxlan_port || 2601 (ip_hdr(skb)->version != 4) || 2602 (udp_hdr(skb)->dest != priv->vxlan_port)) 2603 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2604 } 2605 2606 return features; 2607 } 2608 2609 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2610 { 2611 struct mlx4_en_priv *priv = netdev_priv(dev); 2612 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; 2613 struct mlx4_update_qp_params params; 2614 int err; 2615 2616 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2617 return -EOPNOTSUPP; 2618 2619 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2620 if (maxrate >> 12) { 2621 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2622 params.rate_val = maxrate / 1000; 2623 } else if (maxrate) { 2624 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2625 params.rate_val = maxrate; 2626 } else { /* zero serves to revoke the QP rate-limitation */ 2627 params.rate_unit = 0; 2628 params.rate_val = 0; 2629 } 2630 2631 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2632 ¶ms); 2633 return err; 2634 } 2635 2636 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) 2637 { 2638 struct mlx4_en_priv *priv = netdev_priv(dev); 2639 struct mlx4_en_dev *mdev = priv->mdev; 2640 struct bpf_prog *old_prog; 2641 int xdp_ring_num; 2642 int port_up = 0; 2643 int err; 2644 int i; 2645 2646 xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; 2647 2648 /* No need to reconfigure buffers when simply swapping the 2649 * program for a new one. 2650 */ 2651 if (priv->xdp_ring_num == xdp_ring_num) { 2652 if (prog) { 2653 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2654 if (IS_ERR(prog)) 2655 return PTR_ERR(prog); 2656 } 2657 mutex_lock(&mdev->state_lock); 2658 for (i = 0; i < priv->rx_ring_num; i++) { 2659 old_prog = rcu_dereference_protected( 2660 priv->rx_ring[i]->xdp_prog, 2661 lockdep_is_held(&mdev->state_lock)); 2662 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2663 if (old_prog) 2664 bpf_prog_put(old_prog); 2665 } 2666 mutex_unlock(&mdev->state_lock); 2667 return 0; 2668 } 2669 2670 if (priv->num_frags > 1) { 2671 en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); 2672 return -EOPNOTSUPP; 2673 } 2674 2675 if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { 2676 en_err(priv, 2677 "Minimum %d tx channels required to run XDP\n", 2678 (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); 2679 return -EINVAL; 2680 } 2681 2682 if (prog) { 2683 prog = bpf_prog_add(prog, priv->rx_ring_num - 1); 2684 if (IS_ERR(prog)) 2685 return PTR_ERR(prog); 2686 } 2687 2688 mutex_lock(&mdev->state_lock); 2689 if (priv->port_up) { 2690 port_up = 1; 2691 mlx4_en_stop_port(dev, 1); 2692 } 2693 2694 priv->xdp_ring_num = xdp_ring_num; 2695 netif_set_real_num_tx_queues(dev, priv->tx_ring_num - 2696 priv->xdp_ring_num); 2697 2698 for (i = 0; i < priv->rx_ring_num; i++) { 2699 old_prog = rcu_dereference_protected( 2700 priv->rx_ring[i]->xdp_prog, 2701 lockdep_is_held(&mdev->state_lock)); 2702 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog); 2703 if (old_prog) 2704 bpf_prog_put(old_prog); 2705 } 2706 2707 if (port_up) { 2708 err = mlx4_en_start_port(dev); 2709 if (err) { 2710 en_err(priv, "Failed starting port %d for XDP change\n", 2711 priv->port); 2712 queue_work(mdev->workqueue, &priv->watchdog_task); 2713 } 2714 } 2715 2716 mutex_unlock(&mdev->state_lock); 2717 return 0; 2718 } 2719 2720 static bool mlx4_xdp_attached(struct net_device *dev) 2721 { 2722 struct mlx4_en_priv *priv = netdev_priv(dev); 2723 2724 return !!priv->xdp_ring_num; 2725 } 2726 2727 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) 2728 { 2729 switch (xdp->command) { 2730 case XDP_SETUP_PROG: 2731 return mlx4_xdp_set(dev, xdp->prog); 2732 case XDP_QUERY_PROG: 2733 xdp->prog_attached = mlx4_xdp_attached(dev); 2734 return 0; 2735 default: 2736 return -EINVAL; 2737 } 2738 } 2739 2740 static const struct net_device_ops mlx4_netdev_ops = { 2741 .ndo_open = mlx4_en_open, 2742 .ndo_stop = mlx4_en_close, 2743 .ndo_start_xmit = mlx4_en_xmit, 2744 .ndo_select_queue = mlx4_en_select_queue, 2745 .ndo_get_stats64 = mlx4_en_get_stats64, 2746 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2747 .ndo_set_mac_address = mlx4_en_set_mac, 2748 .ndo_validate_addr = eth_validate_addr, 2749 .ndo_change_mtu = mlx4_en_change_mtu, 2750 .ndo_do_ioctl = mlx4_en_ioctl, 2751 .ndo_tx_timeout = mlx4_en_tx_timeout, 2752 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2753 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2754 #ifdef CONFIG_NET_POLL_CONTROLLER 2755 .ndo_poll_controller = mlx4_en_netpoll, 2756 #endif 2757 .ndo_set_features = mlx4_en_set_features, 2758 .ndo_fix_features = mlx4_en_fix_features, 2759 .ndo_setup_tc = __mlx4_en_setup_tc, 2760 #ifdef CONFIG_RFS_ACCEL 2761 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2762 #endif 2763 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2764 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2765 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2766 .ndo_features_check = mlx4_en_features_check, 2767 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2768 .ndo_xdp = mlx4_xdp, 2769 }; 2770 2771 static const struct net_device_ops mlx4_netdev_ops_master = { 2772 .ndo_open = mlx4_en_open, 2773 .ndo_stop = mlx4_en_close, 2774 .ndo_start_xmit = mlx4_en_xmit, 2775 .ndo_select_queue = mlx4_en_select_queue, 2776 .ndo_get_stats64 = mlx4_en_get_stats64, 2777 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2778 .ndo_set_mac_address = mlx4_en_set_mac, 2779 .ndo_validate_addr = eth_validate_addr, 2780 .ndo_change_mtu = mlx4_en_change_mtu, 2781 .ndo_tx_timeout = mlx4_en_tx_timeout, 2782 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2783 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2784 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2785 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2786 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2787 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2788 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2789 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2790 .ndo_get_vf_config = mlx4_en_get_vf_config, 2791 #ifdef CONFIG_NET_POLL_CONTROLLER 2792 .ndo_poll_controller = mlx4_en_netpoll, 2793 #endif 2794 .ndo_set_features = mlx4_en_set_features, 2795 .ndo_fix_features = mlx4_en_fix_features, 2796 .ndo_setup_tc = __mlx4_en_setup_tc, 2797 #ifdef CONFIG_RFS_ACCEL 2798 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2799 #endif 2800 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2801 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, 2802 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, 2803 .ndo_features_check = mlx4_en_features_check, 2804 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2805 .ndo_xdp = mlx4_xdp, 2806 }; 2807 2808 struct mlx4_en_bond { 2809 struct work_struct work; 2810 struct mlx4_en_priv *priv; 2811 int is_bonded; 2812 struct mlx4_port_map port_map; 2813 }; 2814 2815 static void mlx4_en_bond_work(struct work_struct *work) 2816 { 2817 struct mlx4_en_bond *bond = container_of(work, 2818 struct mlx4_en_bond, 2819 work); 2820 int err = 0; 2821 struct mlx4_dev *dev = bond->priv->mdev->dev; 2822 2823 if (bond->is_bonded) { 2824 if (!mlx4_is_bonded(dev)) { 2825 err = mlx4_bond(dev); 2826 if (err) 2827 en_err(bond->priv, "Fail to bond device\n"); 2828 } 2829 if (!err) { 2830 err = mlx4_port_map_set(dev, &bond->port_map); 2831 if (err) 2832 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2833 bond->port_map.port1, 2834 bond->port_map.port2, 2835 err); 2836 } 2837 } else if (mlx4_is_bonded(dev)) { 2838 err = mlx4_unbond(dev); 2839 if (err) 2840 en_err(bond->priv, "Fail to unbond device\n"); 2841 } 2842 dev_put(bond->priv->dev); 2843 kfree(bond); 2844 } 2845 2846 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 2847 u8 v2p_p1, u8 v2p_p2) 2848 { 2849 struct mlx4_en_bond *bond = NULL; 2850 2851 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 2852 if (!bond) 2853 return -ENOMEM; 2854 2855 INIT_WORK(&bond->work, mlx4_en_bond_work); 2856 bond->priv = priv; 2857 bond->is_bonded = is_bonded; 2858 bond->port_map.port1 = v2p_p1; 2859 bond->port_map.port2 = v2p_p2; 2860 dev_hold(priv->dev); 2861 queue_work(priv->mdev->workqueue, &bond->work); 2862 return 0; 2863 } 2864 2865 int mlx4_en_netdev_event(struct notifier_block *this, 2866 unsigned long event, void *ptr) 2867 { 2868 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2869 u8 port = 0; 2870 struct mlx4_en_dev *mdev; 2871 struct mlx4_dev *dev; 2872 int i, num_eth_ports = 0; 2873 bool do_bond = true; 2874 struct mlx4_en_priv *priv; 2875 u8 v2p_port1 = 0; 2876 u8 v2p_port2 = 0; 2877 2878 if (!net_eq(dev_net(ndev), &init_net)) 2879 return NOTIFY_DONE; 2880 2881 mdev = container_of(this, struct mlx4_en_dev, nb); 2882 dev = mdev->dev; 2883 2884 /* Go into this mode only when two network devices set on two ports 2885 * of the same mlx4 device are slaves of the same bonding master 2886 */ 2887 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 2888 ++num_eth_ports; 2889 if (!port && (mdev->pndev[i] == ndev)) 2890 port = i; 2891 mdev->upper[i] = mdev->pndev[i] ? 2892 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 2893 /* condition not met: network device is a slave */ 2894 if (!mdev->upper[i]) 2895 do_bond = false; 2896 if (num_eth_ports < 2) 2897 continue; 2898 /* condition not met: same master */ 2899 if (mdev->upper[i] != mdev->upper[i-1]) 2900 do_bond = false; 2901 } 2902 /* condition not met: 2 salves */ 2903 do_bond = (num_eth_ports == 2) ? do_bond : false; 2904 2905 /* handle only events that come with enough info */ 2906 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 2907 return NOTIFY_DONE; 2908 2909 priv = netdev_priv(ndev); 2910 if (do_bond) { 2911 struct netdev_notifier_bonding_info *notifier_info = ptr; 2912 struct netdev_bonding_info *bonding_info = 2913 ¬ifier_info->bonding_info; 2914 2915 /* required mode 1, 2 or 4 */ 2916 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 2917 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 2918 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 2919 do_bond = false; 2920 2921 /* require exactly 2 slaves */ 2922 if (bonding_info->master.num_slaves != 2) 2923 do_bond = false; 2924 2925 /* calc v2p */ 2926 if (do_bond) { 2927 if (bonding_info->master.bond_mode == 2928 BOND_MODE_ACTIVEBACKUP) { 2929 /* in active-backup mode virtual ports are 2930 * mapped to the physical port of the active 2931 * slave */ 2932 if (bonding_info->slave.state == 2933 BOND_STATE_BACKUP) { 2934 if (port == 1) { 2935 v2p_port1 = 2; 2936 v2p_port2 = 2; 2937 } else { 2938 v2p_port1 = 1; 2939 v2p_port2 = 1; 2940 } 2941 } else { /* BOND_STATE_ACTIVE */ 2942 if (port == 1) { 2943 v2p_port1 = 1; 2944 v2p_port2 = 1; 2945 } else { 2946 v2p_port1 = 2; 2947 v2p_port2 = 2; 2948 } 2949 } 2950 } else { /* Active-Active */ 2951 /* in active-active mode a virtual port is 2952 * mapped to the native physical port if and only 2953 * if the physical port is up */ 2954 __s8 link = bonding_info->slave.link; 2955 2956 if (port == 1) 2957 v2p_port2 = 2; 2958 else 2959 v2p_port1 = 1; 2960 if ((link == BOND_LINK_UP) || 2961 (link == BOND_LINK_FAIL)) { 2962 if (port == 1) 2963 v2p_port1 = 1; 2964 else 2965 v2p_port2 = 2; 2966 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 2967 if (port == 1) 2968 v2p_port1 = 2; 2969 else 2970 v2p_port2 = 1; 2971 } 2972 } 2973 } 2974 } 2975 2976 mlx4_en_queue_bond_work(priv, do_bond, 2977 v2p_port1, v2p_port2); 2978 2979 return NOTIFY_DONE; 2980 } 2981 2982 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 2983 struct mlx4_en_stats_bitmap *stats_bitmap, 2984 u8 rx_ppp, u8 rx_pause, 2985 u8 tx_ppp, u8 tx_pause) 2986 { 2987 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 2988 2989 if (!mlx4_is_slave(dev) && 2990 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 2991 mutex_lock(&stats_bitmap->mutex); 2992 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 2993 2994 if (rx_ppp) 2995 bitmap_set(stats_bitmap->bitmap, last_i, 2996 NUM_FLOW_PRIORITY_STATS_RX); 2997 last_i += NUM_FLOW_PRIORITY_STATS_RX; 2998 2999 if (rx_pause && !(rx_ppp)) 3000 bitmap_set(stats_bitmap->bitmap, last_i, 3001 NUM_FLOW_STATS_RX); 3002 last_i += NUM_FLOW_STATS_RX; 3003 3004 if (tx_ppp) 3005 bitmap_set(stats_bitmap->bitmap, last_i, 3006 NUM_FLOW_PRIORITY_STATS_TX); 3007 last_i += NUM_FLOW_PRIORITY_STATS_TX; 3008 3009 if (tx_pause && !(tx_ppp)) 3010 bitmap_set(stats_bitmap->bitmap, last_i, 3011 NUM_FLOW_STATS_TX); 3012 last_i += NUM_FLOW_STATS_TX; 3013 3014 mutex_unlock(&stats_bitmap->mutex); 3015 } 3016 } 3017 3018 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 3019 struct mlx4_en_stats_bitmap *stats_bitmap, 3020 u8 rx_ppp, u8 rx_pause, 3021 u8 tx_ppp, u8 tx_pause) 3022 { 3023 int last_i = 0; 3024 3025 mutex_init(&stats_bitmap->mutex); 3026 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 3027 3028 if (mlx4_is_slave(dev)) { 3029 bitmap_set(stats_bitmap->bitmap, last_i + 3030 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 3031 bitmap_set(stats_bitmap->bitmap, last_i + 3032 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 3033 bitmap_set(stats_bitmap->bitmap, last_i + 3034 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 3035 bitmap_set(stats_bitmap->bitmap, last_i + 3036 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 3037 bitmap_set(stats_bitmap->bitmap, last_i + 3038 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 3039 bitmap_set(stats_bitmap->bitmap, last_i + 3040 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 3041 } else { 3042 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 3043 } 3044 last_i += NUM_MAIN_STATS; 3045 3046 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 3047 last_i += NUM_PORT_STATS; 3048 3049 if (mlx4_is_master(dev)) 3050 bitmap_set(stats_bitmap->bitmap, last_i, 3051 NUM_PF_STATS); 3052 last_i += NUM_PF_STATS; 3053 3054 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 3055 rx_ppp, rx_pause, 3056 tx_ppp, tx_pause); 3057 last_i += NUM_FLOW_STATS; 3058 3059 if (!mlx4_is_slave(dev)) 3060 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 3061 } 3062 3063 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 3064 struct mlx4_en_port_profile *prof) 3065 { 3066 struct net_device *dev; 3067 struct mlx4_en_priv *priv; 3068 int i; 3069 int err; 3070 3071 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3072 MAX_TX_RINGS, MAX_RX_RINGS); 3073 if (dev == NULL) 3074 return -ENOMEM; 3075 3076 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 3077 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 3078 3079 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 3080 dev->dev_port = port - 1; 3081 3082 /* 3083 * Initialize driver private data 3084 */ 3085 3086 priv = netdev_priv(dev); 3087 memset(priv, 0, sizeof(struct mlx4_en_priv)); 3088 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 3089 spin_lock_init(&priv->stats_lock); 3090 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3091 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 3092 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3093 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3094 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3095 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 3096 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 3097 #ifdef CONFIG_RFS_ACCEL 3098 INIT_LIST_HEAD(&priv->filters); 3099 spin_lock_init(&priv->filters_lock); 3100 #endif 3101 3102 priv->dev = dev; 3103 priv->mdev = mdev; 3104 priv->ddev = &mdev->pdev->dev; 3105 priv->prof = prof; 3106 priv->port = port; 3107 priv->port_up = false; 3108 priv->flags = prof->flags; 3109 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 3110 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 3111 MLX4_WQE_CTRL_SOLICITED); 3112 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 3113 priv->tx_ring_num = prof->tx_ring_num; 3114 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 3115 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 3116 3117 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 3118 GFP_KERNEL); 3119 if (!priv->tx_ring) { 3120 err = -ENOMEM; 3121 goto out; 3122 } 3123 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 3124 GFP_KERNEL); 3125 if (!priv->tx_cq) { 3126 err = -ENOMEM; 3127 goto out; 3128 } 3129 priv->rx_ring_num = prof->rx_ring_num; 3130 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 3131 priv->cqe_size = mdev->dev->caps.cqe_size; 3132 priv->mac_index = -1; 3133 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3134 #ifdef CONFIG_MLX4_EN_DCB 3135 if (!mlx4_is_slave(priv->mdev->dev)) { 3136 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | 3137 DCB_CAP_DCBX_VER_IEEE; 3138 priv->flags |= MLX4_EN_DCB_ENABLED; 3139 priv->cee_config.pfc_state = false; 3140 3141 for (i = 0; i < MLX4_EN_NUM_UP; i++) 3142 priv->cee_config.dcb_pfc[i] = pfc_disabled; 3143 3144 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3145 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3146 } else { 3147 en_info(priv, "enabling only PFC DCB ops\n"); 3148 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 3149 } 3150 } 3151 #endif 3152 3153 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 3154 INIT_HLIST_HEAD(&priv->mac_hash[i]); 3155 3156 /* Query for default mac and max mtu */ 3157 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 3158 3159 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 3160 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 3161 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3162 3163 /* Set default MAC */ 3164 dev->addr_len = ETH_ALEN; 3165 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3166 if (!is_valid_ether_addr(dev->dev_addr)) { 3167 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3168 priv->port, dev->dev_addr); 3169 err = -EINVAL; 3170 goto out; 3171 } else if (mlx4_is_slave(priv->mdev->dev) && 3172 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3173 /* Random MAC was assigned in mlx4_slave_cap 3174 * in mlx4_core module 3175 */ 3176 dev->addr_assign_type |= NET_ADDR_RANDOM; 3177 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3178 } 3179 3180 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3181 3182 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3183 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3184 err = mlx4_en_alloc_resources(priv); 3185 if (err) 3186 goto out; 3187 3188 /* Initialize time stamping config */ 3189 priv->hwtstamp_config.flags = 0; 3190 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3191 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3192 3193 /* Allocate page for receive rings */ 3194 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3195 MLX4_EN_PAGE_SIZE); 3196 if (err) { 3197 en_err(priv, "Failed to allocate page for rx qps\n"); 3198 goto out; 3199 } 3200 priv->allocated = 1; 3201 3202 /* 3203 * Initialize netdev entry points 3204 */ 3205 if (mlx4_is_master(priv->mdev->dev)) 3206 dev->netdev_ops = &mlx4_netdev_ops_master; 3207 else 3208 dev->netdev_ops = &mlx4_netdev_ops; 3209 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3210 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 3211 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3212 3213 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3214 3215 /* 3216 * Set driver features 3217 */ 3218 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3219 if (mdev->LSO_support) 3220 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3221 3222 dev->vlan_features = dev->hw_features; 3223 3224 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3225 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3226 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3227 NETIF_F_HW_VLAN_CTAG_FILTER; 3228 dev->hw_features |= NETIF_F_LOOPBACK | 3229 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3230 3231 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3232 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3233 NETIF_F_HW_VLAN_STAG_FILTER; 3234 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3235 } 3236 3237 if (mlx4_is_slave(mdev->dev)) { 3238 bool vlan_offload_disabled; 3239 int phv; 3240 3241 err = get_phv_bit(mdev->dev, port, &phv); 3242 if (!err && phv) { 3243 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3244 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3245 } 3246 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port, 3247 &vlan_offload_disabled); 3248 if (!err && vlan_offload_disabled) { 3249 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3250 NETIF_F_HW_VLAN_CTAG_RX | 3251 NETIF_F_HW_VLAN_STAG_TX | 3252 NETIF_F_HW_VLAN_STAG_RX); 3253 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3254 NETIF_F_HW_VLAN_CTAG_RX | 3255 NETIF_F_HW_VLAN_STAG_TX | 3256 NETIF_F_HW_VLAN_STAG_RX); 3257 } 3258 } else { 3259 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3260 !(mdev->dev->caps.flags2 & 3261 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3262 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3263 } 3264 3265 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3266 dev->hw_features |= NETIF_F_RXFCS; 3267 3268 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3269 dev->hw_features |= NETIF_F_RXALL; 3270 3271 if (mdev->dev->caps.steering_mode == 3272 MLX4_STEERING_MODE_DEVICE_MANAGED && 3273 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3274 dev->hw_features |= NETIF_F_NTUPLE; 3275 3276 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3277 dev->priv_flags |= IFF_UNICAST_FLT; 3278 3279 /* Setting a default hash function value */ 3280 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3281 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3282 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3283 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3284 } else { 3285 en_warn(priv, 3286 "No RSS hash capabilities exposed, using Toeplitz\n"); 3287 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3288 } 3289 3290 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3291 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3292 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3293 NETIF_F_GSO_PARTIAL; 3294 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3295 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3296 NETIF_F_GSO_PARTIAL; 3297 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3298 } 3299 3300 mdev->pndev[port] = dev; 3301 mdev->upper[port] = NULL; 3302 3303 netif_carrier_off(dev); 3304 mlx4_en_set_default_moderation(priv); 3305 3306 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 3307 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3308 3309 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3310 3311 /* Configure port */ 3312 mlx4_en_calc_rx_buf(dev); 3313 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3314 priv->rx_skb_size + ETH_FCS_LEN, 3315 prof->tx_pause, prof->tx_ppp, 3316 prof->rx_pause, prof->rx_ppp); 3317 if (err) { 3318 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3319 priv->port, err); 3320 goto out; 3321 } 3322 3323 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3324 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3325 if (err) { 3326 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3327 err); 3328 goto out; 3329 } 3330 } 3331 3332 /* Init port */ 3333 en_warn(priv, "Initializing port\n"); 3334 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3335 if (err) { 3336 en_err(priv, "Failed Initializing port\n"); 3337 goto out; 3338 } 3339 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3340 3341 /* Initialize time stamp mechanism */ 3342 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3343 mlx4_en_init_timestamp(mdev); 3344 3345 queue_delayed_work(mdev->workqueue, &priv->service_task, 3346 SERVICE_TASK_DELAY); 3347 3348 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3349 mdev->profile.prof[priv->port].rx_ppp, 3350 mdev->profile.prof[priv->port].rx_pause, 3351 mdev->profile.prof[priv->port].tx_ppp, 3352 mdev->profile.prof[priv->port].tx_pause); 3353 3354 err = register_netdev(dev); 3355 if (err) { 3356 en_err(priv, "Netdev registration failed for port %d\n", port); 3357 goto out; 3358 } 3359 3360 priv->registered = 1; 3361 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3362 dev); 3363 3364 return 0; 3365 3366 out: 3367 mlx4_en_destroy_netdev(dev); 3368 return err; 3369 } 3370 3371 int mlx4_en_reset_config(struct net_device *dev, 3372 struct hwtstamp_config ts_config, 3373 netdev_features_t features) 3374 { 3375 struct mlx4_en_priv *priv = netdev_priv(dev); 3376 struct mlx4_en_dev *mdev = priv->mdev; 3377 struct mlx4_en_port_profile new_prof; 3378 struct mlx4_en_priv *tmp; 3379 int port_up = 0; 3380 int err = 0; 3381 3382 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3383 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3384 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3385 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3386 return 0; /* Nothing to change */ 3387 3388 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3389 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3390 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3391 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3392 return -EINVAL; 3393 } 3394 3395 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3396 if (!tmp) 3397 return -ENOMEM; 3398 3399 mutex_lock(&mdev->state_lock); 3400 3401 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3402 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3403 3404 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3405 if (err) 3406 goto out; 3407 3408 if (priv->port_up) { 3409 port_up = 1; 3410 mlx4_en_stop_port(dev, 1); 3411 } 3412 3413 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3414 ts_config.rx_filter, 3415 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3416 3417 mlx4_en_safe_replace_resources(priv, tmp); 3418 3419 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3420 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3421 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3422 else 3423 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3424 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3425 /* RX time-stamping is OFF, update the RX vlan offload 3426 * to the latest wanted state 3427 */ 3428 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3429 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3430 else 3431 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3432 } 3433 3434 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3435 if (features & NETIF_F_RXFCS) 3436 dev->features |= NETIF_F_RXFCS; 3437 else 3438 dev->features &= ~NETIF_F_RXFCS; 3439 } 3440 3441 /* RX vlan offload and RX time-stamping can't co-exist ! 3442 * Regardless of the caller's choice, 3443 * Turn Off RX vlan offload in case of time-stamping is ON 3444 */ 3445 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3446 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3447 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3448 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3449 } 3450 3451 if (port_up) { 3452 err = mlx4_en_start_port(dev); 3453 if (err) 3454 en_err(priv, "Failed starting port\n"); 3455 } 3456 3457 out: 3458 mutex_unlock(&mdev->state_lock); 3459 kfree(tmp); 3460 if (!err) 3461 netdev_features_change(dev); 3462 return err; 3463 } 3464