1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/hash.h> 40 #include <net/ip.h> 41 #include <net/busy_poll.h> 42 #include <net/vxlan.h> 43 #include <net/devlink.h> 44 45 #include <linux/mlx4/driver.h> 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/cmd.h> 48 #include <linux/mlx4/cq.h> 49 50 #include "mlx4_en.h" 51 #include "en_port.h" 52 53 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 54 { 55 struct mlx4_en_priv *priv = netdev_priv(dev); 56 int i; 57 unsigned int offset = 0; 58 59 if (up && up != MLX4_EN_NUM_UP) 60 return -EINVAL; 61 62 netdev_set_num_tc(dev, up); 63 64 /* Partition Tx queues evenly amongst UP's */ 65 for (i = 0; i < up; i++) { 66 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 67 offset += priv->num_tx_rings_p_up; 68 } 69 70 return 0; 71 } 72 73 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 74 struct tc_to_netdev *tc) 75 { 76 if (tc->type != TC_SETUP_MQPRIO) 77 return -EINVAL; 78 79 return mlx4_en_setup_tc(dev, tc->tc); 80 } 81 82 #ifdef CONFIG_RFS_ACCEL 83 84 struct mlx4_en_filter { 85 struct list_head next; 86 struct work_struct work; 87 88 u8 ip_proto; 89 __be32 src_ip; 90 __be32 dst_ip; 91 __be16 src_port; 92 __be16 dst_port; 93 94 int rxq_index; 95 struct mlx4_en_priv *priv; 96 u32 flow_id; /* RFS infrastructure id */ 97 int id; /* mlx4_en driver id */ 98 u64 reg_id; /* Flow steering API id */ 99 u8 activated; /* Used to prevent expiry before filter 100 * is attached 101 */ 102 struct hlist_node filter_chain; 103 }; 104 105 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 106 107 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 108 { 109 switch (ip_proto) { 110 case IPPROTO_UDP: 111 return MLX4_NET_TRANS_RULE_ID_UDP; 112 case IPPROTO_TCP: 113 return MLX4_NET_TRANS_RULE_ID_TCP; 114 default: 115 return MLX4_NET_TRANS_RULE_NUM; 116 } 117 }; 118 119 static void mlx4_en_filter_work(struct work_struct *work) 120 { 121 struct mlx4_en_filter *filter = container_of(work, 122 struct mlx4_en_filter, 123 work); 124 struct mlx4_en_priv *priv = filter->priv; 125 struct mlx4_spec_list spec_tcp_udp = { 126 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 127 { 128 .tcp_udp = { 129 .dst_port = filter->dst_port, 130 .dst_port_msk = (__force __be16)-1, 131 .src_port = filter->src_port, 132 .src_port_msk = (__force __be16)-1, 133 }, 134 }, 135 }; 136 struct mlx4_spec_list spec_ip = { 137 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 138 { 139 .ipv4 = { 140 .dst_ip = filter->dst_ip, 141 .dst_ip_msk = (__force __be32)-1, 142 .src_ip = filter->src_ip, 143 .src_ip_msk = (__force __be32)-1, 144 }, 145 }, 146 }; 147 struct mlx4_spec_list spec_eth = { 148 .id = MLX4_NET_TRANS_RULE_ID_ETH, 149 }; 150 struct mlx4_net_trans_rule rule = { 151 .list = LIST_HEAD_INIT(rule.list), 152 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 153 .exclusive = 1, 154 .allow_loopback = 1, 155 .promisc_mode = MLX4_FS_REGULAR, 156 .port = priv->port, 157 .priority = MLX4_DOMAIN_RFS, 158 }; 159 int rc; 160 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 161 162 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { 163 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 164 filter->ip_proto); 165 goto ignore; 166 } 167 list_add_tail(&spec_eth.list, &rule.list); 168 list_add_tail(&spec_ip.list, &rule.list); 169 list_add_tail(&spec_tcp_udp.list, &rule.list); 170 171 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 172 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 173 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 174 175 filter->activated = 0; 176 177 if (filter->reg_id) { 178 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 179 if (rc && rc != -ENOENT) 180 en_err(priv, "Error detaching flow. rc = %d\n", rc); 181 } 182 183 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 184 if (rc) 185 en_err(priv, "Error attaching flow. err = %d\n", rc); 186 187 ignore: 188 mlx4_en_filter_rfs_expire(priv); 189 190 filter->activated = 1; 191 } 192 193 static inline struct hlist_head * 194 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 195 __be16 src_port, __be16 dst_port) 196 { 197 unsigned long l; 198 int bucket_idx; 199 200 l = (__force unsigned long)src_port | 201 ((__force unsigned long)dst_port << 2); 202 l ^= (__force unsigned long)(src_ip ^ dst_ip); 203 204 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 205 206 return &priv->filter_hash[bucket_idx]; 207 } 208 209 static struct mlx4_en_filter * 210 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 211 __be32 dst_ip, u8 ip_proto, __be16 src_port, 212 __be16 dst_port, u32 flow_id) 213 { 214 struct mlx4_en_filter *filter = NULL; 215 216 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 217 if (!filter) 218 return NULL; 219 220 filter->priv = priv; 221 filter->rxq_index = rxq_index; 222 INIT_WORK(&filter->work, mlx4_en_filter_work); 223 224 filter->src_ip = src_ip; 225 filter->dst_ip = dst_ip; 226 filter->ip_proto = ip_proto; 227 filter->src_port = src_port; 228 filter->dst_port = dst_port; 229 230 filter->flow_id = flow_id; 231 232 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 233 234 list_add_tail(&filter->next, &priv->filters); 235 hlist_add_head(&filter->filter_chain, 236 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 237 dst_port)); 238 239 return filter; 240 } 241 242 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 243 { 244 struct mlx4_en_priv *priv = filter->priv; 245 int rc; 246 247 list_del(&filter->next); 248 249 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 250 if (rc && rc != -ENOENT) 251 en_err(priv, "Error detaching flow. rc = %d\n", rc); 252 253 kfree(filter); 254 } 255 256 static inline struct mlx4_en_filter * 257 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 258 u8 ip_proto, __be16 src_port, __be16 dst_port) 259 { 260 struct mlx4_en_filter *filter; 261 struct mlx4_en_filter *ret = NULL; 262 263 hlist_for_each_entry(filter, 264 filter_hash_bucket(priv, src_ip, dst_ip, 265 src_port, dst_port), 266 filter_chain) { 267 if (filter->src_ip == src_ip && 268 filter->dst_ip == dst_ip && 269 filter->ip_proto == ip_proto && 270 filter->src_port == src_port && 271 filter->dst_port == dst_port) { 272 ret = filter; 273 break; 274 } 275 } 276 277 return ret; 278 } 279 280 static int 281 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 282 u16 rxq_index, u32 flow_id) 283 { 284 struct mlx4_en_priv *priv = netdev_priv(net_dev); 285 struct mlx4_en_filter *filter; 286 const struct iphdr *ip; 287 const __be16 *ports; 288 u8 ip_proto; 289 __be32 src_ip; 290 __be32 dst_ip; 291 __be16 src_port; 292 __be16 dst_port; 293 int nhoff = skb_network_offset(skb); 294 int ret = 0; 295 296 if (skb->protocol != htons(ETH_P_IP)) 297 return -EPROTONOSUPPORT; 298 299 ip = (const struct iphdr *)(skb->data + nhoff); 300 if (ip_is_fragment(ip)) 301 return -EPROTONOSUPPORT; 302 303 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 304 return -EPROTONOSUPPORT; 305 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 306 307 ip_proto = ip->protocol; 308 src_ip = ip->saddr; 309 dst_ip = ip->daddr; 310 src_port = ports[0]; 311 dst_port = ports[1]; 312 313 spin_lock_bh(&priv->filters_lock); 314 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 315 src_port, dst_port); 316 if (filter) { 317 if (filter->rxq_index == rxq_index) 318 goto out; 319 320 filter->rxq_index = rxq_index; 321 } else { 322 filter = mlx4_en_filter_alloc(priv, rxq_index, 323 src_ip, dst_ip, ip_proto, 324 src_port, dst_port, flow_id); 325 if (!filter) { 326 ret = -ENOMEM; 327 goto err; 328 } 329 } 330 331 queue_work(priv->mdev->workqueue, &filter->work); 332 333 out: 334 ret = filter->id; 335 err: 336 spin_unlock_bh(&priv->filters_lock); 337 338 return ret; 339 } 340 341 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 342 { 343 struct mlx4_en_filter *filter, *tmp; 344 LIST_HEAD(del_list); 345 346 spin_lock_bh(&priv->filters_lock); 347 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 348 list_move(&filter->next, &del_list); 349 hlist_del(&filter->filter_chain); 350 } 351 spin_unlock_bh(&priv->filters_lock); 352 353 list_for_each_entry_safe(filter, tmp, &del_list, next) { 354 cancel_work_sync(&filter->work); 355 mlx4_en_filter_free(filter); 356 } 357 } 358 359 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 360 { 361 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 362 LIST_HEAD(del_list); 363 int i = 0; 364 365 spin_lock_bh(&priv->filters_lock); 366 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 367 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 368 break; 369 370 if (filter->activated && 371 !work_pending(&filter->work) && 372 rps_may_expire_flow(priv->dev, 373 filter->rxq_index, filter->flow_id, 374 filter->id)) { 375 list_move(&filter->next, &del_list); 376 hlist_del(&filter->filter_chain); 377 } else 378 last_filter = filter; 379 380 i++; 381 } 382 383 if (last_filter && (&last_filter->next != priv->filters.next)) 384 list_move(&priv->filters, &last_filter->next); 385 386 spin_unlock_bh(&priv->filters_lock); 387 388 list_for_each_entry_safe(filter, tmp, &del_list, next) 389 mlx4_en_filter_free(filter); 390 } 391 #endif 392 393 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 394 __be16 proto, u16 vid) 395 { 396 struct mlx4_en_priv *priv = netdev_priv(dev); 397 struct mlx4_en_dev *mdev = priv->mdev; 398 int err; 399 int idx; 400 401 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 402 403 set_bit(vid, priv->active_vlans); 404 405 /* Add VID to port VLAN filter */ 406 mutex_lock(&mdev->state_lock); 407 if (mdev->device_up && priv->port_up) { 408 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 409 if (err) { 410 en_err(priv, "Failed configuring VLAN filter\n"); 411 goto out; 412 } 413 } 414 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); 415 if (err) 416 en_dbg(HW, priv, "Failed adding vlan %d\n", vid); 417 418 out: 419 mutex_unlock(&mdev->state_lock); 420 return err; 421 } 422 423 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 424 __be16 proto, u16 vid) 425 { 426 struct mlx4_en_priv *priv = netdev_priv(dev); 427 struct mlx4_en_dev *mdev = priv->mdev; 428 int err = 0; 429 430 en_dbg(HW, priv, "Killing VID:%d\n", vid); 431 432 clear_bit(vid, priv->active_vlans); 433 434 /* Remove VID from port VLAN filter */ 435 mutex_lock(&mdev->state_lock); 436 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 437 438 if (mdev->device_up && priv->port_up) { 439 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 440 if (err) 441 en_err(priv, "Failed configuring VLAN filter\n"); 442 } 443 mutex_unlock(&mdev->state_lock); 444 445 return err; 446 } 447 448 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 449 { 450 int i; 451 for (i = ETH_ALEN - 1; i >= 0; --i) { 452 dst_mac[i] = src_mac & 0xff; 453 src_mac >>= 8; 454 } 455 memset(&dst_mac[ETH_ALEN], 0, 2); 456 } 457 458 459 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, 460 int qpn, u64 *reg_id) 461 { 462 int err; 463 464 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || 465 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) 466 return 0; /* do nothing */ 467 468 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 469 MLX4_DOMAIN_NIC, reg_id); 470 if (err) { 471 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 472 return err; 473 } 474 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); 475 return 0; 476 } 477 478 479 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 480 unsigned char *mac, int *qpn, u64 *reg_id) 481 { 482 struct mlx4_en_dev *mdev = priv->mdev; 483 struct mlx4_dev *dev = mdev->dev; 484 int err; 485 486 switch (dev->caps.steering_mode) { 487 case MLX4_STEERING_MODE_B0: { 488 struct mlx4_qp qp; 489 u8 gid[16] = {0}; 490 491 qp.qpn = *qpn; 492 memcpy(&gid[10], mac, ETH_ALEN); 493 gid[5] = priv->port; 494 495 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 496 break; 497 } 498 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 499 struct mlx4_spec_list spec_eth = { {NULL} }; 500 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 501 502 struct mlx4_net_trans_rule rule = { 503 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 504 .exclusive = 0, 505 .allow_loopback = 1, 506 .promisc_mode = MLX4_FS_REGULAR, 507 .priority = MLX4_DOMAIN_NIC, 508 }; 509 510 rule.port = priv->port; 511 rule.qpn = *qpn; 512 INIT_LIST_HEAD(&rule.list); 513 514 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 515 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 516 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 517 list_add_tail(&spec_eth.list, &rule.list); 518 519 err = mlx4_flow_attach(dev, &rule, reg_id); 520 break; 521 } 522 default: 523 return -EINVAL; 524 } 525 if (err) 526 en_warn(priv, "Failed Attaching Unicast\n"); 527 528 return err; 529 } 530 531 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 532 unsigned char *mac, int qpn, u64 reg_id) 533 { 534 struct mlx4_en_dev *mdev = priv->mdev; 535 struct mlx4_dev *dev = mdev->dev; 536 537 switch (dev->caps.steering_mode) { 538 case MLX4_STEERING_MODE_B0: { 539 struct mlx4_qp qp; 540 u8 gid[16] = {0}; 541 542 qp.qpn = qpn; 543 memcpy(&gid[10], mac, ETH_ALEN); 544 gid[5] = priv->port; 545 546 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 547 break; 548 } 549 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 550 mlx4_flow_detach(dev, reg_id); 551 break; 552 } 553 default: 554 en_err(priv, "Invalid steering mode.\n"); 555 } 556 } 557 558 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 559 { 560 struct mlx4_en_dev *mdev = priv->mdev; 561 struct mlx4_dev *dev = mdev->dev; 562 int index = 0; 563 int err = 0; 564 int *qpn = &priv->base_qpn; 565 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 566 567 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 568 priv->dev->dev_addr); 569 index = mlx4_register_mac(dev, priv->port, mac); 570 if (index < 0) { 571 err = index; 572 en_err(priv, "Failed adding MAC: %pM\n", 573 priv->dev->dev_addr); 574 return err; 575 } 576 577 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 578 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 579 *qpn = base_qpn + index; 580 return 0; 581 } 582 583 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); 584 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 585 if (err) { 586 en_err(priv, "Failed to reserve qp for mac registration\n"); 587 mlx4_unregister_mac(dev, priv->port, mac); 588 return err; 589 } 590 591 return 0; 592 } 593 594 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 595 { 596 struct mlx4_en_dev *mdev = priv->mdev; 597 struct mlx4_dev *dev = mdev->dev; 598 int qpn = priv->base_qpn; 599 600 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 601 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); 602 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 603 priv->dev->dev_addr); 604 mlx4_unregister_mac(dev, priv->port, mac); 605 } else { 606 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 607 priv->port, qpn); 608 mlx4_qp_release_range(dev, qpn, 1); 609 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 610 } 611 } 612 613 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 614 unsigned char *new_mac, unsigned char *prev_mac) 615 { 616 struct mlx4_en_dev *mdev = priv->mdev; 617 struct mlx4_dev *dev = mdev->dev; 618 int err = 0; 619 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); 620 621 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 622 struct hlist_head *bucket; 623 unsigned int mac_hash; 624 struct mlx4_mac_entry *entry; 625 struct hlist_node *tmp; 626 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); 627 628 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 629 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 630 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 631 mlx4_en_uc_steer_release(priv, entry->mac, 632 qpn, entry->reg_id); 633 mlx4_unregister_mac(dev, priv->port, 634 prev_mac_u64); 635 hlist_del_rcu(&entry->hlist); 636 synchronize_rcu(); 637 memcpy(entry->mac, new_mac, ETH_ALEN); 638 entry->reg_id = 0; 639 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 640 hlist_add_head_rcu(&entry->hlist, 641 &priv->mac_hash[mac_hash]); 642 mlx4_register_mac(dev, priv->port, new_mac_u64); 643 err = mlx4_en_uc_steer_add(priv, new_mac, 644 &qpn, 645 &entry->reg_id); 646 if (err) 647 return err; 648 if (priv->tunnel_reg_id) { 649 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 650 priv->tunnel_reg_id = 0; 651 } 652 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, 653 &priv->tunnel_reg_id); 654 return err; 655 } 656 } 657 return -EINVAL; 658 } 659 660 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 661 } 662 663 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, 664 unsigned char new_mac[ETH_ALEN + 2]) 665 { 666 int err = 0; 667 668 if (priv->port_up) { 669 /* Remove old MAC and insert the new one */ 670 err = mlx4_en_replace_mac(priv, priv->base_qpn, 671 new_mac, priv->current_mac); 672 if (err) 673 en_err(priv, "Failed changing HW MAC address\n"); 674 } else 675 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 676 677 if (!err) 678 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); 679 680 return err; 681 } 682 683 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 684 { 685 struct mlx4_en_priv *priv = netdev_priv(dev); 686 struct mlx4_en_dev *mdev = priv->mdev; 687 struct sockaddr *saddr = addr; 688 unsigned char new_mac[ETH_ALEN + 2]; 689 int err; 690 691 if (!is_valid_ether_addr(saddr->sa_data)) 692 return -EADDRNOTAVAIL; 693 694 mutex_lock(&mdev->state_lock); 695 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 696 err = mlx4_en_do_set_mac(priv, new_mac); 697 if (!err) 698 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 699 mutex_unlock(&mdev->state_lock); 700 701 return err; 702 } 703 704 static void mlx4_en_clear_list(struct net_device *dev) 705 { 706 struct mlx4_en_priv *priv = netdev_priv(dev); 707 struct mlx4_en_mc_list *tmp, *mc_to_del; 708 709 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 710 list_del(&mc_to_del->list); 711 kfree(mc_to_del); 712 } 713 } 714 715 static void mlx4_en_cache_mclist(struct net_device *dev) 716 { 717 struct mlx4_en_priv *priv = netdev_priv(dev); 718 struct netdev_hw_addr *ha; 719 struct mlx4_en_mc_list *tmp; 720 721 mlx4_en_clear_list(dev); 722 netdev_for_each_mc_addr(ha, dev) { 723 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 724 if (!tmp) { 725 mlx4_en_clear_list(dev); 726 return; 727 } 728 memcpy(tmp->addr, ha->addr, ETH_ALEN); 729 list_add_tail(&tmp->list, &priv->mc_list); 730 } 731 } 732 733 static void update_mclist_flags(struct mlx4_en_priv *priv, 734 struct list_head *dst, 735 struct list_head *src) 736 { 737 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 738 bool found; 739 740 /* Find all the entries that should be removed from dst, 741 * These are the entries that are not found in src 742 */ 743 list_for_each_entry(dst_tmp, dst, list) { 744 found = false; 745 list_for_each_entry(src_tmp, src, list) { 746 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 747 found = true; 748 break; 749 } 750 } 751 if (!found) 752 dst_tmp->action = MCLIST_REM; 753 } 754 755 /* Add entries that exist in src but not in dst 756 * mark them as need to add 757 */ 758 list_for_each_entry(src_tmp, src, list) { 759 found = false; 760 list_for_each_entry(dst_tmp, dst, list) { 761 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { 762 dst_tmp->action = MCLIST_NONE; 763 found = true; 764 break; 765 } 766 } 767 if (!found) { 768 new_mc = kmemdup(src_tmp, 769 sizeof(struct mlx4_en_mc_list), 770 GFP_KERNEL); 771 if (!new_mc) 772 return; 773 774 new_mc->action = MCLIST_ADD; 775 list_add_tail(&new_mc->list, dst); 776 } 777 } 778 } 779 780 static void mlx4_en_set_rx_mode(struct net_device *dev) 781 { 782 struct mlx4_en_priv *priv = netdev_priv(dev); 783 784 if (!priv->port_up) 785 return; 786 787 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 788 } 789 790 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 791 struct mlx4_en_dev *mdev) 792 { 793 int err = 0; 794 795 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 796 if (netif_msg_rx_status(priv)) 797 en_warn(priv, "Entering promiscuous mode\n"); 798 priv->flags |= MLX4_EN_FLAG_PROMISC; 799 800 /* Enable promiscouos mode */ 801 switch (mdev->dev->caps.steering_mode) { 802 case MLX4_STEERING_MODE_DEVICE_MANAGED: 803 err = mlx4_flow_steer_promisc_add(mdev->dev, 804 priv->port, 805 priv->base_qpn, 806 MLX4_FS_ALL_DEFAULT); 807 if (err) 808 en_err(priv, "Failed enabling promiscuous mode\n"); 809 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 810 break; 811 812 case MLX4_STEERING_MODE_B0: 813 err = mlx4_unicast_promisc_add(mdev->dev, 814 priv->base_qpn, 815 priv->port); 816 if (err) 817 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 818 819 /* Add the default qp number as multicast 820 * promisc 821 */ 822 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 823 err = mlx4_multicast_promisc_add(mdev->dev, 824 priv->base_qpn, 825 priv->port); 826 if (err) 827 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 828 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 829 } 830 break; 831 832 case MLX4_STEERING_MODE_A0: 833 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 834 priv->port, 835 priv->base_qpn, 836 1); 837 if (err) 838 en_err(priv, "Failed enabling promiscuous mode\n"); 839 break; 840 } 841 842 /* Disable port multicast filter (unconditionally) */ 843 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 844 0, MLX4_MCAST_DISABLE); 845 if (err) 846 en_err(priv, "Failed disabling multicast filter\n"); 847 } 848 } 849 850 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 851 struct mlx4_en_dev *mdev) 852 { 853 int err = 0; 854 855 if (netif_msg_rx_status(priv)) 856 en_warn(priv, "Leaving promiscuous mode\n"); 857 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 858 859 /* Disable promiscouos mode */ 860 switch (mdev->dev->caps.steering_mode) { 861 case MLX4_STEERING_MODE_DEVICE_MANAGED: 862 err = mlx4_flow_steer_promisc_remove(mdev->dev, 863 priv->port, 864 MLX4_FS_ALL_DEFAULT); 865 if (err) 866 en_err(priv, "Failed disabling promiscuous mode\n"); 867 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 868 break; 869 870 case MLX4_STEERING_MODE_B0: 871 err = mlx4_unicast_promisc_remove(mdev->dev, 872 priv->base_qpn, 873 priv->port); 874 if (err) 875 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 876 /* Disable Multicast promisc */ 877 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 878 err = mlx4_multicast_promisc_remove(mdev->dev, 879 priv->base_qpn, 880 priv->port); 881 if (err) 882 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 883 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 884 } 885 break; 886 887 case MLX4_STEERING_MODE_A0: 888 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 889 priv->port, 890 priv->base_qpn, 0); 891 if (err) 892 en_err(priv, "Failed disabling promiscuous mode\n"); 893 break; 894 } 895 } 896 897 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 898 struct net_device *dev, 899 struct mlx4_en_dev *mdev) 900 { 901 struct mlx4_en_mc_list *mclist, *tmp; 902 u64 mcast_addr = 0; 903 u8 mc_list[16] = {0}; 904 int err = 0; 905 906 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 907 if (dev->flags & IFF_ALLMULTI) { 908 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 909 0, MLX4_MCAST_DISABLE); 910 if (err) 911 en_err(priv, "Failed disabling multicast filter\n"); 912 913 /* Add the default qp number as multicast promisc */ 914 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 915 switch (mdev->dev->caps.steering_mode) { 916 case MLX4_STEERING_MODE_DEVICE_MANAGED: 917 err = mlx4_flow_steer_promisc_add(mdev->dev, 918 priv->port, 919 priv->base_qpn, 920 MLX4_FS_MC_DEFAULT); 921 break; 922 923 case MLX4_STEERING_MODE_B0: 924 err = mlx4_multicast_promisc_add(mdev->dev, 925 priv->base_qpn, 926 priv->port); 927 break; 928 929 case MLX4_STEERING_MODE_A0: 930 break; 931 } 932 if (err) 933 en_err(priv, "Failed entering multicast promisc mode\n"); 934 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 935 } 936 } else { 937 /* Disable Multicast promisc */ 938 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 939 switch (mdev->dev->caps.steering_mode) { 940 case MLX4_STEERING_MODE_DEVICE_MANAGED: 941 err = mlx4_flow_steer_promisc_remove(mdev->dev, 942 priv->port, 943 MLX4_FS_MC_DEFAULT); 944 break; 945 946 case MLX4_STEERING_MODE_B0: 947 err = mlx4_multicast_promisc_remove(mdev->dev, 948 priv->base_qpn, 949 priv->port); 950 break; 951 952 case MLX4_STEERING_MODE_A0: 953 break; 954 } 955 if (err) 956 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 957 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 958 } 959 960 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 961 0, MLX4_MCAST_DISABLE); 962 if (err) 963 en_err(priv, "Failed disabling multicast filter\n"); 964 965 /* Flush mcast filter and init it with broadcast address */ 966 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 967 1, MLX4_MCAST_CONFIG); 968 969 /* Update multicast list - we cache all addresses so they won't 970 * change while HW is updated holding the command semaphor */ 971 netif_addr_lock_bh(dev); 972 mlx4_en_cache_mclist(dev); 973 netif_addr_unlock_bh(dev); 974 list_for_each_entry(mclist, &priv->mc_list, list) { 975 mcast_addr = mlx4_mac_to_u64(mclist->addr); 976 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 977 mcast_addr, 0, MLX4_MCAST_CONFIG); 978 } 979 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 980 0, MLX4_MCAST_ENABLE); 981 if (err) 982 en_err(priv, "Failed enabling multicast filter\n"); 983 984 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 985 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 986 if (mclist->action == MCLIST_REM) { 987 /* detach this address and delete from list */ 988 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 989 mc_list[5] = priv->port; 990 err = mlx4_multicast_detach(mdev->dev, 991 &priv->rss_map.indir_qp, 992 mc_list, 993 MLX4_PROT_ETH, 994 mclist->reg_id); 995 if (err) 996 en_err(priv, "Fail to detach multicast address\n"); 997 998 if (mclist->tunnel_reg_id) { 999 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); 1000 if (err) 1001 en_err(priv, "Failed to detach multicast address\n"); 1002 } 1003 1004 /* remove from list */ 1005 list_del(&mclist->list); 1006 kfree(mclist); 1007 } else if (mclist->action == MCLIST_ADD) { 1008 /* attach the address */ 1009 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1010 /* needed for B0 steering support */ 1011 mc_list[5] = priv->port; 1012 err = mlx4_multicast_attach(mdev->dev, 1013 &priv->rss_map.indir_qp, 1014 mc_list, 1015 priv->port, 0, 1016 MLX4_PROT_ETH, 1017 &mclist->reg_id); 1018 if (err) 1019 en_err(priv, "Fail to attach multicast address\n"); 1020 1021 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, 1022 &mclist->tunnel_reg_id); 1023 if (err) 1024 en_err(priv, "Failed to attach multicast address\n"); 1025 } 1026 } 1027 } 1028 } 1029 1030 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1031 struct net_device *dev, 1032 struct mlx4_en_dev *mdev) 1033 { 1034 struct netdev_hw_addr *ha; 1035 struct mlx4_mac_entry *entry; 1036 struct hlist_node *tmp; 1037 bool found; 1038 u64 mac; 1039 int err = 0; 1040 struct hlist_head *bucket; 1041 unsigned int i; 1042 int removed = 0; 1043 u32 prev_flags; 1044 1045 /* Note that we do not need to protect our mac_hash traversal with rcu, 1046 * since all modification code is protected by mdev->state_lock 1047 */ 1048 1049 /* find what to remove */ 1050 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1051 bucket = &priv->mac_hash[i]; 1052 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1053 found = false; 1054 netdev_for_each_uc_addr(ha, dev) { 1055 if (ether_addr_equal_64bits(entry->mac, 1056 ha->addr)) { 1057 found = true; 1058 break; 1059 } 1060 } 1061 1062 /* MAC address of the port is not in uc list */ 1063 if (ether_addr_equal_64bits(entry->mac, 1064 priv->current_mac)) 1065 found = true; 1066 1067 if (!found) { 1068 mac = mlx4_mac_to_u64(entry->mac); 1069 mlx4_en_uc_steer_release(priv, entry->mac, 1070 priv->base_qpn, 1071 entry->reg_id); 1072 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1073 1074 hlist_del_rcu(&entry->hlist); 1075 kfree_rcu(entry, rcu); 1076 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1077 entry->mac, priv->port); 1078 ++removed; 1079 } 1080 } 1081 } 1082 1083 /* if we didn't remove anything, there is no use in trying to add 1084 * again once we are in a forced promisc mode state 1085 */ 1086 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1087 return; 1088 1089 prev_flags = priv->flags; 1090 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1091 1092 /* find what to add */ 1093 netdev_for_each_uc_addr(ha, dev) { 1094 found = false; 1095 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1096 hlist_for_each_entry(entry, bucket, hlist) { 1097 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1098 found = true; 1099 break; 1100 } 1101 } 1102 1103 if (!found) { 1104 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1105 if (!entry) { 1106 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1107 ha->addr, priv->port); 1108 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1109 break; 1110 } 1111 mac = mlx4_mac_to_u64(ha->addr); 1112 memcpy(entry->mac, ha->addr, ETH_ALEN); 1113 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1114 if (err < 0) { 1115 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1116 ha->addr, priv->port, err); 1117 kfree(entry); 1118 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1119 break; 1120 } 1121 err = mlx4_en_uc_steer_add(priv, ha->addr, 1122 &priv->base_qpn, 1123 &entry->reg_id); 1124 if (err) { 1125 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1126 ha->addr, priv->port, err); 1127 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1128 kfree(entry); 1129 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1130 break; 1131 } else { 1132 unsigned int mac_hash; 1133 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1134 ha->addr, priv->port); 1135 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1136 bucket = &priv->mac_hash[mac_hash]; 1137 hlist_add_head_rcu(&entry->hlist, bucket); 1138 } 1139 } 1140 } 1141 1142 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1143 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1144 priv->port); 1145 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1146 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1147 priv->port); 1148 } 1149 } 1150 1151 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1152 { 1153 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1154 rx_mode_task); 1155 struct mlx4_en_dev *mdev = priv->mdev; 1156 struct net_device *dev = priv->dev; 1157 1158 mutex_lock(&mdev->state_lock); 1159 if (!mdev->device_up) { 1160 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1161 goto out; 1162 } 1163 if (!priv->port_up) { 1164 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1165 goto out; 1166 } 1167 1168 if (!netif_carrier_ok(dev)) { 1169 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1170 if (priv->port_state.link_state) { 1171 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1172 netif_carrier_on(dev); 1173 en_dbg(LINK, priv, "Link Up\n"); 1174 } 1175 } 1176 } 1177 1178 if (dev->priv_flags & IFF_UNICAST_FLT) 1179 mlx4_en_do_uc_filter(priv, dev, mdev); 1180 1181 /* Promsicuous mode: disable all filters */ 1182 if ((dev->flags & IFF_PROMISC) || 1183 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1184 mlx4_en_set_promisc_mode(priv, mdev); 1185 goto out; 1186 } 1187 1188 /* Not in promiscuous mode */ 1189 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1190 mlx4_en_clear_promisc_mode(priv, mdev); 1191 1192 mlx4_en_do_multicast(priv, dev, mdev); 1193 out: 1194 mutex_unlock(&mdev->state_lock); 1195 } 1196 1197 #ifdef CONFIG_NET_POLL_CONTROLLER 1198 static void mlx4_en_netpoll(struct net_device *dev) 1199 { 1200 struct mlx4_en_priv *priv = netdev_priv(dev); 1201 struct mlx4_en_cq *cq; 1202 int i; 1203 1204 for (i = 0; i < priv->rx_ring_num; i++) { 1205 cq = priv->rx_cq[i]; 1206 napi_schedule(&cq->napi); 1207 } 1208 } 1209 #endif 1210 1211 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1212 { 1213 u64 reg_id; 1214 int err = 0; 1215 int *qpn = &priv->base_qpn; 1216 struct mlx4_mac_entry *entry; 1217 1218 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 1219 if (err) 1220 return err; 1221 1222 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, 1223 &priv->tunnel_reg_id); 1224 if (err) 1225 goto tunnel_err; 1226 1227 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1228 if (!entry) { 1229 err = -ENOMEM; 1230 goto alloc_err; 1231 } 1232 1233 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 1234 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); 1235 entry->reg_id = reg_id; 1236 hlist_add_head_rcu(&entry->hlist, 1237 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 1238 1239 return 0; 1240 1241 alloc_err: 1242 if (priv->tunnel_reg_id) 1243 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1244 1245 tunnel_err: 1246 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 1247 return err; 1248 } 1249 1250 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv) 1251 { 1252 u64 mac; 1253 unsigned int i; 1254 int qpn = priv->base_qpn; 1255 struct hlist_head *bucket; 1256 struct hlist_node *tmp; 1257 struct mlx4_mac_entry *entry; 1258 1259 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1260 bucket = &priv->mac_hash[i]; 1261 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1262 mac = mlx4_mac_to_u64(entry->mac); 1263 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n", 1264 entry->mac); 1265 mlx4_en_uc_steer_release(priv, entry->mac, 1266 qpn, entry->reg_id); 1267 1268 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); 1269 hlist_del_rcu(&entry->hlist); 1270 kfree_rcu(entry, rcu); 1271 } 1272 } 1273 1274 if (priv->tunnel_reg_id) { 1275 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); 1276 priv->tunnel_reg_id = 0; 1277 } 1278 } 1279 1280 static void mlx4_en_tx_timeout(struct net_device *dev) 1281 { 1282 struct mlx4_en_priv *priv = netdev_priv(dev); 1283 struct mlx4_en_dev *mdev = priv->mdev; 1284 int i; 1285 1286 if (netif_msg_timer(priv)) 1287 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1288 1289 for (i = 0; i < priv->tx_ring_num; i++) { 1290 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1291 continue; 1292 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1293 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, 1294 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); 1295 } 1296 1297 priv->port_stats.tx_timeout++; 1298 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1299 queue_work(mdev->workqueue, &priv->watchdog_task); 1300 } 1301 1302 1303 static struct rtnl_link_stats64 * 1304 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1305 { 1306 struct mlx4_en_priv *priv = netdev_priv(dev); 1307 1308 spin_lock_bh(&priv->stats_lock); 1309 netdev_stats_to_stats64(stats, &dev->stats); 1310 spin_unlock_bh(&priv->stats_lock); 1311 1312 return stats; 1313 } 1314 1315 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1316 { 1317 struct mlx4_en_cq *cq; 1318 int i; 1319 1320 /* If we haven't received a specific coalescing setting 1321 * (module param), we set the moderation parameters as follows: 1322 * - moder_cnt is set to the number of mtu sized packets to 1323 * satisfy our coalescing target. 1324 * - moder_time is set to a fixed value. 1325 */ 1326 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1327 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1328 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1329 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1330 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1331 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1332 1333 /* Setup cq moderation params */ 1334 for (i = 0; i < priv->rx_ring_num; i++) { 1335 cq = priv->rx_cq[i]; 1336 cq->moder_cnt = priv->rx_frames; 1337 cq->moder_time = priv->rx_usecs; 1338 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1339 priv->last_moder_packets[i] = 0; 1340 priv->last_moder_bytes[i] = 0; 1341 } 1342 1343 for (i = 0; i < priv->tx_ring_num; i++) { 1344 cq = priv->tx_cq[i]; 1345 cq->moder_cnt = priv->tx_frames; 1346 cq->moder_time = priv->tx_usecs; 1347 } 1348 1349 /* Reset auto-moderation params */ 1350 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1351 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1352 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1353 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1354 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1355 priv->adaptive_rx_coal = 1; 1356 priv->last_moder_jiffies = 0; 1357 priv->last_moder_tx_packets = 0; 1358 } 1359 1360 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1361 { 1362 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1363 struct mlx4_en_cq *cq; 1364 unsigned long packets; 1365 unsigned long rate; 1366 unsigned long avg_pkt_size; 1367 unsigned long rx_packets; 1368 unsigned long rx_bytes; 1369 unsigned long rx_pkt_diff; 1370 int moder_time; 1371 int ring, err; 1372 1373 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1374 return; 1375 1376 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1377 spin_lock_bh(&priv->stats_lock); 1378 rx_packets = priv->rx_ring[ring]->packets; 1379 rx_bytes = priv->rx_ring[ring]->bytes; 1380 spin_unlock_bh(&priv->stats_lock); 1381 1382 rx_pkt_diff = ((unsigned long) (rx_packets - 1383 priv->last_moder_packets[ring])); 1384 packets = rx_pkt_diff; 1385 rate = packets * HZ / period; 1386 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1387 priv->last_moder_bytes[ring])) / packets : 0; 1388 1389 /* Apply auto-moderation only when packet rate 1390 * exceeds a rate that it matters */ 1391 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1392 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1393 if (rate < priv->pkt_rate_low) 1394 moder_time = priv->rx_usecs_low; 1395 else if (rate > priv->pkt_rate_high) 1396 moder_time = priv->rx_usecs_high; 1397 else 1398 moder_time = (rate - priv->pkt_rate_low) * 1399 (priv->rx_usecs_high - priv->rx_usecs_low) / 1400 (priv->pkt_rate_high - priv->pkt_rate_low) + 1401 priv->rx_usecs_low; 1402 } else { 1403 moder_time = priv->rx_usecs_low; 1404 } 1405 1406 if (moder_time != priv->last_moder_time[ring]) { 1407 priv->last_moder_time[ring] = moder_time; 1408 cq = priv->rx_cq[ring]; 1409 cq->moder_time = moder_time; 1410 cq->moder_cnt = priv->rx_frames; 1411 err = mlx4_en_set_cq_moder(priv, cq); 1412 if (err) 1413 en_err(priv, "Failed modifying moderation for cq:%d\n", 1414 ring); 1415 } 1416 priv->last_moder_packets[ring] = rx_packets; 1417 priv->last_moder_bytes[ring] = rx_bytes; 1418 } 1419 1420 priv->last_moder_jiffies = jiffies; 1421 } 1422 1423 static void mlx4_en_do_get_stats(struct work_struct *work) 1424 { 1425 struct delayed_work *delay = to_delayed_work(work); 1426 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1427 stats_task); 1428 struct mlx4_en_dev *mdev = priv->mdev; 1429 int err; 1430 1431 mutex_lock(&mdev->state_lock); 1432 if (mdev->device_up) { 1433 if (priv->port_up) { 1434 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1435 if (err) 1436 en_dbg(HW, priv, "Could not update stats\n"); 1437 1438 mlx4_en_auto_moderation(priv); 1439 } 1440 1441 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1442 } 1443 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1444 mlx4_en_do_set_mac(priv, priv->current_mac); 1445 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1446 } 1447 mutex_unlock(&mdev->state_lock); 1448 } 1449 1450 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1451 * periodically 1452 */ 1453 static void mlx4_en_service_task(struct work_struct *work) 1454 { 1455 struct delayed_work *delay = to_delayed_work(work); 1456 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1457 service_task); 1458 struct mlx4_en_dev *mdev = priv->mdev; 1459 1460 mutex_lock(&mdev->state_lock); 1461 if (mdev->device_up) { 1462 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1463 mlx4_en_ptp_overflow_check(mdev); 1464 1465 mlx4_en_recover_from_oom(priv); 1466 queue_delayed_work(mdev->workqueue, &priv->service_task, 1467 SERVICE_TASK_DELAY); 1468 } 1469 mutex_unlock(&mdev->state_lock); 1470 } 1471 1472 static void mlx4_en_linkstate(struct work_struct *work) 1473 { 1474 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1475 linkstate_task); 1476 struct mlx4_en_dev *mdev = priv->mdev; 1477 int linkstate = priv->link_state; 1478 1479 mutex_lock(&mdev->state_lock); 1480 /* If observable port state changed set carrier state and 1481 * report to system log */ 1482 if (priv->last_link_state != linkstate) { 1483 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1484 en_info(priv, "Link Down\n"); 1485 netif_carrier_off(priv->dev); 1486 } else { 1487 en_info(priv, "Link Up\n"); 1488 netif_carrier_on(priv->dev); 1489 } 1490 } 1491 priv->last_link_state = linkstate; 1492 mutex_unlock(&mdev->state_lock); 1493 } 1494 1495 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1496 { 1497 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; 1498 int numa_node = priv->mdev->dev->numa_node; 1499 1500 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) 1501 return -ENOMEM; 1502 1503 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), 1504 ring->affinity_mask); 1505 return 0; 1506 } 1507 1508 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) 1509 { 1510 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); 1511 } 1512 1513 int mlx4_en_start_port(struct net_device *dev) 1514 { 1515 struct mlx4_en_priv *priv = netdev_priv(dev); 1516 struct mlx4_en_dev *mdev = priv->mdev; 1517 struct mlx4_en_cq *cq; 1518 struct mlx4_en_tx_ring *tx_ring; 1519 int rx_index = 0; 1520 int tx_index = 0; 1521 int err = 0; 1522 int i; 1523 int j; 1524 u8 mc_list[16] = {0}; 1525 1526 if (priv->port_up) { 1527 en_dbg(DRV, priv, "start port called while port already up\n"); 1528 return 0; 1529 } 1530 1531 INIT_LIST_HEAD(&priv->mc_list); 1532 INIT_LIST_HEAD(&priv->curr_list); 1533 INIT_LIST_HEAD(&priv->ethtool_list); 1534 memset(&priv->ethtool_rules[0], 0, 1535 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1536 1537 /* Calculate Rx buf size */ 1538 dev->mtu = min(dev->mtu, priv->max_mtu); 1539 mlx4_en_calc_rx_buf(dev); 1540 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1541 1542 /* Configure rx cq's and rings */ 1543 err = mlx4_en_activate_rx_rings(priv); 1544 if (err) { 1545 en_err(priv, "Failed to activate RX rings\n"); 1546 return err; 1547 } 1548 for (i = 0; i < priv->rx_ring_num; i++) { 1549 cq = priv->rx_cq[i]; 1550 1551 err = mlx4_en_init_affinity_hint(priv, i); 1552 if (err) { 1553 en_err(priv, "Failed preparing IRQ affinity hint\n"); 1554 goto cq_err; 1555 } 1556 1557 err = mlx4_en_activate_cq(priv, cq, i); 1558 if (err) { 1559 en_err(priv, "Failed activating Rx CQ\n"); 1560 mlx4_en_free_affinity_hint(priv, i); 1561 goto cq_err; 1562 } 1563 1564 for (j = 0; j < cq->size; j++) { 1565 struct mlx4_cqe *cqe = NULL; 1566 1567 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + 1568 priv->cqe_factor; 1569 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1570 } 1571 1572 err = mlx4_en_set_cq_moder(priv, cq); 1573 if (err) { 1574 en_err(priv, "Failed setting cq moderation parameters\n"); 1575 mlx4_en_deactivate_cq(priv, cq); 1576 mlx4_en_free_affinity_hint(priv, i); 1577 goto cq_err; 1578 } 1579 mlx4_en_arm_cq(priv, cq); 1580 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1581 ++rx_index; 1582 } 1583 1584 /* Set qp number */ 1585 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1586 err = mlx4_en_get_qp(priv); 1587 if (err) { 1588 en_err(priv, "Failed getting eth qp\n"); 1589 goto cq_err; 1590 } 1591 mdev->mac_removed[priv->port] = 0; 1592 1593 priv->counter_index = 1594 mlx4_get_default_counter_index(mdev->dev, priv->port); 1595 1596 err = mlx4_en_config_rss_steer(priv); 1597 if (err) { 1598 en_err(priv, "Failed configuring rss steering\n"); 1599 goto mac_err; 1600 } 1601 1602 err = mlx4_en_create_drop_qp(priv); 1603 if (err) 1604 goto rss_err; 1605 1606 /* Configure tx cq's and rings */ 1607 for (i = 0; i < priv->tx_ring_num; i++) { 1608 /* Configure cq */ 1609 cq = priv->tx_cq[i]; 1610 err = mlx4_en_activate_cq(priv, cq, i); 1611 if (err) { 1612 en_err(priv, "Failed allocating Tx CQ\n"); 1613 goto tx_err; 1614 } 1615 err = mlx4_en_set_cq_moder(priv, cq); 1616 if (err) { 1617 en_err(priv, "Failed setting cq moderation parameters\n"); 1618 mlx4_en_deactivate_cq(priv, cq); 1619 goto tx_err; 1620 } 1621 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1622 cq->buf->wqe_index = cpu_to_be16(0xffff); 1623 1624 /* Configure ring */ 1625 tx_ring = priv->tx_ring[i]; 1626 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1627 i / priv->num_tx_rings_p_up); 1628 if (err) { 1629 en_err(priv, "Failed allocating Tx ring\n"); 1630 mlx4_en_deactivate_cq(priv, cq); 1631 goto tx_err; 1632 } 1633 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1634 1635 /* Arm CQ for TX completions */ 1636 mlx4_en_arm_cq(priv, cq); 1637 1638 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1639 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1640 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1641 ++tx_index; 1642 } 1643 1644 /* Configure port */ 1645 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1646 priv->rx_skb_size + ETH_FCS_LEN, 1647 priv->prof->tx_pause, 1648 priv->prof->tx_ppp, 1649 priv->prof->rx_pause, 1650 priv->prof->rx_ppp); 1651 if (err) { 1652 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1653 priv->port, err); 1654 goto tx_err; 1655 } 1656 /* Set default qp number */ 1657 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1658 if (err) { 1659 en_err(priv, "Failed setting default qp numbers\n"); 1660 goto tx_err; 1661 } 1662 1663 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 1664 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 1665 if (err) { 1666 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 1667 err); 1668 goto tx_err; 1669 } 1670 } 1671 1672 /* Init port */ 1673 en_dbg(HW, priv, "Initializing port\n"); 1674 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1675 if (err) { 1676 en_err(priv, "Failed Initializing port\n"); 1677 goto tx_err; 1678 } 1679 1680 /* Set Unicast and VXLAN steering rules */ 1681 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && 1682 mlx4_en_set_rss_steer_rules(priv)) 1683 mlx4_warn(mdev, "Failed setting steering rules\n"); 1684 1685 /* Attach rx QP to bradcast address */ 1686 eth_broadcast_addr(&mc_list[10]); 1687 mc_list[5] = priv->port; /* needed for B0 steering support */ 1688 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1689 priv->port, 0, MLX4_PROT_ETH, 1690 &priv->broadcast_id)) 1691 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1692 1693 /* Must redo promiscuous mode setup. */ 1694 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1695 1696 /* Schedule multicast task to populate multicast list */ 1697 queue_work(mdev->workqueue, &priv->rx_mode_task); 1698 1699 #ifdef CONFIG_MLX4_EN_VXLAN 1700 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1701 vxlan_get_rx_port(dev); 1702 #endif 1703 priv->port_up = true; 1704 netif_tx_start_all_queues(dev); 1705 netif_device_attach(dev); 1706 1707 return 0; 1708 1709 tx_err: 1710 while (tx_index--) { 1711 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1712 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1713 } 1714 mlx4_en_destroy_drop_qp(priv); 1715 rss_err: 1716 mlx4_en_release_rss_steer(priv); 1717 mac_err: 1718 mlx4_en_put_qp(priv); 1719 cq_err: 1720 while (rx_index--) { 1721 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1722 mlx4_en_free_affinity_hint(priv, rx_index); 1723 } 1724 for (i = 0; i < priv->rx_ring_num; i++) 1725 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1726 1727 return err; /* need to close devices */ 1728 } 1729 1730 1731 void mlx4_en_stop_port(struct net_device *dev, int detach) 1732 { 1733 struct mlx4_en_priv *priv = netdev_priv(dev); 1734 struct mlx4_en_dev *mdev = priv->mdev; 1735 struct mlx4_en_mc_list *mclist, *tmp; 1736 struct ethtool_flow_id *flow, *tmp_flow; 1737 int i; 1738 u8 mc_list[16] = {0}; 1739 1740 if (!priv->port_up) { 1741 en_dbg(DRV, priv, "stop port called while port already down\n"); 1742 return; 1743 } 1744 1745 /* close port*/ 1746 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1747 1748 /* Synchronize with tx routine */ 1749 netif_tx_lock_bh(dev); 1750 if (detach) 1751 netif_device_detach(dev); 1752 netif_tx_stop_all_queues(dev); 1753 netif_tx_unlock_bh(dev); 1754 1755 netif_tx_disable(dev); 1756 1757 /* Set port as not active */ 1758 priv->port_up = false; 1759 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 1760 1761 /* Promsicuous mode */ 1762 if (mdev->dev->caps.steering_mode == 1763 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1764 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1765 MLX4_EN_FLAG_MC_PROMISC); 1766 mlx4_flow_steer_promisc_remove(mdev->dev, 1767 priv->port, 1768 MLX4_FS_ALL_DEFAULT); 1769 mlx4_flow_steer_promisc_remove(mdev->dev, 1770 priv->port, 1771 MLX4_FS_MC_DEFAULT); 1772 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1773 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1774 1775 /* Disable promiscouos mode */ 1776 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1777 priv->port); 1778 1779 /* Disable Multicast promisc */ 1780 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1781 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1782 priv->port); 1783 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1784 } 1785 } 1786 1787 /* Detach All multicasts */ 1788 eth_broadcast_addr(&mc_list[10]); 1789 mc_list[5] = priv->port; /* needed for B0 steering support */ 1790 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1791 MLX4_PROT_ETH, priv->broadcast_id); 1792 list_for_each_entry(mclist, &priv->curr_list, list) { 1793 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1794 mc_list[5] = priv->port; 1795 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1796 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1797 if (mclist->tunnel_reg_id) 1798 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); 1799 } 1800 mlx4_en_clear_list(dev); 1801 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1802 list_del(&mclist->list); 1803 kfree(mclist); 1804 } 1805 1806 /* Flush multicast filter */ 1807 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1808 1809 /* Remove flow steering rules for the port*/ 1810 if (mdev->dev->caps.steering_mode == 1811 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1812 ASSERT_RTNL(); 1813 list_for_each_entry_safe(flow, tmp_flow, 1814 &priv->ethtool_list, list) { 1815 mlx4_flow_detach(mdev->dev, flow->id); 1816 list_del(&flow->list); 1817 } 1818 } 1819 1820 mlx4_en_destroy_drop_qp(priv); 1821 1822 /* Free TX Rings */ 1823 for (i = 0; i < priv->tx_ring_num; i++) { 1824 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1825 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1826 } 1827 msleep(10); 1828 1829 for (i = 0; i < priv->tx_ring_num; i++) 1830 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1831 1832 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1833 mlx4_en_delete_rss_steer_rules(priv); 1834 1835 /* Free RSS qps */ 1836 mlx4_en_release_rss_steer(priv); 1837 1838 /* Unregister Mac address for the port */ 1839 mlx4_en_put_qp(priv); 1840 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1841 mdev->mac_removed[priv->port] = 1; 1842 1843 /* Free RX Rings */ 1844 for (i = 0; i < priv->rx_ring_num; i++) { 1845 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1846 1847 napi_synchronize(&cq->napi); 1848 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1849 mlx4_en_deactivate_cq(priv, cq); 1850 1851 mlx4_en_free_affinity_hint(priv, i); 1852 } 1853 } 1854 1855 static void mlx4_en_restart(struct work_struct *work) 1856 { 1857 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1858 watchdog_task); 1859 struct mlx4_en_dev *mdev = priv->mdev; 1860 struct net_device *dev = priv->dev; 1861 1862 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1863 1864 rtnl_lock(); 1865 mutex_lock(&mdev->state_lock); 1866 if (priv->port_up) { 1867 mlx4_en_stop_port(dev, 1); 1868 if (mlx4_en_start_port(dev)) 1869 en_err(priv, "Failed restarting port %d\n", priv->port); 1870 } 1871 mutex_unlock(&mdev->state_lock); 1872 rtnl_unlock(); 1873 } 1874 1875 static void mlx4_en_clear_stats(struct net_device *dev) 1876 { 1877 struct mlx4_en_priv *priv = netdev_priv(dev); 1878 struct mlx4_en_dev *mdev = priv->mdev; 1879 int i; 1880 1881 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1882 en_dbg(HW, priv, "Failed dumping statistics\n"); 1883 1884 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1885 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1886 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1887 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); 1888 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); 1889 memset(&priv->rx_priority_flowstats, 0, 1890 sizeof(priv->rx_priority_flowstats)); 1891 memset(&priv->tx_priority_flowstats, 0, 1892 sizeof(priv->tx_priority_flowstats)); 1893 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); 1894 1895 for (i = 0; i < priv->tx_ring_num; i++) { 1896 priv->tx_ring[i]->bytes = 0; 1897 priv->tx_ring[i]->packets = 0; 1898 priv->tx_ring[i]->tx_csum = 0; 1899 priv->tx_ring[i]->tx_dropped = 0; 1900 priv->tx_ring[i]->queue_stopped = 0; 1901 priv->tx_ring[i]->wake_queue = 0; 1902 priv->tx_ring[i]->tso_packets = 0; 1903 priv->tx_ring[i]->xmit_more = 0; 1904 } 1905 for (i = 0; i < priv->rx_ring_num; i++) { 1906 priv->rx_ring[i]->bytes = 0; 1907 priv->rx_ring[i]->packets = 0; 1908 priv->rx_ring[i]->csum_ok = 0; 1909 priv->rx_ring[i]->csum_none = 0; 1910 priv->rx_ring[i]->csum_complete = 0; 1911 } 1912 } 1913 1914 static int mlx4_en_open(struct net_device *dev) 1915 { 1916 struct mlx4_en_priv *priv = netdev_priv(dev); 1917 struct mlx4_en_dev *mdev = priv->mdev; 1918 int err = 0; 1919 1920 mutex_lock(&mdev->state_lock); 1921 1922 if (!mdev->device_up) { 1923 en_err(priv, "Cannot open - device down/disabled\n"); 1924 err = -EBUSY; 1925 goto out; 1926 } 1927 1928 /* Reset HW statistics and SW counters */ 1929 mlx4_en_clear_stats(dev); 1930 1931 err = mlx4_en_start_port(dev); 1932 if (err) 1933 en_err(priv, "Failed starting port:%d\n", priv->port); 1934 1935 out: 1936 mutex_unlock(&mdev->state_lock); 1937 return err; 1938 } 1939 1940 1941 static int mlx4_en_close(struct net_device *dev) 1942 { 1943 struct mlx4_en_priv *priv = netdev_priv(dev); 1944 struct mlx4_en_dev *mdev = priv->mdev; 1945 1946 en_dbg(IFDOWN, priv, "Close port called\n"); 1947 1948 mutex_lock(&mdev->state_lock); 1949 1950 mlx4_en_stop_port(dev, 0); 1951 netif_carrier_off(dev); 1952 1953 mutex_unlock(&mdev->state_lock); 1954 return 0; 1955 } 1956 1957 static void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1958 { 1959 int i; 1960 1961 #ifdef CONFIG_RFS_ACCEL 1962 priv->dev->rx_cpu_rmap = NULL; 1963 #endif 1964 1965 for (i = 0; i < priv->tx_ring_num; i++) { 1966 if (priv->tx_ring && priv->tx_ring[i]) 1967 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1968 if (priv->tx_cq && priv->tx_cq[i]) 1969 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1970 } 1971 1972 for (i = 0; i < priv->rx_ring_num; i++) { 1973 if (priv->rx_ring[i]) 1974 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1975 priv->prof->rx_ring_size, priv->stride); 1976 if (priv->rx_cq[i]) 1977 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1978 } 1979 1980 } 1981 1982 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1983 { 1984 struct mlx4_en_port_profile *prof = priv->prof; 1985 int i; 1986 int node; 1987 1988 /* Create tx Rings */ 1989 for (i = 0; i < priv->tx_ring_num; i++) { 1990 node = cpu_to_node(i % num_online_cpus()); 1991 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1992 prof->tx_ring_size, i, TX, node)) 1993 goto err; 1994 1995 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1996 prof->tx_ring_size, TXBB_SIZE, 1997 node, i)) 1998 goto err; 1999 } 2000 2001 /* Create rx Rings */ 2002 for (i = 0; i < priv->rx_ring_num; i++) { 2003 node = cpu_to_node(i % num_online_cpus()); 2004 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 2005 prof->rx_ring_size, i, RX, node)) 2006 goto err; 2007 2008 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 2009 prof->rx_ring_size, priv->stride, 2010 node)) 2011 goto err; 2012 } 2013 2014 #ifdef CONFIG_RFS_ACCEL 2015 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); 2016 #endif 2017 2018 return 0; 2019 2020 err: 2021 en_err(priv, "Failed to allocate NIC resources\n"); 2022 for (i = 0; i < priv->rx_ring_num; i++) { 2023 if (priv->rx_ring[i]) 2024 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 2025 prof->rx_ring_size, 2026 priv->stride); 2027 if (priv->rx_cq[i]) 2028 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 2029 } 2030 for (i = 0; i < priv->tx_ring_num; i++) { 2031 if (priv->tx_ring[i]) 2032 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 2033 if (priv->tx_cq[i]) 2034 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 2035 } 2036 return -ENOMEM; 2037 } 2038 2039 static void mlx4_en_shutdown(struct net_device *dev) 2040 { 2041 rtnl_lock(); 2042 netif_device_detach(dev); 2043 mlx4_en_close(dev); 2044 rtnl_unlock(); 2045 } 2046 2047 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2048 struct mlx4_en_priv *src, 2049 struct mlx4_en_port_profile *prof) 2050 { 2051 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, 2052 sizeof(dst->hwtstamp_config)); 2053 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; 2054 dst->tx_ring_num = prof->tx_ring_num; 2055 dst->rx_ring_num = prof->rx_ring_num; 2056 dst->flags = prof->flags; 2057 dst->mdev = src->mdev; 2058 dst->port = src->port; 2059 dst->dev = src->dev; 2060 dst->prof = prof; 2061 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2062 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2063 2064 dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2065 GFP_KERNEL); 2066 if (!dst->tx_ring) 2067 return -ENOMEM; 2068 2069 dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 2070 GFP_KERNEL); 2071 if (!dst->tx_cq) { 2072 kfree(dst->tx_ring); 2073 return -ENOMEM; 2074 } 2075 return 0; 2076 } 2077 2078 static void mlx4_en_update_priv(struct mlx4_en_priv *dst, 2079 struct mlx4_en_priv *src) 2080 { 2081 memcpy(dst->rx_ring, src->rx_ring, 2082 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); 2083 memcpy(dst->rx_cq, src->rx_cq, 2084 sizeof(struct mlx4_en_cq *) * src->rx_ring_num); 2085 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, 2086 sizeof(dst->hwtstamp_config)); 2087 dst->tx_ring_num = src->tx_ring_num; 2088 dst->rx_ring_num = src->rx_ring_num; 2089 dst->tx_ring = src->tx_ring; 2090 dst->tx_cq = src->tx_cq; 2091 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); 2092 } 2093 2094 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2095 struct mlx4_en_priv *tmp, 2096 struct mlx4_en_port_profile *prof) 2097 { 2098 mlx4_en_copy_priv(tmp, priv, prof); 2099 2100 if (mlx4_en_alloc_resources(tmp)) { 2101 en_warn(priv, 2102 "%s: Resource allocation failed, using previous configuration\n", 2103 __func__); 2104 kfree(tmp->tx_ring); 2105 kfree(tmp->tx_cq); 2106 return -ENOMEM; 2107 } 2108 return 0; 2109 } 2110 2111 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 2112 struct mlx4_en_priv *tmp) 2113 { 2114 mlx4_en_free_resources(priv); 2115 mlx4_en_update_priv(priv, tmp); 2116 } 2117 2118 void mlx4_en_destroy_netdev(struct net_device *dev) 2119 { 2120 struct mlx4_en_priv *priv = netdev_priv(dev); 2121 struct mlx4_en_dev *mdev = priv->mdev; 2122 bool shutdown = mdev->dev->persist->interface_state & 2123 MLX4_INTERFACE_STATE_SHUTDOWN; 2124 2125 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2126 2127 /* Unregister device - this will close the port if it was up */ 2128 if (priv->registered) { 2129 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2130 priv->port)); 2131 if (shutdown) 2132 mlx4_en_shutdown(dev); 2133 else 2134 unregister_netdev(dev); 2135 } 2136 2137 if (priv->allocated) 2138 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 2139 2140 cancel_delayed_work(&priv->stats_task); 2141 cancel_delayed_work(&priv->service_task); 2142 /* flush any pending task for this netdev */ 2143 flush_workqueue(mdev->workqueue); 2144 2145 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2146 mlx4_en_remove_timestamp(mdev); 2147 2148 /* Detach the netdev so tasks would not attempt to access it */ 2149 mutex_lock(&mdev->state_lock); 2150 mdev->pndev[priv->port] = NULL; 2151 mdev->upper[priv->port] = NULL; 2152 mutex_unlock(&mdev->state_lock); 2153 2154 #ifdef CONFIG_RFS_ACCEL 2155 mlx4_en_cleanup_filters(priv); 2156 #endif 2157 2158 mlx4_en_free_resources(priv); 2159 2160 kfree(priv->tx_ring); 2161 kfree(priv->tx_cq); 2162 2163 if (!shutdown) 2164 free_netdev(dev); 2165 } 2166 2167 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2168 { 2169 struct mlx4_en_priv *priv = netdev_priv(dev); 2170 struct mlx4_en_dev *mdev = priv->mdev; 2171 int err = 0; 2172 2173 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2174 dev->mtu, new_mtu); 2175 2176 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 2177 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 2178 return -EPERM; 2179 } 2180 dev->mtu = new_mtu; 2181 2182 if (netif_running(dev)) { 2183 mutex_lock(&mdev->state_lock); 2184 if (!mdev->device_up) { 2185 /* NIC is probably restarting - let watchdog task reset 2186 * the port */ 2187 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2188 } else { 2189 mlx4_en_stop_port(dev, 1); 2190 err = mlx4_en_start_port(dev); 2191 if (err) { 2192 en_err(priv, "Failed restarting port:%d\n", 2193 priv->port); 2194 queue_work(mdev->workqueue, &priv->watchdog_task); 2195 } 2196 } 2197 mutex_unlock(&mdev->state_lock); 2198 } 2199 return 0; 2200 } 2201 2202 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2203 { 2204 struct mlx4_en_priv *priv = netdev_priv(dev); 2205 struct mlx4_en_dev *mdev = priv->mdev; 2206 struct hwtstamp_config config; 2207 2208 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2209 return -EFAULT; 2210 2211 /* reserved for future extensions */ 2212 if (config.flags) 2213 return -EINVAL; 2214 2215 /* device doesn't support time stamping */ 2216 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2217 return -EINVAL; 2218 2219 /* TX HW timestamp */ 2220 switch (config.tx_type) { 2221 case HWTSTAMP_TX_OFF: 2222 case HWTSTAMP_TX_ON: 2223 break; 2224 default: 2225 return -ERANGE; 2226 } 2227 2228 /* RX HW timestamp */ 2229 switch (config.rx_filter) { 2230 case HWTSTAMP_FILTER_NONE: 2231 break; 2232 case HWTSTAMP_FILTER_ALL: 2233 case HWTSTAMP_FILTER_SOME: 2234 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2235 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2236 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2237 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2238 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2239 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2240 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2241 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2242 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2243 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2244 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2245 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2246 config.rx_filter = HWTSTAMP_FILTER_ALL; 2247 break; 2248 default: 2249 return -ERANGE; 2250 } 2251 2252 if (mlx4_en_reset_config(dev, config, dev->features)) { 2253 config.tx_type = HWTSTAMP_TX_OFF; 2254 config.rx_filter = HWTSTAMP_FILTER_NONE; 2255 } 2256 2257 return copy_to_user(ifr->ifr_data, &config, 2258 sizeof(config)) ? -EFAULT : 0; 2259 } 2260 2261 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2262 { 2263 struct mlx4_en_priv *priv = netdev_priv(dev); 2264 2265 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, 2266 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; 2267 } 2268 2269 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2270 { 2271 switch (cmd) { 2272 case SIOCSHWTSTAMP: 2273 return mlx4_en_hwtstamp_set(dev, ifr); 2274 case SIOCGHWTSTAMP: 2275 return mlx4_en_hwtstamp_get(dev, ifr); 2276 default: 2277 return -EOPNOTSUPP; 2278 } 2279 } 2280 2281 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, 2282 netdev_features_t features) 2283 { 2284 struct mlx4_en_priv *en_priv = netdev_priv(netdev); 2285 struct mlx4_en_dev *mdev = en_priv->mdev; 2286 2287 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel 2288 * enable/disable make sure S-TAG flag is always in same state as 2289 * C-TAG. 2290 */ 2291 if (features & NETIF_F_HW_VLAN_CTAG_RX && 2292 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 2293 features |= NETIF_F_HW_VLAN_STAG_RX; 2294 else 2295 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2296 2297 return features; 2298 } 2299 2300 static int mlx4_en_set_features(struct net_device *netdev, 2301 netdev_features_t features) 2302 { 2303 struct mlx4_en_priv *priv = netdev_priv(netdev); 2304 bool reset = false; 2305 int ret = 0; 2306 2307 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { 2308 en_info(priv, "Turn %s RX-FCS\n", 2309 (features & NETIF_F_RXFCS) ? "ON" : "OFF"); 2310 reset = true; 2311 } 2312 2313 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { 2314 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; 2315 2316 en_info(priv, "Turn %s RX-ALL\n", 2317 ignore_fcs_value ? "ON" : "OFF"); 2318 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, 2319 priv->port, ignore_fcs_value); 2320 if (ret) 2321 return ret; 2322 } 2323 2324 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2325 en_info(priv, "Turn %s RX vlan strip offload\n", 2326 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2327 reset = true; 2328 } 2329 2330 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2331 en_info(priv, "Turn %s TX vlan strip offload\n", 2332 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2333 2334 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) 2335 en_info(priv, "Turn %s TX S-VLAN strip offload\n", 2336 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); 2337 2338 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { 2339 en_info(priv, "Turn %s loopback\n", 2340 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); 2341 mlx4_en_update_loopback_state(netdev, features); 2342 } 2343 2344 if (reset) { 2345 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2346 features); 2347 if (ret) 2348 return ret; 2349 } 2350 2351 return 0; 2352 } 2353 2354 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2355 { 2356 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2357 struct mlx4_en_dev *mdev = en_priv->mdev; 2358 u64 mac_u64 = mlx4_mac_to_u64(mac); 2359 2360 if (is_multicast_ether_addr(mac)) 2361 return -EINVAL; 2362 2363 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2364 } 2365 2366 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) 2367 { 2368 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2369 struct mlx4_en_dev *mdev = en_priv->mdev; 2370 2371 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); 2372 } 2373 2374 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 2375 int max_tx_rate) 2376 { 2377 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2378 struct mlx4_en_dev *mdev = en_priv->mdev; 2379 2380 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, 2381 max_tx_rate); 2382 } 2383 2384 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2385 { 2386 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2387 struct mlx4_en_dev *mdev = en_priv->mdev; 2388 2389 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2390 } 2391 2392 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2393 { 2394 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2395 struct mlx4_en_dev *mdev = en_priv->mdev; 2396 2397 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2398 } 2399 2400 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2401 { 2402 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2403 struct mlx4_en_dev *mdev = en_priv->mdev; 2404 2405 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2406 } 2407 2408 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf, 2409 struct ifla_vf_stats *vf_stats) 2410 { 2411 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2412 struct mlx4_en_dev *mdev = en_priv->mdev; 2413 2414 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); 2415 } 2416 2417 #define PORT_ID_BYTE_LEN 8 2418 static int mlx4_en_get_phys_port_id(struct net_device *dev, 2419 struct netdev_phys_item_id *ppid) 2420 { 2421 struct mlx4_en_priv *priv = netdev_priv(dev); 2422 struct mlx4_dev *mdev = priv->mdev->dev; 2423 int i; 2424 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; 2425 2426 if (!phys_port_id) 2427 return -EOPNOTSUPP; 2428 2429 ppid->id_len = sizeof(phys_port_id); 2430 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { 2431 ppid->id[i] = phys_port_id & 0xff; 2432 phys_port_id >>= 8; 2433 } 2434 return 0; 2435 } 2436 2437 #ifdef CONFIG_MLX4_EN_VXLAN 2438 static void mlx4_en_add_vxlan_offloads(struct work_struct *work) 2439 { 2440 int ret; 2441 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2442 vxlan_add_task); 2443 2444 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); 2445 if (ret) 2446 goto out; 2447 2448 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2449 VXLAN_STEER_BY_OUTER_MAC, 1); 2450 out: 2451 if (ret) { 2452 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2453 return; 2454 } 2455 2456 /* set offloads */ 2457 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2458 NETIF_F_RXCSUM | 2459 NETIF_F_TSO | NETIF_F_TSO6 | 2460 NETIF_F_GSO_UDP_TUNNEL | 2461 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2462 NETIF_F_GSO_PARTIAL; 2463 } 2464 2465 static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2466 { 2467 int ret; 2468 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2469 vxlan_del_task); 2470 /* unset offloads */ 2471 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2472 NETIF_F_RXCSUM | 2473 NETIF_F_TSO | NETIF_F_TSO6 | 2474 NETIF_F_GSO_UDP_TUNNEL | 2475 NETIF_F_GSO_UDP_TUNNEL_CSUM | 2476 NETIF_F_GSO_PARTIAL); 2477 2478 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2479 VXLAN_STEER_BY_OUTER_MAC, 0); 2480 if (ret) 2481 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2482 2483 priv->vxlan_port = 0; 2484 } 2485 2486 static void mlx4_en_add_vxlan_port(struct net_device *dev, 2487 sa_family_t sa_family, __be16 port) 2488 { 2489 struct mlx4_en_priv *priv = netdev_priv(dev); 2490 __be16 current_port; 2491 2492 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2493 return; 2494 2495 if (sa_family == AF_INET6) 2496 return; 2497 2498 current_port = priv->vxlan_port; 2499 if (current_port && current_port != port) { 2500 en_warn(priv, "vxlan port %d configured, can't add port %d\n", 2501 ntohs(current_port), ntohs(port)); 2502 return; 2503 } 2504 2505 priv->vxlan_port = port; 2506 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); 2507 } 2508 2509 static void mlx4_en_del_vxlan_port(struct net_device *dev, 2510 sa_family_t sa_family, __be16 port) 2511 { 2512 struct mlx4_en_priv *priv = netdev_priv(dev); 2513 __be16 current_port; 2514 2515 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 2516 return; 2517 2518 if (sa_family == AF_INET6) 2519 return; 2520 2521 current_port = priv->vxlan_port; 2522 if (current_port != port) { 2523 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); 2524 return; 2525 } 2526 2527 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2528 } 2529 2530 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2531 struct net_device *dev, 2532 netdev_features_t features) 2533 { 2534 features = vlan_features_check(skb, features); 2535 features = vxlan_features_check(skb, features); 2536 2537 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does 2538 * support inner IPv6 checksums and segmentation so we need to 2539 * strip that feature if this is an IPv6 encapsulated frame. 2540 */ 2541 if (skb->encapsulation && 2542 (skb->ip_summed == CHECKSUM_PARTIAL)) { 2543 struct mlx4_en_priv *priv = netdev_priv(dev); 2544 2545 if (!priv->vxlan_port || 2546 (ip_hdr(skb)->version != 4) || 2547 (udp_hdr(skb)->dest != priv->vxlan_port)) 2548 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2549 } 2550 2551 return features; 2552 } 2553 #endif 2554 2555 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) 2556 { 2557 struct mlx4_en_priv *priv = netdev_priv(dev); 2558 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; 2559 struct mlx4_update_qp_params params; 2560 int err; 2561 2562 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) 2563 return -EOPNOTSUPP; 2564 2565 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ 2566 if (maxrate >> 12) { 2567 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; 2568 params.rate_val = maxrate / 1000; 2569 } else if (maxrate) { 2570 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; 2571 params.rate_val = maxrate; 2572 } else { /* zero serves to revoke the QP rate-limitation */ 2573 params.rate_unit = 0; 2574 params.rate_val = 0; 2575 } 2576 2577 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, 2578 ¶ms); 2579 return err; 2580 } 2581 2582 static const struct net_device_ops mlx4_netdev_ops = { 2583 .ndo_open = mlx4_en_open, 2584 .ndo_stop = mlx4_en_close, 2585 .ndo_start_xmit = mlx4_en_xmit, 2586 .ndo_select_queue = mlx4_en_select_queue, 2587 .ndo_get_stats64 = mlx4_en_get_stats64, 2588 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2589 .ndo_set_mac_address = mlx4_en_set_mac, 2590 .ndo_validate_addr = eth_validate_addr, 2591 .ndo_change_mtu = mlx4_en_change_mtu, 2592 .ndo_do_ioctl = mlx4_en_ioctl, 2593 .ndo_tx_timeout = mlx4_en_tx_timeout, 2594 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2595 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2596 #ifdef CONFIG_NET_POLL_CONTROLLER 2597 .ndo_poll_controller = mlx4_en_netpoll, 2598 #endif 2599 .ndo_set_features = mlx4_en_set_features, 2600 .ndo_fix_features = mlx4_en_fix_features, 2601 .ndo_setup_tc = __mlx4_en_setup_tc, 2602 #ifdef CONFIG_RFS_ACCEL 2603 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2604 #endif 2605 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2606 #ifdef CONFIG_MLX4_EN_VXLAN 2607 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2608 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2609 .ndo_features_check = mlx4_en_features_check, 2610 #endif 2611 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2612 }; 2613 2614 static const struct net_device_ops mlx4_netdev_ops_master = { 2615 .ndo_open = mlx4_en_open, 2616 .ndo_stop = mlx4_en_close, 2617 .ndo_start_xmit = mlx4_en_xmit, 2618 .ndo_select_queue = mlx4_en_select_queue, 2619 .ndo_get_stats64 = mlx4_en_get_stats64, 2620 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2621 .ndo_set_mac_address = mlx4_en_set_mac, 2622 .ndo_validate_addr = eth_validate_addr, 2623 .ndo_change_mtu = mlx4_en_change_mtu, 2624 .ndo_tx_timeout = mlx4_en_tx_timeout, 2625 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2626 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2627 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2628 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2629 .ndo_set_vf_rate = mlx4_en_set_vf_rate, 2630 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2631 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2632 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2633 .ndo_get_vf_config = mlx4_en_get_vf_config, 2634 #ifdef CONFIG_NET_POLL_CONTROLLER 2635 .ndo_poll_controller = mlx4_en_netpoll, 2636 #endif 2637 .ndo_set_features = mlx4_en_set_features, 2638 .ndo_fix_features = mlx4_en_fix_features, 2639 .ndo_setup_tc = __mlx4_en_setup_tc, 2640 #ifdef CONFIG_RFS_ACCEL 2641 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2642 #endif 2643 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2644 #ifdef CONFIG_MLX4_EN_VXLAN 2645 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2646 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2647 .ndo_features_check = mlx4_en_features_check, 2648 #endif 2649 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, 2650 }; 2651 2652 struct mlx4_en_bond { 2653 struct work_struct work; 2654 struct mlx4_en_priv *priv; 2655 int is_bonded; 2656 struct mlx4_port_map port_map; 2657 }; 2658 2659 static void mlx4_en_bond_work(struct work_struct *work) 2660 { 2661 struct mlx4_en_bond *bond = container_of(work, 2662 struct mlx4_en_bond, 2663 work); 2664 int err = 0; 2665 struct mlx4_dev *dev = bond->priv->mdev->dev; 2666 2667 if (bond->is_bonded) { 2668 if (!mlx4_is_bonded(dev)) { 2669 err = mlx4_bond(dev); 2670 if (err) 2671 en_err(bond->priv, "Fail to bond device\n"); 2672 } 2673 if (!err) { 2674 err = mlx4_port_map_set(dev, &bond->port_map); 2675 if (err) 2676 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", 2677 bond->port_map.port1, 2678 bond->port_map.port2, 2679 err); 2680 } 2681 } else if (mlx4_is_bonded(dev)) { 2682 err = mlx4_unbond(dev); 2683 if (err) 2684 en_err(bond->priv, "Fail to unbond device\n"); 2685 } 2686 dev_put(bond->priv->dev); 2687 kfree(bond); 2688 } 2689 2690 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, 2691 u8 v2p_p1, u8 v2p_p2) 2692 { 2693 struct mlx4_en_bond *bond = NULL; 2694 2695 bond = kzalloc(sizeof(*bond), GFP_ATOMIC); 2696 if (!bond) 2697 return -ENOMEM; 2698 2699 INIT_WORK(&bond->work, mlx4_en_bond_work); 2700 bond->priv = priv; 2701 bond->is_bonded = is_bonded; 2702 bond->port_map.port1 = v2p_p1; 2703 bond->port_map.port2 = v2p_p2; 2704 dev_hold(priv->dev); 2705 queue_work(priv->mdev->workqueue, &bond->work); 2706 return 0; 2707 } 2708 2709 int mlx4_en_netdev_event(struct notifier_block *this, 2710 unsigned long event, void *ptr) 2711 { 2712 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2713 u8 port = 0; 2714 struct mlx4_en_dev *mdev; 2715 struct mlx4_dev *dev; 2716 int i, num_eth_ports = 0; 2717 bool do_bond = true; 2718 struct mlx4_en_priv *priv; 2719 u8 v2p_port1 = 0; 2720 u8 v2p_port2 = 0; 2721 2722 if (!net_eq(dev_net(ndev), &init_net)) 2723 return NOTIFY_DONE; 2724 2725 mdev = container_of(this, struct mlx4_en_dev, nb); 2726 dev = mdev->dev; 2727 2728 /* Go into this mode only when two network devices set on two ports 2729 * of the same mlx4 device are slaves of the same bonding master 2730 */ 2731 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 2732 ++num_eth_ports; 2733 if (!port && (mdev->pndev[i] == ndev)) 2734 port = i; 2735 mdev->upper[i] = mdev->pndev[i] ? 2736 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; 2737 /* condition not met: network device is a slave */ 2738 if (!mdev->upper[i]) 2739 do_bond = false; 2740 if (num_eth_ports < 2) 2741 continue; 2742 /* condition not met: same master */ 2743 if (mdev->upper[i] != mdev->upper[i-1]) 2744 do_bond = false; 2745 } 2746 /* condition not met: 2 salves */ 2747 do_bond = (num_eth_ports == 2) ? do_bond : false; 2748 2749 /* handle only events that come with enough info */ 2750 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) 2751 return NOTIFY_DONE; 2752 2753 priv = netdev_priv(ndev); 2754 if (do_bond) { 2755 struct netdev_notifier_bonding_info *notifier_info = ptr; 2756 struct netdev_bonding_info *bonding_info = 2757 ¬ifier_info->bonding_info; 2758 2759 /* required mode 1, 2 or 4 */ 2760 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && 2761 (bonding_info->master.bond_mode != BOND_MODE_XOR) && 2762 (bonding_info->master.bond_mode != BOND_MODE_8023AD)) 2763 do_bond = false; 2764 2765 /* require exactly 2 slaves */ 2766 if (bonding_info->master.num_slaves != 2) 2767 do_bond = false; 2768 2769 /* calc v2p */ 2770 if (do_bond) { 2771 if (bonding_info->master.bond_mode == 2772 BOND_MODE_ACTIVEBACKUP) { 2773 /* in active-backup mode virtual ports are 2774 * mapped to the physical port of the active 2775 * slave */ 2776 if (bonding_info->slave.state == 2777 BOND_STATE_BACKUP) { 2778 if (port == 1) { 2779 v2p_port1 = 2; 2780 v2p_port2 = 2; 2781 } else { 2782 v2p_port1 = 1; 2783 v2p_port2 = 1; 2784 } 2785 } else { /* BOND_STATE_ACTIVE */ 2786 if (port == 1) { 2787 v2p_port1 = 1; 2788 v2p_port2 = 1; 2789 } else { 2790 v2p_port1 = 2; 2791 v2p_port2 = 2; 2792 } 2793 } 2794 } else { /* Active-Active */ 2795 /* in active-active mode a virtual port is 2796 * mapped to the native physical port if and only 2797 * if the physical port is up */ 2798 __s8 link = bonding_info->slave.link; 2799 2800 if (port == 1) 2801 v2p_port2 = 2; 2802 else 2803 v2p_port1 = 1; 2804 if ((link == BOND_LINK_UP) || 2805 (link == BOND_LINK_FAIL)) { 2806 if (port == 1) 2807 v2p_port1 = 1; 2808 else 2809 v2p_port2 = 2; 2810 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ 2811 if (port == 1) 2812 v2p_port1 = 2; 2813 else 2814 v2p_port2 = 1; 2815 } 2816 } 2817 } 2818 } 2819 2820 mlx4_en_queue_bond_work(priv, do_bond, 2821 v2p_port1, v2p_port2); 2822 2823 return NOTIFY_DONE; 2824 } 2825 2826 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, 2827 struct mlx4_en_stats_bitmap *stats_bitmap, 2828 u8 rx_ppp, u8 rx_pause, 2829 u8 tx_ppp, u8 tx_pause) 2830 { 2831 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS; 2832 2833 if (!mlx4_is_slave(dev) && 2834 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { 2835 mutex_lock(&stats_bitmap->mutex); 2836 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); 2837 2838 if (rx_ppp) 2839 bitmap_set(stats_bitmap->bitmap, last_i, 2840 NUM_FLOW_PRIORITY_STATS_RX); 2841 last_i += NUM_FLOW_PRIORITY_STATS_RX; 2842 2843 if (rx_pause && !(rx_ppp)) 2844 bitmap_set(stats_bitmap->bitmap, last_i, 2845 NUM_FLOW_STATS_RX); 2846 last_i += NUM_FLOW_STATS_RX; 2847 2848 if (tx_ppp) 2849 bitmap_set(stats_bitmap->bitmap, last_i, 2850 NUM_FLOW_PRIORITY_STATS_TX); 2851 last_i += NUM_FLOW_PRIORITY_STATS_TX; 2852 2853 if (tx_pause && !(tx_ppp)) 2854 bitmap_set(stats_bitmap->bitmap, last_i, 2855 NUM_FLOW_STATS_TX); 2856 last_i += NUM_FLOW_STATS_TX; 2857 2858 mutex_unlock(&stats_bitmap->mutex); 2859 } 2860 } 2861 2862 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, 2863 struct mlx4_en_stats_bitmap *stats_bitmap, 2864 u8 rx_ppp, u8 rx_pause, 2865 u8 tx_ppp, u8 tx_pause) 2866 { 2867 int last_i = 0; 2868 2869 mutex_init(&stats_bitmap->mutex); 2870 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); 2871 2872 if (mlx4_is_slave(dev)) { 2873 bitmap_set(stats_bitmap->bitmap, last_i + 2874 MLX4_FIND_NETDEV_STAT(rx_packets), 1); 2875 bitmap_set(stats_bitmap->bitmap, last_i + 2876 MLX4_FIND_NETDEV_STAT(tx_packets), 1); 2877 bitmap_set(stats_bitmap->bitmap, last_i + 2878 MLX4_FIND_NETDEV_STAT(rx_bytes), 1); 2879 bitmap_set(stats_bitmap->bitmap, last_i + 2880 MLX4_FIND_NETDEV_STAT(tx_bytes), 1); 2881 bitmap_set(stats_bitmap->bitmap, last_i + 2882 MLX4_FIND_NETDEV_STAT(rx_dropped), 1); 2883 bitmap_set(stats_bitmap->bitmap, last_i + 2884 MLX4_FIND_NETDEV_STAT(tx_dropped), 1); 2885 } else { 2886 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); 2887 } 2888 last_i += NUM_MAIN_STATS; 2889 2890 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); 2891 last_i += NUM_PORT_STATS; 2892 2893 if (mlx4_is_master(dev)) 2894 bitmap_set(stats_bitmap->bitmap, last_i, 2895 NUM_PF_STATS); 2896 last_i += NUM_PF_STATS; 2897 2898 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, 2899 rx_ppp, rx_pause, 2900 tx_ppp, tx_pause); 2901 last_i += NUM_FLOW_STATS; 2902 2903 if (!mlx4_is_slave(dev)) 2904 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); 2905 } 2906 2907 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2908 struct mlx4_en_port_profile *prof) 2909 { 2910 struct net_device *dev; 2911 struct mlx4_en_priv *priv; 2912 int i; 2913 int err; 2914 2915 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 2916 MAX_TX_RINGS, MAX_RX_RINGS); 2917 if (dev == NULL) 2918 return -ENOMEM; 2919 2920 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 2921 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2922 2923 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); 2924 dev->dev_port = port - 1; 2925 2926 /* 2927 * Initialize driver private data 2928 */ 2929 2930 priv = netdev_priv(dev); 2931 memset(priv, 0, sizeof(struct mlx4_en_priv)); 2932 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); 2933 spin_lock_init(&priv->stats_lock); 2934 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2935 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2936 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2937 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2938 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2939 #ifdef CONFIG_MLX4_EN_VXLAN 2940 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); 2941 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); 2942 #endif 2943 #ifdef CONFIG_RFS_ACCEL 2944 INIT_LIST_HEAD(&priv->filters); 2945 spin_lock_init(&priv->filters_lock); 2946 #endif 2947 2948 priv->dev = dev; 2949 priv->mdev = mdev; 2950 priv->ddev = &mdev->pdev->dev; 2951 priv->prof = prof; 2952 priv->port = port; 2953 priv->port_up = false; 2954 priv->flags = prof->flags; 2955 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; 2956 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 2957 MLX4_WQE_CTRL_SOLICITED); 2958 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2959 priv->tx_ring_num = prof->tx_ring_num; 2960 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; 2961 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); 2962 2963 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2964 GFP_KERNEL); 2965 if (!priv->tx_ring) { 2966 err = -ENOMEM; 2967 goto out; 2968 } 2969 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 2970 GFP_KERNEL); 2971 if (!priv->tx_cq) { 2972 err = -ENOMEM; 2973 goto out; 2974 } 2975 priv->rx_ring_num = prof->rx_ring_num; 2976 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2977 priv->cqe_size = mdev->dev->caps.cqe_size; 2978 priv->mac_index = -1; 2979 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2980 #ifdef CONFIG_MLX4_EN_DCB 2981 if (!mlx4_is_slave(priv->mdev->dev)) { 2982 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2983 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2984 } else { 2985 en_info(priv, "enabling only PFC DCB ops\n"); 2986 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2987 } 2988 } 2989 #endif 2990 2991 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2992 INIT_HLIST_HEAD(&priv->mac_hash[i]); 2993 2994 /* Query for default mac and max mtu */ 2995 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2996 2997 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & 2998 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) 2999 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; 3000 3001 /* Set default MAC */ 3002 dev->addr_len = ETH_ALEN; 3003 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 3004 if (!is_valid_ether_addr(dev->dev_addr)) { 3005 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 3006 priv->port, dev->dev_addr); 3007 err = -EINVAL; 3008 goto out; 3009 } else if (mlx4_is_slave(priv->mdev->dev) && 3010 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { 3011 /* Random MAC was assigned in mlx4_slave_cap 3012 * in mlx4_core module 3013 */ 3014 dev->addr_assign_type |= NET_ADDR_RANDOM; 3015 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 3016 } 3017 3018 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); 3019 3020 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 3021 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 3022 err = mlx4_en_alloc_resources(priv); 3023 if (err) 3024 goto out; 3025 3026 /* Initialize time stamping config */ 3027 priv->hwtstamp_config.flags = 0; 3028 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 3029 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 3030 3031 /* Allocate page for receive rings */ 3032 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 3033 MLX4_EN_PAGE_SIZE); 3034 if (err) { 3035 en_err(priv, "Failed to allocate page for rx qps\n"); 3036 goto out; 3037 } 3038 priv->allocated = 1; 3039 3040 /* 3041 * Initialize netdev entry points 3042 */ 3043 if (mlx4_is_master(priv->mdev->dev)) 3044 dev->netdev_ops = &mlx4_netdev_ops_master; 3045 else 3046 dev->netdev_ops = &mlx4_netdev_ops; 3047 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 3048 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 3049 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 3050 3051 dev->ethtool_ops = &mlx4_en_ethtool_ops; 3052 3053 /* 3054 * Set driver features 3055 */ 3056 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3057 if (mdev->LSO_support) 3058 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3059 3060 dev->vlan_features = dev->hw_features; 3061 3062 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 3063 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 3064 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 3065 NETIF_F_HW_VLAN_CTAG_FILTER; 3066 dev->hw_features |= NETIF_F_LOOPBACK | 3067 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 3068 3069 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3070 dev->features |= NETIF_F_HW_VLAN_STAG_RX | 3071 NETIF_F_HW_VLAN_STAG_FILTER; 3072 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; 3073 } 3074 3075 if (mlx4_is_slave(mdev->dev)) { 3076 int phv; 3077 3078 err = get_phv_bit(mdev->dev, port, &phv); 3079 if (!err && phv) { 3080 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3081 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; 3082 } 3083 } else { 3084 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3085 !(mdev->dev->caps.flags2 & 3086 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) 3087 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 3088 } 3089 3090 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 3091 dev->hw_features |= NETIF_F_RXFCS; 3092 3093 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) 3094 dev->hw_features |= NETIF_F_RXALL; 3095 3096 if (mdev->dev->caps.steering_mode == 3097 MLX4_STEERING_MODE_DEVICE_MANAGED && 3098 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 3099 dev->hw_features |= NETIF_F_NTUPLE; 3100 3101 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 3102 dev->priv_flags |= IFF_UNICAST_FLT; 3103 3104 /* Setting a default hash function value */ 3105 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 3106 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3107 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { 3108 priv->rss_hash_fn = ETH_RSS_HASH_XOR; 3109 } else { 3110 en_warn(priv, 3111 "No RSS hash capabilities exposed, using Toeplitz\n"); 3112 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 3113 } 3114 3115 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3116 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | 3117 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3118 NETIF_F_GSO_PARTIAL; 3119 dev->features |= NETIF_F_GSO_UDP_TUNNEL | 3120 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3121 NETIF_F_GSO_PARTIAL; 3122 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; 3123 } 3124 3125 mdev->pndev[port] = dev; 3126 mdev->upper[port] = NULL; 3127 3128 netif_carrier_off(dev); 3129 mlx4_en_set_default_moderation(priv); 3130 3131 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 3132 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 3133 3134 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 3135 3136 /* Configure port */ 3137 mlx4_en_calc_rx_buf(dev); 3138 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 3139 priv->rx_skb_size + ETH_FCS_LEN, 3140 prof->tx_pause, prof->tx_ppp, 3141 prof->rx_pause, prof->rx_ppp); 3142 if (err) { 3143 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 3144 priv->port, err); 3145 goto out; 3146 } 3147 3148 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { 3149 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); 3150 if (err) { 3151 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", 3152 err); 3153 goto out; 3154 } 3155 } 3156 3157 /* Init port */ 3158 en_warn(priv, "Initializing port\n"); 3159 err = mlx4_INIT_PORT(mdev->dev, priv->port); 3160 if (err) { 3161 en_err(priv, "Failed Initializing port\n"); 3162 goto out; 3163 } 3164 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 3165 3166 /* Initialize time stamp mechanism */ 3167 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 3168 mlx4_en_init_timestamp(mdev); 3169 3170 queue_delayed_work(mdev->workqueue, &priv->service_task, 3171 SERVICE_TASK_DELAY); 3172 3173 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, 3174 mdev->profile.prof[priv->port].rx_ppp, 3175 mdev->profile.prof[priv->port].rx_pause, 3176 mdev->profile.prof[priv->port].tx_ppp, 3177 mdev->profile.prof[priv->port].tx_pause); 3178 3179 err = register_netdev(dev); 3180 if (err) { 3181 en_err(priv, "Netdev registration failed for port %d\n", port); 3182 goto out; 3183 } 3184 3185 priv->registered = 1; 3186 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), 3187 dev); 3188 3189 return 0; 3190 3191 out: 3192 mlx4_en_destroy_netdev(dev); 3193 return err; 3194 } 3195 3196 int mlx4_en_reset_config(struct net_device *dev, 3197 struct hwtstamp_config ts_config, 3198 netdev_features_t features) 3199 { 3200 struct mlx4_en_priv *priv = netdev_priv(dev); 3201 struct mlx4_en_dev *mdev = priv->mdev; 3202 struct mlx4_en_port_profile new_prof; 3203 struct mlx4_en_priv *tmp; 3204 int port_up = 0; 3205 int err = 0; 3206 3207 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3208 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3209 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3210 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) 3211 return 0; /* Nothing to change */ 3212 3213 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3214 (features & NETIF_F_HW_VLAN_CTAG_RX) && 3215 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { 3216 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); 3217 return -EINVAL; 3218 } 3219 3220 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3221 if (!tmp) 3222 return -ENOMEM; 3223 3224 mutex_lock(&mdev->state_lock); 3225 3226 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3227 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3228 3229 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3230 if (err) 3231 goto out; 3232 3233 if (priv->port_up) { 3234 port_up = 1; 3235 mlx4_en_stop_port(dev, 1); 3236 } 3237 3238 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", 3239 ts_config.rx_filter, 3240 !!(features & NETIF_F_HW_VLAN_CTAG_RX)); 3241 3242 mlx4_en_safe_replace_resources(priv, tmp); 3243 3244 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 3245 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3246 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3247 else 3248 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3249 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { 3250 /* RX time-stamping is OFF, update the RX vlan offload 3251 * to the latest wanted state 3252 */ 3253 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) 3254 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3255 else 3256 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3257 } 3258 3259 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { 3260 if (features & NETIF_F_RXFCS) 3261 dev->features |= NETIF_F_RXFCS; 3262 else 3263 dev->features &= ~NETIF_F_RXFCS; 3264 } 3265 3266 /* RX vlan offload and RX time-stamping can't co-exist ! 3267 * Regardless of the caller's choice, 3268 * Turn Off RX vlan offload in case of time-stamping is ON 3269 */ 3270 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { 3271 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 3272 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); 3273 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3274 } 3275 3276 if (port_up) { 3277 err = mlx4_en_start_port(dev); 3278 if (err) 3279 en_err(priv, "Failed starting port\n"); 3280 } 3281 3282 out: 3283 mutex_unlock(&mdev->state_lock); 3284 kfree(tmp); 3285 if (!err) 3286 netdev_features_change(dev); 3287 return err; 3288 } 3289