1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/hash.h> 40 #include <net/ip.h> 41 42 #include <linux/mlx4/driver.h> 43 #include <linux/mlx4/device.h> 44 #include <linux/mlx4/cmd.h> 45 #include <linux/mlx4/cq.h> 46 47 #include "mlx4_en.h" 48 #include "en_port.h" 49 50 static int mlx4_en_setup_tc(struct net_device *dev, u8 up) 51 { 52 struct mlx4_en_priv *priv = netdev_priv(dev); 53 int i; 54 unsigned int q, offset = 0; 55 56 if (up && up != MLX4_EN_NUM_UP) 57 return -EINVAL; 58 59 netdev_set_num_tc(dev, up); 60 61 /* Partition Tx queues evenly amongst UP's */ 62 q = priv->tx_ring_num / up; 63 for (i = 0; i < up; i++) { 64 netdev_set_tc_queue(dev, i, q, offset); 65 offset += q; 66 } 67 68 return 0; 69 } 70 71 #ifdef CONFIG_RFS_ACCEL 72 73 struct mlx4_en_filter { 74 struct list_head next; 75 struct work_struct work; 76 77 __be32 src_ip; 78 __be32 dst_ip; 79 __be16 src_port; 80 __be16 dst_port; 81 82 int rxq_index; 83 struct mlx4_en_priv *priv; 84 u32 flow_id; /* RFS infrastructure id */ 85 int id; /* mlx4_en driver id */ 86 u64 reg_id; /* Flow steering API id */ 87 u8 activated; /* Used to prevent expiry before filter 88 * is attached 89 */ 90 struct hlist_node filter_chain; 91 }; 92 93 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 94 95 static void mlx4_en_filter_work(struct work_struct *work) 96 { 97 struct mlx4_en_filter *filter = container_of(work, 98 struct mlx4_en_filter, 99 work); 100 struct mlx4_en_priv *priv = filter->priv; 101 struct mlx4_spec_list spec_tcp = { 102 .id = MLX4_NET_TRANS_RULE_ID_TCP, 103 { 104 .tcp_udp = { 105 .dst_port = filter->dst_port, 106 .dst_port_msk = (__force __be16)-1, 107 .src_port = filter->src_port, 108 .src_port_msk = (__force __be16)-1, 109 }, 110 }, 111 }; 112 struct mlx4_spec_list spec_ip = { 113 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 114 { 115 .ipv4 = { 116 .dst_ip = filter->dst_ip, 117 .dst_ip_msk = (__force __be32)-1, 118 .src_ip = filter->src_ip, 119 .src_ip_msk = (__force __be32)-1, 120 }, 121 }, 122 }; 123 struct mlx4_spec_list spec_eth = { 124 .id = MLX4_NET_TRANS_RULE_ID_ETH, 125 }; 126 struct mlx4_net_trans_rule rule = { 127 .list = LIST_HEAD_INIT(rule.list), 128 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 129 .exclusive = 1, 130 .allow_loopback = 1, 131 .promisc_mode = MLX4_FS_PROMISC_NONE, 132 .port = priv->port, 133 .priority = MLX4_DOMAIN_RFS, 134 }; 135 int rc; 136 __be64 mac; 137 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 138 139 list_add_tail(&spec_eth.list, &rule.list); 140 list_add_tail(&spec_ip.list, &rule.list); 141 list_add_tail(&spec_tcp.list, &rule.list); 142 143 mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16); 144 145 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 146 memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN); 147 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 148 149 filter->activated = 0; 150 151 if (filter->reg_id) { 152 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 153 if (rc && rc != -ENOENT) 154 en_err(priv, "Error detaching flow. rc = %d\n", rc); 155 } 156 157 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 158 if (rc) 159 en_err(priv, "Error attaching flow. err = %d\n", rc); 160 161 mlx4_en_filter_rfs_expire(priv); 162 163 filter->activated = 1; 164 } 165 166 static inline struct hlist_head * 167 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 168 __be16 src_port, __be16 dst_port) 169 { 170 unsigned long l; 171 int bucket_idx; 172 173 l = (__force unsigned long)src_port | 174 ((__force unsigned long)dst_port << 2); 175 l ^= (__force unsigned long)(src_ip ^ dst_ip); 176 177 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 178 179 return &priv->filter_hash[bucket_idx]; 180 } 181 182 static struct mlx4_en_filter * 183 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 184 __be32 dst_ip, __be16 src_port, __be16 dst_port, 185 u32 flow_id) 186 { 187 struct mlx4_en_filter *filter = NULL; 188 189 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 190 if (!filter) 191 return NULL; 192 193 filter->priv = priv; 194 filter->rxq_index = rxq_index; 195 INIT_WORK(&filter->work, mlx4_en_filter_work); 196 197 filter->src_ip = src_ip; 198 filter->dst_ip = dst_ip; 199 filter->src_port = src_port; 200 filter->dst_port = dst_port; 201 202 filter->flow_id = flow_id; 203 204 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 205 206 list_add_tail(&filter->next, &priv->filters); 207 hlist_add_head(&filter->filter_chain, 208 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 209 dst_port)); 210 211 return filter; 212 } 213 214 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 215 { 216 struct mlx4_en_priv *priv = filter->priv; 217 int rc; 218 219 list_del(&filter->next); 220 221 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 222 if (rc && rc != -ENOENT) 223 en_err(priv, "Error detaching flow. rc = %d\n", rc); 224 225 kfree(filter); 226 } 227 228 static inline struct mlx4_en_filter * 229 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 230 __be16 src_port, __be16 dst_port) 231 { 232 struct hlist_node *elem; 233 struct mlx4_en_filter *filter; 234 struct mlx4_en_filter *ret = NULL; 235 236 hlist_for_each_entry(filter, elem, 237 filter_hash_bucket(priv, src_ip, dst_ip, 238 src_port, dst_port), 239 filter_chain) { 240 if (filter->src_ip == src_ip && 241 filter->dst_ip == dst_ip && 242 filter->src_port == src_port && 243 filter->dst_port == dst_port) { 244 ret = filter; 245 break; 246 } 247 } 248 249 return ret; 250 } 251 252 static int 253 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 254 u16 rxq_index, u32 flow_id) 255 { 256 struct mlx4_en_priv *priv = netdev_priv(net_dev); 257 struct mlx4_en_filter *filter; 258 const struct iphdr *ip; 259 const __be16 *ports; 260 __be32 src_ip; 261 __be32 dst_ip; 262 __be16 src_port; 263 __be16 dst_port; 264 int nhoff = skb_network_offset(skb); 265 int ret = 0; 266 267 if (skb->protocol != htons(ETH_P_IP)) 268 return -EPROTONOSUPPORT; 269 270 ip = (const struct iphdr *)(skb->data + nhoff); 271 if (ip_is_fragment(ip)) 272 return -EPROTONOSUPPORT; 273 274 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 275 276 src_ip = ip->saddr; 277 dst_ip = ip->daddr; 278 src_port = ports[0]; 279 dst_port = ports[1]; 280 281 if (ip->protocol != IPPROTO_TCP) 282 return -EPROTONOSUPPORT; 283 284 spin_lock_bh(&priv->filters_lock); 285 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); 286 if (filter) { 287 if (filter->rxq_index == rxq_index) 288 goto out; 289 290 filter->rxq_index = rxq_index; 291 } else { 292 filter = mlx4_en_filter_alloc(priv, rxq_index, 293 src_ip, dst_ip, 294 src_port, dst_port, flow_id); 295 if (!filter) { 296 ret = -ENOMEM; 297 goto err; 298 } 299 } 300 301 queue_work(priv->mdev->workqueue, &filter->work); 302 303 out: 304 ret = filter->id; 305 err: 306 spin_unlock_bh(&priv->filters_lock); 307 308 return ret; 309 } 310 311 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 312 struct mlx4_en_rx_ring *rx_ring) 313 { 314 struct mlx4_en_filter *filter, *tmp; 315 LIST_HEAD(del_list); 316 317 spin_lock_bh(&priv->filters_lock); 318 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 319 list_move(&filter->next, &del_list); 320 hlist_del(&filter->filter_chain); 321 } 322 spin_unlock_bh(&priv->filters_lock); 323 324 list_for_each_entry_safe(filter, tmp, &del_list, next) { 325 cancel_work_sync(&filter->work); 326 mlx4_en_filter_free(filter); 327 } 328 } 329 330 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 331 { 332 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 333 LIST_HEAD(del_list); 334 int i = 0; 335 336 spin_lock_bh(&priv->filters_lock); 337 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 338 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 339 break; 340 341 if (filter->activated && 342 !work_pending(&filter->work) && 343 rps_may_expire_flow(priv->dev, 344 filter->rxq_index, filter->flow_id, 345 filter->id)) { 346 list_move(&filter->next, &del_list); 347 hlist_del(&filter->filter_chain); 348 } else 349 last_filter = filter; 350 351 i++; 352 } 353 354 if (last_filter && (&last_filter->next != priv->filters.next)) 355 list_move(&priv->filters, &last_filter->next); 356 357 spin_unlock_bh(&priv->filters_lock); 358 359 list_for_each_entry_safe(filter, tmp, &del_list, next) 360 mlx4_en_filter_free(filter); 361 } 362 #endif 363 364 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 365 { 366 struct mlx4_en_priv *priv = netdev_priv(dev); 367 struct mlx4_en_dev *mdev = priv->mdev; 368 int err; 369 int idx; 370 371 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 372 373 set_bit(vid, priv->active_vlans); 374 375 /* Add VID to port VLAN filter */ 376 mutex_lock(&mdev->state_lock); 377 if (mdev->device_up && priv->port_up) { 378 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 379 if (err) 380 en_err(priv, "Failed configuring VLAN filter\n"); 381 } 382 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 383 en_err(priv, "failed adding vlan %d\n", vid); 384 mutex_unlock(&mdev->state_lock); 385 386 return 0; 387 } 388 389 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 390 { 391 struct mlx4_en_priv *priv = netdev_priv(dev); 392 struct mlx4_en_dev *mdev = priv->mdev; 393 int err; 394 int idx; 395 396 en_dbg(HW, priv, "Killing VID:%d\n", vid); 397 398 clear_bit(vid, priv->active_vlans); 399 400 /* Remove VID from port VLAN filter */ 401 mutex_lock(&mdev->state_lock); 402 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 403 mlx4_unregister_vlan(mdev->dev, priv->port, idx); 404 else 405 en_err(priv, "could not find vid %d in cache\n", vid); 406 407 if (mdev->device_up && priv->port_up) { 408 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 409 if (err) 410 en_err(priv, "Failed configuring VLAN filter\n"); 411 } 412 mutex_unlock(&mdev->state_lock); 413 414 return 0; 415 } 416 417 u64 mlx4_en_mac_to_u64(u8 *addr) 418 { 419 u64 mac = 0; 420 int i; 421 422 for (i = 0; i < ETH_ALEN; i++) { 423 mac <<= 8; 424 mac |= addr[i]; 425 } 426 return mac; 427 } 428 429 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 430 { 431 struct mlx4_en_priv *priv = netdev_priv(dev); 432 struct mlx4_en_dev *mdev = priv->mdev; 433 struct sockaddr *saddr = addr; 434 435 if (!is_valid_ether_addr(saddr->sa_data)) 436 return -EADDRNOTAVAIL; 437 438 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 439 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); 440 queue_work(mdev->workqueue, &priv->mac_task); 441 return 0; 442 } 443 444 static void mlx4_en_do_set_mac(struct work_struct *work) 445 { 446 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 447 mac_task); 448 struct mlx4_en_dev *mdev = priv->mdev; 449 int err = 0; 450 451 mutex_lock(&mdev->state_lock); 452 if (priv->port_up) { 453 /* Remove old MAC and insert the new one */ 454 err = mlx4_replace_mac(mdev->dev, priv->port, 455 priv->base_qpn, priv->mac); 456 if (err) 457 en_err(priv, "Failed changing HW MAC address\n"); 458 } else 459 en_dbg(HW, priv, "Port is down while " 460 "registering mac, exiting...\n"); 461 462 mutex_unlock(&mdev->state_lock); 463 } 464 465 static void mlx4_en_clear_list(struct net_device *dev) 466 { 467 struct mlx4_en_priv *priv = netdev_priv(dev); 468 struct mlx4_en_mc_list *tmp, *mc_to_del; 469 470 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 471 list_del(&mc_to_del->list); 472 kfree(mc_to_del); 473 } 474 } 475 476 static void mlx4_en_cache_mclist(struct net_device *dev) 477 { 478 struct mlx4_en_priv *priv = netdev_priv(dev); 479 struct netdev_hw_addr *ha; 480 struct mlx4_en_mc_list *tmp; 481 482 mlx4_en_clear_list(dev); 483 netdev_for_each_mc_addr(ha, dev) { 484 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 485 if (!tmp) { 486 en_err(priv, "failed to allocate multicast list\n"); 487 mlx4_en_clear_list(dev); 488 return; 489 } 490 memcpy(tmp->addr, ha->addr, ETH_ALEN); 491 list_add_tail(&tmp->list, &priv->mc_list); 492 } 493 } 494 495 static void update_mclist_flags(struct mlx4_en_priv *priv, 496 struct list_head *dst, 497 struct list_head *src) 498 { 499 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 500 bool found; 501 502 /* Find all the entries that should be removed from dst, 503 * These are the entries that are not found in src 504 */ 505 list_for_each_entry(dst_tmp, dst, list) { 506 found = false; 507 list_for_each_entry(src_tmp, src, list) { 508 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 509 found = true; 510 break; 511 } 512 } 513 if (!found) 514 dst_tmp->action = MCLIST_REM; 515 } 516 517 /* Add entries that exist in src but not in dst 518 * mark them as need to add 519 */ 520 list_for_each_entry(src_tmp, src, list) { 521 found = false; 522 list_for_each_entry(dst_tmp, dst, list) { 523 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 524 dst_tmp->action = MCLIST_NONE; 525 found = true; 526 break; 527 } 528 } 529 if (!found) { 530 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), 531 GFP_KERNEL); 532 if (!new_mc) { 533 en_err(priv, "Failed to allocate current multicast list\n"); 534 return; 535 } 536 memcpy(new_mc, src_tmp, 537 sizeof(struct mlx4_en_mc_list)); 538 new_mc->action = MCLIST_ADD; 539 list_add_tail(&new_mc->list, dst); 540 } 541 } 542 } 543 544 static void mlx4_en_set_multicast(struct net_device *dev) 545 { 546 struct mlx4_en_priv *priv = netdev_priv(dev); 547 548 if (!priv->port_up) 549 return; 550 551 queue_work(priv->mdev->workqueue, &priv->mcast_task); 552 } 553 554 static void mlx4_en_do_set_multicast(struct work_struct *work) 555 { 556 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 557 mcast_task); 558 struct mlx4_en_dev *mdev = priv->mdev; 559 struct net_device *dev = priv->dev; 560 struct mlx4_en_mc_list *mclist, *tmp; 561 u64 mcast_addr = 0; 562 u8 mc_list[16] = {0}; 563 int err = 0; 564 565 mutex_lock(&mdev->state_lock); 566 if (!mdev->device_up) { 567 en_dbg(HW, priv, "Card is not up, " 568 "ignoring multicast change.\n"); 569 goto out; 570 } 571 if (!priv->port_up) { 572 en_dbg(HW, priv, "Port is down, " 573 "ignoring multicast change.\n"); 574 goto out; 575 } 576 577 if (!netif_carrier_ok(dev)) { 578 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 579 if (priv->port_state.link_state) { 580 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 581 netif_carrier_on(dev); 582 en_dbg(LINK, priv, "Link Up\n"); 583 } 584 } 585 } 586 587 /* 588 * Promsicuous mode: disable all filters 589 */ 590 591 if (dev->flags & IFF_PROMISC) { 592 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 593 if (netif_msg_rx_status(priv)) 594 en_warn(priv, "Entering promiscuous mode\n"); 595 priv->flags |= MLX4_EN_FLAG_PROMISC; 596 597 /* Enable promiscouos mode */ 598 switch (mdev->dev->caps.steering_mode) { 599 case MLX4_STEERING_MODE_DEVICE_MANAGED: 600 err = mlx4_flow_steer_promisc_add(mdev->dev, 601 priv->port, 602 priv->base_qpn, 603 MLX4_FS_PROMISC_UPLINK); 604 if (err) 605 en_err(priv, "Failed enabling promiscuous mode\n"); 606 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 607 break; 608 609 case MLX4_STEERING_MODE_B0: 610 err = mlx4_unicast_promisc_add(mdev->dev, 611 priv->base_qpn, 612 priv->port); 613 if (err) 614 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 615 616 /* Add the default qp number as multicast 617 * promisc 618 */ 619 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 620 err = mlx4_multicast_promisc_add(mdev->dev, 621 priv->base_qpn, 622 priv->port); 623 if (err) 624 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 625 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 626 } 627 break; 628 629 case MLX4_STEERING_MODE_A0: 630 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 631 priv->port, 632 priv->base_qpn, 633 1); 634 if (err) 635 en_err(priv, "Failed enabling promiscuous mode\n"); 636 break; 637 } 638 639 /* Disable port multicast filter (unconditionally) */ 640 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 641 0, MLX4_MCAST_DISABLE); 642 if (err) 643 en_err(priv, "Failed disabling " 644 "multicast filter\n"); 645 646 /* Disable port VLAN filter */ 647 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 648 if (err) 649 en_err(priv, "Failed disabling VLAN filter\n"); 650 } 651 goto out; 652 } 653 654 /* 655 * Not in promiscuous mode 656 */ 657 658 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 659 if (netif_msg_rx_status(priv)) 660 en_warn(priv, "Leaving promiscuous mode\n"); 661 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 662 663 /* Disable promiscouos mode */ 664 switch (mdev->dev->caps.steering_mode) { 665 case MLX4_STEERING_MODE_DEVICE_MANAGED: 666 err = mlx4_flow_steer_promisc_remove(mdev->dev, 667 priv->port, 668 MLX4_FS_PROMISC_UPLINK); 669 if (err) 670 en_err(priv, "Failed disabling promiscuous mode\n"); 671 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 672 break; 673 674 case MLX4_STEERING_MODE_B0: 675 err = mlx4_unicast_promisc_remove(mdev->dev, 676 priv->base_qpn, 677 priv->port); 678 if (err) 679 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 680 /* Disable Multicast promisc */ 681 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 682 err = mlx4_multicast_promisc_remove(mdev->dev, 683 priv->base_qpn, 684 priv->port); 685 if (err) 686 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 687 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 688 } 689 break; 690 691 case MLX4_STEERING_MODE_A0: 692 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 693 priv->port, 694 priv->base_qpn, 0); 695 if (err) 696 en_err(priv, "Failed disabling promiscuous mode\n"); 697 break; 698 } 699 700 /* Enable port VLAN filter */ 701 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 702 if (err) 703 en_err(priv, "Failed enabling VLAN filter\n"); 704 } 705 706 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 707 if (dev->flags & IFF_ALLMULTI) { 708 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 709 0, MLX4_MCAST_DISABLE); 710 if (err) 711 en_err(priv, "Failed disabling multicast filter\n"); 712 713 /* Add the default qp number as multicast promisc */ 714 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 715 switch (mdev->dev->caps.steering_mode) { 716 case MLX4_STEERING_MODE_DEVICE_MANAGED: 717 err = mlx4_flow_steer_promisc_add(mdev->dev, 718 priv->port, 719 priv->base_qpn, 720 MLX4_FS_PROMISC_ALL_MULTI); 721 break; 722 723 case MLX4_STEERING_MODE_B0: 724 err = mlx4_multicast_promisc_add(mdev->dev, 725 priv->base_qpn, 726 priv->port); 727 break; 728 729 case MLX4_STEERING_MODE_A0: 730 break; 731 } 732 if (err) 733 en_err(priv, "Failed entering multicast promisc mode\n"); 734 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 735 } 736 } else { 737 /* Disable Multicast promisc */ 738 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 739 switch (mdev->dev->caps.steering_mode) { 740 case MLX4_STEERING_MODE_DEVICE_MANAGED: 741 err = mlx4_flow_steer_promisc_remove(mdev->dev, 742 priv->port, 743 MLX4_FS_PROMISC_ALL_MULTI); 744 break; 745 746 case MLX4_STEERING_MODE_B0: 747 err = mlx4_multicast_promisc_remove(mdev->dev, 748 priv->base_qpn, 749 priv->port); 750 break; 751 752 case MLX4_STEERING_MODE_A0: 753 break; 754 } 755 if (err) 756 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 757 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 758 } 759 760 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 761 0, MLX4_MCAST_DISABLE); 762 if (err) 763 en_err(priv, "Failed disabling multicast filter\n"); 764 765 /* Flush mcast filter and init it with broadcast address */ 766 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 767 1, MLX4_MCAST_CONFIG); 768 769 /* Update multicast list - we cache all addresses so they won't 770 * change while HW is updated holding the command semaphor */ 771 netif_tx_lock_bh(dev); 772 mlx4_en_cache_mclist(dev); 773 netif_tx_unlock_bh(dev); 774 list_for_each_entry(mclist, &priv->mc_list, list) { 775 mcast_addr = mlx4_en_mac_to_u64(mclist->addr); 776 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 777 mcast_addr, 0, MLX4_MCAST_CONFIG); 778 } 779 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 780 0, MLX4_MCAST_ENABLE); 781 if (err) 782 en_err(priv, "Failed enabling multicast filter\n"); 783 784 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 785 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 786 if (mclist->action == MCLIST_REM) { 787 /* detach this address and delete from list */ 788 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 789 mc_list[5] = priv->port; 790 err = mlx4_multicast_detach(mdev->dev, 791 &priv->rss_map.indir_qp, 792 mc_list, 793 MLX4_PROT_ETH, 794 mclist->reg_id); 795 if (err) 796 en_err(priv, "Fail to detach multicast address\n"); 797 798 /* remove from list */ 799 list_del(&mclist->list); 800 kfree(mclist); 801 } else if (mclist->action == MCLIST_ADD) { 802 /* attach the address */ 803 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 804 /* needed for B0 steering support */ 805 mc_list[5] = priv->port; 806 err = mlx4_multicast_attach(mdev->dev, 807 &priv->rss_map.indir_qp, 808 mc_list, 809 priv->port, 0, 810 MLX4_PROT_ETH, 811 &mclist->reg_id); 812 if (err) 813 en_err(priv, "Fail to attach multicast address\n"); 814 815 } 816 } 817 } 818 out: 819 mutex_unlock(&mdev->state_lock); 820 } 821 822 #ifdef CONFIG_NET_POLL_CONTROLLER 823 static void mlx4_en_netpoll(struct net_device *dev) 824 { 825 struct mlx4_en_priv *priv = netdev_priv(dev); 826 struct mlx4_en_cq *cq; 827 unsigned long flags; 828 int i; 829 830 for (i = 0; i < priv->rx_ring_num; i++) { 831 cq = &priv->rx_cq[i]; 832 spin_lock_irqsave(&cq->lock, flags); 833 napi_synchronize(&cq->napi); 834 mlx4_en_process_rx_cq(dev, cq, 0); 835 spin_unlock_irqrestore(&cq->lock, flags); 836 } 837 } 838 #endif 839 840 static void mlx4_en_tx_timeout(struct net_device *dev) 841 { 842 struct mlx4_en_priv *priv = netdev_priv(dev); 843 struct mlx4_en_dev *mdev = priv->mdev; 844 845 if (netif_msg_timer(priv)) 846 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 847 848 priv->port_stats.tx_timeout++; 849 en_dbg(DRV, priv, "Scheduling watchdog\n"); 850 queue_work(mdev->workqueue, &priv->watchdog_task); 851 } 852 853 854 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 855 { 856 struct mlx4_en_priv *priv = netdev_priv(dev); 857 858 spin_lock_bh(&priv->stats_lock); 859 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 860 spin_unlock_bh(&priv->stats_lock); 861 862 return &priv->ret_stats; 863 } 864 865 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 866 { 867 struct mlx4_en_cq *cq; 868 int i; 869 870 /* If we haven't received a specific coalescing setting 871 * (module param), we set the moderation parameters as follows: 872 * - moder_cnt is set to the number of mtu sized packets to 873 * satisfy our coelsing target. 874 * - moder_time is set to a fixed value. 875 */ 876 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 877 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 878 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 879 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 880 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 881 "rx_frames:%d rx_usecs:%d\n", 882 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 883 884 /* Setup cq moderation params */ 885 for (i = 0; i < priv->rx_ring_num; i++) { 886 cq = &priv->rx_cq[i]; 887 cq->moder_cnt = priv->rx_frames; 888 cq->moder_time = priv->rx_usecs; 889 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 890 priv->last_moder_packets[i] = 0; 891 priv->last_moder_bytes[i] = 0; 892 } 893 894 for (i = 0; i < priv->tx_ring_num; i++) { 895 cq = &priv->tx_cq[i]; 896 cq->moder_cnt = priv->tx_frames; 897 cq->moder_time = priv->tx_usecs; 898 } 899 900 /* Reset auto-moderation params */ 901 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 902 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 903 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 904 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 905 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 906 priv->adaptive_rx_coal = 1; 907 priv->last_moder_jiffies = 0; 908 priv->last_moder_tx_packets = 0; 909 } 910 911 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 912 { 913 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 914 struct mlx4_en_cq *cq; 915 unsigned long packets; 916 unsigned long rate; 917 unsigned long avg_pkt_size; 918 unsigned long rx_packets; 919 unsigned long rx_bytes; 920 unsigned long rx_pkt_diff; 921 int moder_time; 922 int ring, err; 923 924 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 925 return; 926 927 for (ring = 0; ring < priv->rx_ring_num; ring++) { 928 spin_lock_bh(&priv->stats_lock); 929 rx_packets = priv->rx_ring[ring].packets; 930 rx_bytes = priv->rx_ring[ring].bytes; 931 spin_unlock_bh(&priv->stats_lock); 932 933 rx_pkt_diff = ((unsigned long) (rx_packets - 934 priv->last_moder_packets[ring])); 935 packets = rx_pkt_diff; 936 rate = packets * HZ / period; 937 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 938 priv->last_moder_bytes[ring])) / packets : 0; 939 940 /* Apply auto-moderation only when packet rate 941 * exceeds a rate that it matters */ 942 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 943 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 944 if (rate < priv->pkt_rate_low) 945 moder_time = priv->rx_usecs_low; 946 else if (rate > priv->pkt_rate_high) 947 moder_time = priv->rx_usecs_high; 948 else 949 moder_time = (rate - priv->pkt_rate_low) * 950 (priv->rx_usecs_high - priv->rx_usecs_low) / 951 (priv->pkt_rate_high - priv->pkt_rate_low) + 952 priv->rx_usecs_low; 953 } else { 954 moder_time = priv->rx_usecs_low; 955 } 956 957 if (moder_time != priv->last_moder_time[ring]) { 958 priv->last_moder_time[ring] = moder_time; 959 cq = &priv->rx_cq[ring]; 960 cq->moder_time = moder_time; 961 err = mlx4_en_set_cq_moder(priv, cq); 962 if (err) 963 en_err(priv, "Failed modifying moderation " 964 "for cq:%d\n", ring); 965 } 966 priv->last_moder_packets[ring] = rx_packets; 967 priv->last_moder_bytes[ring] = rx_bytes; 968 } 969 970 priv->last_moder_jiffies = jiffies; 971 } 972 973 static void mlx4_en_do_get_stats(struct work_struct *work) 974 { 975 struct delayed_work *delay = to_delayed_work(work); 976 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 977 stats_task); 978 struct mlx4_en_dev *mdev = priv->mdev; 979 int err; 980 981 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 982 if (err) 983 en_dbg(HW, priv, "Could not update stats\n"); 984 985 mutex_lock(&mdev->state_lock); 986 if (mdev->device_up) { 987 if (priv->port_up) 988 mlx4_en_auto_moderation(priv); 989 990 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 991 } 992 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 993 queue_work(mdev->workqueue, &priv->mac_task); 994 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 995 } 996 mutex_unlock(&mdev->state_lock); 997 } 998 999 static void mlx4_en_linkstate(struct work_struct *work) 1000 { 1001 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1002 linkstate_task); 1003 struct mlx4_en_dev *mdev = priv->mdev; 1004 int linkstate = priv->link_state; 1005 1006 mutex_lock(&mdev->state_lock); 1007 /* If observable port state changed set carrier state and 1008 * report to system log */ 1009 if (priv->last_link_state != linkstate) { 1010 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1011 en_info(priv, "Link Down\n"); 1012 netif_carrier_off(priv->dev); 1013 } else { 1014 en_info(priv, "Link Up\n"); 1015 netif_carrier_on(priv->dev); 1016 } 1017 } 1018 priv->last_link_state = linkstate; 1019 mutex_unlock(&mdev->state_lock); 1020 } 1021 1022 1023 int mlx4_en_start_port(struct net_device *dev) 1024 { 1025 struct mlx4_en_priv *priv = netdev_priv(dev); 1026 struct mlx4_en_dev *mdev = priv->mdev; 1027 struct mlx4_en_cq *cq; 1028 struct mlx4_en_tx_ring *tx_ring; 1029 int rx_index = 0; 1030 int tx_index = 0; 1031 int err = 0; 1032 int i; 1033 int j; 1034 u8 mc_list[16] = {0}; 1035 1036 if (priv->port_up) { 1037 en_dbg(DRV, priv, "start port called while port already up\n"); 1038 return 0; 1039 } 1040 1041 INIT_LIST_HEAD(&priv->mc_list); 1042 INIT_LIST_HEAD(&priv->curr_list); 1043 1044 /* Calculate Rx buf size */ 1045 dev->mtu = min(dev->mtu, priv->max_mtu); 1046 mlx4_en_calc_rx_buf(dev); 1047 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1048 1049 /* Configure rx cq's and rings */ 1050 err = mlx4_en_activate_rx_rings(priv); 1051 if (err) { 1052 en_err(priv, "Failed to activate RX rings\n"); 1053 return err; 1054 } 1055 for (i = 0; i < priv->rx_ring_num; i++) { 1056 cq = &priv->rx_cq[i]; 1057 1058 err = mlx4_en_activate_cq(priv, cq, i); 1059 if (err) { 1060 en_err(priv, "Failed activating Rx CQ\n"); 1061 goto cq_err; 1062 } 1063 for (j = 0; j < cq->size; j++) 1064 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1065 err = mlx4_en_set_cq_moder(priv, cq); 1066 if (err) { 1067 en_err(priv, "Failed setting cq moderation parameters"); 1068 mlx4_en_deactivate_cq(priv, cq); 1069 goto cq_err; 1070 } 1071 mlx4_en_arm_cq(priv, cq); 1072 priv->rx_ring[i].cqn = cq->mcq.cqn; 1073 ++rx_index; 1074 } 1075 1076 /* Set qp number */ 1077 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1078 err = mlx4_get_eth_qp(mdev->dev, priv->port, 1079 priv->mac, &priv->base_qpn); 1080 if (err) { 1081 en_err(priv, "Failed getting eth qp\n"); 1082 goto cq_err; 1083 } 1084 mdev->mac_removed[priv->port] = 0; 1085 1086 err = mlx4_en_config_rss_steer(priv); 1087 if (err) { 1088 en_err(priv, "Failed configuring rss steering\n"); 1089 goto mac_err; 1090 } 1091 1092 err = mlx4_en_create_drop_qp(priv); 1093 if (err) 1094 goto rss_err; 1095 1096 /* Configure tx cq's and rings */ 1097 for (i = 0; i < priv->tx_ring_num; i++) { 1098 /* Configure cq */ 1099 cq = &priv->tx_cq[i]; 1100 err = mlx4_en_activate_cq(priv, cq, i); 1101 if (err) { 1102 en_err(priv, "Failed allocating Tx CQ\n"); 1103 goto tx_err; 1104 } 1105 err = mlx4_en_set_cq_moder(priv, cq); 1106 if (err) { 1107 en_err(priv, "Failed setting cq moderation parameters"); 1108 mlx4_en_deactivate_cq(priv, cq); 1109 goto tx_err; 1110 } 1111 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1112 cq->buf->wqe_index = cpu_to_be16(0xffff); 1113 1114 /* Configure ring */ 1115 tx_ring = &priv->tx_ring[i]; 1116 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1117 i / priv->mdev->profile.num_tx_rings_p_up); 1118 if (err) { 1119 en_err(priv, "Failed allocating Tx ring\n"); 1120 mlx4_en_deactivate_cq(priv, cq); 1121 goto tx_err; 1122 } 1123 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1124 1125 /* Arm CQ for TX completions */ 1126 mlx4_en_arm_cq(priv, cq); 1127 1128 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1129 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1130 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1131 ++tx_index; 1132 } 1133 1134 /* Configure port */ 1135 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1136 priv->rx_skb_size + ETH_FCS_LEN, 1137 priv->prof->tx_pause, 1138 priv->prof->tx_ppp, 1139 priv->prof->rx_pause, 1140 priv->prof->rx_ppp); 1141 if (err) { 1142 en_err(priv, "Failed setting port general configurations " 1143 "for port %d, with error %d\n", priv->port, err); 1144 goto tx_err; 1145 } 1146 /* Set default qp number */ 1147 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1148 if (err) { 1149 en_err(priv, "Failed setting default qp numbers\n"); 1150 goto tx_err; 1151 } 1152 1153 /* Init port */ 1154 en_dbg(HW, priv, "Initializing port\n"); 1155 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1156 if (err) { 1157 en_err(priv, "Failed Initializing port\n"); 1158 goto tx_err; 1159 } 1160 1161 /* Attach rx QP to bradcast address */ 1162 memset(&mc_list[10], 0xff, ETH_ALEN); 1163 mc_list[5] = priv->port; /* needed for B0 steering support */ 1164 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1165 priv->port, 0, MLX4_PROT_ETH, 1166 &priv->broadcast_id)) 1167 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1168 1169 /* Must redo promiscuous mode setup. */ 1170 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1171 if (mdev->dev->caps.steering_mode == 1172 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1173 mlx4_flow_steer_promisc_remove(mdev->dev, 1174 priv->port, 1175 MLX4_FS_PROMISC_UPLINK); 1176 mlx4_flow_steer_promisc_remove(mdev->dev, 1177 priv->port, 1178 MLX4_FS_PROMISC_ALL_MULTI); 1179 } 1180 1181 /* Schedule multicast task to populate multicast list */ 1182 queue_work(mdev->workqueue, &priv->mcast_task); 1183 1184 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 1185 1186 priv->port_up = true; 1187 netif_tx_start_all_queues(dev); 1188 return 0; 1189 1190 tx_err: 1191 while (tx_index--) { 1192 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1193 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1194 } 1195 mlx4_en_destroy_drop_qp(priv); 1196 rss_err: 1197 mlx4_en_release_rss_steer(priv); 1198 mac_err: 1199 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1200 cq_err: 1201 while (rx_index--) 1202 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 1203 for (i = 0; i < priv->rx_ring_num; i++) 1204 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1205 1206 return err; /* need to close devices */ 1207 } 1208 1209 1210 void mlx4_en_stop_port(struct net_device *dev) 1211 { 1212 struct mlx4_en_priv *priv = netdev_priv(dev); 1213 struct mlx4_en_dev *mdev = priv->mdev; 1214 struct mlx4_en_mc_list *mclist, *tmp; 1215 int i; 1216 u8 mc_list[16] = {0}; 1217 1218 if (!priv->port_up) { 1219 en_dbg(DRV, priv, "stop port called while port already down\n"); 1220 return; 1221 } 1222 1223 /* Synchronize with tx routine */ 1224 netif_tx_lock_bh(dev); 1225 netif_tx_stop_all_queues(dev); 1226 netif_tx_unlock_bh(dev); 1227 1228 /* Set port as not active */ 1229 priv->port_up = false; 1230 1231 /* Detach All multicasts */ 1232 memset(&mc_list[10], 0xff, ETH_ALEN); 1233 mc_list[5] = priv->port; /* needed for B0 steering support */ 1234 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1235 MLX4_PROT_ETH, priv->broadcast_id); 1236 list_for_each_entry(mclist, &priv->curr_list, list) { 1237 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1238 mc_list[5] = priv->port; 1239 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1240 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1241 } 1242 mlx4_en_clear_list(dev); 1243 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1244 list_del(&mclist->list); 1245 kfree(mclist); 1246 } 1247 1248 /* Flush multicast filter */ 1249 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1250 1251 mlx4_en_destroy_drop_qp(priv); 1252 1253 /* Free TX Rings */ 1254 for (i = 0; i < priv->tx_ring_num; i++) { 1255 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1256 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 1257 } 1258 msleep(10); 1259 1260 for (i = 0; i < priv->tx_ring_num; i++) 1261 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 1262 1263 /* Free RSS qps */ 1264 mlx4_en_release_rss_steer(priv); 1265 1266 /* Unregister Mac address for the port */ 1267 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1268 mdev->mac_removed[priv->port] = 1; 1269 1270 /* Free RX Rings */ 1271 for (i = 0; i < priv->rx_ring_num; i++) { 1272 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1273 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 1274 msleep(1); 1275 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 1276 } 1277 1278 /* close port*/ 1279 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1280 } 1281 1282 static void mlx4_en_restart(struct work_struct *work) 1283 { 1284 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1285 watchdog_task); 1286 struct mlx4_en_dev *mdev = priv->mdev; 1287 struct net_device *dev = priv->dev; 1288 int i; 1289 1290 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1291 1292 mutex_lock(&mdev->state_lock); 1293 if (priv->port_up) { 1294 mlx4_en_stop_port(dev); 1295 for (i = 0; i < priv->tx_ring_num; i++) 1296 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); 1297 if (mlx4_en_start_port(dev)) 1298 en_err(priv, "Failed restarting port %d\n", priv->port); 1299 } 1300 mutex_unlock(&mdev->state_lock); 1301 } 1302 1303 static void mlx4_en_clear_stats(struct net_device *dev) 1304 { 1305 struct mlx4_en_priv *priv = netdev_priv(dev); 1306 struct mlx4_en_dev *mdev = priv->mdev; 1307 int i; 1308 1309 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1310 en_dbg(HW, priv, "Failed dumping statistics\n"); 1311 1312 memset(&priv->stats, 0, sizeof(priv->stats)); 1313 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1314 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1315 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1316 1317 for (i = 0; i < priv->tx_ring_num; i++) { 1318 priv->tx_ring[i].bytes = 0; 1319 priv->tx_ring[i].packets = 0; 1320 priv->tx_ring[i].tx_csum = 0; 1321 } 1322 for (i = 0; i < priv->rx_ring_num; i++) { 1323 priv->rx_ring[i].bytes = 0; 1324 priv->rx_ring[i].packets = 0; 1325 priv->rx_ring[i].csum_ok = 0; 1326 priv->rx_ring[i].csum_none = 0; 1327 } 1328 } 1329 1330 static int mlx4_en_open(struct net_device *dev) 1331 { 1332 struct mlx4_en_priv *priv = netdev_priv(dev); 1333 struct mlx4_en_dev *mdev = priv->mdev; 1334 int err = 0; 1335 1336 mutex_lock(&mdev->state_lock); 1337 1338 if (!mdev->device_up) { 1339 en_err(priv, "Cannot open - device down/disabled\n"); 1340 err = -EBUSY; 1341 goto out; 1342 } 1343 1344 /* Reset HW statistics and SW counters */ 1345 mlx4_en_clear_stats(dev); 1346 1347 err = mlx4_en_start_port(dev); 1348 if (err) 1349 en_err(priv, "Failed starting port:%d\n", priv->port); 1350 1351 out: 1352 mutex_unlock(&mdev->state_lock); 1353 return err; 1354 } 1355 1356 1357 static int mlx4_en_close(struct net_device *dev) 1358 { 1359 struct mlx4_en_priv *priv = netdev_priv(dev); 1360 struct mlx4_en_dev *mdev = priv->mdev; 1361 1362 en_dbg(IFDOWN, priv, "Close port called\n"); 1363 1364 mutex_lock(&mdev->state_lock); 1365 1366 mlx4_en_stop_port(dev); 1367 netif_carrier_off(dev); 1368 1369 mutex_unlock(&mdev->state_lock); 1370 return 0; 1371 } 1372 1373 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1374 { 1375 int i; 1376 1377 #ifdef CONFIG_RFS_ACCEL 1378 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1379 priv->dev->rx_cpu_rmap = NULL; 1380 #endif 1381 1382 for (i = 0; i < priv->tx_ring_num; i++) { 1383 if (priv->tx_ring[i].tx_info) 1384 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1385 if (priv->tx_cq[i].buf) 1386 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1387 } 1388 1389 for (i = 0; i < priv->rx_ring_num; i++) { 1390 if (priv->rx_ring[i].rx_info) 1391 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1392 priv->prof->rx_ring_size, priv->stride); 1393 if (priv->rx_cq[i].buf) 1394 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1395 } 1396 1397 if (priv->base_tx_qpn) { 1398 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); 1399 priv->base_tx_qpn = 0; 1400 } 1401 } 1402 1403 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1404 { 1405 struct mlx4_en_port_profile *prof = priv->prof; 1406 int i; 1407 int err; 1408 1409 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1410 if (err) { 1411 en_err(priv, "failed reserving range for TX rings\n"); 1412 return err; 1413 } 1414 1415 /* Create tx Rings */ 1416 for (i = 0; i < priv->tx_ring_num; i++) { 1417 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1418 prof->tx_ring_size, i, TX)) 1419 goto err; 1420 1421 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1422 prof->tx_ring_size, TXBB_SIZE)) 1423 goto err; 1424 } 1425 1426 /* Create rx Rings */ 1427 for (i = 0; i < priv->rx_ring_num; i++) { 1428 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1429 prof->rx_ring_size, i, RX)) 1430 goto err; 1431 1432 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1433 prof->rx_ring_size, priv->stride)) 1434 goto err; 1435 } 1436 1437 #ifdef CONFIG_RFS_ACCEL 1438 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1439 if (!priv->dev->rx_cpu_rmap) 1440 goto err; 1441 1442 INIT_LIST_HEAD(&priv->filters); 1443 spin_lock_init(&priv->filters_lock); 1444 #endif 1445 1446 return 0; 1447 1448 err: 1449 en_err(priv, "Failed to allocate NIC resources\n"); 1450 return -ENOMEM; 1451 } 1452 1453 1454 void mlx4_en_destroy_netdev(struct net_device *dev) 1455 { 1456 struct mlx4_en_priv *priv = netdev_priv(dev); 1457 struct mlx4_en_dev *mdev = priv->mdev; 1458 1459 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1460 1461 /* Unregister device - this will close the port if it was up */ 1462 if (priv->registered) 1463 unregister_netdev(dev); 1464 1465 if (priv->allocated) 1466 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1467 1468 cancel_delayed_work(&priv->stats_task); 1469 /* flush any pending task for this netdev */ 1470 flush_workqueue(mdev->workqueue); 1471 1472 /* Detach the netdev so tasks would not attempt to access it */ 1473 mutex_lock(&mdev->state_lock); 1474 mdev->pndev[priv->port] = NULL; 1475 mutex_unlock(&mdev->state_lock); 1476 1477 mlx4_en_free_resources(priv); 1478 1479 kfree(priv->tx_ring); 1480 kfree(priv->tx_cq); 1481 1482 free_netdev(dev); 1483 } 1484 1485 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1486 { 1487 struct mlx4_en_priv *priv = netdev_priv(dev); 1488 struct mlx4_en_dev *mdev = priv->mdev; 1489 int err = 0; 1490 1491 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 1492 dev->mtu, new_mtu); 1493 1494 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1495 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 1496 return -EPERM; 1497 } 1498 dev->mtu = new_mtu; 1499 1500 if (netif_running(dev)) { 1501 mutex_lock(&mdev->state_lock); 1502 if (!mdev->device_up) { 1503 /* NIC is probably restarting - let watchdog task reset 1504 * the port */ 1505 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1506 } else { 1507 mlx4_en_stop_port(dev); 1508 err = mlx4_en_start_port(dev); 1509 if (err) { 1510 en_err(priv, "Failed restarting port:%d\n", 1511 priv->port); 1512 queue_work(mdev->workqueue, &priv->watchdog_task); 1513 } 1514 } 1515 mutex_unlock(&mdev->state_lock); 1516 } 1517 return 0; 1518 } 1519 1520 static int mlx4_en_set_features(struct net_device *netdev, 1521 netdev_features_t features) 1522 { 1523 struct mlx4_en_priv *priv = netdev_priv(netdev); 1524 1525 if (features & NETIF_F_LOOPBACK) 1526 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1527 else 1528 priv->ctrl_flags &= 1529 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 1530 1531 return 0; 1532 1533 } 1534 1535 static const struct net_device_ops mlx4_netdev_ops = { 1536 .ndo_open = mlx4_en_open, 1537 .ndo_stop = mlx4_en_close, 1538 .ndo_start_xmit = mlx4_en_xmit, 1539 .ndo_select_queue = mlx4_en_select_queue, 1540 .ndo_get_stats = mlx4_en_get_stats, 1541 .ndo_set_rx_mode = mlx4_en_set_multicast, 1542 .ndo_set_mac_address = mlx4_en_set_mac, 1543 .ndo_validate_addr = eth_validate_addr, 1544 .ndo_change_mtu = mlx4_en_change_mtu, 1545 .ndo_tx_timeout = mlx4_en_tx_timeout, 1546 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 1547 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 1548 #ifdef CONFIG_NET_POLL_CONTROLLER 1549 .ndo_poll_controller = mlx4_en_netpoll, 1550 #endif 1551 .ndo_set_features = mlx4_en_set_features, 1552 .ndo_setup_tc = mlx4_en_setup_tc, 1553 #ifdef CONFIG_RFS_ACCEL 1554 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 1555 #endif 1556 }; 1557 1558 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1559 struct mlx4_en_port_profile *prof) 1560 { 1561 struct net_device *dev; 1562 struct mlx4_en_priv *priv; 1563 int i; 1564 int err; 1565 1566 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1567 prof->tx_ring_num, prof->rx_ring_num); 1568 if (dev == NULL) 1569 return -ENOMEM; 1570 1571 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1572 dev->dev_id = port - 1; 1573 1574 /* 1575 * Initialize driver private data 1576 */ 1577 1578 priv = netdev_priv(dev); 1579 memset(priv, 0, sizeof(struct mlx4_en_priv)); 1580 priv->dev = dev; 1581 priv->mdev = mdev; 1582 priv->ddev = &mdev->pdev->dev; 1583 priv->prof = prof; 1584 priv->port = port; 1585 priv->port_up = false; 1586 priv->flags = prof->flags; 1587 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1588 MLX4_WQE_CTRL_SOLICITED); 1589 priv->tx_ring_num = prof->tx_ring_num; 1590 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * 1591 priv->tx_ring_num, GFP_KERNEL); 1592 if (!priv->tx_ring) { 1593 err = -ENOMEM; 1594 goto out; 1595 } 1596 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num, 1597 GFP_KERNEL); 1598 if (!priv->tx_cq) { 1599 err = -ENOMEM; 1600 goto out; 1601 } 1602 priv->rx_ring_num = prof->rx_ring_num; 1603 priv->mac_index = -1; 1604 priv->msg_enable = MLX4_EN_MSG_LEVEL; 1605 spin_lock_init(&priv->stats_lock); 1606 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 1607 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 1608 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1609 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1610 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1611 #ifdef CONFIG_MLX4_EN_DCB 1612 if (!mlx4_is_slave(priv->mdev->dev)) 1613 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 1614 #endif 1615 1616 /* Query for default mac and max mtu */ 1617 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1618 priv->mac = mdev->dev->caps.def_mac[priv->port]; 1619 if (ILLEGAL_MAC(priv->mac)) { 1620 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 1621 priv->port, priv->mac); 1622 err = -EINVAL; 1623 goto out; 1624 } 1625 1626 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 1627 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 1628 err = mlx4_en_alloc_resources(priv); 1629 if (err) 1630 goto out; 1631 1632 /* Allocate page for receive rings */ 1633 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1634 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1635 if (err) { 1636 en_err(priv, "Failed to allocate page for rx qps\n"); 1637 goto out; 1638 } 1639 priv->allocated = 1; 1640 1641 /* 1642 * Initialize netdev entry points 1643 */ 1644 dev->netdev_ops = &mlx4_netdev_ops; 1645 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1646 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1647 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1648 1649 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1650 1651 /* Set defualt MAC */ 1652 dev->addr_len = ETH_ALEN; 1653 for (i = 0; i < ETH_ALEN; i++) { 1654 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1655 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1656 } 1657 1658 /* 1659 * Set driver features 1660 */ 1661 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1662 if (mdev->LSO_support) 1663 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 1664 1665 dev->vlan_features = dev->hw_features; 1666 1667 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 1668 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 1669 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1670 NETIF_F_HW_VLAN_FILTER; 1671 dev->hw_features |= NETIF_F_LOOPBACK; 1672 1673 if (mdev->dev->caps.steering_mode == 1674 MLX4_STEERING_MODE_DEVICE_MANAGED) 1675 dev->hw_features |= NETIF_F_NTUPLE; 1676 1677 mdev->pndev[port] = dev; 1678 1679 netif_carrier_off(dev); 1680 err = register_netdev(dev); 1681 if (err) { 1682 en_err(priv, "Netdev registration failed for port %d\n", port); 1683 goto out; 1684 } 1685 priv->registered = 1; 1686 1687 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1688 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1689 1690 /* Configure port */ 1691 mlx4_en_calc_rx_buf(dev); 1692 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1693 priv->rx_skb_size + ETH_FCS_LEN, 1694 prof->tx_pause, prof->tx_ppp, 1695 prof->rx_pause, prof->rx_ppp); 1696 if (err) { 1697 en_err(priv, "Failed setting port general configurations " 1698 "for port %d, with error %d\n", priv->port, err); 1699 goto out; 1700 } 1701 1702 /* Init port */ 1703 en_warn(priv, "Initializing port\n"); 1704 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1705 if (err) { 1706 en_err(priv, "Failed Initializing port\n"); 1707 goto out; 1708 } 1709 mlx4_en_set_default_moderation(priv); 1710 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1711 return 0; 1712 1713 out: 1714 mlx4_en_destroy_netdev(dev); 1715 return err; 1716 } 1717 1718