1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/hash.h> 40 #include <net/ip.h> 41 42 #include <linux/mlx4/driver.h> 43 #include <linux/mlx4/device.h> 44 #include <linux/mlx4/cmd.h> 45 #include <linux/mlx4/cq.h> 46 47 #include "mlx4_en.h" 48 #include "en_port.h" 49 50 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 51 { 52 struct mlx4_en_priv *priv = netdev_priv(dev); 53 int i; 54 unsigned int offset = 0; 55 56 if (up && up != MLX4_EN_NUM_UP) 57 return -EINVAL; 58 59 netdev_set_num_tc(dev, up); 60 61 /* Partition Tx queues evenly amongst UP's */ 62 for (i = 0; i < up; i++) { 63 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 64 offset += priv->num_tx_rings_p_up; 65 } 66 67 return 0; 68 } 69 70 #ifdef CONFIG_RFS_ACCEL 71 72 struct mlx4_en_filter { 73 struct list_head next; 74 struct work_struct work; 75 76 __be32 src_ip; 77 __be32 dst_ip; 78 __be16 src_port; 79 __be16 dst_port; 80 81 int rxq_index; 82 struct mlx4_en_priv *priv; 83 u32 flow_id; /* RFS infrastructure id */ 84 int id; /* mlx4_en driver id */ 85 u64 reg_id; /* Flow steering API id */ 86 u8 activated; /* Used to prevent expiry before filter 87 * is attached 88 */ 89 struct hlist_node filter_chain; 90 }; 91 92 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 93 94 static void mlx4_en_filter_work(struct work_struct *work) 95 { 96 struct mlx4_en_filter *filter = container_of(work, 97 struct mlx4_en_filter, 98 work); 99 struct mlx4_en_priv *priv = filter->priv; 100 struct mlx4_spec_list spec_tcp = { 101 .id = MLX4_NET_TRANS_RULE_ID_TCP, 102 { 103 .tcp_udp = { 104 .dst_port = filter->dst_port, 105 .dst_port_msk = (__force __be16)-1, 106 .src_port = filter->src_port, 107 .src_port_msk = (__force __be16)-1, 108 }, 109 }, 110 }; 111 struct mlx4_spec_list spec_ip = { 112 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 113 { 114 .ipv4 = { 115 .dst_ip = filter->dst_ip, 116 .dst_ip_msk = (__force __be32)-1, 117 .src_ip = filter->src_ip, 118 .src_ip_msk = (__force __be32)-1, 119 }, 120 }, 121 }; 122 struct mlx4_spec_list spec_eth = { 123 .id = MLX4_NET_TRANS_RULE_ID_ETH, 124 }; 125 struct mlx4_net_trans_rule rule = { 126 .list = LIST_HEAD_INIT(rule.list), 127 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 128 .exclusive = 1, 129 .allow_loopback = 1, 130 .promisc_mode = MLX4_FS_PROMISC_NONE, 131 .port = priv->port, 132 .priority = MLX4_DOMAIN_RFS, 133 }; 134 int rc; 135 __be64 mac; 136 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 137 138 list_add_tail(&spec_eth.list, &rule.list); 139 list_add_tail(&spec_ip.list, &rule.list); 140 list_add_tail(&spec_tcp.list, &rule.list); 141 142 mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16); 143 144 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 145 memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN); 146 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 147 148 filter->activated = 0; 149 150 if (filter->reg_id) { 151 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 152 if (rc && rc != -ENOENT) 153 en_err(priv, "Error detaching flow. rc = %d\n", rc); 154 } 155 156 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 157 if (rc) 158 en_err(priv, "Error attaching flow. err = %d\n", rc); 159 160 mlx4_en_filter_rfs_expire(priv); 161 162 filter->activated = 1; 163 } 164 165 static inline struct hlist_head * 166 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 167 __be16 src_port, __be16 dst_port) 168 { 169 unsigned long l; 170 int bucket_idx; 171 172 l = (__force unsigned long)src_port | 173 ((__force unsigned long)dst_port << 2); 174 l ^= (__force unsigned long)(src_ip ^ dst_ip); 175 176 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 177 178 return &priv->filter_hash[bucket_idx]; 179 } 180 181 static struct mlx4_en_filter * 182 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 183 __be32 dst_ip, __be16 src_port, __be16 dst_port, 184 u32 flow_id) 185 { 186 struct mlx4_en_filter *filter = NULL; 187 188 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 189 if (!filter) 190 return NULL; 191 192 filter->priv = priv; 193 filter->rxq_index = rxq_index; 194 INIT_WORK(&filter->work, mlx4_en_filter_work); 195 196 filter->src_ip = src_ip; 197 filter->dst_ip = dst_ip; 198 filter->src_port = src_port; 199 filter->dst_port = dst_port; 200 201 filter->flow_id = flow_id; 202 203 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 204 205 list_add_tail(&filter->next, &priv->filters); 206 hlist_add_head(&filter->filter_chain, 207 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 208 dst_port)); 209 210 return filter; 211 } 212 213 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 214 { 215 struct mlx4_en_priv *priv = filter->priv; 216 int rc; 217 218 list_del(&filter->next); 219 220 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 221 if (rc && rc != -ENOENT) 222 en_err(priv, "Error detaching flow. rc = %d\n", rc); 223 224 kfree(filter); 225 } 226 227 static inline struct mlx4_en_filter * 228 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 229 __be16 src_port, __be16 dst_port) 230 { 231 struct hlist_node *elem; 232 struct mlx4_en_filter *filter; 233 struct mlx4_en_filter *ret = NULL; 234 235 hlist_for_each_entry(filter, elem, 236 filter_hash_bucket(priv, src_ip, dst_ip, 237 src_port, dst_port), 238 filter_chain) { 239 if (filter->src_ip == src_ip && 240 filter->dst_ip == dst_ip && 241 filter->src_port == src_port && 242 filter->dst_port == dst_port) { 243 ret = filter; 244 break; 245 } 246 } 247 248 return ret; 249 } 250 251 static int 252 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 253 u16 rxq_index, u32 flow_id) 254 { 255 struct mlx4_en_priv *priv = netdev_priv(net_dev); 256 struct mlx4_en_filter *filter; 257 const struct iphdr *ip; 258 const __be16 *ports; 259 __be32 src_ip; 260 __be32 dst_ip; 261 __be16 src_port; 262 __be16 dst_port; 263 int nhoff = skb_network_offset(skb); 264 int ret = 0; 265 266 if (skb->protocol != htons(ETH_P_IP)) 267 return -EPROTONOSUPPORT; 268 269 ip = (const struct iphdr *)(skb->data + nhoff); 270 if (ip_is_fragment(ip)) 271 return -EPROTONOSUPPORT; 272 273 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 274 275 src_ip = ip->saddr; 276 dst_ip = ip->daddr; 277 src_port = ports[0]; 278 dst_port = ports[1]; 279 280 if (ip->protocol != IPPROTO_TCP) 281 return -EPROTONOSUPPORT; 282 283 spin_lock_bh(&priv->filters_lock); 284 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); 285 if (filter) { 286 if (filter->rxq_index == rxq_index) 287 goto out; 288 289 filter->rxq_index = rxq_index; 290 } else { 291 filter = mlx4_en_filter_alloc(priv, rxq_index, 292 src_ip, dst_ip, 293 src_port, dst_port, flow_id); 294 if (!filter) { 295 ret = -ENOMEM; 296 goto err; 297 } 298 } 299 300 queue_work(priv->mdev->workqueue, &filter->work); 301 302 out: 303 ret = filter->id; 304 err: 305 spin_unlock_bh(&priv->filters_lock); 306 307 return ret; 308 } 309 310 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 311 struct mlx4_en_rx_ring *rx_ring) 312 { 313 struct mlx4_en_filter *filter, *tmp; 314 LIST_HEAD(del_list); 315 316 spin_lock_bh(&priv->filters_lock); 317 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 318 list_move(&filter->next, &del_list); 319 hlist_del(&filter->filter_chain); 320 } 321 spin_unlock_bh(&priv->filters_lock); 322 323 list_for_each_entry_safe(filter, tmp, &del_list, next) { 324 cancel_work_sync(&filter->work); 325 mlx4_en_filter_free(filter); 326 } 327 } 328 329 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 330 { 331 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 332 LIST_HEAD(del_list); 333 int i = 0; 334 335 spin_lock_bh(&priv->filters_lock); 336 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 337 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 338 break; 339 340 if (filter->activated && 341 !work_pending(&filter->work) && 342 rps_may_expire_flow(priv->dev, 343 filter->rxq_index, filter->flow_id, 344 filter->id)) { 345 list_move(&filter->next, &del_list); 346 hlist_del(&filter->filter_chain); 347 } else 348 last_filter = filter; 349 350 i++; 351 } 352 353 if (last_filter && (&last_filter->next != priv->filters.next)) 354 list_move(&priv->filters, &last_filter->next); 355 356 spin_unlock_bh(&priv->filters_lock); 357 358 list_for_each_entry_safe(filter, tmp, &del_list, next) 359 mlx4_en_filter_free(filter); 360 } 361 #endif 362 363 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 364 { 365 struct mlx4_en_priv *priv = netdev_priv(dev); 366 struct mlx4_en_dev *mdev = priv->mdev; 367 int err; 368 int idx; 369 370 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 371 372 set_bit(vid, priv->active_vlans); 373 374 /* Add VID to port VLAN filter */ 375 mutex_lock(&mdev->state_lock); 376 if (mdev->device_up && priv->port_up) { 377 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 378 if (err) 379 en_err(priv, "Failed configuring VLAN filter\n"); 380 } 381 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 382 en_err(priv, "failed adding vlan %d\n", vid); 383 mutex_unlock(&mdev->state_lock); 384 385 return 0; 386 } 387 388 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 389 { 390 struct mlx4_en_priv *priv = netdev_priv(dev); 391 struct mlx4_en_dev *mdev = priv->mdev; 392 int err; 393 int idx; 394 395 en_dbg(HW, priv, "Killing VID:%d\n", vid); 396 397 clear_bit(vid, priv->active_vlans); 398 399 /* Remove VID from port VLAN filter */ 400 mutex_lock(&mdev->state_lock); 401 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 402 mlx4_unregister_vlan(mdev->dev, priv->port, idx); 403 else 404 en_err(priv, "could not find vid %d in cache\n", vid); 405 406 if (mdev->device_up && priv->port_up) { 407 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 408 if (err) 409 en_err(priv, "Failed configuring VLAN filter\n"); 410 } 411 mutex_unlock(&mdev->state_lock); 412 413 return 0; 414 } 415 416 u64 mlx4_en_mac_to_u64(u8 *addr) 417 { 418 u64 mac = 0; 419 int i; 420 421 for (i = 0; i < ETH_ALEN; i++) { 422 mac <<= 8; 423 mac |= addr[i]; 424 } 425 return mac; 426 } 427 428 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 429 { 430 struct mlx4_en_priv *priv = netdev_priv(dev); 431 struct mlx4_en_dev *mdev = priv->mdev; 432 struct sockaddr *saddr = addr; 433 434 if (!is_valid_ether_addr(saddr->sa_data)) 435 return -EADDRNOTAVAIL; 436 437 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 438 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); 439 queue_work(mdev->workqueue, &priv->mac_task); 440 return 0; 441 } 442 443 static void mlx4_en_do_set_mac(struct work_struct *work) 444 { 445 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 446 mac_task); 447 struct mlx4_en_dev *mdev = priv->mdev; 448 int err = 0; 449 450 mutex_lock(&mdev->state_lock); 451 if (priv->port_up) { 452 /* Remove old MAC and insert the new one */ 453 err = mlx4_replace_mac(mdev->dev, priv->port, 454 priv->base_qpn, priv->mac); 455 if (err) 456 en_err(priv, "Failed changing HW MAC address\n"); 457 } else 458 en_dbg(HW, priv, "Port is down while " 459 "registering mac, exiting...\n"); 460 461 mutex_unlock(&mdev->state_lock); 462 } 463 464 static void mlx4_en_clear_list(struct net_device *dev) 465 { 466 struct mlx4_en_priv *priv = netdev_priv(dev); 467 struct mlx4_en_mc_list *tmp, *mc_to_del; 468 469 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 470 list_del(&mc_to_del->list); 471 kfree(mc_to_del); 472 } 473 } 474 475 static void mlx4_en_cache_mclist(struct net_device *dev) 476 { 477 struct mlx4_en_priv *priv = netdev_priv(dev); 478 struct netdev_hw_addr *ha; 479 struct mlx4_en_mc_list *tmp; 480 481 mlx4_en_clear_list(dev); 482 netdev_for_each_mc_addr(ha, dev) { 483 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 484 if (!tmp) { 485 en_err(priv, "failed to allocate multicast list\n"); 486 mlx4_en_clear_list(dev); 487 return; 488 } 489 memcpy(tmp->addr, ha->addr, ETH_ALEN); 490 list_add_tail(&tmp->list, &priv->mc_list); 491 } 492 } 493 494 static void update_mclist_flags(struct mlx4_en_priv *priv, 495 struct list_head *dst, 496 struct list_head *src) 497 { 498 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 499 bool found; 500 501 /* Find all the entries that should be removed from dst, 502 * These are the entries that are not found in src 503 */ 504 list_for_each_entry(dst_tmp, dst, list) { 505 found = false; 506 list_for_each_entry(src_tmp, src, list) { 507 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 508 found = true; 509 break; 510 } 511 } 512 if (!found) 513 dst_tmp->action = MCLIST_REM; 514 } 515 516 /* Add entries that exist in src but not in dst 517 * mark them as need to add 518 */ 519 list_for_each_entry(src_tmp, src, list) { 520 found = false; 521 list_for_each_entry(dst_tmp, dst, list) { 522 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 523 dst_tmp->action = MCLIST_NONE; 524 found = true; 525 break; 526 } 527 } 528 if (!found) { 529 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), 530 GFP_KERNEL); 531 if (!new_mc) { 532 en_err(priv, "Failed to allocate current multicast list\n"); 533 return; 534 } 535 memcpy(new_mc, src_tmp, 536 sizeof(struct mlx4_en_mc_list)); 537 new_mc->action = MCLIST_ADD; 538 list_add_tail(&new_mc->list, dst); 539 } 540 } 541 } 542 543 static void mlx4_en_set_multicast(struct net_device *dev) 544 { 545 struct mlx4_en_priv *priv = netdev_priv(dev); 546 547 if (!priv->port_up) 548 return; 549 550 queue_work(priv->mdev->workqueue, &priv->mcast_task); 551 } 552 553 static void mlx4_en_do_set_multicast(struct work_struct *work) 554 { 555 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 556 mcast_task); 557 struct mlx4_en_dev *mdev = priv->mdev; 558 struct net_device *dev = priv->dev; 559 struct mlx4_en_mc_list *mclist, *tmp; 560 u64 mcast_addr = 0; 561 u8 mc_list[16] = {0}; 562 int err = 0; 563 564 mutex_lock(&mdev->state_lock); 565 if (!mdev->device_up) { 566 en_dbg(HW, priv, "Card is not up, " 567 "ignoring multicast change.\n"); 568 goto out; 569 } 570 if (!priv->port_up) { 571 en_dbg(HW, priv, "Port is down, " 572 "ignoring multicast change.\n"); 573 goto out; 574 } 575 576 if (!netif_carrier_ok(dev)) { 577 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 578 if (priv->port_state.link_state) { 579 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 580 netif_carrier_on(dev); 581 en_dbg(LINK, priv, "Link Up\n"); 582 } 583 } 584 } 585 586 /* 587 * Promsicuous mode: disable all filters 588 */ 589 590 if (dev->flags & IFF_PROMISC) { 591 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 592 if (netif_msg_rx_status(priv)) 593 en_warn(priv, "Entering promiscuous mode\n"); 594 priv->flags |= MLX4_EN_FLAG_PROMISC; 595 596 /* Enable promiscouos mode */ 597 switch (mdev->dev->caps.steering_mode) { 598 case MLX4_STEERING_MODE_DEVICE_MANAGED: 599 err = mlx4_flow_steer_promisc_add(mdev->dev, 600 priv->port, 601 priv->base_qpn, 602 MLX4_FS_PROMISC_UPLINK); 603 if (err) 604 en_err(priv, "Failed enabling promiscuous mode\n"); 605 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 606 break; 607 608 case MLX4_STEERING_MODE_B0: 609 err = mlx4_unicast_promisc_add(mdev->dev, 610 priv->base_qpn, 611 priv->port); 612 if (err) 613 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 614 615 /* Add the default qp number as multicast 616 * promisc 617 */ 618 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 619 err = mlx4_multicast_promisc_add(mdev->dev, 620 priv->base_qpn, 621 priv->port); 622 if (err) 623 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 624 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 625 } 626 break; 627 628 case MLX4_STEERING_MODE_A0: 629 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 630 priv->port, 631 priv->base_qpn, 632 1); 633 if (err) 634 en_err(priv, "Failed enabling promiscuous mode\n"); 635 break; 636 } 637 638 /* Disable port multicast filter (unconditionally) */ 639 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 640 0, MLX4_MCAST_DISABLE); 641 if (err) 642 en_err(priv, "Failed disabling " 643 "multicast filter\n"); 644 645 /* Disable port VLAN filter */ 646 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 647 if (err) 648 en_err(priv, "Failed disabling VLAN filter\n"); 649 } 650 goto out; 651 } 652 653 /* 654 * Not in promiscuous mode 655 */ 656 657 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 658 if (netif_msg_rx_status(priv)) 659 en_warn(priv, "Leaving promiscuous mode\n"); 660 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 661 662 /* Disable promiscouos mode */ 663 switch (mdev->dev->caps.steering_mode) { 664 case MLX4_STEERING_MODE_DEVICE_MANAGED: 665 err = mlx4_flow_steer_promisc_remove(mdev->dev, 666 priv->port, 667 MLX4_FS_PROMISC_UPLINK); 668 if (err) 669 en_err(priv, "Failed disabling promiscuous mode\n"); 670 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 671 break; 672 673 case MLX4_STEERING_MODE_B0: 674 err = mlx4_unicast_promisc_remove(mdev->dev, 675 priv->base_qpn, 676 priv->port); 677 if (err) 678 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 679 /* Disable Multicast promisc */ 680 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 681 err = mlx4_multicast_promisc_remove(mdev->dev, 682 priv->base_qpn, 683 priv->port); 684 if (err) 685 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 686 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 687 } 688 break; 689 690 case MLX4_STEERING_MODE_A0: 691 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 692 priv->port, 693 priv->base_qpn, 0); 694 if (err) 695 en_err(priv, "Failed disabling promiscuous mode\n"); 696 break; 697 } 698 699 /* Enable port VLAN filter */ 700 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 701 if (err) 702 en_err(priv, "Failed enabling VLAN filter\n"); 703 } 704 705 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 706 if (dev->flags & IFF_ALLMULTI) { 707 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 708 0, MLX4_MCAST_DISABLE); 709 if (err) 710 en_err(priv, "Failed disabling multicast filter\n"); 711 712 /* Add the default qp number as multicast promisc */ 713 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 714 switch (mdev->dev->caps.steering_mode) { 715 case MLX4_STEERING_MODE_DEVICE_MANAGED: 716 err = mlx4_flow_steer_promisc_add(mdev->dev, 717 priv->port, 718 priv->base_qpn, 719 MLX4_FS_PROMISC_ALL_MULTI); 720 break; 721 722 case MLX4_STEERING_MODE_B0: 723 err = mlx4_multicast_promisc_add(mdev->dev, 724 priv->base_qpn, 725 priv->port); 726 break; 727 728 case MLX4_STEERING_MODE_A0: 729 break; 730 } 731 if (err) 732 en_err(priv, "Failed entering multicast promisc mode\n"); 733 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 734 } 735 } else { 736 /* Disable Multicast promisc */ 737 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 738 switch (mdev->dev->caps.steering_mode) { 739 case MLX4_STEERING_MODE_DEVICE_MANAGED: 740 err = mlx4_flow_steer_promisc_remove(mdev->dev, 741 priv->port, 742 MLX4_FS_PROMISC_ALL_MULTI); 743 break; 744 745 case MLX4_STEERING_MODE_B0: 746 err = mlx4_multicast_promisc_remove(mdev->dev, 747 priv->base_qpn, 748 priv->port); 749 break; 750 751 case MLX4_STEERING_MODE_A0: 752 break; 753 } 754 if (err) 755 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 756 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 757 } 758 759 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 760 0, MLX4_MCAST_DISABLE); 761 if (err) 762 en_err(priv, "Failed disabling multicast filter\n"); 763 764 /* Flush mcast filter and init it with broadcast address */ 765 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 766 1, MLX4_MCAST_CONFIG); 767 768 /* Update multicast list - we cache all addresses so they won't 769 * change while HW is updated holding the command semaphor */ 770 netif_tx_lock_bh(dev); 771 mlx4_en_cache_mclist(dev); 772 netif_tx_unlock_bh(dev); 773 list_for_each_entry(mclist, &priv->mc_list, list) { 774 mcast_addr = mlx4_en_mac_to_u64(mclist->addr); 775 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 776 mcast_addr, 0, MLX4_MCAST_CONFIG); 777 } 778 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 779 0, MLX4_MCAST_ENABLE); 780 if (err) 781 en_err(priv, "Failed enabling multicast filter\n"); 782 783 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 784 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 785 if (mclist->action == MCLIST_REM) { 786 /* detach this address and delete from list */ 787 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 788 mc_list[5] = priv->port; 789 err = mlx4_multicast_detach(mdev->dev, 790 &priv->rss_map.indir_qp, 791 mc_list, 792 MLX4_PROT_ETH, 793 mclist->reg_id); 794 if (err) 795 en_err(priv, "Fail to detach multicast address\n"); 796 797 /* remove from list */ 798 list_del(&mclist->list); 799 kfree(mclist); 800 } else if (mclist->action == MCLIST_ADD) { 801 /* attach the address */ 802 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 803 /* needed for B0 steering support */ 804 mc_list[5] = priv->port; 805 err = mlx4_multicast_attach(mdev->dev, 806 &priv->rss_map.indir_qp, 807 mc_list, 808 priv->port, 0, 809 MLX4_PROT_ETH, 810 &mclist->reg_id); 811 if (err) 812 en_err(priv, "Fail to attach multicast address\n"); 813 814 } 815 } 816 } 817 out: 818 mutex_unlock(&mdev->state_lock); 819 } 820 821 #ifdef CONFIG_NET_POLL_CONTROLLER 822 static void mlx4_en_netpoll(struct net_device *dev) 823 { 824 struct mlx4_en_priv *priv = netdev_priv(dev); 825 struct mlx4_en_cq *cq; 826 unsigned long flags; 827 int i; 828 829 for (i = 0; i < priv->rx_ring_num; i++) { 830 cq = &priv->rx_cq[i]; 831 spin_lock_irqsave(&cq->lock, flags); 832 napi_synchronize(&cq->napi); 833 mlx4_en_process_rx_cq(dev, cq, 0); 834 spin_unlock_irqrestore(&cq->lock, flags); 835 } 836 } 837 #endif 838 839 static void mlx4_en_tx_timeout(struct net_device *dev) 840 { 841 struct mlx4_en_priv *priv = netdev_priv(dev); 842 struct mlx4_en_dev *mdev = priv->mdev; 843 844 if (netif_msg_timer(priv)) 845 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 846 847 priv->port_stats.tx_timeout++; 848 en_dbg(DRV, priv, "Scheduling watchdog\n"); 849 queue_work(mdev->workqueue, &priv->watchdog_task); 850 } 851 852 853 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 854 { 855 struct mlx4_en_priv *priv = netdev_priv(dev); 856 857 spin_lock_bh(&priv->stats_lock); 858 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 859 spin_unlock_bh(&priv->stats_lock); 860 861 return &priv->ret_stats; 862 } 863 864 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 865 { 866 struct mlx4_en_cq *cq; 867 int i; 868 869 /* If we haven't received a specific coalescing setting 870 * (module param), we set the moderation parameters as follows: 871 * - moder_cnt is set to the number of mtu sized packets to 872 * satisfy our coalescing target. 873 * - moder_time is set to a fixed value. 874 */ 875 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 876 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 877 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 878 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 879 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 880 "rx_frames:%d rx_usecs:%d\n", 881 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 882 883 /* Setup cq moderation params */ 884 for (i = 0; i < priv->rx_ring_num; i++) { 885 cq = &priv->rx_cq[i]; 886 cq->moder_cnt = priv->rx_frames; 887 cq->moder_time = priv->rx_usecs; 888 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 889 priv->last_moder_packets[i] = 0; 890 priv->last_moder_bytes[i] = 0; 891 } 892 893 for (i = 0; i < priv->tx_ring_num; i++) { 894 cq = &priv->tx_cq[i]; 895 cq->moder_cnt = priv->tx_frames; 896 cq->moder_time = priv->tx_usecs; 897 } 898 899 /* Reset auto-moderation params */ 900 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 901 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 902 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 903 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 904 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 905 priv->adaptive_rx_coal = 1; 906 priv->last_moder_jiffies = 0; 907 priv->last_moder_tx_packets = 0; 908 } 909 910 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 911 { 912 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 913 struct mlx4_en_cq *cq; 914 unsigned long packets; 915 unsigned long rate; 916 unsigned long avg_pkt_size; 917 unsigned long rx_packets; 918 unsigned long rx_bytes; 919 unsigned long rx_pkt_diff; 920 int moder_time; 921 int ring, err; 922 923 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 924 return; 925 926 for (ring = 0; ring < priv->rx_ring_num; ring++) { 927 spin_lock_bh(&priv->stats_lock); 928 rx_packets = priv->rx_ring[ring].packets; 929 rx_bytes = priv->rx_ring[ring].bytes; 930 spin_unlock_bh(&priv->stats_lock); 931 932 rx_pkt_diff = ((unsigned long) (rx_packets - 933 priv->last_moder_packets[ring])); 934 packets = rx_pkt_diff; 935 rate = packets * HZ / period; 936 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 937 priv->last_moder_bytes[ring])) / packets : 0; 938 939 /* Apply auto-moderation only when packet rate 940 * exceeds a rate that it matters */ 941 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 942 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 943 if (rate < priv->pkt_rate_low) 944 moder_time = priv->rx_usecs_low; 945 else if (rate > priv->pkt_rate_high) 946 moder_time = priv->rx_usecs_high; 947 else 948 moder_time = (rate - priv->pkt_rate_low) * 949 (priv->rx_usecs_high - priv->rx_usecs_low) / 950 (priv->pkt_rate_high - priv->pkt_rate_low) + 951 priv->rx_usecs_low; 952 } else { 953 moder_time = priv->rx_usecs_low; 954 } 955 956 if (moder_time != priv->last_moder_time[ring]) { 957 priv->last_moder_time[ring] = moder_time; 958 cq = &priv->rx_cq[ring]; 959 cq->moder_time = moder_time; 960 err = mlx4_en_set_cq_moder(priv, cq); 961 if (err) 962 en_err(priv, "Failed modifying moderation " 963 "for cq:%d\n", ring); 964 } 965 priv->last_moder_packets[ring] = rx_packets; 966 priv->last_moder_bytes[ring] = rx_bytes; 967 } 968 969 priv->last_moder_jiffies = jiffies; 970 } 971 972 static void mlx4_en_do_get_stats(struct work_struct *work) 973 { 974 struct delayed_work *delay = to_delayed_work(work); 975 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 976 stats_task); 977 struct mlx4_en_dev *mdev = priv->mdev; 978 int err; 979 980 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 981 if (err) 982 en_dbg(HW, priv, "Could not update stats\n"); 983 984 mutex_lock(&mdev->state_lock); 985 if (mdev->device_up) { 986 if (priv->port_up) 987 mlx4_en_auto_moderation(priv); 988 989 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 990 } 991 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 992 queue_work(mdev->workqueue, &priv->mac_task); 993 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 994 } 995 mutex_unlock(&mdev->state_lock); 996 } 997 998 static void mlx4_en_linkstate(struct work_struct *work) 999 { 1000 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1001 linkstate_task); 1002 struct mlx4_en_dev *mdev = priv->mdev; 1003 int linkstate = priv->link_state; 1004 1005 mutex_lock(&mdev->state_lock); 1006 /* If observable port state changed set carrier state and 1007 * report to system log */ 1008 if (priv->last_link_state != linkstate) { 1009 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1010 en_info(priv, "Link Down\n"); 1011 netif_carrier_off(priv->dev); 1012 } else { 1013 en_info(priv, "Link Up\n"); 1014 netif_carrier_on(priv->dev); 1015 } 1016 } 1017 priv->last_link_state = linkstate; 1018 mutex_unlock(&mdev->state_lock); 1019 } 1020 1021 1022 int mlx4_en_start_port(struct net_device *dev) 1023 { 1024 struct mlx4_en_priv *priv = netdev_priv(dev); 1025 struct mlx4_en_dev *mdev = priv->mdev; 1026 struct mlx4_en_cq *cq; 1027 struct mlx4_en_tx_ring *tx_ring; 1028 int rx_index = 0; 1029 int tx_index = 0; 1030 int err = 0; 1031 int i; 1032 int j; 1033 u8 mc_list[16] = {0}; 1034 1035 if (priv->port_up) { 1036 en_dbg(DRV, priv, "start port called while port already up\n"); 1037 return 0; 1038 } 1039 1040 INIT_LIST_HEAD(&priv->mc_list); 1041 INIT_LIST_HEAD(&priv->curr_list); 1042 1043 /* Calculate Rx buf size */ 1044 dev->mtu = min(dev->mtu, priv->max_mtu); 1045 mlx4_en_calc_rx_buf(dev); 1046 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1047 1048 /* Configure rx cq's and rings */ 1049 err = mlx4_en_activate_rx_rings(priv); 1050 if (err) { 1051 en_err(priv, "Failed to activate RX rings\n"); 1052 return err; 1053 } 1054 for (i = 0; i < priv->rx_ring_num; i++) { 1055 cq = &priv->rx_cq[i]; 1056 1057 err = mlx4_en_activate_cq(priv, cq, i); 1058 if (err) { 1059 en_err(priv, "Failed activating Rx CQ\n"); 1060 goto cq_err; 1061 } 1062 for (j = 0; j < cq->size; j++) 1063 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1064 err = mlx4_en_set_cq_moder(priv, cq); 1065 if (err) { 1066 en_err(priv, "Failed setting cq moderation parameters"); 1067 mlx4_en_deactivate_cq(priv, cq); 1068 goto cq_err; 1069 } 1070 mlx4_en_arm_cq(priv, cq); 1071 priv->rx_ring[i].cqn = cq->mcq.cqn; 1072 ++rx_index; 1073 } 1074 1075 /* Set qp number */ 1076 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1077 err = mlx4_get_eth_qp(mdev->dev, priv->port, 1078 priv->mac, &priv->base_qpn); 1079 if (err) { 1080 en_err(priv, "Failed getting eth qp\n"); 1081 goto cq_err; 1082 } 1083 mdev->mac_removed[priv->port] = 0; 1084 1085 err = mlx4_en_config_rss_steer(priv); 1086 if (err) { 1087 en_err(priv, "Failed configuring rss steering\n"); 1088 goto mac_err; 1089 } 1090 1091 err = mlx4_en_create_drop_qp(priv); 1092 if (err) 1093 goto rss_err; 1094 1095 /* Configure tx cq's and rings */ 1096 for (i = 0; i < priv->tx_ring_num; i++) { 1097 /* Configure cq */ 1098 cq = &priv->tx_cq[i]; 1099 err = mlx4_en_activate_cq(priv, cq, i); 1100 if (err) { 1101 en_err(priv, "Failed allocating Tx CQ\n"); 1102 goto tx_err; 1103 } 1104 err = mlx4_en_set_cq_moder(priv, cq); 1105 if (err) { 1106 en_err(priv, "Failed setting cq moderation parameters"); 1107 mlx4_en_deactivate_cq(priv, cq); 1108 goto tx_err; 1109 } 1110 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1111 cq->buf->wqe_index = cpu_to_be16(0xffff); 1112 1113 /* Configure ring */ 1114 tx_ring = &priv->tx_ring[i]; 1115 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1116 i / priv->num_tx_rings_p_up); 1117 if (err) { 1118 en_err(priv, "Failed allocating Tx ring\n"); 1119 mlx4_en_deactivate_cq(priv, cq); 1120 goto tx_err; 1121 } 1122 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1123 1124 /* Arm CQ for TX completions */ 1125 mlx4_en_arm_cq(priv, cq); 1126 1127 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1128 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1129 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1130 ++tx_index; 1131 } 1132 1133 /* Configure port */ 1134 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1135 priv->rx_skb_size + ETH_FCS_LEN, 1136 priv->prof->tx_pause, 1137 priv->prof->tx_ppp, 1138 priv->prof->rx_pause, 1139 priv->prof->rx_ppp); 1140 if (err) { 1141 en_err(priv, "Failed setting port general configurations " 1142 "for port %d, with error %d\n", priv->port, err); 1143 goto tx_err; 1144 } 1145 /* Set default qp number */ 1146 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1147 if (err) { 1148 en_err(priv, "Failed setting default qp numbers\n"); 1149 goto tx_err; 1150 } 1151 1152 /* Init port */ 1153 en_dbg(HW, priv, "Initializing port\n"); 1154 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1155 if (err) { 1156 en_err(priv, "Failed Initializing port\n"); 1157 goto tx_err; 1158 } 1159 1160 /* Attach rx QP to bradcast address */ 1161 memset(&mc_list[10], 0xff, ETH_ALEN); 1162 mc_list[5] = priv->port; /* needed for B0 steering support */ 1163 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1164 priv->port, 0, MLX4_PROT_ETH, 1165 &priv->broadcast_id)) 1166 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1167 1168 /* Must redo promiscuous mode setup. */ 1169 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1170 if (mdev->dev->caps.steering_mode == 1171 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1172 mlx4_flow_steer_promisc_remove(mdev->dev, 1173 priv->port, 1174 MLX4_FS_PROMISC_UPLINK); 1175 mlx4_flow_steer_promisc_remove(mdev->dev, 1176 priv->port, 1177 MLX4_FS_PROMISC_ALL_MULTI); 1178 } 1179 1180 /* Schedule multicast task to populate multicast list */ 1181 queue_work(mdev->workqueue, &priv->mcast_task); 1182 1183 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 1184 1185 priv->port_up = true; 1186 netif_tx_start_all_queues(dev); 1187 return 0; 1188 1189 tx_err: 1190 while (tx_index--) { 1191 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1192 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1193 } 1194 mlx4_en_destroy_drop_qp(priv); 1195 rss_err: 1196 mlx4_en_release_rss_steer(priv); 1197 mac_err: 1198 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1199 cq_err: 1200 while (rx_index--) 1201 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 1202 for (i = 0; i < priv->rx_ring_num; i++) 1203 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1204 1205 return err; /* need to close devices */ 1206 } 1207 1208 1209 void mlx4_en_stop_port(struct net_device *dev) 1210 { 1211 struct mlx4_en_priv *priv = netdev_priv(dev); 1212 struct mlx4_en_dev *mdev = priv->mdev; 1213 struct mlx4_en_mc_list *mclist, *tmp; 1214 int i; 1215 u8 mc_list[16] = {0}; 1216 1217 if (!priv->port_up) { 1218 en_dbg(DRV, priv, "stop port called while port already down\n"); 1219 return; 1220 } 1221 1222 /* Synchronize with tx routine */ 1223 netif_tx_lock_bh(dev); 1224 netif_tx_stop_all_queues(dev); 1225 netif_tx_unlock_bh(dev); 1226 1227 /* Set port as not active */ 1228 priv->port_up = false; 1229 1230 /* Detach All multicasts */ 1231 memset(&mc_list[10], 0xff, ETH_ALEN); 1232 mc_list[5] = priv->port; /* needed for B0 steering support */ 1233 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1234 MLX4_PROT_ETH, priv->broadcast_id); 1235 list_for_each_entry(mclist, &priv->curr_list, list) { 1236 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1237 mc_list[5] = priv->port; 1238 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1239 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1240 } 1241 mlx4_en_clear_list(dev); 1242 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1243 list_del(&mclist->list); 1244 kfree(mclist); 1245 } 1246 1247 /* Flush multicast filter */ 1248 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1249 1250 mlx4_en_destroy_drop_qp(priv); 1251 1252 /* Free TX Rings */ 1253 for (i = 0; i < priv->tx_ring_num; i++) { 1254 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1255 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 1256 } 1257 msleep(10); 1258 1259 for (i = 0; i < priv->tx_ring_num; i++) 1260 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 1261 1262 /* Free RSS qps */ 1263 mlx4_en_release_rss_steer(priv); 1264 1265 /* Unregister Mac address for the port */ 1266 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 1267 mdev->mac_removed[priv->port] = 1; 1268 1269 /* Free RX Rings */ 1270 for (i = 0; i < priv->rx_ring_num; i++) { 1271 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1272 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 1273 msleep(1); 1274 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 1275 } 1276 1277 /* close port*/ 1278 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1279 } 1280 1281 static void mlx4_en_restart(struct work_struct *work) 1282 { 1283 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1284 watchdog_task); 1285 struct mlx4_en_dev *mdev = priv->mdev; 1286 struct net_device *dev = priv->dev; 1287 int i; 1288 1289 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1290 1291 mutex_lock(&mdev->state_lock); 1292 if (priv->port_up) { 1293 mlx4_en_stop_port(dev); 1294 for (i = 0; i < priv->tx_ring_num; i++) 1295 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); 1296 if (mlx4_en_start_port(dev)) 1297 en_err(priv, "Failed restarting port %d\n", priv->port); 1298 } 1299 mutex_unlock(&mdev->state_lock); 1300 } 1301 1302 static void mlx4_en_clear_stats(struct net_device *dev) 1303 { 1304 struct mlx4_en_priv *priv = netdev_priv(dev); 1305 struct mlx4_en_dev *mdev = priv->mdev; 1306 int i; 1307 1308 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1309 en_dbg(HW, priv, "Failed dumping statistics\n"); 1310 1311 memset(&priv->stats, 0, sizeof(priv->stats)); 1312 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1313 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1314 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1315 1316 for (i = 0; i < priv->tx_ring_num; i++) { 1317 priv->tx_ring[i].bytes = 0; 1318 priv->tx_ring[i].packets = 0; 1319 priv->tx_ring[i].tx_csum = 0; 1320 } 1321 for (i = 0; i < priv->rx_ring_num; i++) { 1322 priv->rx_ring[i].bytes = 0; 1323 priv->rx_ring[i].packets = 0; 1324 priv->rx_ring[i].csum_ok = 0; 1325 priv->rx_ring[i].csum_none = 0; 1326 } 1327 } 1328 1329 static int mlx4_en_open(struct net_device *dev) 1330 { 1331 struct mlx4_en_priv *priv = netdev_priv(dev); 1332 struct mlx4_en_dev *mdev = priv->mdev; 1333 int err = 0; 1334 1335 mutex_lock(&mdev->state_lock); 1336 1337 if (!mdev->device_up) { 1338 en_err(priv, "Cannot open - device down/disabled\n"); 1339 err = -EBUSY; 1340 goto out; 1341 } 1342 1343 /* Reset HW statistics and SW counters */ 1344 mlx4_en_clear_stats(dev); 1345 1346 err = mlx4_en_start_port(dev); 1347 if (err) 1348 en_err(priv, "Failed starting port:%d\n", priv->port); 1349 1350 out: 1351 mutex_unlock(&mdev->state_lock); 1352 return err; 1353 } 1354 1355 1356 static int mlx4_en_close(struct net_device *dev) 1357 { 1358 struct mlx4_en_priv *priv = netdev_priv(dev); 1359 struct mlx4_en_dev *mdev = priv->mdev; 1360 1361 en_dbg(IFDOWN, priv, "Close port called\n"); 1362 1363 mutex_lock(&mdev->state_lock); 1364 1365 mlx4_en_stop_port(dev); 1366 netif_carrier_off(dev); 1367 1368 mutex_unlock(&mdev->state_lock); 1369 return 0; 1370 } 1371 1372 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1373 { 1374 int i; 1375 1376 #ifdef CONFIG_RFS_ACCEL 1377 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1378 priv->dev->rx_cpu_rmap = NULL; 1379 #endif 1380 1381 for (i = 0; i < priv->tx_ring_num; i++) { 1382 if (priv->tx_ring[i].tx_info) 1383 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1384 if (priv->tx_cq[i].buf) 1385 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1386 } 1387 1388 for (i = 0; i < priv->rx_ring_num; i++) { 1389 if (priv->rx_ring[i].rx_info) 1390 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1391 priv->prof->rx_ring_size, priv->stride); 1392 if (priv->rx_cq[i].buf) 1393 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1394 } 1395 1396 if (priv->base_tx_qpn) { 1397 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); 1398 priv->base_tx_qpn = 0; 1399 } 1400 } 1401 1402 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1403 { 1404 struct mlx4_en_port_profile *prof = priv->prof; 1405 int i; 1406 int err; 1407 1408 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1409 if (err) { 1410 en_err(priv, "failed reserving range for TX rings\n"); 1411 return err; 1412 } 1413 1414 /* Create tx Rings */ 1415 for (i = 0; i < priv->tx_ring_num; i++) { 1416 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1417 prof->tx_ring_size, i, TX)) 1418 goto err; 1419 1420 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1421 prof->tx_ring_size, TXBB_SIZE)) 1422 goto err; 1423 } 1424 1425 /* Create rx Rings */ 1426 for (i = 0; i < priv->rx_ring_num; i++) { 1427 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1428 prof->rx_ring_size, i, RX)) 1429 goto err; 1430 1431 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1432 prof->rx_ring_size, priv->stride)) 1433 goto err; 1434 } 1435 1436 #ifdef CONFIG_RFS_ACCEL 1437 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1438 if (!priv->dev->rx_cpu_rmap) 1439 goto err; 1440 1441 INIT_LIST_HEAD(&priv->filters); 1442 spin_lock_init(&priv->filters_lock); 1443 #endif 1444 1445 return 0; 1446 1447 err: 1448 en_err(priv, "Failed to allocate NIC resources\n"); 1449 return -ENOMEM; 1450 } 1451 1452 1453 void mlx4_en_destroy_netdev(struct net_device *dev) 1454 { 1455 struct mlx4_en_priv *priv = netdev_priv(dev); 1456 struct mlx4_en_dev *mdev = priv->mdev; 1457 1458 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1459 1460 /* Unregister device - this will close the port if it was up */ 1461 if (priv->registered) 1462 unregister_netdev(dev); 1463 1464 if (priv->allocated) 1465 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1466 1467 cancel_delayed_work(&priv->stats_task); 1468 /* flush any pending task for this netdev */ 1469 flush_workqueue(mdev->workqueue); 1470 1471 /* Detach the netdev so tasks would not attempt to access it */ 1472 mutex_lock(&mdev->state_lock); 1473 mdev->pndev[priv->port] = NULL; 1474 mutex_unlock(&mdev->state_lock); 1475 1476 mlx4_en_free_resources(priv); 1477 1478 kfree(priv->tx_ring); 1479 kfree(priv->tx_cq); 1480 1481 free_netdev(dev); 1482 } 1483 1484 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1485 { 1486 struct mlx4_en_priv *priv = netdev_priv(dev); 1487 struct mlx4_en_dev *mdev = priv->mdev; 1488 int err = 0; 1489 1490 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 1491 dev->mtu, new_mtu); 1492 1493 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1494 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 1495 return -EPERM; 1496 } 1497 dev->mtu = new_mtu; 1498 1499 if (netif_running(dev)) { 1500 mutex_lock(&mdev->state_lock); 1501 if (!mdev->device_up) { 1502 /* NIC is probably restarting - let watchdog task reset 1503 * the port */ 1504 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1505 } else { 1506 mlx4_en_stop_port(dev); 1507 err = mlx4_en_start_port(dev); 1508 if (err) { 1509 en_err(priv, "Failed restarting port:%d\n", 1510 priv->port); 1511 queue_work(mdev->workqueue, &priv->watchdog_task); 1512 } 1513 } 1514 mutex_unlock(&mdev->state_lock); 1515 } 1516 return 0; 1517 } 1518 1519 static int mlx4_en_set_features(struct net_device *netdev, 1520 netdev_features_t features) 1521 { 1522 struct mlx4_en_priv *priv = netdev_priv(netdev); 1523 1524 if (features & NETIF_F_LOOPBACK) 1525 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1526 else 1527 priv->ctrl_flags &= 1528 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 1529 1530 return 0; 1531 1532 } 1533 1534 static const struct net_device_ops mlx4_netdev_ops = { 1535 .ndo_open = mlx4_en_open, 1536 .ndo_stop = mlx4_en_close, 1537 .ndo_start_xmit = mlx4_en_xmit, 1538 .ndo_select_queue = mlx4_en_select_queue, 1539 .ndo_get_stats = mlx4_en_get_stats, 1540 .ndo_set_rx_mode = mlx4_en_set_multicast, 1541 .ndo_set_mac_address = mlx4_en_set_mac, 1542 .ndo_validate_addr = eth_validate_addr, 1543 .ndo_change_mtu = mlx4_en_change_mtu, 1544 .ndo_tx_timeout = mlx4_en_tx_timeout, 1545 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 1546 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 1547 #ifdef CONFIG_NET_POLL_CONTROLLER 1548 .ndo_poll_controller = mlx4_en_netpoll, 1549 #endif 1550 .ndo_set_features = mlx4_en_set_features, 1551 .ndo_setup_tc = mlx4_en_setup_tc, 1552 #ifdef CONFIG_RFS_ACCEL 1553 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 1554 #endif 1555 }; 1556 1557 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1558 struct mlx4_en_port_profile *prof) 1559 { 1560 struct net_device *dev; 1561 struct mlx4_en_priv *priv; 1562 int i; 1563 int err; 1564 1565 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1566 MAX_TX_RINGS, MAX_RX_RINGS); 1567 if (dev == NULL) 1568 return -ENOMEM; 1569 1570 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 1571 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 1572 1573 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1574 dev->dev_id = port - 1; 1575 1576 /* 1577 * Initialize driver private data 1578 */ 1579 1580 priv = netdev_priv(dev); 1581 memset(priv, 0, sizeof(struct mlx4_en_priv)); 1582 priv->dev = dev; 1583 priv->mdev = mdev; 1584 priv->ddev = &mdev->pdev->dev; 1585 priv->prof = prof; 1586 priv->port = port; 1587 priv->port_up = false; 1588 priv->flags = prof->flags; 1589 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1590 MLX4_WQE_CTRL_SOLICITED); 1591 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 1592 priv->tx_ring_num = prof->tx_ring_num; 1593 1594 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, 1595 GFP_KERNEL); 1596 if (!priv->tx_ring) { 1597 err = -ENOMEM; 1598 goto out; 1599 } 1600 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS, 1601 GFP_KERNEL); 1602 if (!priv->tx_cq) { 1603 err = -ENOMEM; 1604 goto out; 1605 } 1606 priv->rx_ring_num = prof->rx_ring_num; 1607 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 1608 priv->mac_index = -1; 1609 priv->msg_enable = MLX4_EN_MSG_LEVEL; 1610 spin_lock_init(&priv->stats_lock); 1611 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 1612 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 1613 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1614 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1615 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1616 #ifdef CONFIG_MLX4_EN_DCB 1617 if (!mlx4_is_slave(priv->mdev->dev)) 1618 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 1619 #endif 1620 1621 /* Query for default mac and max mtu */ 1622 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1623 priv->mac = mdev->dev->caps.def_mac[priv->port]; 1624 if (ILLEGAL_MAC(priv->mac)) { 1625 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 1626 priv->port, priv->mac); 1627 err = -EINVAL; 1628 goto out; 1629 } 1630 1631 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 1632 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 1633 err = mlx4_en_alloc_resources(priv); 1634 if (err) 1635 goto out; 1636 1637 /* Allocate page for receive rings */ 1638 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1639 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1640 if (err) { 1641 en_err(priv, "Failed to allocate page for rx qps\n"); 1642 goto out; 1643 } 1644 priv->allocated = 1; 1645 1646 /* 1647 * Initialize netdev entry points 1648 */ 1649 dev->netdev_ops = &mlx4_netdev_ops; 1650 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1651 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1652 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1653 1654 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1655 1656 /* Set defualt MAC */ 1657 dev->addr_len = ETH_ALEN; 1658 for (i = 0; i < ETH_ALEN; i++) { 1659 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1660 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1661 } 1662 1663 /* 1664 * Set driver features 1665 */ 1666 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1667 if (mdev->LSO_support) 1668 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 1669 1670 dev->vlan_features = dev->hw_features; 1671 1672 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 1673 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 1674 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1675 NETIF_F_HW_VLAN_FILTER; 1676 dev->hw_features |= NETIF_F_LOOPBACK; 1677 1678 if (mdev->dev->caps.steering_mode == 1679 MLX4_STEERING_MODE_DEVICE_MANAGED) 1680 dev->hw_features |= NETIF_F_NTUPLE; 1681 1682 mdev->pndev[port] = dev; 1683 1684 netif_carrier_off(dev); 1685 err = register_netdev(dev); 1686 if (err) { 1687 en_err(priv, "Netdev registration failed for port %d\n", port); 1688 goto out; 1689 } 1690 priv->registered = 1; 1691 1692 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1693 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1694 1695 /* Configure port */ 1696 mlx4_en_calc_rx_buf(dev); 1697 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1698 priv->rx_skb_size + ETH_FCS_LEN, 1699 prof->tx_pause, prof->tx_ppp, 1700 prof->rx_pause, prof->rx_ppp); 1701 if (err) { 1702 en_err(priv, "Failed setting port general configurations " 1703 "for port %d, with error %d\n", priv->port, err); 1704 goto out; 1705 } 1706 1707 /* Init port */ 1708 en_warn(priv, "Initializing port\n"); 1709 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1710 if (err) { 1711 en_err(priv, "Failed Initializing port\n"); 1712 goto out; 1713 } 1714 mlx4_en_set_default_moderation(priv); 1715 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1716 return 0; 1717 1718 out: 1719 mlx4_en_destroy_netdev(dev); 1720 return err; 1721 } 1722 1723