1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/hash.h> 40 #include <net/ip.h> 41 #include <net/busy_poll.h> 42 43 #include <linux/mlx4/driver.h> 44 #include <linux/mlx4/device.h> 45 #include <linux/mlx4/cmd.h> 46 #include <linux/mlx4/cq.h> 47 48 #include "mlx4_en.h" 49 #include "en_port.h" 50 51 int mlx4_en_setup_tc(struct net_device *dev, u8 up) 52 { 53 struct mlx4_en_priv *priv = netdev_priv(dev); 54 int i; 55 unsigned int offset = 0; 56 57 if (up && up != MLX4_EN_NUM_UP) 58 return -EINVAL; 59 60 netdev_set_num_tc(dev, up); 61 62 /* Partition Tx queues evenly amongst UP's */ 63 for (i = 0; i < up; i++) { 64 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); 65 offset += priv->num_tx_rings_p_up; 66 } 67 68 return 0; 69 } 70 71 #ifdef CONFIG_NET_RX_BUSY_POLL 72 /* must be called with local_bh_disable()d */ 73 static int mlx4_en_low_latency_recv(struct napi_struct *napi) 74 { 75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 76 struct net_device *dev = cq->dev; 77 struct mlx4_en_priv *priv = netdev_priv(dev); 78 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 79 int done; 80 81 if (!priv->port_up) 82 return LL_FLUSH_FAILED; 83 84 if (!mlx4_en_cq_lock_poll(cq)) 85 return LL_FLUSH_BUSY; 86 87 done = mlx4_en_process_rx_cq(dev, cq, 4); 88 if (likely(done)) 89 rx_ring->cleaned += done; 90 else 91 rx_ring->misses++; 92 93 mlx4_en_cq_unlock_poll(cq); 94 95 return done; 96 } 97 #endif /* CONFIG_NET_RX_BUSY_POLL */ 98 99 #ifdef CONFIG_RFS_ACCEL 100 101 struct mlx4_en_filter { 102 struct list_head next; 103 struct work_struct work; 104 105 u8 ip_proto; 106 __be32 src_ip; 107 __be32 dst_ip; 108 __be16 src_port; 109 __be16 dst_port; 110 111 int rxq_index; 112 struct mlx4_en_priv *priv; 113 u32 flow_id; /* RFS infrastructure id */ 114 int id; /* mlx4_en driver id */ 115 u64 reg_id; /* Flow steering API id */ 116 u8 activated; /* Used to prevent expiry before filter 117 * is attached 118 */ 119 struct hlist_node filter_chain; 120 }; 121 122 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 123 124 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 125 { 126 switch (ip_proto) { 127 case IPPROTO_UDP: 128 return MLX4_NET_TRANS_RULE_ID_UDP; 129 case IPPROTO_TCP: 130 return MLX4_NET_TRANS_RULE_ID_TCP; 131 default: 132 return -EPROTONOSUPPORT; 133 } 134 }; 135 136 static void mlx4_en_filter_work(struct work_struct *work) 137 { 138 struct mlx4_en_filter *filter = container_of(work, 139 struct mlx4_en_filter, 140 work); 141 struct mlx4_en_priv *priv = filter->priv; 142 struct mlx4_spec_list spec_tcp_udp = { 143 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 144 { 145 .tcp_udp = { 146 .dst_port = filter->dst_port, 147 .dst_port_msk = (__force __be16)-1, 148 .src_port = filter->src_port, 149 .src_port_msk = (__force __be16)-1, 150 }, 151 }, 152 }; 153 struct mlx4_spec_list spec_ip = { 154 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 155 { 156 .ipv4 = { 157 .dst_ip = filter->dst_ip, 158 .dst_ip_msk = (__force __be32)-1, 159 .src_ip = filter->src_ip, 160 .src_ip_msk = (__force __be32)-1, 161 }, 162 }, 163 }; 164 struct mlx4_spec_list spec_eth = { 165 .id = MLX4_NET_TRANS_RULE_ID_ETH, 166 }; 167 struct mlx4_net_trans_rule rule = { 168 .list = LIST_HEAD_INIT(rule.list), 169 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 170 .exclusive = 1, 171 .allow_loopback = 1, 172 .promisc_mode = MLX4_FS_REGULAR, 173 .port = priv->port, 174 .priority = MLX4_DOMAIN_RFS, 175 }; 176 int rc; 177 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 178 179 if (spec_tcp_udp.id < 0) { 180 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 181 filter->ip_proto); 182 goto ignore; 183 } 184 list_add_tail(&spec_eth.list, &rule.list); 185 list_add_tail(&spec_ip.list, &rule.list); 186 list_add_tail(&spec_tcp_udp.list, &rule.list); 187 188 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 189 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 190 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 191 192 filter->activated = 0; 193 194 if (filter->reg_id) { 195 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 196 if (rc && rc != -ENOENT) 197 en_err(priv, "Error detaching flow. rc = %d\n", rc); 198 } 199 200 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 201 if (rc) 202 en_err(priv, "Error attaching flow. err = %d\n", rc); 203 204 ignore: 205 mlx4_en_filter_rfs_expire(priv); 206 207 filter->activated = 1; 208 } 209 210 static inline struct hlist_head * 211 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 212 __be16 src_port, __be16 dst_port) 213 { 214 unsigned long l; 215 int bucket_idx; 216 217 l = (__force unsigned long)src_port | 218 ((__force unsigned long)dst_port << 2); 219 l ^= (__force unsigned long)(src_ip ^ dst_ip); 220 221 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 222 223 return &priv->filter_hash[bucket_idx]; 224 } 225 226 static struct mlx4_en_filter * 227 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 228 __be32 dst_ip, u8 ip_proto, __be16 src_port, 229 __be16 dst_port, u32 flow_id) 230 { 231 struct mlx4_en_filter *filter = NULL; 232 233 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 234 if (!filter) 235 return NULL; 236 237 filter->priv = priv; 238 filter->rxq_index = rxq_index; 239 INIT_WORK(&filter->work, mlx4_en_filter_work); 240 241 filter->src_ip = src_ip; 242 filter->dst_ip = dst_ip; 243 filter->ip_proto = ip_proto; 244 filter->src_port = src_port; 245 filter->dst_port = dst_port; 246 247 filter->flow_id = flow_id; 248 249 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 250 251 list_add_tail(&filter->next, &priv->filters); 252 hlist_add_head(&filter->filter_chain, 253 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 254 dst_port)); 255 256 return filter; 257 } 258 259 static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 260 { 261 struct mlx4_en_priv *priv = filter->priv; 262 int rc; 263 264 list_del(&filter->next); 265 266 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 267 if (rc && rc != -ENOENT) 268 en_err(priv, "Error detaching flow. rc = %d\n", rc); 269 270 kfree(filter); 271 } 272 273 static inline struct mlx4_en_filter * 274 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 275 u8 ip_proto, __be16 src_port, __be16 dst_port) 276 { 277 struct mlx4_en_filter *filter; 278 struct mlx4_en_filter *ret = NULL; 279 280 hlist_for_each_entry(filter, 281 filter_hash_bucket(priv, src_ip, dst_ip, 282 src_port, dst_port), 283 filter_chain) { 284 if (filter->src_ip == src_ip && 285 filter->dst_ip == dst_ip && 286 filter->ip_proto == ip_proto && 287 filter->src_port == src_port && 288 filter->dst_port == dst_port) { 289 ret = filter; 290 break; 291 } 292 } 293 294 return ret; 295 } 296 297 static int 298 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 299 u16 rxq_index, u32 flow_id) 300 { 301 struct mlx4_en_priv *priv = netdev_priv(net_dev); 302 struct mlx4_en_filter *filter; 303 const struct iphdr *ip; 304 const __be16 *ports; 305 u8 ip_proto; 306 __be32 src_ip; 307 __be32 dst_ip; 308 __be16 src_port; 309 __be16 dst_port; 310 int nhoff = skb_network_offset(skb); 311 int ret = 0; 312 313 if (skb->protocol != htons(ETH_P_IP)) 314 return -EPROTONOSUPPORT; 315 316 ip = (const struct iphdr *)(skb->data + nhoff); 317 if (ip_is_fragment(ip)) 318 return -EPROTONOSUPPORT; 319 320 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 321 return -EPROTONOSUPPORT; 322 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 323 324 ip_proto = ip->protocol; 325 src_ip = ip->saddr; 326 dst_ip = ip->daddr; 327 src_port = ports[0]; 328 dst_port = ports[1]; 329 330 spin_lock_bh(&priv->filters_lock); 331 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 332 src_port, dst_port); 333 if (filter) { 334 if (filter->rxq_index == rxq_index) 335 goto out; 336 337 filter->rxq_index = rxq_index; 338 } else { 339 filter = mlx4_en_filter_alloc(priv, rxq_index, 340 src_ip, dst_ip, ip_proto, 341 src_port, dst_port, flow_id); 342 if (!filter) { 343 ret = -ENOMEM; 344 goto err; 345 } 346 } 347 348 queue_work(priv->mdev->workqueue, &filter->work); 349 350 out: 351 ret = filter->id; 352 err: 353 spin_unlock_bh(&priv->filters_lock); 354 355 return ret; 356 } 357 358 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) 359 { 360 struct mlx4_en_filter *filter, *tmp; 361 LIST_HEAD(del_list); 362 363 spin_lock_bh(&priv->filters_lock); 364 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 365 list_move(&filter->next, &del_list); 366 hlist_del(&filter->filter_chain); 367 } 368 spin_unlock_bh(&priv->filters_lock); 369 370 list_for_each_entry_safe(filter, tmp, &del_list, next) { 371 cancel_work_sync(&filter->work); 372 mlx4_en_filter_free(filter); 373 } 374 } 375 376 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 377 { 378 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 379 LIST_HEAD(del_list); 380 int i = 0; 381 382 spin_lock_bh(&priv->filters_lock); 383 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 384 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 385 break; 386 387 if (filter->activated && 388 !work_pending(&filter->work) && 389 rps_may_expire_flow(priv->dev, 390 filter->rxq_index, filter->flow_id, 391 filter->id)) { 392 list_move(&filter->next, &del_list); 393 hlist_del(&filter->filter_chain); 394 } else 395 last_filter = filter; 396 397 i++; 398 } 399 400 if (last_filter && (&last_filter->next != priv->filters.next)) 401 list_move(&priv->filters, &last_filter->next); 402 403 spin_unlock_bh(&priv->filters_lock); 404 405 list_for_each_entry_safe(filter, tmp, &del_list, next) 406 mlx4_en_filter_free(filter); 407 } 408 #endif 409 410 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, 411 __be16 proto, u16 vid) 412 { 413 struct mlx4_en_priv *priv = netdev_priv(dev); 414 struct mlx4_en_dev *mdev = priv->mdev; 415 int err; 416 int idx; 417 418 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 419 420 set_bit(vid, priv->active_vlans); 421 422 /* Add VID to port VLAN filter */ 423 mutex_lock(&mdev->state_lock); 424 if (mdev->device_up && priv->port_up) { 425 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 426 if (err) 427 en_err(priv, "Failed configuring VLAN filter\n"); 428 } 429 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 430 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 431 mutex_unlock(&mdev->state_lock); 432 433 return 0; 434 } 435 436 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 437 __be16 proto, u16 vid) 438 { 439 struct mlx4_en_priv *priv = netdev_priv(dev); 440 struct mlx4_en_dev *mdev = priv->mdev; 441 int err; 442 443 en_dbg(HW, priv, "Killing VID:%d\n", vid); 444 445 clear_bit(vid, priv->active_vlans); 446 447 /* Remove VID from port VLAN filter */ 448 mutex_lock(&mdev->state_lock); 449 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 450 451 if (mdev->device_up && priv->port_up) { 452 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 453 if (err) 454 en_err(priv, "Failed configuring VLAN filter\n"); 455 } 456 mutex_unlock(&mdev->state_lock); 457 458 return 0; 459 } 460 461 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 462 { 463 int i; 464 for (i = ETH_ALEN - 1; i >= 0; --i) { 465 dst_mac[i] = src_mac & 0xff; 466 src_mac >>= 8; 467 } 468 memset(&dst_mac[ETH_ALEN], 0, 2); 469 } 470 471 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 472 unsigned char *mac, int *qpn, u64 *reg_id) 473 { 474 struct mlx4_en_dev *mdev = priv->mdev; 475 struct mlx4_dev *dev = mdev->dev; 476 int err; 477 478 switch (dev->caps.steering_mode) { 479 case MLX4_STEERING_MODE_B0: { 480 struct mlx4_qp qp; 481 u8 gid[16] = {0}; 482 483 qp.qpn = *qpn; 484 memcpy(&gid[10], mac, ETH_ALEN); 485 gid[5] = priv->port; 486 487 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 488 break; 489 } 490 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 491 struct mlx4_spec_list spec_eth = { {NULL} }; 492 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 493 494 struct mlx4_net_trans_rule rule = { 495 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 496 .exclusive = 0, 497 .allow_loopback = 1, 498 .promisc_mode = MLX4_FS_REGULAR, 499 .priority = MLX4_DOMAIN_NIC, 500 }; 501 502 rule.port = priv->port; 503 rule.qpn = *qpn; 504 INIT_LIST_HEAD(&rule.list); 505 506 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 507 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 508 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 509 list_add_tail(&spec_eth.list, &rule.list); 510 511 err = mlx4_flow_attach(dev, &rule, reg_id); 512 break; 513 } 514 default: 515 return -EINVAL; 516 } 517 if (err) 518 en_warn(priv, "Failed Attaching Unicast\n"); 519 520 return err; 521 } 522 523 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 524 unsigned char *mac, int qpn, u64 reg_id) 525 { 526 struct mlx4_en_dev *mdev = priv->mdev; 527 struct mlx4_dev *dev = mdev->dev; 528 529 switch (dev->caps.steering_mode) { 530 case MLX4_STEERING_MODE_B0: { 531 struct mlx4_qp qp; 532 u8 gid[16] = {0}; 533 534 qp.qpn = qpn; 535 memcpy(&gid[10], mac, ETH_ALEN); 536 gid[5] = priv->port; 537 538 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 539 break; 540 } 541 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 542 mlx4_flow_detach(dev, reg_id); 543 break; 544 } 545 default: 546 en_err(priv, "Invalid steering mode.\n"); 547 } 548 } 549 550 static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 551 { 552 struct mlx4_en_dev *mdev = priv->mdev; 553 struct mlx4_dev *dev = mdev->dev; 554 struct mlx4_mac_entry *entry; 555 int index = 0; 556 int err = 0; 557 u64 reg_id; 558 int *qpn = &priv->base_qpn; 559 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 560 561 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 562 priv->dev->dev_addr); 563 index = mlx4_register_mac(dev, priv->port, mac); 564 if (index < 0) { 565 err = index; 566 en_err(priv, "Failed adding MAC: %pM\n", 567 priv->dev->dev_addr); 568 return err; 569 } 570 571 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 572 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 573 *qpn = base_qpn + index; 574 return 0; 575 } 576 577 err = mlx4_qp_reserve_range(dev, 1, 1, qpn); 578 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 579 if (err) { 580 en_err(priv, "Failed to reserve qp for mac registration\n"); 581 goto qp_err; 582 } 583 584 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); 585 if (err) 586 goto steer_err; 587 588 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 589 if (!entry) { 590 err = -ENOMEM; 591 goto alloc_err; 592 } 593 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); 594 entry->reg_id = reg_id; 595 596 hlist_add_head_rcu(&entry->hlist, 597 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 598 599 return 0; 600 601 alloc_err: 602 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); 603 604 steer_err: 605 mlx4_qp_release_range(dev, *qpn, 1); 606 607 qp_err: 608 mlx4_unregister_mac(dev, priv->port, mac); 609 return err; 610 } 611 612 static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 613 { 614 struct mlx4_en_dev *mdev = priv->mdev; 615 struct mlx4_dev *dev = mdev->dev; 616 int qpn = priv->base_qpn; 617 u64 mac; 618 619 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 620 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 621 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 622 priv->dev->dev_addr); 623 mlx4_unregister_mac(dev, priv->port, mac); 624 } else { 625 struct mlx4_mac_entry *entry; 626 struct hlist_node *tmp; 627 struct hlist_head *bucket; 628 unsigned int i; 629 630 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 631 bucket = &priv->mac_hash[i]; 632 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 633 mac = mlx4_en_mac_to_u64(entry->mac); 634 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 635 entry->mac); 636 mlx4_en_uc_steer_release(priv, entry->mac, 637 qpn, entry->reg_id); 638 639 mlx4_unregister_mac(dev, priv->port, mac); 640 hlist_del_rcu(&entry->hlist); 641 kfree_rcu(entry, rcu); 642 } 643 } 644 645 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 646 priv->port, qpn); 647 mlx4_qp_release_range(dev, qpn, 1); 648 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 649 } 650 } 651 652 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, 653 unsigned char *new_mac, unsigned char *prev_mac) 654 { 655 struct mlx4_en_dev *mdev = priv->mdev; 656 struct mlx4_dev *dev = mdev->dev; 657 int err = 0; 658 u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); 659 660 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 661 struct hlist_head *bucket; 662 unsigned int mac_hash; 663 struct mlx4_mac_entry *entry; 664 struct hlist_node *tmp; 665 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); 666 667 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 668 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 669 if (ether_addr_equal_64bits(entry->mac, prev_mac)) { 670 mlx4_en_uc_steer_release(priv, entry->mac, 671 qpn, entry->reg_id); 672 mlx4_unregister_mac(dev, priv->port, 673 prev_mac_u64); 674 hlist_del_rcu(&entry->hlist); 675 synchronize_rcu(); 676 memcpy(entry->mac, new_mac, ETH_ALEN); 677 entry->reg_id = 0; 678 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; 679 hlist_add_head_rcu(&entry->hlist, 680 &priv->mac_hash[mac_hash]); 681 mlx4_register_mac(dev, priv->port, new_mac_u64); 682 err = mlx4_en_uc_steer_add(priv, new_mac, 683 &qpn, 684 &entry->reg_id); 685 return err; 686 } 687 } 688 return -EINVAL; 689 } 690 691 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 692 } 693 694 u64 mlx4_en_mac_to_u64(u8 *addr) 695 { 696 u64 mac = 0; 697 int i; 698 699 for (i = 0; i < ETH_ALEN; i++) { 700 mac <<= 8; 701 mac |= addr[i]; 702 } 703 return mac; 704 } 705 706 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) 707 { 708 int err = 0; 709 710 if (priv->port_up) { 711 /* Remove old MAC and insert the new one */ 712 err = mlx4_en_replace_mac(priv, priv->base_qpn, 713 priv->dev->dev_addr, priv->prev_mac); 714 if (err) 715 en_err(priv, "Failed changing HW MAC address\n"); 716 memcpy(priv->prev_mac, priv->dev->dev_addr, 717 sizeof(priv->prev_mac)); 718 } else 719 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 720 721 return err; 722 } 723 724 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 725 { 726 struct mlx4_en_priv *priv = netdev_priv(dev); 727 struct mlx4_en_dev *mdev = priv->mdev; 728 struct sockaddr *saddr = addr; 729 int err; 730 731 if (!is_valid_ether_addr(saddr->sa_data)) 732 return -EADDRNOTAVAIL; 733 734 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 735 736 mutex_lock(&mdev->state_lock); 737 err = mlx4_en_do_set_mac(priv); 738 mutex_unlock(&mdev->state_lock); 739 740 return err; 741 } 742 743 static void mlx4_en_clear_list(struct net_device *dev) 744 { 745 struct mlx4_en_priv *priv = netdev_priv(dev); 746 struct mlx4_en_mc_list *tmp, *mc_to_del; 747 748 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 749 list_del(&mc_to_del->list); 750 kfree(mc_to_del); 751 } 752 } 753 754 static void mlx4_en_cache_mclist(struct net_device *dev) 755 { 756 struct mlx4_en_priv *priv = netdev_priv(dev); 757 struct netdev_hw_addr *ha; 758 struct mlx4_en_mc_list *tmp; 759 760 mlx4_en_clear_list(dev); 761 netdev_for_each_mc_addr(ha, dev) { 762 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 763 if (!tmp) { 764 mlx4_en_clear_list(dev); 765 return; 766 } 767 memcpy(tmp->addr, ha->addr, ETH_ALEN); 768 list_add_tail(&tmp->list, &priv->mc_list); 769 } 770 } 771 772 static void update_mclist_flags(struct mlx4_en_priv *priv, 773 struct list_head *dst, 774 struct list_head *src) 775 { 776 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 777 bool found; 778 779 /* Find all the entries that should be removed from dst, 780 * These are the entries that are not found in src 781 */ 782 list_for_each_entry(dst_tmp, dst, list) { 783 found = false; 784 list_for_each_entry(src_tmp, src, list) { 785 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 786 found = true; 787 break; 788 } 789 } 790 if (!found) 791 dst_tmp->action = MCLIST_REM; 792 } 793 794 /* Add entries that exist in src but not in dst 795 * mark them as need to add 796 */ 797 list_for_each_entry(src_tmp, src, list) { 798 found = false; 799 list_for_each_entry(dst_tmp, dst, list) { 800 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 801 dst_tmp->action = MCLIST_NONE; 802 found = true; 803 break; 804 } 805 } 806 if (!found) { 807 new_mc = kmemdup(src_tmp, 808 sizeof(struct mlx4_en_mc_list), 809 GFP_KERNEL); 810 if (!new_mc) 811 return; 812 813 new_mc->action = MCLIST_ADD; 814 list_add_tail(&new_mc->list, dst); 815 } 816 } 817 } 818 819 static void mlx4_en_set_rx_mode(struct net_device *dev) 820 { 821 struct mlx4_en_priv *priv = netdev_priv(dev); 822 823 if (!priv->port_up) 824 return; 825 826 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 827 } 828 829 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 830 struct mlx4_en_dev *mdev) 831 { 832 int err = 0; 833 834 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 835 if (netif_msg_rx_status(priv)) 836 en_warn(priv, "Entering promiscuous mode\n"); 837 priv->flags |= MLX4_EN_FLAG_PROMISC; 838 839 /* Enable promiscouos mode */ 840 switch (mdev->dev->caps.steering_mode) { 841 case MLX4_STEERING_MODE_DEVICE_MANAGED: 842 err = mlx4_flow_steer_promisc_add(mdev->dev, 843 priv->port, 844 priv->base_qpn, 845 MLX4_FS_ALL_DEFAULT); 846 if (err) 847 en_err(priv, "Failed enabling promiscuous mode\n"); 848 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 849 break; 850 851 case MLX4_STEERING_MODE_B0: 852 err = mlx4_unicast_promisc_add(mdev->dev, 853 priv->base_qpn, 854 priv->port); 855 if (err) 856 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 857 858 /* Add the default qp number as multicast 859 * promisc 860 */ 861 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 862 err = mlx4_multicast_promisc_add(mdev->dev, 863 priv->base_qpn, 864 priv->port); 865 if (err) 866 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 867 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 868 } 869 break; 870 871 case MLX4_STEERING_MODE_A0: 872 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 873 priv->port, 874 priv->base_qpn, 875 1); 876 if (err) 877 en_err(priv, "Failed enabling promiscuous mode\n"); 878 break; 879 } 880 881 /* Disable port multicast filter (unconditionally) */ 882 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 883 0, MLX4_MCAST_DISABLE); 884 if (err) 885 en_err(priv, "Failed disabling multicast filter\n"); 886 887 /* Disable port VLAN filter */ 888 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 889 if (err) 890 en_err(priv, "Failed disabling VLAN filter\n"); 891 } 892 } 893 894 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 895 struct mlx4_en_dev *mdev) 896 { 897 int err = 0; 898 899 if (netif_msg_rx_status(priv)) 900 en_warn(priv, "Leaving promiscuous mode\n"); 901 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 902 903 /* Disable promiscouos mode */ 904 switch (mdev->dev->caps.steering_mode) { 905 case MLX4_STEERING_MODE_DEVICE_MANAGED: 906 err = mlx4_flow_steer_promisc_remove(mdev->dev, 907 priv->port, 908 MLX4_FS_ALL_DEFAULT); 909 if (err) 910 en_err(priv, "Failed disabling promiscuous mode\n"); 911 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 912 break; 913 914 case MLX4_STEERING_MODE_B0: 915 err = mlx4_unicast_promisc_remove(mdev->dev, 916 priv->base_qpn, 917 priv->port); 918 if (err) 919 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 920 /* Disable Multicast promisc */ 921 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 922 err = mlx4_multicast_promisc_remove(mdev->dev, 923 priv->base_qpn, 924 priv->port); 925 if (err) 926 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 927 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 928 } 929 break; 930 931 case MLX4_STEERING_MODE_A0: 932 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 933 priv->port, 934 priv->base_qpn, 0); 935 if (err) 936 en_err(priv, "Failed disabling promiscuous mode\n"); 937 break; 938 } 939 940 /* Enable port VLAN filter */ 941 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 942 if (err) 943 en_err(priv, "Failed enabling VLAN filter\n"); 944 } 945 946 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 947 struct net_device *dev, 948 struct mlx4_en_dev *mdev) 949 { 950 struct mlx4_en_mc_list *mclist, *tmp; 951 u64 mcast_addr = 0; 952 u8 mc_list[16] = {0}; 953 int err = 0; 954 955 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 956 if (dev->flags & IFF_ALLMULTI) { 957 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 958 0, MLX4_MCAST_DISABLE); 959 if (err) 960 en_err(priv, "Failed disabling multicast filter\n"); 961 962 /* Add the default qp number as multicast promisc */ 963 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 964 switch (mdev->dev->caps.steering_mode) { 965 case MLX4_STEERING_MODE_DEVICE_MANAGED: 966 err = mlx4_flow_steer_promisc_add(mdev->dev, 967 priv->port, 968 priv->base_qpn, 969 MLX4_FS_MC_DEFAULT); 970 break; 971 972 case MLX4_STEERING_MODE_B0: 973 err = mlx4_multicast_promisc_add(mdev->dev, 974 priv->base_qpn, 975 priv->port); 976 break; 977 978 case MLX4_STEERING_MODE_A0: 979 break; 980 } 981 if (err) 982 en_err(priv, "Failed entering multicast promisc mode\n"); 983 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 984 } 985 } else { 986 /* Disable Multicast promisc */ 987 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 988 switch (mdev->dev->caps.steering_mode) { 989 case MLX4_STEERING_MODE_DEVICE_MANAGED: 990 err = mlx4_flow_steer_promisc_remove(mdev->dev, 991 priv->port, 992 MLX4_FS_MC_DEFAULT); 993 break; 994 995 case MLX4_STEERING_MODE_B0: 996 err = mlx4_multicast_promisc_remove(mdev->dev, 997 priv->base_qpn, 998 priv->port); 999 break; 1000 1001 case MLX4_STEERING_MODE_A0: 1002 break; 1003 } 1004 if (err) 1005 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 1006 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1007 } 1008 1009 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1010 0, MLX4_MCAST_DISABLE); 1011 if (err) 1012 en_err(priv, "Failed disabling multicast filter\n"); 1013 1014 /* Flush mcast filter and init it with broadcast address */ 1015 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1016 1, MLX4_MCAST_CONFIG); 1017 1018 /* Update multicast list - we cache all addresses so they won't 1019 * change while HW is updated holding the command semaphor */ 1020 netif_addr_lock_bh(dev); 1021 mlx4_en_cache_mclist(dev); 1022 netif_addr_unlock_bh(dev); 1023 list_for_each_entry(mclist, &priv->mc_list, list) { 1024 mcast_addr = mlx4_en_mac_to_u64(mclist->addr); 1025 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 1026 mcast_addr, 0, MLX4_MCAST_CONFIG); 1027 } 1028 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1029 0, MLX4_MCAST_ENABLE); 1030 if (err) 1031 en_err(priv, "Failed enabling multicast filter\n"); 1032 1033 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 1034 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1035 if (mclist->action == MCLIST_REM) { 1036 /* detach this address and delete from list */ 1037 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1038 mc_list[5] = priv->port; 1039 err = mlx4_multicast_detach(mdev->dev, 1040 &priv->rss_map.indir_qp, 1041 mc_list, 1042 MLX4_PROT_ETH, 1043 mclist->reg_id); 1044 if (err) 1045 en_err(priv, "Fail to detach multicast address\n"); 1046 1047 /* remove from list */ 1048 list_del(&mclist->list); 1049 kfree(mclist); 1050 } else if (mclist->action == MCLIST_ADD) { 1051 /* attach the address */ 1052 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1053 /* needed for B0 steering support */ 1054 mc_list[5] = priv->port; 1055 err = mlx4_multicast_attach(mdev->dev, 1056 &priv->rss_map.indir_qp, 1057 mc_list, 1058 priv->port, 0, 1059 MLX4_PROT_ETH, 1060 &mclist->reg_id); 1061 if (err) 1062 en_err(priv, "Fail to attach multicast address\n"); 1063 1064 } 1065 } 1066 } 1067 } 1068 1069 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, 1070 struct net_device *dev, 1071 struct mlx4_en_dev *mdev) 1072 { 1073 struct netdev_hw_addr *ha; 1074 struct mlx4_mac_entry *entry; 1075 struct hlist_node *tmp; 1076 bool found; 1077 u64 mac; 1078 int err = 0; 1079 struct hlist_head *bucket; 1080 unsigned int i; 1081 int removed = 0; 1082 u32 prev_flags; 1083 1084 /* Note that we do not need to protect our mac_hash traversal with rcu, 1085 * since all modification code is protected by mdev->state_lock 1086 */ 1087 1088 /* find what to remove */ 1089 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 1090 bucket = &priv->mac_hash[i]; 1091 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 1092 found = false; 1093 netdev_for_each_uc_addr(ha, dev) { 1094 if (ether_addr_equal_64bits(entry->mac, 1095 ha->addr)) { 1096 found = true; 1097 break; 1098 } 1099 } 1100 1101 /* MAC address of the port is not in uc list */ 1102 if (ether_addr_equal_64bits(entry->mac, dev->dev_addr)) 1103 found = true; 1104 1105 if (!found) { 1106 mac = mlx4_en_mac_to_u64(entry->mac); 1107 mlx4_en_uc_steer_release(priv, entry->mac, 1108 priv->base_qpn, 1109 entry->reg_id); 1110 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1111 1112 hlist_del_rcu(&entry->hlist); 1113 kfree_rcu(entry, rcu); 1114 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", 1115 entry->mac, priv->port); 1116 ++removed; 1117 } 1118 } 1119 } 1120 1121 /* if we didn't remove anything, there is no use in trying to add 1122 * again once we are in a forced promisc mode state 1123 */ 1124 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) 1125 return; 1126 1127 prev_flags = priv->flags; 1128 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 1129 1130 /* find what to add */ 1131 netdev_for_each_uc_addr(ha, dev) { 1132 found = false; 1133 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; 1134 hlist_for_each_entry(entry, bucket, hlist) { 1135 if (ether_addr_equal_64bits(entry->mac, ha->addr)) { 1136 found = true; 1137 break; 1138 } 1139 } 1140 1141 if (!found) { 1142 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1143 if (!entry) { 1144 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", 1145 ha->addr, priv->port); 1146 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1147 break; 1148 } 1149 mac = mlx4_en_mac_to_u64(ha->addr); 1150 memcpy(entry->mac, ha->addr, ETH_ALEN); 1151 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1152 if (err < 0) { 1153 en_err(priv, "Failed registering MAC %pM on port %d: %d\n", 1154 ha->addr, priv->port, err); 1155 kfree(entry); 1156 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1157 break; 1158 } 1159 err = mlx4_en_uc_steer_add(priv, ha->addr, 1160 &priv->base_qpn, 1161 &entry->reg_id); 1162 if (err) { 1163 en_err(priv, "Failed adding MAC %pM on port %d: %d\n", 1164 ha->addr, priv->port, err); 1165 mlx4_unregister_mac(mdev->dev, priv->port, mac); 1166 kfree(entry); 1167 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1168 break; 1169 } else { 1170 unsigned int mac_hash; 1171 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", 1172 ha->addr, priv->port); 1173 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; 1174 bucket = &priv->mac_hash[mac_hash]; 1175 hlist_add_head_rcu(&entry->hlist, bucket); 1176 } 1177 } 1178 } 1179 1180 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1181 en_warn(priv, "Forcing promiscuous mode on port:%d\n", 1182 priv->port); 1183 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { 1184 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", 1185 priv->port); 1186 } 1187 } 1188 1189 static void mlx4_en_do_set_rx_mode(struct work_struct *work) 1190 { 1191 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1192 rx_mode_task); 1193 struct mlx4_en_dev *mdev = priv->mdev; 1194 struct net_device *dev = priv->dev; 1195 1196 mutex_lock(&mdev->state_lock); 1197 if (!mdev->device_up) { 1198 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 1199 goto out; 1200 } 1201 if (!priv->port_up) { 1202 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 1203 goto out; 1204 } 1205 1206 if (!netif_carrier_ok(dev)) { 1207 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1208 if (priv->port_state.link_state) { 1209 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1210 netif_carrier_on(dev); 1211 en_dbg(LINK, priv, "Link Up\n"); 1212 } 1213 } 1214 } 1215 1216 if (dev->priv_flags & IFF_UNICAST_FLT) 1217 mlx4_en_do_uc_filter(priv, dev, mdev); 1218 1219 /* Promsicuous mode: disable all filters */ 1220 if ((dev->flags & IFF_PROMISC) || 1221 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 1222 mlx4_en_set_promisc_mode(priv, mdev); 1223 goto out; 1224 } 1225 1226 /* Not in promiscuous mode */ 1227 if (priv->flags & MLX4_EN_FLAG_PROMISC) 1228 mlx4_en_clear_promisc_mode(priv, mdev); 1229 1230 mlx4_en_do_multicast(priv, dev, mdev); 1231 out: 1232 mutex_unlock(&mdev->state_lock); 1233 } 1234 1235 #ifdef CONFIG_NET_POLL_CONTROLLER 1236 static void mlx4_en_netpoll(struct net_device *dev) 1237 { 1238 struct mlx4_en_priv *priv = netdev_priv(dev); 1239 struct mlx4_en_cq *cq; 1240 unsigned long flags; 1241 int i; 1242 1243 for (i = 0; i < priv->rx_ring_num; i++) { 1244 cq = priv->rx_cq[i]; 1245 spin_lock_irqsave(&cq->lock, flags); 1246 napi_synchronize(&cq->napi); 1247 mlx4_en_process_rx_cq(dev, cq, 0); 1248 spin_unlock_irqrestore(&cq->lock, flags); 1249 } 1250 } 1251 #endif 1252 1253 static void mlx4_en_tx_timeout(struct net_device *dev) 1254 { 1255 struct mlx4_en_priv *priv = netdev_priv(dev); 1256 struct mlx4_en_dev *mdev = priv->mdev; 1257 int i; 1258 1259 if (netif_msg_timer(priv)) 1260 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 1261 1262 for (i = 0; i < priv->tx_ring_num; i++) { 1263 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1264 continue; 1265 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1266 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, 1267 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); 1268 } 1269 1270 priv->port_stats.tx_timeout++; 1271 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1272 queue_work(mdev->workqueue, &priv->watchdog_task); 1273 } 1274 1275 1276 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 1277 { 1278 struct mlx4_en_priv *priv = netdev_priv(dev); 1279 1280 spin_lock_bh(&priv->stats_lock); 1281 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 1282 spin_unlock_bh(&priv->stats_lock); 1283 1284 return &priv->ret_stats; 1285 } 1286 1287 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1288 { 1289 struct mlx4_en_cq *cq; 1290 int i; 1291 1292 /* If we haven't received a specific coalescing setting 1293 * (module param), we set the moderation parameters as follows: 1294 * - moder_cnt is set to the number of mtu sized packets to 1295 * satisfy our coalescing target. 1296 * - moder_time is set to a fixed value. 1297 */ 1298 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 1299 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1300 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1301 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1302 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", 1303 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 1304 1305 /* Setup cq moderation params */ 1306 for (i = 0; i < priv->rx_ring_num; i++) { 1307 cq = priv->rx_cq[i]; 1308 cq->moder_cnt = priv->rx_frames; 1309 cq->moder_time = priv->rx_usecs; 1310 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1311 priv->last_moder_packets[i] = 0; 1312 priv->last_moder_bytes[i] = 0; 1313 } 1314 1315 for (i = 0; i < priv->tx_ring_num; i++) { 1316 cq = priv->tx_cq[i]; 1317 cq->moder_cnt = priv->tx_frames; 1318 cq->moder_time = priv->tx_usecs; 1319 } 1320 1321 /* Reset auto-moderation params */ 1322 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1323 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1324 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1325 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1326 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1327 priv->adaptive_rx_coal = 1; 1328 priv->last_moder_jiffies = 0; 1329 priv->last_moder_tx_packets = 0; 1330 } 1331 1332 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1333 { 1334 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1335 struct mlx4_en_cq *cq; 1336 unsigned long packets; 1337 unsigned long rate; 1338 unsigned long avg_pkt_size; 1339 unsigned long rx_packets; 1340 unsigned long rx_bytes; 1341 unsigned long rx_pkt_diff; 1342 int moder_time; 1343 int ring, err; 1344 1345 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1346 return; 1347 1348 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1349 spin_lock_bh(&priv->stats_lock); 1350 rx_packets = priv->rx_ring[ring]->packets; 1351 rx_bytes = priv->rx_ring[ring]->bytes; 1352 spin_unlock_bh(&priv->stats_lock); 1353 1354 rx_pkt_diff = ((unsigned long) (rx_packets - 1355 priv->last_moder_packets[ring])); 1356 packets = rx_pkt_diff; 1357 rate = packets * HZ / period; 1358 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1359 priv->last_moder_bytes[ring])) / packets : 0; 1360 1361 /* Apply auto-moderation only when packet rate 1362 * exceeds a rate that it matters */ 1363 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1364 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1365 if (rate < priv->pkt_rate_low) 1366 moder_time = priv->rx_usecs_low; 1367 else if (rate > priv->pkt_rate_high) 1368 moder_time = priv->rx_usecs_high; 1369 else 1370 moder_time = (rate - priv->pkt_rate_low) * 1371 (priv->rx_usecs_high - priv->rx_usecs_low) / 1372 (priv->pkt_rate_high - priv->pkt_rate_low) + 1373 priv->rx_usecs_low; 1374 } else { 1375 moder_time = priv->rx_usecs_low; 1376 } 1377 1378 if (moder_time != priv->last_moder_time[ring]) { 1379 priv->last_moder_time[ring] = moder_time; 1380 cq = priv->rx_cq[ring]; 1381 cq->moder_time = moder_time; 1382 cq->moder_cnt = priv->rx_frames; 1383 err = mlx4_en_set_cq_moder(priv, cq); 1384 if (err) 1385 en_err(priv, "Failed modifying moderation for cq:%d\n", 1386 ring); 1387 } 1388 priv->last_moder_packets[ring] = rx_packets; 1389 priv->last_moder_bytes[ring] = rx_bytes; 1390 } 1391 1392 priv->last_moder_jiffies = jiffies; 1393 } 1394 1395 static void mlx4_en_do_get_stats(struct work_struct *work) 1396 { 1397 struct delayed_work *delay = to_delayed_work(work); 1398 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1399 stats_task); 1400 struct mlx4_en_dev *mdev = priv->mdev; 1401 int err; 1402 1403 mutex_lock(&mdev->state_lock); 1404 if (mdev->device_up) { 1405 if (priv->port_up) { 1406 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1407 if (err) 1408 en_dbg(HW, priv, "Could not update stats\n"); 1409 1410 mlx4_en_auto_moderation(priv); 1411 } 1412 1413 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1414 } 1415 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1416 mlx4_en_do_set_mac(priv); 1417 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1418 } 1419 mutex_unlock(&mdev->state_lock); 1420 } 1421 1422 /* mlx4_en_service_task - Run service task for tasks that needed to be done 1423 * periodically 1424 */ 1425 static void mlx4_en_service_task(struct work_struct *work) 1426 { 1427 struct delayed_work *delay = to_delayed_work(work); 1428 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1429 service_task); 1430 struct mlx4_en_dev *mdev = priv->mdev; 1431 1432 mutex_lock(&mdev->state_lock); 1433 if (mdev->device_up) { 1434 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 1435 mlx4_en_ptp_overflow_check(mdev); 1436 1437 queue_delayed_work(mdev->workqueue, &priv->service_task, 1438 SERVICE_TASK_DELAY); 1439 } 1440 mutex_unlock(&mdev->state_lock); 1441 } 1442 1443 static void mlx4_en_linkstate(struct work_struct *work) 1444 { 1445 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1446 linkstate_task); 1447 struct mlx4_en_dev *mdev = priv->mdev; 1448 int linkstate = priv->link_state; 1449 1450 mutex_lock(&mdev->state_lock); 1451 /* If observable port state changed set carrier state and 1452 * report to system log */ 1453 if (priv->last_link_state != linkstate) { 1454 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1455 en_info(priv, "Link Down\n"); 1456 netif_carrier_off(priv->dev); 1457 } else { 1458 en_info(priv, "Link Up\n"); 1459 netif_carrier_on(priv->dev); 1460 } 1461 } 1462 priv->last_link_state = linkstate; 1463 mutex_unlock(&mdev->state_lock); 1464 } 1465 1466 1467 int mlx4_en_start_port(struct net_device *dev) 1468 { 1469 struct mlx4_en_priv *priv = netdev_priv(dev); 1470 struct mlx4_en_dev *mdev = priv->mdev; 1471 struct mlx4_en_cq *cq; 1472 struct mlx4_en_tx_ring *tx_ring; 1473 int rx_index = 0; 1474 int tx_index = 0; 1475 int err = 0; 1476 int i; 1477 int j; 1478 u8 mc_list[16] = {0}; 1479 1480 if (priv->port_up) { 1481 en_dbg(DRV, priv, "start port called while port already up\n"); 1482 return 0; 1483 } 1484 1485 INIT_LIST_HEAD(&priv->mc_list); 1486 INIT_LIST_HEAD(&priv->curr_list); 1487 INIT_LIST_HEAD(&priv->ethtool_list); 1488 memset(&priv->ethtool_rules[0], 0, 1489 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1490 1491 /* Calculate Rx buf size */ 1492 dev->mtu = min(dev->mtu, priv->max_mtu); 1493 mlx4_en_calc_rx_buf(dev); 1494 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1495 1496 /* Configure rx cq's and rings */ 1497 err = mlx4_en_activate_rx_rings(priv); 1498 if (err) { 1499 en_err(priv, "Failed to activate RX rings\n"); 1500 return err; 1501 } 1502 for (i = 0; i < priv->rx_ring_num; i++) { 1503 cq = priv->rx_cq[i]; 1504 1505 mlx4_en_cq_init_lock(cq); 1506 1507 err = mlx4_en_activate_cq(priv, cq, i); 1508 if (err) { 1509 en_err(priv, "Failed activating Rx CQ\n"); 1510 goto cq_err; 1511 } 1512 for (j = 0; j < cq->size; j++) 1513 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1514 err = mlx4_en_set_cq_moder(priv, cq); 1515 if (err) { 1516 en_err(priv, "Failed setting cq moderation parameters"); 1517 mlx4_en_deactivate_cq(priv, cq); 1518 goto cq_err; 1519 } 1520 mlx4_en_arm_cq(priv, cq); 1521 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1522 ++rx_index; 1523 } 1524 1525 /* Set qp number */ 1526 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1527 err = mlx4_en_get_qp(priv); 1528 if (err) { 1529 en_err(priv, "Failed getting eth qp\n"); 1530 goto cq_err; 1531 } 1532 mdev->mac_removed[priv->port] = 0; 1533 1534 err = mlx4_en_config_rss_steer(priv); 1535 if (err) { 1536 en_err(priv, "Failed configuring rss steering\n"); 1537 goto mac_err; 1538 } 1539 1540 err = mlx4_en_create_drop_qp(priv); 1541 if (err) 1542 goto rss_err; 1543 1544 /* Configure tx cq's and rings */ 1545 for (i = 0; i < priv->tx_ring_num; i++) { 1546 /* Configure cq */ 1547 cq = priv->tx_cq[i]; 1548 err = mlx4_en_activate_cq(priv, cq, i); 1549 if (err) { 1550 en_err(priv, "Failed allocating Tx CQ\n"); 1551 goto tx_err; 1552 } 1553 err = mlx4_en_set_cq_moder(priv, cq); 1554 if (err) { 1555 en_err(priv, "Failed setting cq moderation parameters"); 1556 mlx4_en_deactivate_cq(priv, cq); 1557 goto tx_err; 1558 } 1559 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1560 cq->buf->wqe_index = cpu_to_be16(0xffff); 1561 1562 /* Configure ring */ 1563 tx_ring = priv->tx_ring[i]; 1564 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1565 i / priv->num_tx_rings_p_up); 1566 if (err) { 1567 en_err(priv, "Failed allocating Tx ring\n"); 1568 mlx4_en_deactivate_cq(priv, cq); 1569 goto tx_err; 1570 } 1571 tx_ring->tx_queue = netdev_get_tx_queue(dev, i); 1572 1573 /* Arm CQ for TX completions */ 1574 mlx4_en_arm_cq(priv, cq); 1575 1576 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1577 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1578 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1579 ++tx_index; 1580 } 1581 1582 /* Configure port */ 1583 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1584 priv->rx_skb_size + ETH_FCS_LEN, 1585 priv->prof->tx_pause, 1586 priv->prof->tx_ppp, 1587 priv->prof->rx_pause, 1588 priv->prof->rx_ppp); 1589 if (err) { 1590 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1591 priv->port, err); 1592 goto tx_err; 1593 } 1594 /* Set default qp number */ 1595 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1596 if (err) { 1597 en_err(priv, "Failed setting default qp numbers\n"); 1598 goto tx_err; 1599 } 1600 1601 /* Init port */ 1602 en_dbg(HW, priv, "Initializing port\n"); 1603 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1604 if (err) { 1605 en_err(priv, "Failed Initializing port\n"); 1606 goto tx_err; 1607 } 1608 1609 /* Attach rx QP to bradcast address */ 1610 memset(&mc_list[10], 0xff, ETH_ALEN); 1611 mc_list[5] = priv->port; /* needed for B0 steering support */ 1612 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1613 priv->port, 0, MLX4_PROT_ETH, 1614 &priv->broadcast_id)) 1615 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1616 1617 /* Must redo promiscuous mode setup. */ 1618 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1619 1620 /* Schedule multicast task to populate multicast list */ 1621 queue_work(mdev->workqueue, &priv->rx_mode_task); 1622 1623 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 1624 1625 priv->port_up = true; 1626 netif_tx_start_all_queues(dev); 1627 netif_device_attach(dev); 1628 1629 return 0; 1630 1631 tx_err: 1632 while (tx_index--) { 1633 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1634 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1635 } 1636 mlx4_en_destroy_drop_qp(priv); 1637 rss_err: 1638 mlx4_en_release_rss_steer(priv); 1639 mac_err: 1640 mlx4_en_put_qp(priv); 1641 cq_err: 1642 while (rx_index--) 1643 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1644 for (i = 0; i < priv->rx_ring_num; i++) 1645 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1646 1647 return err; /* need to close devices */ 1648 } 1649 1650 1651 void mlx4_en_stop_port(struct net_device *dev, int detach) 1652 { 1653 struct mlx4_en_priv *priv = netdev_priv(dev); 1654 struct mlx4_en_dev *mdev = priv->mdev; 1655 struct mlx4_en_mc_list *mclist, *tmp; 1656 struct ethtool_flow_id *flow, *tmp_flow; 1657 int i; 1658 u8 mc_list[16] = {0}; 1659 1660 if (!priv->port_up) { 1661 en_dbg(DRV, priv, "stop port called while port already down\n"); 1662 return; 1663 } 1664 1665 /* close port*/ 1666 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1667 1668 /* Synchronize with tx routine */ 1669 netif_tx_lock_bh(dev); 1670 if (detach) 1671 netif_device_detach(dev); 1672 netif_tx_stop_all_queues(dev); 1673 netif_tx_unlock_bh(dev); 1674 1675 netif_tx_disable(dev); 1676 1677 /* Set port as not active */ 1678 priv->port_up = false; 1679 1680 /* Promsicuous mode */ 1681 if (mdev->dev->caps.steering_mode == 1682 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1683 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1684 MLX4_EN_FLAG_MC_PROMISC); 1685 mlx4_flow_steer_promisc_remove(mdev->dev, 1686 priv->port, 1687 MLX4_FS_ALL_DEFAULT); 1688 mlx4_flow_steer_promisc_remove(mdev->dev, 1689 priv->port, 1690 MLX4_FS_MC_DEFAULT); 1691 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1692 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1693 1694 /* Disable promiscouos mode */ 1695 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1696 priv->port); 1697 1698 /* Disable Multicast promisc */ 1699 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1700 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1701 priv->port); 1702 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1703 } 1704 } 1705 1706 /* Detach All multicasts */ 1707 memset(&mc_list[10], 0xff, ETH_ALEN); 1708 mc_list[5] = priv->port; /* needed for B0 steering support */ 1709 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1710 MLX4_PROT_ETH, priv->broadcast_id); 1711 list_for_each_entry(mclist, &priv->curr_list, list) { 1712 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1713 mc_list[5] = priv->port; 1714 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1715 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1716 } 1717 mlx4_en_clear_list(dev); 1718 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1719 list_del(&mclist->list); 1720 kfree(mclist); 1721 } 1722 1723 /* Flush multicast filter */ 1724 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1725 1726 /* Remove flow steering rules for the port*/ 1727 if (mdev->dev->caps.steering_mode == 1728 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1729 ASSERT_RTNL(); 1730 list_for_each_entry_safe(flow, tmp_flow, 1731 &priv->ethtool_list, list) { 1732 mlx4_flow_detach(mdev->dev, flow->id); 1733 list_del(&flow->list); 1734 } 1735 } 1736 1737 mlx4_en_destroy_drop_qp(priv); 1738 1739 /* Free TX Rings */ 1740 for (i = 0; i < priv->tx_ring_num; i++) { 1741 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1742 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1743 } 1744 msleep(10); 1745 1746 for (i = 0; i < priv->tx_ring_num; i++) 1747 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1748 1749 /* Free RSS qps */ 1750 mlx4_en_release_rss_steer(priv); 1751 1752 /* Unregister Mac address for the port */ 1753 mlx4_en_put_qp(priv); 1754 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) 1755 mdev->mac_removed[priv->port] = 1; 1756 1757 /* Free RX Rings */ 1758 for (i = 0; i < priv->rx_ring_num; i++) { 1759 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1760 1761 local_bh_disable(); 1762 while (!mlx4_en_cq_lock_napi(cq)) { 1763 pr_info("CQ %d locked\n", i); 1764 mdelay(1); 1765 } 1766 local_bh_enable(); 1767 1768 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) 1769 msleep(1); 1770 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1771 mlx4_en_deactivate_cq(priv, cq); 1772 } 1773 } 1774 1775 static void mlx4_en_restart(struct work_struct *work) 1776 { 1777 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1778 watchdog_task); 1779 struct mlx4_en_dev *mdev = priv->mdev; 1780 struct net_device *dev = priv->dev; 1781 1782 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1783 1784 mutex_lock(&mdev->state_lock); 1785 if (priv->port_up) { 1786 mlx4_en_stop_port(dev, 1); 1787 if (mlx4_en_start_port(dev)) 1788 en_err(priv, "Failed restarting port %d\n", priv->port); 1789 } 1790 mutex_unlock(&mdev->state_lock); 1791 } 1792 1793 static void mlx4_en_clear_stats(struct net_device *dev) 1794 { 1795 struct mlx4_en_priv *priv = netdev_priv(dev); 1796 struct mlx4_en_dev *mdev = priv->mdev; 1797 int i; 1798 1799 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1800 en_dbg(HW, priv, "Failed dumping statistics\n"); 1801 1802 memset(&priv->stats, 0, sizeof(priv->stats)); 1803 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1804 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1805 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1806 1807 for (i = 0; i < priv->tx_ring_num; i++) { 1808 priv->tx_ring[i]->bytes = 0; 1809 priv->tx_ring[i]->packets = 0; 1810 priv->tx_ring[i]->tx_csum = 0; 1811 } 1812 for (i = 0; i < priv->rx_ring_num; i++) { 1813 priv->rx_ring[i]->bytes = 0; 1814 priv->rx_ring[i]->packets = 0; 1815 priv->rx_ring[i]->csum_ok = 0; 1816 priv->rx_ring[i]->csum_none = 0; 1817 } 1818 } 1819 1820 static int mlx4_en_open(struct net_device *dev) 1821 { 1822 struct mlx4_en_priv *priv = netdev_priv(dev); 1823 struct mlx4_en_dev *mdev = priv->mdev; 1824 int err = 0; 1825 1826 mutex_lock(&mdev->state_lock); 1827 1828 if (!mdev->device_up) { 1829 en_err(priv, "Cannot open - device down/disabled\n"); 1830 err = -EBUSY; 1831 goto out; 1832 } 1833 1834 /* Reset HW statistics and SW counters */ 1835 mlx4_en_clear_stats(dev); 1836 1837 err = mlx4_en_start_port(dev); 1838 if (err) 1839 en_err(priv, "Failed starting port:%d\n", priv->port); 1840 1841 out: 1842 mutex_unlock(&mdev->state_lock); 1843 return err; 1844 } 1845 1846 1847 static int mlx4_en_close(struct net_device *dev) 1848 { 1849 struct mlx4_en_priv *priv = netdev_priv(dev); 1850 struct mlx4_en_dev *mdev = priv->mdev; 1851 1852 en_dbg(IFDOWN, priv, "Close port called\n"); 1853 1854 mutex_lock(&mdev->state_lock); 1855 1856 mlx4_en_stop_port(dev, 0); 1857 netif_carrier_off(dev); 1858 1859 mutex_unlock(&mdev->state_lock); 1860 return 0; 1861 } 1862 1863 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1864 { 1865 int i; 1866 1867 #ifdef CONFIG_RFS_ACCEL 1868 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1869 priv->dev->rx_cpu_rmap = NULL; 1870 #endif 1871 1872 for (i = 0; i < priv->tx_ring_num; i++) { 1873 if (priv->tx_ring && priv->tx_ring[i]) 1874 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1875 if (priv->tx_cq && priv->tx_cq[i]) 1876 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1877 } 1878 1879 for (i = 0; i < priv->rx_ring_num; i++) { 1880 if (priv->rx_ring[i]) 1881 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1882 priv->prof->rx_ring_size, priv->stride); 1883 if (priv->rx_cq[i]) 1884 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1885 } 1886 1887 if (priv->base_tx_qpn) { 1888 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); 1889 priv->base_tx_qpn = 0; 1890 } 1891 } 1892 1893 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1894 { 1895 struct mlx4_en_port_profile *prof = priv->prof; 1896 int i; 1897 int err; 1898 int node; 1899 1900 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1901 if (err) { 1902 en_err(priv, "failed reserving range for TX rings\n"); 1903 return err; 1904 } 1905 1906 /* Create tx Rings */ 1907 for (i = 0; i < priv->tx_ring_num; i++) { 1908 node = cpu_to_node(i % num_online_cpus()); 1909 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1910 prof->tx_ring_size, i, TX, node)) 1911 goto err; 1912 1913 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1914 prof->tx_ring_size, TXBB_SIZE, node)) 1915 goto err; 1916 } 1917 1918 /* Create rx Rings */ 1919 for (i = 0; i < priv->rx_ring_num; i++) { 1920 node = cpu_to_node(i % num_online_cpus()); 1921 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1922 prof->rx_ring_size, i, RX, node)) 1923 goto err; 1924 1925 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1926 prof->rx_ring_size, priv->stride, 1927 node)) 1928 goto err; 1929 } 1930 1931 #ifdef CONFIG_RFS_ACCEL 1932 if (priv->mdev->dev->caps.comp_pool) { 1933 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); 1934 if (!priv->dev->rx_cpu_rmap) 1935 goto err; 1936 } 1937 #endif 1938 1939 return 0; 1940 1941 err: 1942 en_err(priv, "Failed to allocate NIC resources\n"); 1943 for (i = 0; i < priv->rx_ring_num; i++) { 1944 if (priv->rx_ring[i]) 1945 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1946 prof->rx_ring_size, 1947 priv->stride); 1948 if (priv->rx_cq[i]) 1949 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1950 } 1951 for (i = 0; i < priv->tx_ring_num; i++) { 1952 if (priv->tx_ring[i]) 1953 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1954 if (priv->tx_cq[i]) 1955 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1956 } 1957 return -ENOMEM; 1958 } 1959 1960 1961 void mlx4_en_destroy_netdev(struct net_device *dev) 1962 { 1963 struct mlx4_en_priv *priv = netdev_priv(dev); 1964 struct mlx4_en_dev *mdev = priv->mdev; 1965 1966 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1967 1968 /* Unregister device - this will close the port if it was up */ 1969 if (priv->registered) 1970 unregister_netdev(dev); 1971 1972 if (priv->allocated) 1973 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1974 1975 cancel_delayed_work(&priv->stats_task); 1976 cancel_delayed_work(&priv->service_task); 1977 /* flush any pending task for this netdev */ 1978 flush_workqueue(mdev->workqueue); 1979 1980 /* Detach the netdev so tasks would not attempt to access it */ 1981 mutex_lock(&mdev->state_lock); 1982 mdev->pndev[priv->port] = NULL; 1983 mutex_unlock(&mdev->state_lock); 1984 1985 mlx4_en_free_resources(priv); 1986 1987 kfree(priv->tx_ring); 1988 kfree(priv->tx_cq); 1989 1990 free_netdev(dev); 1991 } 1992 1993 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1994 { 1995 struct mlx4_en_priv *priv = netdev_priv(dev); 1996 struct mlx4_en_dev *mdev = priv->mdev; 1997 int err = 0; 1998 1999 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 2000 dev->mtu, new_mtu); 2001 2002 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 2003 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 2004 return -EPERM; 2005 } 2006 dev->mtu = new_mtu; 2007 2008 if (netif_running(dev)) { 2009 mutex_lock(&mdev->state_lock); 2010 if (!mdev->device_up) { 2011 /* NIC is probably restarting - let watchdog task reset 2012 * the port */ 2013 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 2014 } else { 2015 mlx4_en_stop_port(dev, 1); 2016 err = mlx4_en_start_port(dev); 2017 if (err) { 2018 en_err(priv, "Failed restarting port:%d\n", 2019 priv->port); 2020 queue_work(mdev->workqueue, &priv->watchdog_task); 2021 } 2022 } 2023 mutex_unlock(&mdev->state_lock); 2024 } 2025 return 0; 2026 } 2027 2028 static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 2029 { 2030 struct mlx4_en_priv *priv = netdev_priv(dev); 2031 struct mlx4_en_dev *mdev = priv->mdev; 2032 struct hwtstamp_config config; 2033 2034 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2035 return -EFAULT; 2036 2037 /* reserved for future extensions */ 2038 if (config.flags) 2039 return -EINVAL; 2040 2041 /* device doesn't support time stamping */ 2042 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) 2043 return -EINVAL; 2044 2045 /* TX HW timestamp */ 2046 switch (config.tx_type) { 2047 case HWTSTAMP_TX_OFF: 2048 case HWTSTAMP_TX_ON: 2049 break; 2050 default: 2051 return -ERANGE; 2052 } 2053 2054 /* RX HW timestamp */ 2055 switch (config.rx_filter) { 2056 case HWTSTAMP_FILTER_NONE: 2057 break; 2058 case HWTSTAMP_FILTER_ALL: 2059 case HWTSTAMP_FILTER_SOME: 2060 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2061 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2062 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2063 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2064 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2065 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2066 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2067 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2068 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2069 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2070 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2071 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2072 config.rx_filter = HWTSTAMP_FILTER_ALL; 2073 break; 2074 default: 2075 return -ERANGE; 2076 } 2077 2078 if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { 2079 config.tx_type = HWTSTAMP_TX_OFF; 2080 config.rx_filter = HWTSTAMP_FILTER_NONE; 2081 } 2082 2083 return copy_to_user(ifr->ifr_data, &config, 2084 sizeof(config)) ? -EFAULT : 0; 2085 } 2086 2087 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2088 { 2089 switch (cmd) { 2090 case SIOCSHWTSTAMP: 2091 return mlx4_en_hwtstamp_ioctl(dev, ifr); 2092 default: 2093 return -EOPNOTSUPP; 2094 } 2095 } 2096 2097 static int mlx4_en_set_features(struct net_device *netdev, 2098 netdev_features_t features) 2099 { 2100 struct mlx4_en_priv *priv = netdev_priv(netdev); 2101 2102 if (features & NETIF_F_LOOPBACK) 2103 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2104 else 2105 priv->ctrl_flags &= 2106 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 2107 2108 mlx4_en_update_loopback_state(netdev, features); 2109 2110 return 0; 2111 2112 } 2113 2114 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2115 { 2116 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2117 struct mlx4_en_dev *mdev = en_priv->mdev; 2118 u64 mac_u64 = mlx4_en_mac_to_u64(mac); 2119 2120 if (!is_valid_ether_addr(mac)) 2121 return -EINVAL; 2122 2123 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2124 } 2125 2126 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) 2127 { 2128 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2129 struct mlx4_en_dev *mdev = en_priv->mdev; 2130 2131 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); 2132 } 2133 2134 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2135 { 2136 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2137 struct mlx4_en_dev *mdev = en_priv->mdev; 2138 2139 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); 2140 } 2141 2142 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) 2143 { 2144 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2145 struct mlx4_en_dev *mdev = en_priv->mdev; 2146 2147 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); 2148 } 2149 2150 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2151 { 2152 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2153 struct mlx4_en_dev *mdev = en_priv->mdev; 2154 2155 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); 2156 } 2157 static const struct net_device_ops mlx4_netdev_ops = { 2158 .ndo_open = mlx4_en_open, 2159 .ndo_stop = mlx4_en_close, 2160 .ndo_start_xmit = mlx4_en_xmit, 2161 .ndo_select_queue = mlx4_en_select_queue, 2162 .ndo_get_stats = mlx4_en_get_stats, 2163 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2164 .ndo_set_mac_address = mlx4_en_set_mac, 2165 .ndo_validate_addr = eth_validate_addr, 2166 .ndo_change_mtu = mlx4_en_change_mtu, 2167 .ndo_do_ioctl = mlx4_en_ioctl, 2168 .ndo_tx_timeout = mlx4_en_tx_timeout, 2169 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2170 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2171 #ifdef CONFIG_NET_POLL_CONTROLLER 2172 .ndo_poll_controller = mlx4_en_netpoll, 2173 #endif 2174 .ndo_set_features = mlx4_en_set_features, 2175 .ndo_setup_tc = mlx4_en_setup_tc, 2176 #ifdef CONFIG_RFS_ACCEL 2177 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2178 #endif 2179 #ifdef CONFIG_NET_RX_BUSY_POLL 2180 .ndo_busy_poll = mlx4_en_low_latency_recv, 2181 #endif 2182 }; 2183 2184 static const struct net_device_ops mlx4_netdev_ops_master = { 2185 .ndo_open = mlx4_en_open, 2186 .ndo_stop = mlx4_en_close, 2187 .ndo_start_xmit = mlx4_en_xmit, 2188 .ndo_select_queue = mlx4_en_select_queue, 2189 .ndo_get_stats = mlx4_en_get_stats, 2190 .ndo_set_rx_mode = mlx4_en_set_rx_mode, 2191 .ndo_set_mac_address = mlx4_en_set_mac, 2192 .ndo_validate_addr = eth_validate_addr, 2193 .ndo_change_mtu = mlx4_en_change_mtu, 2194 .ndo_tx_timeout = mlx4_en_tx_timeout, 2195 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2196 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2197 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2198 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2199 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2200 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2201 .ndo_get_vf_config = mlx4_en_get_vf_config, 2202 #ifdef CONFIG_NET_POLL_CONTROLLER 2203 .ndo_poll_controller = mlx4_en_netpoll, 2204 #endif 2205 .ndo_set_features = mlx4_en_set_features, 2206 .ndo_setup_tc = mlx4_en_setup_tc, 2207 #ifdef CONFIG_RFS_ACCEL 2208 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2209 #endif 2210 }; 2211 2212 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2213 struct mlx4_en_port_profile *prof) 2214 { 2215 struct net_device *dev; 2216 struct mlx4_en_priv *priv; 2217 int i; 2218 int err; 2219 u64 mac_u64; 2220 2221 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 2222 MAX_TX_RINGS, MAX_RX_RINGS); 2223 if (dev == NULL) 2224 return -ENOMEM; 2225 2226 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 2227 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2228 2229 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 2230 dev->dev_id = port - 1; 2231 2232 /* 2233 * Initialize driver private data 2234 */ 2235 2236 priv = netdev_priv(dev); 2237 memset(priv, 0, sizeof(struct mlx4_en_priv)); 2238 priv->dev = dev; 2239 priv->mdev = mdev; 2240 priv->ddev = &mdev->pdev->dev; 2241 priv->prof = prof; 2242 priv->port = port; 2243 priv->port_up = false; 2244 priv->flags = prof->flags; 2245 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 2246 MLX4_WQE_CTRL_SOLICITED); 2247 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2248 priv->tx_ring_num = prof->tx_ring_num; 2249 2250 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2251 GFP_KERNEL); 2252 if (!priv->tx_ring) { 2253 err = -ENOMEM; 2254 goto out; 2255 } 2256 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, 2257 GFP_KERNEL); 2258 if (!priv->tx_cq) { 2259 err = -ENOMEM; 2260 goto out; 2261 } 2262 priv->rx_ring_num = prof->rx_ring_num; 2263 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2264 priv->mac_index = -1; 2265 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2266 spin_lock_init(&priv->stats_lock); 2267 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2268 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2269 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2270 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2271 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2272 #ifdef CONFIG_MLX4_EN_DCB 2273 if (!mlx4_is_slave(priv->mdev->dev)) { 2274 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 2275 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2276 } else { 2277 en_info(priv, "enabling only PFC DCB ops\n"); 2278 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2279 } 2280 } 2281 #endif 2282 2283 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2284 INIT_HLIST_HEAD(&priv->mac_hash[i]); 2285 2286 /* Query for default mac and max mtu */ 2287 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2288 2289 /* Set default MAC */ 2290 dev->addr_len = ETH_ALEN; 2291 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); 2292 if (!is_valid_ether_addr(dev->dev_addr)) { 2293 if (mlx4_is_slave(priv->mdev->dev)) { 2294 eth_hw_addr_random(dev); 2295 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 2296 mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); 2297 mdev->dev->caps.def_mac[priv->port] = mac_u64; 2298 } else { 2299 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 2300 priv->port, dev->dev_addr); 2301 err = -EINVAL; 2302 goto out; 2303 } 2304 } 2305 2306 memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); 2307 2308 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2309 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 2310 err = mlx4_en_alloc_resources(priv); 2311 if (err) 2312 goto out; 2313 2314 #ifdef CONFIG_RFS_ACCEL 2315 INIT_LIST_HEAD(&priv->filters); 2316 spin_lock_init(&priv->filters_lock); 2317 #endif 2318 2319 /* Initialize time stamping config */ 2320 priv->hwtstamp_config.flags = 0; 2321 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 2322 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 2323 2324 /* Allocate page for receive rings */ 2325 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2326 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2327 if (err) { 2328 en_err(priv, "Failed to allocate page for rx qps\n"); 2329 goto out; 2330 } 2331 priv->allocated = 1; 2332 2333 /* 2334 * Initialize netdev entry points 2335 */ 2336 if (mlx4_is_master(priv->mdev->dev)) 2337 dev->netdev_ops = &mlx4_netdev_ops_master; 2338 else 2339 dev->netdev_ops = &mlx4_netdev_ops; 2340 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 2341 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2342 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2343 2344 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 2345 2346 /* 2347 * Set driver features 2348 */ 2349 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2350 if (mdev->LSO_support) 2351 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 2352 2353 dev->vlan_features = dev->hw_features; 2354 2355 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 2356 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 2357 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 2358 NETIF_F_HW_VLAN_CTAG_FILTER; 2359 dev->hw_features |= NETIF_F_LOOPBACK; 2360 2361 if (mdev->dev->caps.steering_mode == 2362 MLX4_STEERING_MODE_DEVICE_MANAGED) 2363 dev->hw_features |= NETIF_F_NTUPLE; 2364 2365 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 2366 dev->priv_flags |= IFF_UNICAST_FLT; 2367 2368 mdev->pndev[port] = dev; 2369 2370 netif_carrier_off(dev); 2371 mlx4_en_set_default_moderation(priv); 2372 2373 err = register_netdev(dev); 2374 if (err) { 2375 en_err(priv, "Netdev registration failed for port %d\n", port); 2376 goto out; 2377 } 2378 priv->registered = 1; 2379 2380 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2381 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2382 2383 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 2384 2385 /* Configure port */ 2386 mlx4_en_calc_rx_buf(dev); 2387 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2388 priv->rx_skb_size + ETH_FCS_LEN, 2389 prof->tx_pause, prof->tx_ppp, 2390 prof->rx_pause, prof->rx_ppp); 2391 if (err) { 2392 en_err(priv, "Failed setting port general configurations " 2393 "for port %d, with error %d\n", priv->port, err); 2394 goto out; 2395 } 2396 2397 /* Init port */ 2398 en_warn(priv, "Initializing port\n"); 2399 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2400 if (err) { 2401 en_err(priv, "Failed Initializing port\n"); 2402 goto out; 2403 } 2404 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2405 2406 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2407 queue_delayed_work(mdev->workqueue, &priv->service_task, 2408 SERVICE_TASK_DELAY); 2409 2410 return 0; 2411 2412 out: 2413 mlx4_en_destroy_netdev(dev); 2414 return err; 2415 } 2416 2417