1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/tcp.h> 36 #include <linux/if_vlan.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 40 #include <linux/mlx4/driver.h> 41 #include <linux/mlx4/device.h> 42 #include <linux/mlx4/cmd.h> 43 #include <linux/mlx4/cq.h> 44 45 #include "mlx4_en.h" 46 #include "en_port.h" 47 48 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 49 { 50 struct mlx4_en_priv *priv = netdev_priv(dev); 51 struct mlx4_en_dev *mdev = priv->mdev; 52 int err; 53 int idx; 54 55 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 56 57 set_bit(vid, priv->active_vlans); 58 59 /* Add VID to port VLAN filter */ 60 mutex_lock(&mdev->state_lock); 61 if (mdev->device_up && priv->port_up) { 62 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 63 if (err) 64 en_err(priv, "Failed configuring VLAN filter\n"); 65 } 66 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 67 en_err(priv, "failed adding vlan %d\n", vid); 68 mutex_unlock(&mdev->state_lock); 69 70 return 0; 71 } 72 73 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 74 { 75 struct mlx4_en_priv *priv = netdev_priv(dev); 76 struct mlx4_en_dev *mdev = priv->mdev; 77 int err; 78 int idx; 79 80 en_dbg(HW, priv, "Killing VID:%d\n", vid); 81 82 clear_bit(vid, priv->active_vlans); 83 84 /* Remove VID from port VLAN filter */ 85 mutex_lock(&mdev->state_lock); 86 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 87 mlx4_unregister_vlan(mdev->dev, priv->port, idx); 88 else 89 en_err(priv, "could not find vid %d in cache\n", vid); 90 91 if (mdev->device_up && priv->port_up) { 92 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 93 if (err) 94 en_err(priv, "Failed configuring VLAN filter\n"); 95 } 96 mutex_unlock(&mdev->state_lock); 97 98 return 0; 99 } 100 101 u64 mlx4_en_mac_to_u64(u8 *addr) 102 { 103 u64 mac = 0; 104 int i; 105 106 for (i = 0; i < ETH_ALEN; i++) { 107 mac <<= 8; 108 mac |= addr[i]; 109 } 110 return mac; 111 } 112 113 static int mlx4_en_set_mac(struct net_device *dev, void *addr) 114 { 115 struct mlx4_en_priv *priv = netdev_priv(dev); 116 struct mlx4_en_dev *mdev = priv->mdev; 117 struct sockaddr *saddr = addr; 118 119 if (!is_valid_ether_addr(saddr->sa_data)) 120 return -EADDRNOTAVAIL; 121 122 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 123 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); 124 queue_work(mdev->workqueue, &priv->mac_task); 125 return 0; 126 } 127 128 static void mlx4_en_do_set_mac(struct work_struct *work) 129 { 130 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 131 mac_task); 132 struct mlx4_en_dev *mdev = priv->mdev; 133 int err = 0; 134 135 mutex_lock(&mdev->state_lock); 136 if (priv->port_up) { 137 /* Remove old MAC and insert the new one */ 138 err = mlx4_replace_mac(mdev->dev, priv->port, 139 priv->base_qpn, priv->mac); 140 if (err) 141 en_err(priv, "Failed changing HW MAC address\n"); 142 } else 143 en_dbg(HW, priv, "Port is down while " 144 "registering mac, exiting...\n"); 145 146 mutex_unlock(&mdev->state_lock); 147 } 148 149 static void mlx4_en_clear_list(struct net_device *dev) 150 { 151 struct mlx4_en_priv *priv = netdev_priv(dev); 152 153 kfree(priv->mc_addrs); 154 priv->mc_addrs = NULL; 155 priv->mc_addrs_cnt = 0; 156 } 157 158 static void mlx4_en_cache_mclist(struct net_device *dev) 159 { 160 struct mlx4_en_priv *priv = netdev_priv(dev); 161 struct netdev_hw_addr *ha; 162 char *mc_addrs; 163 int mc_addrs_cnt = netdev_mc_count(dev); 164 int i; 165 166 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); 167 if (!mc_addrs) { 168 en_err(priv, "failed to allocate multicast list\n"); 169 return; 170 } 171 i = 0; 172 netdev_for_each_mc_addr(ha, dev) 173 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); 174 mlx4_en_clear_list(dev); 175 priv->mc_addrs = mc_addrs; 176 priv->mc_addrs_cnt = mc_addrs_cnt; 177 } 178 179 180 static void mlx4_en_set_multicast(struct net_device *dev) 181 { 182 struct mlx4_en_priv *priv = netdev_priv(dev); 183 184 if (!priv->port_up) 185 return; 186 187 queue_work(priv->mdev->workqueue, &priv->mcast_task); 188 } 189 190 static void mlx4_en_do_set_multicast(struct work_struct *work) 191 { 192 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 193 mcast_task); 194 struct mlx4_en_dev *mdev = priv->mdev; 195 struct net_device *dev = priv->dev; 196 u64 mcast_addr = 0; 197 u8 mc_list[16] = {0}; 198 int err; 199 200 mutex_lock(&mdev->state_lock); 201 if (!mdev->device_up) { 202 en_dbg(HW, priv, "Card is not up, " 203 "ignoring multicast change.\n"); 204 goto out; 205 } 206 if (!priv->port_up) { 207 en_dbg(HW, priv, "Port is down, " 208 "ignoring multicast change.\n"); 209 goto out; 210 } 211 212 if (!netif_carrier_ok(dev)) { 213 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 214 if (priv->port_state.link_state) { 215 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 216 netif_carrier_on(dev); 217 en_dbg(LINK, priv, "Link Up\n"); 218 } 219 } 220 } 221 222 /* 223 * Promsicuous mode: disable all filters 224 */ 225 226 if (dev->flags & IFF_PROMISC) { 227 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 228 if (netif_msg_rx_status(priv)) 229 en_warn(priv, "Entering promiscuous mode\n"); 230 priv->flags |= MLX4_EN_FLAG_PROMISC; 231 232 /* Enable promiscouos mode */ 233 if (!(mdev->dev->caps.flags & 234 MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 235 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 236 priv->base_qpn, 1); 237 else 238 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, 239 priv->port); 240 if (err) 241 en_err(priv, "Failed enabling " 242 "promiscuous mode\n"); 243 244 /* Disable port multicast filter (unconditionally) */ 245 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 246 0, MLX4_MCAST_DISABLE); 247 if (err) 248 en_err(priv, "Failed disabling " 249 "multicast filter\n"); 250 251 /* Add the default qp number as multicast promisc */ 252 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 253 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 254 priv->port); 255 if (err) 256 en_err(priv, "Failed entering multicast promisc mode\n"); 257 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 258 } 259 260 /* Disable port VLAN filter */ 261 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 262 if (err) 263 en_err(priv, "Failed disabling VLAN filter\n"); 264 } 265 goto out; 266 } 267 268 /* 269 * Not in promiscuous mode 270 */ 271 272 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 273 if (netif_msg_rx_status(priv)) 274 en_warn(priv, "Leaving promiscuous mode\n"); 275 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 276 277 /* Disable promiscouos mode */ 278 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) 279 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 280 priv->base_qpn, 0); 281 else 282 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 283 priv->port); 284 if (err) 285 en_err(priv, "Failed disabling promiscuous mode\n"); 286 287 /* Disable Multicast promisc */ 288 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 289 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 290 priv->port); 291 if (err) 292 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 293 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 294 } 295 296 /* Enable port VLAN filter */ 297 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 298 if (err) 299 en_err(priv, "Failed enabling VLAN filter\n"); 300 } 301 302 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 303 if (dev->flags & IFF_ALLMULTI) { 304 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 305 0, MLX4_MCAST_DISABLE); 306 if (err) 307 en_err(priv, "Failed disabling multicast filter\n"); 308 309 /* Add the default qp number as multicast promisc */ 310 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 311 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 312 priv->port); 313 if (err) 314 en_err(priv, "Failed entering multicast promisc mode\n"); 315 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 316 } 317 } else { 318 int i; 319 /* Disable Multicast promisc */ 320 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 321 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 322 priv->port); 323 if (err) 324 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 325 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 326 } 327 328 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 329 0, MLX4_MCAST_DISABLE); 330 if (err) 331 en_err(priv, "Failed disabling multicast filter\n"); 332 333 /* Detach our qp from all the multicast addresses */ 334 for (i = 0; i < priv->mc_addrs_cnt; i++) { 335 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 336 mc_list[5] = priv->port; 337 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 338 mc_list, MLX4_PROT_ETH); 339 } 340 /* Flush mcast filter and init it with broadcast address */ 341 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 342 1, MLX4_MCAST_CONFIG); 343 344 /* Update multicast list - we cache all addresses so they won't 345 * change while HW is updated holding the command semaphor */ 346 netif_tx_lock_bh(dev); 347 mlx4_en_cache_mclist(dev); 348 netif_tx_unlock_bh(dev); 349 for (i = 0; i < priv->mc_addrs_cnt; i++) { 350 mcast_addr = 351 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 352 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 353 mc_list[5] = priv->port; 354 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, 355 mc_list, 0, MLX4_PROT_ETH); 356 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 357 mcast_addr, 0, MLX4_MCAST_CONFIG); 358 } 359 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 360 0, MLX4_MCAST_ENABLE); 361 if (err) 362 en_err(priv, "Failed enabling multicast filter\n"); 363 } 364 out: 365 mutex_unlock(&mdev->state_lock); 366 } 367 368 #ifdef CONFIG_NET_POLL_CONTROLLER 369 static void mlx4_en_netpoll(struct net_device *dev) 370 { 371 struct mlx4_en_priv *priv = netdev_priv(dev); 372 struct mlx4_en_cq *cq; 373 unsigned long flags; 374 int i; 375 376 for (i = 0; i < priv->rx_ring_num; i++) { 377 cq = &priv->rx_cq[i]; 378 spin_lock_irqsave(&cq->lock, flags); 379 napi_synchronize(&cq->napi); 380 mlx4_en_process_rx_cq(dev, cq, 0); 381 spin_unlock_irqrestore(&cq->lock, flags); 382 } 383 } 384 #endif 385 386 static void mlx4_en_tx_timeout(struct net_device *dev) 387 { 388 struct mlx4_en_priv *priv = netdev_priv(dev); 389 struct mlx4_en_dev *mdev = priv->mdev; 390 391 if (netif_msg_timer(priv)) 392 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 393 394 priv->port_stats.tx_timeout++; 395 en_dbg(DRV, priv, "Scheduling watchdog\n"); 396 queue_work(mdev->workqueue, &priv->watchdog_task); 397 } 398 399 400 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 401 { 402 struct mlx4_en_priv *priv = netdev_priv(dev); 403 404 spin_lock_bh(&priv->stats_lock); 405 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 406 spin_unlock_bh(&priv->stats_lock); 407 408 return &priv->ret_stats; 409 } 410 411 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 412 { 413 struct mlx4_en_cq *cq; 414 int i; 415 416 /* If we haven't received a specific coalescing setting 417 * (module param), we set the moderation parameters as follows: 418 * - moder_cnt is set to the number of mtu sized packets to 419 * satisfy our coelsing target. 420 * - moder_time is set to a fixed value. 421 */ 422 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 423 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 424 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 425 "rx_frames:%d rx_usecs:%d\n", 426 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 427 428 /* Setup cq moderation params */ 429 for (i = 0; i < priv->rx_ring_num; i++) { 430 cq = &priv->rx_cq[i]; 431 cq->moder_cnt = priv->rx_frames; 432 cq->moder_time = priv->rx_usecs; 433 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 434 priv->last_moder_packets[i] = 0; 435 priv->last_moder_bytes[i] = 0; 436 } 437 438 for (i = 0; i < priv->tx_ring_num; i++) { 439 cq = &priv->tx_cq[i]; 440 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; 441 cq->moder_time = MLX4_EN_TX_COAL_TIME; 442 } 443 444 /* Reset auto-moderation params */ 445 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 446 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 447 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 448 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 449 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 450 priv->adaptive_rx_coal = 1; 451 priv->last_moder_jiffies = 0; 452 priv->last_moder_tx_packets = 0; 453 } 454 455 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 456 { 457 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 458 struct mlx4_en_cq *cq; 459 unsigned long packets; 460 unsigned long rate; 461 unsigned long avg_pkt_size; 462 unsigned long rx_packets; 463 unsigned long rx_bytes; 464 unsigned long rx_pkt_diff; 465 int moder_time; 466 int ring, err; 467 468 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 469 return; 470 471 for (ring = 0; ring < priv->rx_ring_num; ring++) { 472 spin_lock_bh(&priv->stats_lock); 473 rx_packets = priv->rx_ring[ring].packets; 474 rx_bytes = priv->rx_ring[ring].bytes; 475 spin_unlock_bh(&priv->stats_lock); 476 477 rx_pkt_diff = ((unsigned long) (rx_packets - 478 priv->last_moder_packets[ring])); 479 packets = rx_pkt_diff; 480 rate = packets * HZ / period; 481 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 482 priv->last_moder_bytes[ring])) / packets : 0; 483 484 /* Apply auto-moderation only when packet rate 485 * exceeds a rate that it matters */ 486 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 487 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 488 if (rate < priv->pkt_rate_low) 489 moder_time = priv->rx_usecs_low; 490 else if (rate > priv->pkt_rate_high) 491 moder_time = priv->rx_usecs_high; 492 else 493 moder_time = (rate - priv->pkt_rate_low) * 494 (priv->rx_usecs_high - priv->rx_usecs_low) / 495 (priv->pkt_rate_high - priv->pkt_rate_low) + 496 priv->rx_usecs_low; 497 } else { 498 moder_time = priv->rx_usecs_low; 499 } 500 501 if (moder_time != priv->last_moder_time[ring]) { 502 priv->last_moder_time[ring] = moder_time; 503 cq = &priv->rx_cq[ring]; 504 cq->moder_time = moder_time; 505 err = mlx4_en_set_cq_moder(priv, cq); 506 if (err) 507 en_err(priv, "Failed modifying moderation " 508 "for cq:%d\n", ring); 509 } 510 priv->last_moder_packets[ring] = rx_packets; 511 priv->last_moder_bytes[ring] = rx_bytes; 512 } 513 514 priv->last_moder_jiffies = jiffies; 515 } 516 517 static void mlx4_en_do_get_stats(struct work_struct *work) 518 { 519 struct delayed_work *delay = to_delayed_work(work); 520 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 521 stats_task); 522 struct mlx4_en_dev *mdev = priv->mdev; 523 int err; 524 525 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 526 if (err) 527 en_dbg(HW, priv, "Could not update stats\n"); 528 529 mutex_lock(&mdev->state_lock); 530 if (mdev->device_up) { 531 if (priv->port_up) 532 mlx4_en_auto_moderation(priv); 533 534 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 535 } 536 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 537 queue_work(mdev->workqueue, &priv->mac_task); 538 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 539 } 540 mutex_unlock(&mdev->state_lock); 541 } 542 543 static void mlx4_en_linkstate(struct work_struct *work) 544 { 545 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 546 linkstate_task); 547 struct mlx4_en_dev *mdev = priv->mdev; 548 int linkstate = priv->link_state; 549 550 mutex_lock(&mdev->state_lock); 551 /* If observable port state changed set carrier state and 552 * report to system log */ 553 if (priv->last_link_state != linkstate) { 554 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 555 en_info(priv, "Link Down\n"); 556 netif_carrier_off(priv->dev); 557 } else { 558 en_info(priv, "Link Up\n"); 559 netif_carrier_on(priv->dev); 560 } 561 } 562 priv->last_link_state = linkstate; 563 mutex_unlock(&mdev->state_lock); 564 } 565 566 567 int mlx4_en_start_port(struct net_device *dev) 568 { 569 struct mlx4_en_priv *priv = netdev_priv(dev); 570 struct mlx4_en_dev *mdev = priv->mdev; 571 struct mlx4_en_cq *cq; 572 struct mlx4_en_tx_ring *tx_ring; 573 int rx_index = 0; 574 int tx_index = 0; 575 int err = 0; 576 int i; 577 int j; 578 u8 mc_list[16] = {0}; 579 580 if (priv->port_up) { 581 en_dbg(DRV, priv, "start port called while port already up\n"); 582 return 0; 583 } 584 585 /* Calculate Rx buf size */ 586 dev->mtu = min(dev->mtu, priv->max_mtu); 587 mlx4_en_calc_rx_buf(dev); 588 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 589 590 /* Configure rx cq's and rings */ 591 err = mlx4_en_activate_rx_rings(priv); 592 if (err) { 593 en_err(priv, "Failed to activate RX rings\n"); 594 return err; 595 } 596 for (i = 0; i < priv->rx_ring_num; i++) { 597 cq = &priv->rx_cq[i]; 598 599 err = mlx4_en_activate_cq(priv, cq, i); 600 if (err) { 601 en_err(priv, "Failed activating Rx CQ\n"); 602 goto cq_err; 603 } 604 for (j = 0; j < cq->size; j++) 605 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 606 err = mlx4_en_set_cq_moder(priv, cq); 607 if (err) { 608 en_err(priv, "Failed setting cq moderation parameters"); 609 mlx4_en_deactivate_cq(priv, cq); 610 goto cq_err; 611 } 612 mlx4_en_arm_cq(priv, cq); 613 priv->rx_ring[i].cqn = cq->mcq.cqn; 614 ++rx_index; 615 } 616 617 /* Set qp number */ 618 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 619 err = mlx4_get_eth_qp(mdev->dev, priv->port, 620 priv->mac, &priv->base_qpn); 621 if (err) { 622 en_err(priv, "Failed getting eth qp\n"); 623 goto cq_err; 624 } 625 mdev->mac_removed[priv->port] = 0; 626 627 err = mlx4_en_config_rss_steer(priv); 628 if (err) { 629 en_err(priv, "Failed configuring rss steering\n"); 630 goto mac_err; 631 } 632 633 /* Configure tx cq's and rings */ 634 for (i = 0; i < priv->tx_ring_num; i++) { 635 /* Configure cq */ 636 cq = &priv->tx_cq[i]; 637 err = mlx4_en_activate_cq(priv, cq, i); 638 if (err) { 639 en_err(priv, "Failed allocating Tx CQ\n"); 640 goto tx_err; 641 } 642 err = mlx4_en_set_cq_moder(priv, cq); 643 if (err) { 644 en_err(priv, "Failed setting cq moderation parameters"); 645 mlx4_en_deactivate_cq(priv, cq); 646 goto tx_err; 647 } 648 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 649 cq->buf->wqe_index = cpu_to_be16(0xffff); 650 651 /* Configure ring */ 652 tx_ring = &priv->tx_ring[i]; 653 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 654 if (err) { 655 en_err(priv, "Failed allocating Tx ring\n"); 656 mlx4_en_deactivate_cq(priv, cq); 657 goto tx_err; 658 } 659 /* Set initial ownership of all Tx TXBBs to SW (1) */ 660 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 661 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 662 ++tx_index; 663 } 664 665 /* Configure port */ 666 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 667 priv->rx_skb_size + ETH_FCS_LEN, 668 priv->prof->tx_pause, 669 priv->prof->tx_ppp, 670 priv->prof->rx_pause, 671 priv->prof->rx_ppp); 672 if (err) { 673 en_err(priv, "Failed setting port general configurations " 674 "for port %d, with error %d\n", priv->port, err); 675 goto tx_err; 676 } 677 /* Set default qp number */ 678 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 679 if (err) { 680 en_err(priv, "Failed setting default qp numbers\n"); 681 goto tx_err; 682 } 683 684 /* Init port */ 685 en_dbg(HW, priv, "Initializing port\n"); 686 err = mlx4_INIT_PORT(mdev->dev, priv->port); 687 if (err) { 688 en_err(priv, "Failed Initializing port\n"); 689 goto tx_err; 690 } 691 692 /* Attach rx QP to bradcast address */ 693 memset(&mc_list[10], 0xff, ETH_ALEN); 694 mc_list[5] = priv->port; 695 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 696 0, MLX4_PROT_ETH)) 697 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 698 699 /* Must redo promiscuous mode setup. */ 700 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 701 702 /* Schedule multicast task to populate multicast list */ 703 queue_work(mdev->workqueue, &priv->mcast_task); 704 705 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 706 707 priv->port_up = true; 708 netif_tx_start_all_queues(dev); 709 return 0; 710 711 tx_err: 712 while (tx_index--) { 713 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 714 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 715 } 716 717 mlx4_en_release_rss_steer(priv); 718 mac_err: 719 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 720 cq_err: 721 while (rx_index--) 722 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 723 for (i = 0; i < priv->rx_ring_num; i++) 724 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 725 726 return err; /* need to close devices */ 727 } 728 729 730 void mlx4_en_stop_port(struct net_device *dev) 731 { 732 struct mlx4_en_priv *priv = netdev_priv(dev); 733 struct mlx4_en_dev *mdev = priv->mdev; 734 int i; 735 u8 mc_list[16] = {0}; 736 737 if (!priv->port_up) { 738 en_dbg(DRV, priv, "stop port called while port already down\n"); 739 return; 740 } 741 742 /* Synchronize with tx routine */ 743 netif_tx_lock_bh(dev); 744 netif_tx_stop_all_queues(dev); 745 netif_tx_unlock_bh(dev); 746 747 /* Set port as not active */ 748 priv->port_up = false; 749 750 /* Detach All multicasts */ 751 memset(&mc_list[10], 0xff, ETH_ALEN); 752 mc_list[5] = priv->port; 753 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 754 MLX4_PROT_ETH); 755 for (i = 0; i < priv->mc_addrs_cnt; i++) { 756 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 757 mc_list[5] = priv->port; 758 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 759 mc_list, MLX4_PROT_ETH); 760 } 761 mlx4_en_clear_list(dev); 762 /* Flush multicast filter */ 763 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 764 765 /* Free TX Rings */ 766 for (i = 0; i < priv->tx_ring_num; i++) { 767 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 768 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 769 } 770 msleep(10); 771 772 for (i = 0; i < priv->tx_ring_num; i++) 773 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 774 775 /* Free RSS qps */ 776 mlx4_en_release_rss_steer(priv); 777 778 /* Unregister Mac address for the port */ 779 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); 780 mdev->mac_removed[priv->port] = 1; 781 782 /* Free RX Rings */ 783 for (i = 0; i < priv->rx_ring_num; i++) { 784 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 785 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 786 msleep(1); 787 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 788 } 789 790 /* close port*/ 791 mlx4_CLOSE_PORT(mdev->dev, priv->port); 792 } 793 794 static void mlx4_en_restart(struct work_struct *work) 795 { 796 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 797 watchdog_task); 798 struct mlx4_en_dev *mdev = priv->mdev; 799 struct net_device *dev = priv->dev; 800 801 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 802 803 mutex_lock(&mdev->state_lock); 804 if (priv->port_up) { 805 mlx4_en_stop_port(dev); 806 if (mlx4_en_start_port(dev)) 807 en_err(priv, "Failed restarting port %d\n", priv->port); 808 } 809 mutex_unlock(&mdev->state_lock); 810 } 811 812 static void mlx4_en_clear_stats(struct net_device *dev) 813 { 814 struct mlx4_en_priv *priv = netdev_priv(dev); 815 struct mlx4_en_dev *mdev = priv->mdev; 816 int i; 817 818 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 819 en_dbg(HW, priv, "Failed dumping statistics\n"); 820 821 memset(&priv->stats, 0, sizeof(priv->stats)); 822 memset(&priv->pstats, 0, sizeof(priv->pstats)); 823 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 824 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 825 826 for (i = 0; i < priv->tx_ring_num; i++) { 827 priv->tx_ring[i].bytes = 0; 828 priv->tx_ring[i].packets = 0; 829 priv->tx_ring[i].tx_csum = 0; 830 } 831 for (i = 0; i < priv->rx_ring_num; i++) { 832 priv->rx_ring[i].bytes = 0; 833 priv->rx_ring[i].packets = 0; 834 priv->rx_ring[i].csum_ok = 0; 835 priv->rx_ring[i].csum_none = 0; 836 } 837 } 838 839 static int mlx4_en_open(struct net_device *dev) 840 { 841 struct mlx4_en_priv *priv = netdev_priv(dev); 842 struct mlx4_en_dev *mdev = priv->mdev; 843 int err = 0; 844 845 mutex_lock(&mdev->state_lock); 846 847 if (!mdev->device_up) { 848 en_err(priv, "Cannot open - device down/disabled\n"); 849 err = -EBUSY; 850 goto out; 851 } 852 853 /* Reset HW statistics and SW counters */ 854 mlx4_en_clear_stats(dev); 855 856 err = mlx4_en_start_port(dev); 857 if (err) 858 en_err(priv, "Failed starting port:%d\n", priv->port); 859 860 out: 861 mutex_unlock(&mdev->state_lock); 862 return err; 863 } 864 865 866 static int mlx4_en_close(struct net_device *dev) 867 { 868 struct mlx4_en_priv *priv = netdev_priv(dev); 869 struct mlx4_en_dev *mdev = priv->mdev; 870 871 en_dbg(IFDOWN, priv, "Close port called\n"); 872 873 mutex_lock(&mdev->state_lock); 874 875 mlx4_en_stop_port(dev); 876 netif_carrier_off(dev); 877 878 mutex_unlock(&mdev->state_lock); 879 return 0; 880 } 881 882 void mlx4_en_free_resources(struct mlx4_en_priv *priv) 883 { 884 int i; 885 886 for (i = 0; i < priv->tx_ring_num; i++) { 887 if (priv->tx_ring[i].tx_info) 888 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 889 if (priv->tx_cq[i].buf) 890 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 891 } 892 893 for (i = 0; i < priv->rx_ring_num; i++) { 894 if (priv->rx_ring[i].rx_info) 895 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 896 priv->prof->rx_ring_size, priv->stride); 897 if (priv->rx_cq[i].buf) 898 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 899 } 900 } 901 902 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 903 { 904 struct mlx4_en_port_profile *prof = priv->prof; 905 int i; 906 int base_tx_qpn, err; 907 908 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); 909 if (err) { 910 en_err(priv, "failed reserving range for TX rings\n"); 911 return err; 912 } 913 914 /* Create tx Rings */ 915 for (i = 0; i < priv->tx_ring_num; i++) { 916 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 917 prof->tx_ring_size, i, TX)) 918 goto err; 919 920 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, 921 prof->tx_ring_size, TXBB_SIZE)) 922 goto err; 923 } 924 925 /* Create rx Rings */ 926 for (i = 0; i < priv->rx_ring_num; i++) { 927 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 928 prof->rx_ring_size, i, RX)) 929 goto err; 930 931 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 932 prof->rx_ring_size, priv->stride)) 933 goto err; 934 } 935 936 return 0; 937 938 err: 939 en_err(priv, "Failed to allocate NIC resources\n"); 940 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); 941 return -ENOMEM; 942 } 943 944 945 void mlx4_en_destroy_netdev(struct net_device *dev) 946 { 947 struct mlx4_en_priv *priv = netdev_priv(dev); 948 struct mlx4_en_dev *mdev = priv->mdev; 949 950 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 951 952 /* Unregister device - this will close the port if it was up */ 953 if (priv->registered) 954 unregister_netdev(dev); 955 956 if (priv->allocated) 957 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 958 959 cancel_delayed_work(&priv->stats_task); 960 /* flush any pending task for this netdev */ 961 flush_workqueue(mdev->workqueue); 962 963 /* Detach the netdev so tasks would not attempt to access it */ 964 mutex_lock(&mdev->state_lock); 965 mdev->pndev[priv->port] = NULL; 966 mutex_unlock(&mdev->state_lock); 967 968 mlx4_en_free_resources(priv); 969 free_netdev(dev); 970 } 971 972 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 973 { 974 struct mlx4_en_priv *priv = netdev_priv(dev); 975 struct mlx4_en_dev *mdev = priv->mdev; 976 int err = 0; 977 978 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 979 dev->mtu, new_mtu); 980 981 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 982 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 983 return -EPERM; 984 } 985 dev->mtu = new_mtu; 986 987 if (netif_running(dev)) { 988 mutex_lock(&mdev->state_lock); 989 if (!mdev->device_up) { 990 /* NIC is probably restarting - let watchdog task reset 991 * the port */ 992 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 993 } else { 994 mlx4_en_stop_port(dev); 995 err = mlx4_en_start_port(dev); 996 if (err) { 997 en_err(priv, "Failed restarting port:%d\n", 998 priv->port); 999 queue_work(mdev->workqueue, &priv->watchdog_task); 1000 } 1001 } 1002 mutex_unlock(&mdev->state_lock); 1003 } 1004 return 0; 1005 } 1006 1007 static int mlx4_en_set_features(struct net_device *netdev, 1008 netdev_features_t features) 1009 { 1010 struct mlx4_en_priv *priv = netdev_priv(netdev); 1011 1012 if (features & NETIF_F_LOOPBACK) 1013 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1014 else 1015 priv->ctrl_flags &= 1016 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 1017 1018 return 0; 1019 1020 } 1021 1022 static const struct net_device_ops mlx4_netdev_ops = { 1023 .ndo_open = mlx4_en_open, 1024 .ndo_stop = mlx4_en_close, 1025 .ndo_start_xmit = mlx4_en_xmit, 1026 .ndo_select_queue = mlx4_en_select_queue, 1027 .ndo_get_stats = mlx4_en_get_stats, 1028 .ndo_set_rx_mode = mlx4_en_set_multicast, 1029 .ndo_set_mac_address = mlx4_en_set_mac, 1030 .ndo_validate_addr = eth_validate_addr, 1031 .ndo_change_mtu = mlx4_en_change_mtu, 1032 .ndo_tx_timeout = mlx4_en_tx_timeout, 1033 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 1034 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 1035 #ifdef CONFIG_NET_POLL_CONTROLLER 1036 .ndo_poll_controller = mlx4_en_netpoll, 1037 #endif 1038 .ndo_set_features = mlx4_en_set_features, 1039 }; 1040 1041 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1042 struct mlx4_en_port_profile *prof) 1043 { 1044 struct net_device *dev; 1045 struct mlx4_en_priv *priv; 1046 int i; 1047 int err; 1048 1049 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1050 prof->tx_ring_num, prof->rx_ring_num); 1051 if (dev == NULL) 1052 return -ENOMEM; 1053 1054 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1055 dev->dev_id = port - 1; 1056 1057 /* 1058 * Initialize driver private data 1059 */ 1060 1061 priv = netdev_priv(dev); 1062 memset(priv, 0, sizeof(struct mlx4_en_priv)); 1063 priv->dev = dev; 1064 priv->mdev = mdev; 1065 priv->ddev = &mdev->pdev->dev; 1066 priv->prof = prof; 1067 priv->port = port; 1068 priv->port_up = false; 1069 priv->flags = prof->flags; 1070 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1071 MLX4_WQE_CTRL_SOLICITED); 1072 priv->tx_ring_num = prof->tx_ring_num; 1073 priv->rx_ring_num = prof->rx_ring_num; 1074 priv->mac_index = -1; 1075 priv->msg_enable = MLX4_EN_MSG_LEVEL; 1076 spin_lock_init(&priv->stats_lock); 1077 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 1078 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 1079 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1080 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1081 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1082 1083 /* Query for default mac and max mtu */ 1084 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1085 priv->mac = mdev->dev->caps.def_mac[priv->port]; 1086 if (ILLEGAL_MAC(priv->mac)) { 1087 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 1088 priv->port, priv->mac); 1089 err = -EINVAL; 1090 goto out; 1091 } 1092 1093 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 1094 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 1095 err = mlx4_en_alloc_resources(priv); 1096 if (err) 1097 goto out; 1098 1099 /* Allocate page for receive rings */ 1100 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1101 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1102 if (err) { 1103 en_err(priv, "Failed to allocate page for rx qps\n"); 1104 goto out; 1105 } 1106 priv->allocated = 1; 1107 1108 /* 1109 * Initialize netdev entry points 1110 */ 1111 dev->netdev_ops = &mlx4_netdev_ops; 1112 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1113 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1114 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1115 1116 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1117 1118 /* Set defualt MAC */ 1119 dev->addr_len = ETH_ALEN; 1120 for (i = 0; i < ETH_ALEN; i++) { 1121 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1122 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 1123 } 1124 1125 /* 1126 * Set driver features 1127 */ 1128 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1129 if (mdev->LSO_support) 1130 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 1131 1132 dev->vlan_features = dev->hw_features; 1133 1134 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 1135 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 1136 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1137 NETIF_F_HW_VLAN_FILTER; 1138 dev->hw_features |= NETIF_F_LOOPBACK; 1139 1140 mdev->pndev[port] = dev; 1141 1142 netif_carrier_off(dev); 1143 err = register_netdev(dev); 1144 if (err) { 1145 en_err(priv, "Netdev registration failed for port %d\n", port); 1146 goto out; 1147 } 1148 priv->registered = 1; 1149 1150 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1151 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1152 1153 /* Configure port */ 1154 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1155 MLX4_EN_MIN_MTU, 1156 0, 0, 0, 0); 1157 if (err) { 1158 en_err(priv, "Failed setting port general configurations " 1159 "for port %d, with error %d\n", priv->port, err); 1160 goto out; 1161 } 1162 1163 /* Init port */ 1164 en_warn(priv, "Initializing port\n"); 1165 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1166 if (err) { 1167 en_err(priv, "Failed Initializing port\n"); 1168 goto out; 1169 } 1170 mlx4_en_set_default_moderation(priv); 1171 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1172 return 0; 1173 1174 out: 1175 mlx4_en_destroy_netdev(dev); 1176 return err; 1177 } 1178 1179