15a2cc190SJeff Kirsher /* 25a2cc190SJeff Kirsher * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 35a2cc190SJeff Kirsher * 45a2cc190SJeff Kirsher * This software is available to you under a choice of one of two 55a2cc190SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 65a2cc190SJeff Kirsher * General Public License (GPL) Version 2, available from the file 75a2cc190SJeff Kirsher * COPYING in the main directory of this source tree, or the 85a2cc190SJeff Kirsher * OpenIB.org BSD license below: 95a2cc190SJeff Kirsher * 105a2cc190SJeff Kirsher * Redistribution and use in source and binary forms, with or 115a2cc190SJeff Kirsher * without modification, are permitted provided that the following 125a2cc190SJeff Kirsher * conditions are met: 135a2cc190SJeff Kirsher * 145a2cc190SJeff Kirsher * - Redistributions of source code must retain the above 155a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 165a2cc190SJeff Kirsher * disclaimer. 175a2cc190SJeff Kirsher * 185a2cc190SJeff Kirsher * - Redistributions in binary form must reproduce the above 195a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 205a2cc190SJeff Kirsher * disclaimer in the documentation and/or other materials 215a2cc190SJeff Kirsher * provided with the distribution. 225a2cc190SJeff Kirsher * 235a2cc190SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 245a2cc190SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 255a2cc190SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 265a2cc190SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 275a2cc190SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 285a2cc190SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 295a2cc190SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 305a2cc190SJeff Kirsher * SOFTWARE. 315a2cc190SJeff Kirsher * 325a2cc190SJeff Kirsher */ 335a2cc190SJeff Kirsher 345a2cc190SJeff Kirsher #include <linux/cpumask.h> 355a2cc190SJeff Kirsher #include <linux/module.h> 365a2cc190SJeff Kirsher #include <linux/delay.h> 375a2cc190SJeff Kirsher #include <linux/netdevice.h> 385a2cc190SJeff Kirsher #include <linux/slab.h> 395a2cc190SJeff Kirsher 405a2cc190SJeff Kirsher #include <linux/mlx4/driver.h> 415a2cc190SJeff Kirsher #include <linux/mlx4/device.h> 425a2cc190SJeff Kirsher #include <linux/mlx4/cmd.h> 435a2cc190SJeff Kirsher 445a2cc190SJeff Kirsher #include "mlx4_en.h" 455a2cc190SJeff Kirsher 465a2cc190SJeff Kirsher MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); 475a2cc190SJeff Kirsher MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); 485a2cc190SJeff Kirsher MODULE_LICENSE("Dual BSD/GPL"); 49808df6a2STariq Toukan MODULE_VERSION(DRV_VERSION); 505a2cc190SJeff Kirsher 515a2cc190SJeff Kirsher static const char mlx4_en_version[] = 525a2cc190SJeff Kirsher DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" 53808df6a2STariq Toukan DRV_VERSION "\n"; 545a2cc190SJeff Kirsher 555a2cc190SJeff Kirsher #define MLX4_EN_PARM_INT(X, def_val, desc) \ 565a2cc190SJeff Kirsher static unsigned int X = def_val;\ 575a2cc190SJeff Kirsher module_param(X , uint, 0444); \ 585a2cc190SJeff Kirsher MODULE_PARM_DESC(X, desc); 595a2cc190SJeff Kirsher 605a2cc190SJeff Kirsher 615a2cc190SJeff Kirsher /* 625a2cc190SJeff Kirsher * Device scope module parameters 635a2cc190SJeff Kirsher */ 645a2cc190SJeff Kirsher 655a2cc190SJeff Kirsher /* Enable RSS UDP traffic */ 665a2cc190SJeff Kirsher MLX4_EN_PARM_INT(udp_rss, 1, 67d82603c6SJorrit Schippers "Enable RSS for incoming UDP traffic or disabled (0)"); 685a2cc190SJeff Kirsher 695a2cc190SJeff Kirsher /* Priority pausing */ 705a2cc190SJeff Kirsher MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." 715a2cc190SJeff Kirsher " Per priority bit mask"); 725a2cc190SJeff Kirsher MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 735a2cc190SJeff Kirsher " Per priority bit mask"); 745a2cc190SJeff Kirsher 75b97b33a3SEugenia Emantayev MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, 76b97b33a3SEugenia Emantayev "Threshold for using inline data (range: 17-104, default: 104)"); 77b97b33a3SEugenia Emantayev 78b97b33a3SEugenia Emantayev #define MAX_PFC_TX 0xff 79b97b33a3SEugenia Emantayev #define MAX_PFC_RX 0xff 80b97b33a3SEugenia Emantayev 810c87b29cSJoe Perches void en_print(const char *level, const struct mlx4_en_priv *priv, 825a2cc190SJeff Kirsher const char *format, ...) 835a2cc190SJeff Kirsher { 845a2cc190SJeff Kirsher va_list args; 855a2cc190SJeff Kirsher struct va_format vaf; 865a2cc190SJeff Kirsher 875a2cc190SJeff Kirsher va_start(args, format); 885a2cc190SJeff Kirsher 895a2cc190SJeff Kirsher vaf.fmt = format; 905a2cc190SJeff Kirsher vaf.va = &args; 915a2cc190SJeff Kirsher if (priv->registered) 920c87b29cSJoe Perches printk("%s%s: %s: %pV", 935a2cc190SJeff Kirsher level, DRV_NAME, priv->dev->name, &vaf); 945a2cc190SJeff Kirsher else 950c87b29cSJoe Perches printk("%s%s: %s: Port %d: %pV", 965a2cc190SJeff Kirsher level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), 975a2cc190SJeff Kirsher priv->port, &vaf); 985a2cc190SJeff Kirsher va_end(args); 995a2cc190SJeff Kirsher } 1005a2cc190SJeff Kirsher 10179aeaccdSYan Burman void mlx4_en_update_loopback_state(struct net_device *dev, 10279aeaccdSYan Burman netdev_features_t features) 10379aeaccdSYan Burman { 10479aeaccdSYan Burman struct mlx4_en_priv *priv = netdev_priv(dev); 10579aeaccdSYan Burman 106241a08c3SIdo Shamay if (features & NETIF_F_LOOPBACK) 107241a08c3SIdo Shamay priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 108241a08c3SIdo Shamay else 109241a08c3SIdo Shamay priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 110241a08c3SIdo Shamay 11179aeaccdSYan Burman priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| 11279aeaccdSYan Burman MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); 11379aeaccdSYan Burman 11479aeaccdSYan Burman /* Drop the packet if SRIOV is not enabled 11579aeaccdSYan Burman * and not performing the selftest or flb disabled 11679aeaccdSYan Burman */ 11779aeaccdSYan Burman if (mlx4_is_mfunc(priv->mdev->dev) && 11879aeaccdSYan Burman !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback) 11979aeaccdSYan Burman priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED; 12079aeaccdSYan Burman 12179aeaccdSYan Burman /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest 12279aeaccdSYan Burman * is requested 12379aeaccdSYan Burman */ 12479aeaccdSYan Burman if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback) 12579aeaccdSYan Burman priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK; 12674194fb9SMaor Gottlieb 12774194fb9SMaor Gottlieb mutex_lock(&priv->mdev->state_lock); 1284931c6efSSaeed Mahameed if ((priv->mdev->dev->caps.flags2 & 1294931c6efSSaeed Mahameed MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) && 1304931c6efSSaeed Mahameed priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) { 13174194fb9SMaor Gottlieb int i; 13274194fb9SMaor Gottlieb int err = 0; 13374194fb9SMaor Gottlieb int loopback = !!(features & NETIF_F_LOOPBACK); 13474194fb9SMaor Gottlieb 13574194fb9SMaor Gottlieb for (i = 0; i < priv->rx_ring_num; i++) { 13674194fb9SMaor Gottlieb int ret; 13774194fb9SMaor Gottlieb 13874194fb9SMaor Gottlieb ret = mlx4_en_change_mcast_lb(priv, 13974194fb9SMaor Gottlieb &priv->rss_map.qps[i], 14074194fb9SMaor Gottlieb loopback); 14174194fb9SMaor Gottlieb if (!err) 14274194fb9SMaor Gottlieb err = ret; 14374194fb9SMaor Gottlieb } 14474194fb9SMaor Gottlieb if (err) 14574194fb9SMaor Gottlieb mlx4_warn(priv->mdev, "failed to change mcast loopback\n"); 14674194fb9SMaor Gottlieb } 14774194fb9SMaor Gottlieb mutex_unlock(&priv->mdev->state_lock); 14879aeaccdSYan Burman } 14979aeaccdSYan Burman 150f575a02eSZhu Yanjun static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) 1515a2cc190SJeff Kirsher { 1525a2cc190SJeff Kirsher struct mlx4_en_profile *params = &mdev->profile; 1535a2cc190SJeff Kirsher int i; 1545a2cc190SJeff Kirsher 1555a2cc190SJeff Kirsher params->udp_rss = udp_rss; 1567e1dc5e9SInbar Karmy params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ? 157ea1c1af1SAmir Vadai MLX4_EN_MIN_TX_RING_P_UP : 158ea1c1af1SAmir Vadai min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); 159ea1c1af1SAmir Vadai 1605a2cc190SJeff Kirsher if (params->udp_rss && !(mdev->dev->caps.flags 1615a2cc190SJeff Kirsher & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 1621a91de28SJoe Perches mlx4_warn(mdev, "UDP RSS is not supported on this device\n"); 1635a2cc190SJeff Kirsher params->udp_rss = 0; 1645a2cc190SJeff Kirsher } 1655a2cc190SJeff Kirsher for (i = 1; i <= MLX4_MAX_PORTS; i++) { 1665a2cc190SJeff Kirsher params->prof[i].rx_pause = 1; 1675a2cc190SJeff Kirsher params->prof[i].rx_ppp = pfcrx; 1685a2cc190SJeff Kirsher params->prof[i].tx_pause = 1; 1695a2cc190SJeff Kirsher params->prof[i].tx_ppp = pfctx; 1705a2cc190SJeff Kirsher params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 1715a2cc190SJeff Kirsher params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 172ec327f7aSInbar Karmy params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; 1737e1dc5e9SInbar Karmy params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; 1747e1dc5e9SInbar Karmy params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up * 175f21ad614SInbar Karmy params->prof[i].num_up; 17693d3e367SYevgeny Petrilin params->prof[i].rss_rings = 0; 177b97b33a3SEugenia Emantayev params->prof[i].inline_thold = inline_thold; 1785a2cc190SJeff Kirsher } 1795a2cc190SJeff Kirsher } 1805a2cc190SJeff Kirsher 1815a2cc190SJeff Kirsher static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) 1825a2cc190SJeff Kirsher { 1835a2cc190SJeff Kirsher struct mlx4_en_dev *endev = ctx; 1845a2cc190SJeff Kirsher 1855a2cc190SJeff Kirsher return endev->pndev[port]; 1865a2cc190SJeff Kirsher } 1875a2cc190SJeff Kirsher 1885a2cc190SJeff Kirsher static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 18900f5ce99SJack Morgenstein enum mlx4_dev_event event, unsigned long port) 1905a2cc190SJeff Kirsher { 1915a2cc190SJeff Kirsher struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 1925a2cc190SJeff Kirsher struct mlx4_en_priv *priv; 1935a2cc190SJeff Kirsher 1945a2cc190SJeff Kirsher switch (event) { 1955a2cc190SJeff Kirsher case MLX4_DEV_EVENT_PORT_UP: 1965a2cc190SJeff Kirsher case MLX4_DEV_EVENT_PORT_DOWN: 19713bf58b7SJack Morgenstein if (!mdev->pndev[port]) 19813bf58b7SJack Morgenstein return; 19913bf58b7SJack Morgenstein priv = netdev_priv(mdev->pndev[port]); 2005a2cc190SJeff Kirsher /* To prevent races, we poll the link state in a separate 2015a2cc190SJeff Kirsher task rather than changing it here */ 2025a2cc190SJeff Kirsher priv->link_state = event; 2035a2cc190SJeff Kirsher queue_work(mdev->workqueue, &priv->linkstate_task); 2045a2cc190SJeff Kirsher break; 2055a2cc190SJeff Kirsher 2065a2cc190SJeff Kirsher case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 2075a2cc190SJeff Kirsher mlx4_err(mdev, "Internal error detected, restarting device\n"); 2085a2cc190SJeff Kirsher break; 2095a2cc190SJeff Kirsher 210e4b59a1cSEugenia Emantayev case MLX4_DEV_EVENT_SLAVE_INIT: 211e4b59a1cSEugenia Emantayev case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: 212e4b59a1cSEugenia Emantayev break; 2135a2cc190SJeff Kirsher default: 21413bf58b7SJack Morgenstein if (port < 1 || port > dev->caps.num_ports || 21513bf58b7SJack Morgenstein !mdev->pndev[port]) 21613bf58b7SJack Morgenstein return; 21700f5ce99SJack Morgenstein mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, 21800f5ce99SJack Morgenstein (int) port); 2195a2cc190SJeff Kirsher } 2205a2cc190SJeff Kirsher } 2215a2cc190SJeff Kirsher 2225a2cc190SJeff Kirsher static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) 2235a2cc190SJeff Kirsher { 2245a2cc190SJeff Kirsher struct mlx4_en_dev *mdev = endev_ptr; 2255a2cc190SJeff Kirsher int i; 2265a2cc190SJeff Kirsher 2275a2cc190SJeff Kirsher mutex_lock(&mdev->state_lock); 2285a2cc190SJeff Kirsher mdev->device_up = false; 2295a2cc190SJeff Kirsher mutex_unlock(&mdev->state_lock); 2305a2cc190SJeff Kirsher 2315a2cc190SJeff Kirsher mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 2325a2cc190SJeff Kirsher if (mdev->pndev[i]) 2335a2cc190SJeff Kirsher mlx4_en_destroy_netdev(mdev->pndev[i]); 2345a2cc190SJeff Kirsher 2355a2cc190SJeff Kirsher flush_workqueue(mdev->workqueue); 2365a2cc190SJeff Kirsher destroy_workqueue(mdev->workqueue); 23761083720SShani Michaeli (void) mlx4_mr_free(dev, &mdev->mr); 2387398af40SAlexander Guller iounmap(mdev->uar_map); 2395a2cc190SJeff Kirsher mlx4_uar_free(dev, &mdev->priv_uar); 2405a2cc190SJeff Kirsher mlx4_pd_free(dev, mdev->priv_pdn); 2415da03547SMoni Shoua if (mdev->nb.notifier_call) 2425da03547SMoni Shoua unregister_netdevice_notifier(&mdev->nb); 2435a2cc190SJeff Kirsher kfree(mdev); 2445a2cc190SJeff Kirsher } 2455a2cc190SJeff Kirsher 24679857cd3SMoni Shoua static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx) 24779857cd3SMoni Shoua { 24879857cd3SMoni Shoua int i; 24979857cd3SMoni Shoua struct mlx4_en_dev *mdev = ctx; 25079857cd3SMoni Shoua 25179857cd3SMoni Shoua /* Create a netdev for each port */ 25279857cd3SMoni Shoua mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 25379857cd3SMoni Shoua mlx4_info(mdev, "Activating port:%d\n", i); 25479857cd3SMoni Shoua if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) 25579857cd3SMoni Shoua mdev->pndev[i] = NULL; 25679857cd3SMoni Shoua } 25779857cd3SMoni Shoua 25879857cd3SMoni Shoua /* register notifier */ 25979857cd3SMoni Shoua mdev->nb.notifier_call = mlx4_en_netdev_event; 26079857cd3SMoni Shoua if (register_netdevice_notifier(&mdev->nb)) { 26179857cd3SMoni Shoua mdev->nb.notifier_call = NULL; 26279857cd3SMoni Shoua mlx4_err(mdev, "Failed to create notifier\n"); 26379857cd3SMoni Shoua } 26479857cd3SMoni Shoua } 26579857cd3SMoni Shoua 2665a2cc190SJeff Kirsher static void *mlx4_en_add(struct mlx4_dev *dev) 2675a2cc190SJeff Kirsher { 2685a2cc190SJeff Kirsher struct mlx4_en_dev *mdev; 2695a2cc190SJeff Kirsher int i; 2705a2cc190SJeff Kirsher 2715a2cc190SJeff Kirsher printk_once(KERN_INFO "%s", mlx4_en_version); 2725a2cc190SJeff Kirsher 273b2adaca9SJoe Perches mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 274c2a3d4b4SJack Morgenstein if (!mdev) 2755a2cc190SJeff Kirsher goto err_free_res; 2765a2cc190SJeff Kirsher 2775a2cc190SJeff Kirsher if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) 2785a2cc190SJeff Kirsher goto err_free_dev; 2795a2cc190SJeff Kirsher 2805a2cc190SJeff Kirsher if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 2815a2cc190SJeff Kirsher goto err_pd; 2825a2cc190SJeff Kirsher 2835a2cc190SJeff Kirsher mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, 2845a2cc190SJeff Kirsher PAGE_SIZE); 2855a2cc190SJeff Kirsher if (!mdev->uar_map) 2865a2cc190SJeff Kirsher goto err_uar; 2875a2cc190SJeff Kirsher spin_lock_init(&mdev->uar_lock); 2885a2cc190SJeff Kirsher 2895a2cc190SJeff Kirsher mdev->dev = dev; 290872bf2fbSYishai Hadas mdev->dma_device = &dev->persist->pdev->dev; 291872bf2fbSYishai Hadas mdev->pdev = dev->persist->pdev; 2925a2cc190SJeff Kirsher mdev->device_up = false; 2935a2cc190SJeff Kirsher 2945a2cc190SJeff Kirsher mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 2955a2cc190SJeff Kirsher if (!mdev->LSO_support) 2961a91de28SJoe Perches mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); 2975a2cc190SJeff Kirsher 2985a2cc190SJeff Kirsher if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, 2995a2cc190SJeff Kirsher MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 3005a2cc190SJeff Kirsher 0, 0, &mdev->mr)) { 3015a2cc190SJeff Kirsher mlx4_err(mdev, "Failed allocating memory region\n"); 3027398af40SAlexander Guller goto err_map; 3035a2cc190SJeff Kirsher } 3045a2cc190SJeff Kirsher if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { 3055a2cc190SJeff Kirsher mlx4_err(mdev, "Failed enabling memory region\n"); 3065a2cc190SJeff Kirsher goto err_mr; 3075a2cc190SJeff Kirsher } 3085a2cc190SJeff Kirsher 3095a2cc190SJeff Kirsher /* Build device profile according to supplied module parameters */ 310f575a02eSZhu Yanjun mlx4_en_get_profile(mdev); 3115a2cc190SJeff Kirsher 3125a2cc190SJeff Kirsher /* Configure which ports to start according to module parameters */ 3135a2cc190SJeff Kirsher mdev->port_cnt = 0; 3145a2cc190SJeff Kirsher mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 3155a2cc190SJeff Kirsher mdev->port_cnt++; 3165a2cc190SJeff Kirsher 31702512482SIdo Shamay /* Set default number of RX rings*/ 31802512482SIdo Shamay mlx4_en_set_num_rx_rings(mdev); 3195a2cc190SJeff Kirsher 3205a2cc190SJeff Kirsher /* Create our own workqueue for reset/multicast tasks 3215a2cc190SJeff Kirsher * Note: we cannot use the shared workqueue because of deadlocks caused 3225a2cc190SJeff Kirsher * by the rtnl lock */ 3235a2cc190SJeff Kirsher mdev->workqueue = create_singlethread_workqueue("mlx4_en"); 324c2a3d4b4SJack Morgenstein if (!mdev->workqueue) 3255a2cc190SJeff Kirsher goto err_mr; 3265a2cc190SJeff Kirsher 3275a2cc190SJeff Kirsher /* At this stage all non-port specific tasks are complete: 3285a2cc190SJeff Kirsher * mark the card state as up */ 3295a2cc190SJeff Kirsher mutex_init(&mdev->state_lock); 3305a2cc190SJeff Kirsher mdev->device_up = true; 3315a2cc190SJeff Kirsher 3325a2cc190SJeff Kirsher return mdev; 3335a2cc190SJeff Kirsher 3345a2cc190SJeff Kirsher err_mr: 33561083720SShani Michaeli (void) mlx4_mr_free(dev, &mdev->mr); 3367398af40SAlexander Guller err_map: 3378850494aSDotan Barak if (mdev->uar_map) 3387398af40SAlexander Guller iounmap(mdev->uar_map); 3395a2cc190SJeff Kirsher err_uar: 3405a2cc190SJeff Kirsher mlx4_uar_free(dev, &mdev->priv_uar); 3415a2cc190SJeff Kirsher err_pd: 3425a2cc190SJeff Kirsher mlx4_pd_free(dev, mdev->priv_pdn); 3435a2cc190SJeff Kirsher err_free_dev: 3445a2cc190SJeff Kirsher kfree(mdev); 3455a2cc190SJeff Kirsher err_free_res: 3465a2cc190SJeff Kirsher return NULL; 3475a2cc190SJeff Kirsher } 3485a2cc190SJeff Kirsher 3495a2cc190SJeff Kirsher static struct mlx4_interface mlx4_en_interface = { 3505a2cc190SJeff Kirsher .add = mlx4_en_add, 3515a2cc190SJeff Kirsher .remove = mlx4_en_remove, 3525a2cc190SJeff Kirsher .event = mlx4_en_event, 3535a2cc190SJeff Kirsher .get_dev = mlx4_en_get_netdev, 3545a2cc190SJeff Kirsher .protocol = MLX4_PROT_ETH, 35579857cd3SMoni Shoua .activate = mlx4_en_activate, 3565a2cc190SJeff Kirsher }; 3575a2cc190SJeff Kirsher 358d0ceebd7SFengguang Wu static void mlx4_en_verify_params(void) 359b97b33a3SEugenia Emantayev { 360b97b33a3SEugenia Emantayev if (pfctx > MAX_PFC_TX) { 361b97b33a3SEugenia Emantayev pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", 362b97b33a3SEugenia Emantayev pfctx, MAX_PFC_TX); 363b97b33a3SEugenia Emantayev pfctx = 0; 364b97b33a3SEugenia Emantayev } 365b97b33a3SEugenia Emantayev 366b97b33a3SEugenia Emantayev if (pfcrx > MAX_PFC_RX) { 367b97b33a3SEugenia Emantayev pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", 368b97b33a3SEugenia Emantayev pfcrx, MAX_PFC_RX); 369b97b33a3SEugenia Emantayev pfcrx = 0; 370b97b33a3SEugenia Emantayev } 371b97b33a3SEugenia Emantayev 372b97b33a3SEugenia Emantayev if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) { 373b97b33a3SEugenia Emantayev pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n", 374b97b33a3SEugenia Emantayev inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE); 375b97b33a3SEugenia Emantayev inline_thold = MAX_INLINE; 376b97b33a3SEugenia Emantayev } 377b97b33a3SEugenia Emantayev } 378b97b33a3SEugenia Emantayev 3795a2cc190SJeff Kirsher static int __init mlx4_en_init(void) 3805a2cc190SJeff Kirsher { 381b97b33a3SEugenia Emantayev mlx4_en_verify_params(); 3823d8f7cc7SDavid Decotigny mlx4_en_init_ptys2ethtool_map(); 383b97b33a3SEugenia Emantayev 3845a2cc190SJeff Kirsher return mlx4_register_interface(&mlx4_en_interface); 3855a2cc190SJeff Kirsher } 3865a2cc190SJeff Kirsher 3875a2cc190SJeff Kirsher static void __exit mlx4_en_cleanup(void) 3885a2cc190SJeff Kirsher { 3895a2cc190SJeff Kirsher mlx4_unregister_interface(&mlx4_en_interface); 3905a2cc190SJeff Kirsher } 3915a2cc190SJeff Kirsher 3925a2cc190SJeff Kirsher module_init(mlx4_en_init); 3935a2cc190SJeff Kirsher module_exit(mlx4_en_cleanup); 3945a2cc190SJeff Kirsher 395