1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/slab.h> 35 #include <linux/export.h> 36 #include <linux/errno.h> 37 #include <net/devlink.h> 38 39 #include "mlx4.h" 40 41 struct mlx4_device_context { 42 struct list_head list; 43 struct list_head bond_list; 44 struct mlx4_interface *intf; 45 void *context; 46 }; 47 48 static LIST_HEAD(intf_list); 49 static LIST_HEAD(dev_list); 50 static DEFINE_MUTEX(intf_mutex); 51 52 static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) 53 { 54 struct mlx4_device_context *dev_ctx; 55 56 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); 57 if (!dev_ctx) 58 return; 59 60 dev_ctx->intf = intf; 61 dev_ctx->context = intf->add(&priv->dev); 62 63 if (dev_ctx->context) { 64 spin_lock_irq(&priv->ctx_lock); 65 list_add_tail(&dev_ctx->list, &priv->ctx_list); 66 spin_unlock_irq(&priv->ctx_lock); 67 if (intf->activate) 68 intf->activate(&priv->dev, dev_ctx->context); 69 } else 70 kfree(dev_ctx); 71 72 } 73 74 static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) 75 { 76 struct mlx4_device_context *dev_ctx; 77 78 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 79 if (dev_ctx->intf == intf) { 80 spin_lock_irq(&priv->ctx_lock); 81 list_del(&dev_ctx->list); 82 spin_unlock_irq(&priv->ctx_lock); 83 84 intf->remove(&priv->dev, dev_ctx->context); 85 kfree(dev_ctx); 86 return; 87 } 88 } 89 90 int mlx4_register_interface(struct mlx4_interface *intf) 91 { 92 struct mlx4_priv *priv; 93 94 if (!intf->add || !intf->remove) 95 return -EINVAL; 96 97 mutex_lock(&intf_mutex); 98 99 list_add_tail(&intf->list, &intf_list); 100 list_for_each_entry(priv, &dev_list, dev_list) { 101 if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { 102 mlx4_dbg(&priv->dev, 103 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); 104 intf->flags &= ~MLX4_INTFF_BONDING; 105 } 106 mlx4_add_device(intf, priv); 107 } 108 109 mutex_unlock(&intf_mutex); 110 111 return 0; 112 } 113 EXPORT_SYMBOL_GPL(mlx4_register_interface); 114 115 void mlx4_unregister_interface(struct mlx4_interface *intf) 116 { 117 struct mlx4_priv *priv; 118 119 mutex_lock(&intf_mutex); 120 121 list_for_each_entry(priv, &dev_list, dev_list) 122 mlx4_remove_device(intf, priv); 123 124 list_del(&intf->list); 125 126 mutex_unlock(&intf_mutex); 127 } 128 EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 129 130 int mlx4_do_bond(struct mlx4_dev *dev, bool enable) 131 { 132 struct mlx4_priv *priv = mlx4_priv(dev); 133 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; 134 unsigned long flags; 135 int ret; 136 LIST_HEAD(bond_list); 137 138 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 139 return -EOPNOTSUPP; 140 141 ret = mlx4_disable_rx_port_check(dev, enable); 142 if (ret) { 143 mlx4_err(dev, "Fail to %s rx port check\n", 144 enable ? "enable" : "disable"); 145 return ret; 146 } 147 if (enable) { 148 dev->flags |= MLX4_FLAG_BONDED; 149 } else { 150 ret = mlx4_virt2phy_port_map(dev, 1, 2); 151 if (ret) { 152 mlx4_err(dev, "Fail to reset port map\n"); 153 return ret; 154 } 155 dev->flags &= ~MLX4_FLAG_BONDED; 156 } 157 158 spin_lock_irqsave(&priv->ctx_lock, flags); 159 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { 160 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { 161 list_add_tail(&dev_ctx->bond_list, &bond_list); 162 list_del(&dev_ctx->list); 163 } 164 } 165 spin_unlock_irqrestore(&priv->ctx_lock, flags); 166 167 list_for_each_entry(dev_ctx, &bond_list, bond_list) { 168 dev_ctx->intf->remove(dev, dev_ctx->context); 169 dev_ctx->context = dev_ctx->intf->add(dev); 170 171 spin_lock_irqsave(&priv->ctx_lock, flags); 172 list_add_tail(&dev_ctx->list, &priv->ctx_list); 173 spin_unlock_irqrestore(&priv->ctx_lock, flags); 174 175 mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", 176 dev_ctx->intf->protocol, enable ? 177 "enabled" : "disabled"); 178 } 179 return 0; 180 } 181 182 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 183 unsigned long param) 184 { 185 struct mlx4_priv *priv = mlx4_priv(dev); 186 struct mlx4_device_context *dev_ctx; 187 unsigned long flags; 188 189 spin_lock_irqsave(&priv->ctx_lock, flags); 190 191 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 192 if (dev_ctx->intf->event) 193 dev_ctx->intf->event(dev, dev_ctx->context, type, param); 194 195 spin_unlock_irqrestore(&priv->ctx_lock, flags); 196 } 197 198 int mlx4_register_device(struct mlx4_dev *dev) 199 { 200 struct mlx4_priv *priv = mlx4_priv(dev); 201 struct mlx4_interface *intf; 202 203 mutex_lock(&intf_mutex); 204 205 dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; 206 list_add_tail(&priv->dev_list, &dev_list); 207 list_for_each_entry(intf, &intf_list, list) 208 mlx4_add_device(intf, priv); 209 210 mutex_unlock(&intf_mutex); 211 mlx4_start_catas_poll(dev); 212 213 return 0; 214 } 215 216 void mlx4_unregister_device(struct mlx4_dev *dev) 217 { 218 struct mlx4_priv *priv = mlx4_priv(dev); 219 struct mlx4_interface *intf; 220 221 if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) 222 return; 223 224 mlx4_stop_catas_poll(dev); 225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && 226 mlx4_is_slave(dev)) { 227 /* In mlx4_remove_one on a VF */ 228 u32 slave_read = 229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); 230 231 if (mlx4_comm_internal_err(slave_read)) { 232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", 233 __func__); 234 mlx4_enter_error_state(dev->persist); 235 } 236 } 237 mutex_lock(&intf_mutex); 238 239 list_for_each_entry(intf, &intf_list, list) 240 mlx4_remove_device(intf, priv); 241 242 list_del(&priv->dev_list); 243 dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; 244 245 mutex_unlock(&intf_mutex); 246 } 247 248 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) 249 { 250 struct mlx4_priv *priv = mlx4_priv(dev); 251 struct mlx4_device_context *dev_ctx; 252 unsigned long flags; 253 void *result = NULL; 254 255 spin_lock_irqsave(&priv->ctx_lock, flags); 256 257 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 258 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { 259 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); 260 break; 261 } 262 263 spin_unlock_irqrestore(&priv->ctx_lock, flags); 264 265 return result; 266 } 267 EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); 268 269 struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port) 270 { 271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 272 273 return &info->devlink_port; 274 } 275 EXPORT_SYMBOL_GPL(mlx4_get_devlink_port); 276