1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/errno.h>
37 
38 #include "mlx4.h"
39 
40 struct mlx4_device_context {
41 	struct list_head	list;
42 	struct list_head	bond_list;
43 	struct mlx4_interface  *intf;
44 	void		       *context;
45 };
46 
47 static LIST_HEAD(intf_list);
48 static LIST_HEAD(dev_list);
49 static DEFINE_MUTEX(intf_mutex);
50 
51 static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
52 {
53 	struct mlx4_device_context *dev_ctx;
54 
55 	dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
56 	if (!dev_ctx)
57 		return;
58 
59 	dev_ctx->intf    = intf;
60 	dev_ctx->context = intf->add(&priv->dev);
61 
62 	if (dev_ctx->context) {
63 		spin_lock_irq(&priv->ctx_lock);
64 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
65 		spin_unlock_irq(&priv->ctx_lock);
66 	} else
67 		kfree(dev_ctx);
68 }
69 
70 static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
71 {
72 	struct mlx4_device_context *dev_ctx;
73 
74 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
75 		if (dev_ctx->intf == intf) {
76 			spin_lock_irq(&priv->ctx_lock);
77 			list_del(&dev_ctx->list);
78 			spin_unlock_irq(&priv->ctx_lock);
79 
80 			intf->remove(&priv->dev, dev_ctx->context);
81 			kfree(dev_ctx);
82 			return;
83 		}
84 }
85 
86 int mlx4_register_interface(struct mlx4_interface *intf)
87 {
88 	struct mlx4_priv *priv;
89 
90 	if (!intf->add || !intf->remove)
91 		return -EINVAL;
92 
93 	mutex_lock(&intf_mutex);
94 
95 	list_add_tail(&intf->list, &intf_list);
96 	list_for_each_entry(priv, &dev_list, dev_list)
97 		mlx4_add_device(intf, priv);
98 
99 	mutex_unlock(&intf_mutex);
100 
101 	return 0;
102 }
103 EXPORT_SYMBOL_GPL(mlx4_register_interface);
104 
105 void mlx4_unregister_interface(struct mlx4_interface *intf)
106 {
107 	struct mlx4_priv *priv;
108 
109 	mutex_lock(&intf_mutex);
110 
111 	list_for_each_entry(priv, &dev_list, dev_list)
112 		mlx4_remove_device(intf, priv);
113 
114 	list_del(&intf->list);
115 
116 	mutex_unlock(&intf_mutex);
117 }
118 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
119 
120 int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
121 {
122 	struct mlx4_priv *priv = mlx4_priv(dev);
123 	struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
124 	unsigned long flags;
125 	int ret;
126 	LIST_HEAD(bond_list);
127 
128 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
129 		return -ENOTSUPP;
130 
131 	ret = mlx4_disable_rx_port_check(dev, enable);
132 	if (ret) {
133 		mlx4_err(dev, "Fail to %s rx port check\n",
134 			 enable ? "enable" : "disable");
135 		return ret;
136 	}
137 	if (enable) {
138 		dev->flags |= MLX4_FLAG_BONDED;
139 	} else {
140 		 ret = mlx4_virt2phy_port_map(dev, 1, 2);
141 		if (ret) {
142 			mlx4_err(dev, "Fail to reset port map\n");
143 			return ret;
144 		}
145 		dev->flags &= ~MLX4_FLAG_BONDED;
146 	}
147 
148 	spin_lock_irqsave(&priv->ctx_lock, flags);
149 	list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
150 		if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
151 			list_add_tail(&dev_ctx->bond_list, &bond_list);
152 			list_del(&dev_ctx->list);
153 		}
154 	}
155 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
156 
157 	list_for_each_entry(dev_ctx, &bond_list, bond_list) {
158 		dev_ctx->intf->remove(dev, dev_ctx->context);
159 		dev_ctx->context =  dev_ctx->intf->add(dev);
160 
161 		spin_lock_irqsave(&priv->ctx_lock, flags);
162 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
163 		spin_unlock_irqrestore(&priv->ctx_lock, flags);
164 
165 		mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
166 			 dev_ctx->intf->protocol, enable ?
167 			 "enabled" : "disabled");
168 	}
169 	return 0;
170 }
171 
172 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
173 			 unsigned long param)
174 {
175 	struct mlx4_priv *priv = mlx4_priv(dev);
176 	struct mlx4_device_context *dev_ctx;
177 	unsigned long flags;
178 
179 	spin_lock_irqsave(&priv->ctx_lock, flags);
180 
181 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
182 		if (dev_ctx->intf->event)
183 			dev_ctx->intf->event(dev, dev_ctx->context, type, param);
184 
185 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
186 }
187 
188 int mlx4_register_device(struct mlx4_dev *dev)
189 {
190 	struct mlx4_priv *priv = mlx4_priv(dev);
191 	struct mlx4_interface *intf;
192 
193 	mutex_lock(&intf_mutex);
194 
195 	dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
196 	list_add_tail(&priv->dev_list, &dev_list);
197 	list_for_each_entry(intf, &intf_list, list)
198 		mlx4_add_device(intf, priv);
199 
200 	mutex_unlock(&intf_mutex);
201 	mlx4_start_catas_poll(dev);
202 
203 	return 0;
204 }
205 
206 void mlx4_unregister_device(struct mlx4_dev *dev)
207 {
208 	struct mlx4_priv *priv = mlx4_priv(dev);
209 	struct mlx4_interface *intf;
210 
211 	mlx4_stop_catas_poll(dev);
212 	mutex_lock(&intf_mutex);
213 
214 	list_for_each_entry(intf, &intf_list, list)
215 		mlx4_remove_device(intf, priv);
216 
217 	list_del(&priv->dev_list);
218 	dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
219 
220 	mutex_unlock(&intf_mutex);
221 }
222 
223 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
224 {
225 	struct mlx4_priv *priv = mlx4_priv(dev);
226 	struct mlx4_device_context *dev_ctx;
227 	unsigned long flags;
228 	void *result = NULL;
229 
230 	spin_lock_irqsave(&priv->ctx_lock, flags);
231 
232 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
233 		if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
234 			result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
235 			break;
236 		}
237 
238 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
239 
240 	return result;
241 }
242 EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
243