xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cm.c (revision 97da55fc)
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_mad.h>
34 
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
39 
40 #include "mlx4_ib.h"
41 
42 #define CM_CLEANUP_CACHE_TIMEOUT  (5 * HZ)
43 
44 struct id_map_entry {
45 	struct rb_node node;
46 
47 	u32 sl_cm_id;
48 	u32 pv_cm_id;
49 	int slave_id;
50 	int scheduled_delete;
51 	struct mlx4_ib_dev *dev;
52 
53 	struct list_head list;
54 	struct delayed_work timeout;
55 };
56 
57 struct cm_generic_msg {
58 	struct ib_mad_hdr hdr;
59 
60 	__be32 local_comm_id;
61 	__be32 remote_comm_id;
62 };
63 
64 struct cm_req_msg {
65 	unsigned char unused[0x60];
66 	union ib_gid primary_path_sgid;
67 };
68 
69 
70 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
71 {
72 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
73 	msg->local_comm_id = cpu_to_be32(cm_id);
74 }
75 
76 static u32 get_local_comm_id(struct ib_mad *mad)
77 {
78 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
79 
80 	return be32_to_cpu(msg->local_comm_id);
81 }
82 
83 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
84 {
85 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86 	msg->remote_comm_id = cpu_to_be32(cm_id);
87 }
88 
89 static u32 get_remote_comm_id(struct ib_mad *mad)
90 {
91 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
92 
93 	return be32_to_cpu(msg->remote_comm_id);
94 }
95 
96 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
97 {
98 	struct cm_req_msg *msg = (struct cm_req_msg *)mad;
99 
100 	return msg->primary_path_sgid;
101 }
102 
103 /* Lock should be taken before called */
104 static struct id_map_entry *
105 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
106 {
107 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
108 	struct rb_node *node = sl_id_map->rb_node;
109 
110 	while (node) {
111 		struct id_map_entry *id_map_entry =
112 			rb_entry(node, struct id_map_entry, node);
113 
114 		if (id_map_entry->sl_cm_id > sl_cm_id)
115 			node = node->rb_left;
116 		else if (id_map_entry->sl_cm_id < sl_cm_id)
117 			node = node->rb_right;
118 		else if (id_map_entry->slave_id > slave_id)
119 			node = node->rb_left;
120 		else if (id_map_entry->slave_id < slave_id)
121 			node = node->rb_right;
122 		else
123 			return id_map_entry;
124 	}
125 	return NULL;
126 }
127 
128 static void id_map_ent_timeout(struct work_struct *work)
129 {
130 	struct delayed_work *delay = to_delayed_work(work);
131 	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
132 	struct id_map_entry *db_ent, *found_ent;
133 	struct mlx4_ib_dev *dev = ent->dev;
134 	struct mlx4_ib_sriov *sriov = &dev->sriov;
135 	struct rb_root *sl_id_map = &sriov->sl_id_map;
136 	int pv_id = (int) ent->pv_cm_id;
137 
138 	spin_lock(&sriov->id_map_lock);
139 	db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
140 	if (!db_ent)
141 		goto out;
142 	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
143 	if (found_ent && found_ent == ent)
144 		rb_erase(&found_ent->node, sl_id_map);
145 	idr_remove(&sriov->pv_id_table, pv_id);
146 
147 out:
148 	list_del(&ent->list);
149 	spin_unlock(&sriov->id_map_lock);
150 	kfree(ent);
151 }
152 
153 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
154 {
155 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
156 	struct rb_root *sl_id_map = &sriov->sl_id_map;
157 	struct id_map_entry *ent, *found_ent;
158 
159 	spin_lock(&sriov->id_map_lock);
160 	ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
161 	if (!ent)
162 		goto out;
163 	found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
164 	if (found_ent && found_ent == ent)
165 		rb_erase(&found_ent->node, sl_id_map);
166 	idr_remove(&sriov->pv_id_table, pv_cm_id);
167 out:
168 	spin_unlock(&sriov->id_map_lock);
169 }
170 
171 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
172 {
173 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
174 	struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
175 	struct id_map_entry *ent;
176 	int slave_id = new->slave_id;
177 	int sl_cm_id = new->sl_cm_id;
178 
179 	ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
180 	if (ent) {
181 		pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
182 			 sl_cm_id);
183 
184 		rb_replace_node(&ent->node, &new->node, sl_id_map);
185 		return;
186 	}
187 
188 	/* Go to the bottom of the tree */
189 	while (*link) {
190 		parent = *link;
191 		ent = rb_entry(parent, struct id_map_entry, node);
192 
193 		if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
194 			link = &(*link)->rb_left;
195 		else
196 			link = &(*link)->rb_right;
197 	}
198 
199 	rb_link_node(&new->node, parent, link);
200 	rb_insert_color(&new->node, sl_id_map);
201 }
202 
203 static struct id_map_entry *
204 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
205 {
206 	int ret;
207 	static int next_id;
208 	struct id_map_entry *ent;
209 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
210 
211 	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
212 	if (!ent) {
213 		mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
214 		return ERR_PTR(-ENOMEM);
215 	}
216 
217 	ent->sl_cm_id = sl_cm_id;
218 	ent->slave_id = slave_id;
219 	ent->scheduled_delete = 0;
220 	ent->dev = to_mdev(ibdev);
221 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
222 
223 	idr_preload(GFP_KERNEL);
224 	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
225 
226 	ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
227 	if (ret >= 0) {
228 		next_id = max(ret + 1, 0);
229 		ent->pv_cm_id = (u32)ret;
230 		sl_id_map_add(ibdev, ent);
231 		list_add_tail(&ent->list, &sriov->cm_list);
232 	}
233 
234 	spin_unlock(&sriov->id_map_lock);
235 	idr_preload_end();
236 
237 	if (ret >= 0)
238 		return ent;
239 
240 	/*error flow*/
241 	kfree(ent);
242 	mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
243 	return ERR_PTR(-ENOMEM);
244 }
245 
246 static struct id_map_entry *
247 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
248 {
249 	struct id_map_entry *ent;
250 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
251 
252 	spin_lock(&sriov->id_map_lock);
253 	if (*pv_cm_id == -1) {
254 		ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
255 		if (ent)
256 			*pv_cm_id = (int) ent->pv_cm_id;
257 	} else
258 		ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
259 	spin_unlock(&sriov->id_map_lock);
260 
261 	return ent;
262 }
263 
264 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
265 {
266 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
267 	unsigned long flags;
268 
269 	spin_lock(&sriov->id_map_lock);
270 	spin_lock_irqsave(&sriov->going_down_lock, flags);
271 	/*make sure that there is no schedule inside the scheduled work.*/
272 	if (!sriov->is_going_down) {
273 		id->scheduled_delete = 1;
274 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
275 	}
276 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
277 	spin_unlock(&sriov->id_map_lock);
278 }
279 
280 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
281 		struct ib_mad *mad)
282 {
283 	struct id_map_entry *id;
284 	u32 sl_cm_id;
285 	int pv_cm_id = -1;
286 
287 	sl_cm_id = get_local_comm_id(mad);
288 
289 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
290 			mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
291 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
292 		if (IS_ERR(id)) {
293 			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
294 				__func__, slave_id, sl_cm_id);
295 			return PTR_ERR(id);
296 		}
297 	} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
298 		return 0;
299 	} else {
300 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
301 	}
302 
303 	if (!id) {
304 		pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
305 			 slave_id, sl_cm_id);
306 		return -EINVAL;
307 	}
308 
309 	set_local_comm_id(mad, id->pv_cm_id);
310 
311 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
312 		schedule_delayed(ibdev, id);
313 	else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
314 		id_map_find_del(ibdev, pv_cm_id);
315 
316 	return 0;
317 }
318 
319 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
320 							     struct ib_mad *mad)
321 {
322 	u32 pv_cm_id;
323 	struct id_map_entry *id;
324 
325 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
326 		union ib_gid gid;
327 
328 		gid = gid_from_req_msg(ibdev, mad);
329 		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
330 		if (*slave < 0) {
331 			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
332 					gid.global.interface_id);
333 			return -ENOENT;
334 		}
335 		return 0;
336 	}
337 
338 	pv_cm_id = get_remote_comm_id(mad);
339 	id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
340 
341 	if (!id) {
342 		pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
343 		return -ENOENT;
344 	}
345 
346 	*slave = id->slave_id;
347 	set_remote_comm_id(mad, id->sl_cm_id);
348 
349 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
350 		schedule_delayed(ibdev, id);
351 	else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
352 			mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
353 		id_map_find_del(ibdev, (int) pv_cm_id);
354 	}
355 
356 	return 0;
357 }
358 
359 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
360 {
361 	spin_lock_init(&dev->sriov.id_map_lock);
362 	INIT_LIST_HEAD(&dev->sriov.cm_list);
363 	dev->sriov.sl_id_map = RB_ROOT;
364 	idr_init(&dev->sriov.pv_id_table);
365 }
366 
367 /* slave = -1 ==> all slaves */
368 /* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
369 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
370 {
371 	struct mlx4_ib_sriov *sriov = &dev->sriov;
372 	struct rb_root *sl_id_map = &sriov->sl_id_map;
373 	struct list_head lh;
374 	struct rb_node *nd;
375 	int need_flush = 1;
376 	struct id_map_entry *map, *tmp_map;
377 	/* cancel all delayed work queue entries */
378 	INIT_LIST_HEAD(&lh);
379 	spin_lock(&sriov->id_map_lock);
380 	list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
381 		if (slave < 0 || slave == map->slave_id) {
382 			if (map->scheduled_delete)
383 				need_flush &= !!cancel_delayed_work(&map->timeout);
384 		}
385 	}
386 
387 	spin_unlock(&sriov->id_map_lock);
388 
389 	if (!need_flush)
390 		flush_scheduled_work(); /* make sure all timers were flushed */
391 
392 	/* now, remove all leftover entries from databases*/
393 	spin_lock(&sriov->id_map_lock);
394 	if (slave < 0) {
395 		while (rb_first(sl_id_map)) {
396 			struct id_map_entry *ent =
397 				rb_entry(rb_first(sl_id_map),
398 					 struct id_map_entry, node);
399 
400 			rb_erase(&ent->node, sl_id_map);
401 			idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
402 		}
403 		list_splice_init(&dev->sriov.cm_list, &lh);
404 	} else {
405 		/* first, move nodes belonging to slave to db remove list */
406 		nd = rb_first(sl_id_map);
407 		while (nd) {
408 			struct id_map_entry *ent =
409 				rb_entry(nd, struct id_map_entry, node);
410 			nd = rb_next(nd);
411 			if (ent->slave_id == slave)
412 				list_move_tail(&ent->list, &lh);
413 		}
414 		/* remove those nodes from databases */
415 		list_for_each_entry_safe(map, tmp_map, &lh, list) {
416 			rb_erase(&map->node, sl_id_map);
417 			idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
418 		}
419 
420 		/* add remaining nodes from cm_list */
421 		list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
422 			if (slave == map->slave_id)
423 				list_move_tail(&map->list, &lh);
424 		}
425 	}
426 
427 	spin_unlock(&sriov->id_map_lock);
428 
429 	/* free any map entries left behind due to cancel_delayed_work above */
430 	list_for_each_entry_safe(map, tmp_map, &lh, list) {
431 		list_del(&map->list);
432 		kfree(map);
433 	}
434 }
435