xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cm.c (revision 3cf69cc8dbebf15b99deb342ea422105ae9c2774)
1*3cf69cc8SAmir Vadai /*
2*3cf69cc8SAmir Vadai  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3*3cf69cc8SAmir Vadai  *
4*3cf69cc8SAmir Vadai  * This software is available to you under a choice of one of two
5*3cf69cc8SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
6*3cf69cc8SAmir Vadai  * General Public License (GPL) Version 2, available from the file
7*3cf69cc8SAmir Vadai  * COPYING in the main directory of this source tree, or the
8*3cf69cc8SAmir Vadai  * OpenIB.org BSD license below:
9*3cf69cc8SAmir Vadai  *
10*3cf69cc8SAmir Vadai  *     Redistribution and use in source and binary forms, with or
11*3cf69cc8SAmir Vadai  *     without modification, are permitted provided that the following
12*3cf69cc8SAmir Vadai  *     conditions are met:
13*3cf69cc8SAmir Vadai  *
14*3cf69cc8SAmir Vadai  *      - Redistributions of source code must retain the above
15*3cf69cc8SAmir Vadai  *        copyright notice, this list of conditions and the following
16*3cf69cc8SAmir Vadai  *        disclaimer.
17*3cf69cc8SAmir Vadai  *
18*3cf69cc8SAmir Vadai  *      - Redistributions in binary form must reproduce the above
19*3cf69cc8SAmir Vadai  *        copyright notice, this list of conditions and the following
20*3cf69cc8SAmir Vadai  *        disclaimer in the documentation and/or other materials
21*3cf69cc8SAmir Vadai  *        provided with the distribution.
22*3cf69cc8SAmir Vadai  *
23*3cf69cc8SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*3cf69cc8SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*3cf69cc8SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*3cf69cc8SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*3cf69cc8SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*3cf69cc8SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*3cf69cc8SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*3cf69cc8SAmir Vadai  * SOFTWARE.
31*3cf69cc8SAmir Vadai  */
32*3cf69cc8SAmir Vadai 
33*3cf69cc8SAmir Vadai #include <rdma/ib_mad.h>
34*3cf69cc8SAmir Vadai 
35*3cf69cc8SAmir Vadai #include <linux/mlx4/cmd.h>
36*3cf69cc8SAmir Vadai #include <linux/rbtree.h>
37*3cf69cc8SAmir Vadai #include <linux/idr.h>
38*3cf69cc8SAmir Vadai #include <rdma/ib_cm.h>
39*3cf69cc8SAmir Vadai 
40*3cf69cc8SAmir Vadai #include "mlx4_ib.h"
41*3cf69cc8SAmir Vadai 
42*3cf69cc8SAmir Vadai #define CM_CLEANUP_CACHE_TIMEOUT  (5 * HZ)
43*3cf69cc8SAmir Vadai 
44*3cf69cc8SAmir Vadai struct id_map_entry {
45*3cf69cc8SAmir Vadai 	struct rb_node node;
46*3cf69cc8SAmir Vadai 
47*3cf69cc8SAmir Vadai 	u32 sl_cm_id;
48*3cf69cc8SAmir Vadai 	u32 pv_cm_id;
49*3cf69cc8SAmir Vadai 	int slave_id;
50*3cf69cc8SAmir Vadai 	int scheduled_delete;
51*3cf69cc8SAmir Vadai 	struct mlx4_ib_dev *dev;
52*3cf69cc8SAmir Vadai 
53*3cf69cc8SAmir Vadai 	struct list_head list;
54*3cf69cc8SAmir Vadai 	struct delayed_work timeout;
55*3cf69cc8SAmir Vadai };
56*3cf69cc8SAmir Vadai 
57*3cf69cc8SAmir Vadai struct cm_generic_msg {
58*3cf69cc8SAmir Vadai 	struct ib_mad_hdr hdr;
59*3cf69cc8SAmir Vadai 
60*3cf69cc8SAmir Vadai 	__be32 local_comm_id;
61*3cf69cc8SAmir Vadai 	__be32 remote_comm_id;
62*3cf69cc8SAmir Vadai };
63*3cf69cc8SAmir Vadai 
64*3cf69cc8SAmir Vadai struct cm_req_msg {
65*3cf69cc8SAmir Vadai 	unsigned char unused[0x60];
66*3cf69cc8SAmir Vadai 	union ib_gid primary_path_sgid;
67*3cf69cc8SAmir Vadai };
68*3cf69cc8SAmir Vadai 
69*3cf69cc8SAmir Vadai 
70*3cf69cc8SAmir Vadai static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
71*3cf69cc8SAmir Vadai {
72*3cf69cc8SAmir Vadai 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
73*3cf69cc8SAmir Vadai 	msg->local_comm_id = cpu_to_be32(cm_id);
74*3cf69cc8SAmir Vadai }
75*3cf69cc8SAmir Vadai 
76*3cf69cc8SAmir Vadai static u32 get_local_comm_id(struct ib_mad *mad)
77*3cf69cc8SAmir Vadai {
78*3cf69cc8SAmir Vadai 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
79*3cf69cc8SAmir Vadai 
80*3cf69cc8SAmir Vadai 	return be32_to_cpu(msg->local_comm_id);
81*3cf69cc8SAmir Vadai }
82*3cf69cc8SAmir Vadai 
83*3cf69cc8SAmir Vadai static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
84*3cf69cc8SAmir Vadai {
85*3cf69cc8SAmir Vadai 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86*3cf69cc8SAmir Vadai 	msg->remote_comm_id = cpu_to_be32(cm_id);
87*3cf69cc8SAmir Vadai }
88*3cf69cc8SAmir Vadai 
89*3cf69cc8SAmir Vadai static u32 get_remote_comm_id(struct ib_mad *mad)
90*3cf69cc8SAmir Vadai {
91*3cf69cc8SAmir Vadai 	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
92*3cf69cc8SAmir Vadai 
93*3cf69cc8SAmir Vadai 	return be32_to_cpu(msg->remote_comm_id);
94*3cf69cc8SAmir Vadai }
95*3cf69cc8SAmir Vadai 
96*3cf69cc8SAmir Vadai static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
97*3cf69cc8SAmir Vadai {
98*3cf69cc8SAmir Vadai 	struct cm_req_msg *msg = (struct cm_req_msg *)mad;
99*3cf69cc8SAmir Vadai 
100*3cf69cc8SAmir Vadai 	return msg->primary_path_sgid;
101*3cf69cc8SAmir Vadai }
102*3cf69cc8SAmir Vadai 
103*3cf69cc8SAmir Vadai /* Lock should be taken before called */
104*3cf69cc8SAmir Vadai static struct id_map_entry *
105*3cf69cc8SAmir Vadai id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
106*3cf69cc8SAmir Vadai {
107*3cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
108*3cf69cc8SAmir Vadai 	struct rb_node *node = sl_id_map->rb_node;
109*3cf69cc8SAmir Vadai 
110*3cf69cc8SAmir Vadai 	while (node) {
111*3cf69cc8SAmir Vadai 		struct id_map_entry *id_map_entry =
112*3cf69cc8SAmir Vadai 			rb_entry(node, struct id_map_entry, node);
113*3cf69cc8SAmir Vadai 
114*3cf69cc8SAmir Vadai 		if (id_map_entry->sl_cm_id > sl_cm_id)
115*3cf69cc8SAmir Vadai 			node = node->rb_left;
116*3cf69cc8SAmir Vadai 		else if (id_map_entry->sl_cm_id < sl_cm_id)
117*3cf69cc8SAmir Vadai 			node = node->rb_right;
118*3cf69cc8SAmir Vadai 		else if (id_map_entry->slave_id > slave_id)
119*3cf69cc8SAmir Vadai 			node = node->rb_left;
120*3cf69cc8SAmir Vadai 		else if (id_map_entry->slave_id < slave_id)
121*3cf69cc8SAmir Vadai 			node = node->rb_right;
122*3cf69cc8SAmir Vadai 		else
123*3cf69cc8SAmir Vadai 			return id_map_entry;
124*3cf69cc8SAmir Vadai 	}
125*3cf69cc8SAmir Vadai 	return NULL;
126*3cf69cc8SAmir Vadai }
127*3cf69cc8SAmir Vadai 
128*3cf69cc8SAmir Vadai static void id_map_ent_timeout(struct work_struct *work)
129*3cf69cc8SAmir Vadai {
130*3cf69cc8SAmir Vadai 	struct delayed_work *delay = to_delayed_work(work);
131*3cf69cc8SAmir Vadai 	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
132*3cf69cc8SAmir Vadai 	struct id_map_entry *db_ent, *found_ent;
133*3cf69cc8SAmir Vadai 	struct mlx4_ib_dev *dev = ent->dev;
134*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &dev->sriov;
135*3cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &sriov->sl_id_map;
136*3cf69cc8SAmir Vadai 	int pv_id = (int) ent->pv_cm_id;
137*3cf69cc8SAmir Vadai 
138*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
139*3cf69cc8SAmir Vadai 	db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
140*3cf69cc8SAmir Vadai 	if (!db_ent)
141*3cf69cc8SAmir Vadai 		goto out;
142*3cf69cc8SAmir Vadai 	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
143*3cf69cc8SAmir Vadai 	if (found_ent && found_ent == ent)
144*3cf69cc8SAmir Vadai 		rb_erase(&found_ent->node, sl_id_map);
145*3cf69cc8SAmir Vadai 	idr_remove(&sriov->pv_id_table, pv_id);
146*3cf69cc8SAmir Vadai 
147*3cf69cc8SAmir Vadai out:
148*3cf69cc8SAmir Vadai 	list_del(&ent->list);
149*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
150*3cf69cc8SAmir Vadai 	kfree(ent);
151*3cf69cc8SAmir Vadai }
152*3cf69cc8SAmir Vadai 
153*3cf69cc8SAmir Vadai static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
154*3cf69cc8SAmir Vadai {
155*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
156*3cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &sriov->sl_id_map;
157*3cf69cc8SAmir Vadai 	struct id_map_entry *ent, *found_ent;
158*3cf69cc8SAmir Vadai 
159*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
160*3cf69cc8SAmir Vadai 	ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
161*3cf69cc8SAmir Vadai 	if (!ent)
162*3cf69cc8SAmir Vadai 		goto out;
163*3cf69cc8SAmir Vadai 	found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
164*3cf69cc8SAmir Vadai 	if (found_ent && found_ent == ent)
165*3cf69cc8SAmir Vadai 		rb_erase(&found_ent->node, sl_id_map);
166*3cf69cc8SAmir Vadai 	idr_remove(&sriov->pv_id_table, pv_cm_id);
167*3cf69cc8SAmir Vadai out:
168*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
169*3cf69cc8SAmir Vadai }
170*3cf69cc8SAmir Vadai 
171*3cf69cc8SAmir Vadai static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
172*3cf69cc8SAmir Vadai {
173*3cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
174*3cf69cc8SAmir Vadai 	struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
175*3cf69cc8SAmir Vadai 	struct id_map_entry *ent;
176*3cf69cc8SAmir Vadai 	int slave_id = new->slave_id;
177*3cf69cc8SAmir Vadai 	int sl_cm_id = new->sl_cm_id;
178*3cf69cc8SAmir Vadai 
179*3cf69cc8SAmir Vadai 	ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
180*3cf69cc8SAmir Vadai 	if (ent) {
181*3cf69cc8SAmir Vadai 		pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
182*3cf69cc8SAmir Vadai 			 sl_cm_id);
183*3cf69cc8SAmir Vadai 
184*3cf69cc8SAmir Vadai 		rb_replace_node(&ent->node, &new->node, sl_id_map);
185*3cf69cc8SAmir Vadai 		return;
186*3cf69cc8SAmir Vadai 	}
187*3cf69cc8SAmir Vadai 
188*3cf69cc8SAmir Vadai 	/* Go to the bottom of the tree */
189*3cf69cc8SAmir Vadai 	while (*link) {
190*3cf69cc8SAmir Vadai 		parent = *link;
191*3cf69cc8SAmir Vadai 		ent = rb_entry(parent, struct id_map_entry, node);
192*3cf69cc8SAmir Vadai 
193*3cf69cc8SAmir Vadai 		if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
194*3cf69cc8SAmir Vadai 			link = &(*link)->rb_left;
195*3cf69cc8SAmir Vadai 		else
196*3cf69cc8SAmir Vadai 			link = &(*link)->rb_right;
197*3cf69cc8SAmir Vadai 	}
198*3cf69cc8SAmir Vadai 
199*3cf69cc8SAmir Vadai 	rb_link_node(&new->node, parent, link);
200*3cf69cc8SAmir Vadai 	rb_insert_color(&new->node, sl_id_map);
201*3cf69cc8SAmir Vadai }
202*3cf69cc8SAmir Vadai 
203*3cf69cc8SAmir Vadai static struct id_map_entry *
204*3cf69cc8SAmir Vadai id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
205*3cf69cc8SAmir Vadai {
206*3cf69cc8SAmir Vadai 	int ret, id;
207*3cf69cc8SAmir Vadai 	static int next_id;
208*3cf69cc8SAmir Vadai 	struct id_map_entry *ent;
209*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
210*3cf69cc8SAmir Vadai 
211*3cf69cc8SAmir Vadai 	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
212*3cf69cc8SAmir Vadai 	if (!ent) {
213*3cf69cc8SAmir Vadai 		mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
214*3cf69cc8SAmir Vadai 		return ERR_PTR(-ENOMEM);
215*3cf69cc8SAmir Vadai 	}
216*3cf69cc8SAmir Vadai 
217*3cf69cc8SAmir Vadai 	ent->sl_cm_id = sl_cm_id;
218*3cf69cc8SAmir Vadai 	ent->slave_id = slave_id;
219*3cf69cc8SAmir Vadai 	ent->scheduled_delete = 0;
220*3cf69cc8SAmir Vadai 	ent->dev = to_mdev(ibdev);
221*3cf69cc8SAmir Vadai 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
222*3cf69cc8SAmir Vadai 
223*3cf69cc8SAmir Vadai 	do {
224*3cf69cc8SAmir Vadai 		spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
225*3cf69cc8SAmir Vadai 		ret = idr_get_new_above(&sriov->pv_id_table, ent,
226*3cf69cc8SAmir Vadai 					next_id, &id);
227*3cf69cc8SAmir Vadai 		if (!ret) {
228*3cf69cc8SAmir Vadai 			next_id = ((unsigned) id + 1) & MAX_ID_MASK;
229*3cf69cc8SAmir Vadai 			ent->pv_cm_id = (u32)id;
230*3cf69cc8SAmir Vadai 			sl_id_map_add(ibdev, ent);
231*3cf69cc8SAmir Vadai 		}
232*3cf69cc8SAmir Vadai 
233*3cf69cc8SAmir Vadai 		spin_unlock(&sriov->id_map_lock);
234*3cf69cc8SAmir Vadai 	} while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
235*3cf69cc8SAmir Vadai 	/*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
236*3cf69cc8SAmir Vadai 	if (!ret) {
237*3cf69cc8SAmir Vadai 		spin_lock(&sriov->id_map_lock);
238*3cf69cc8SAmir Vadai 		list_add_tail(&ent->list, &sriov->cm_list);
239*3cf69cc8SAmir Vadai 		spin_unlock(&sriov->id_map_lock);
240*3cf69cc8SAmir Vadai 		return ent;
241*3cf69cc8SAmir Vadai 	}
242*3cf69cc8SAmir Vadai 	/*error flow*/
243*3cf69cc8SAmir Vadai 	kfree(ent);
244*3cf69cc8SAmir Vadai 	mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
245*3cf69cc8SAmir Vadai 	return ERR_PTR(-ENOMEM);
246*3cf69cc8SAmir Vadai }
247*3cf69cc8SAmir Vadai 
248*3cf69cc8SAmir Vadai static struct id_map_entry *
249*3cf69cc8SAmir Vadai id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
250*3cf69cc8SAmir Vadai {
251*3cf69cc8SAmir Vadai 	struct id_map_entry *ent;
252*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
253*3cf69cc8SAmir Vadai 
254*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
255*3cf69cc8SAmir Vadai 	if (*pv_cm_id == -1) {
256*3cf69cc8SAmir Vadai 		ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
257*3cf69cc8SAmir Vadai 		if (ent)
258*3cf69cc8SAmir Vadai 			*pv_cm_id = (int) ent->pv_cm_id;
259*3cf69cc8SAmir Vadai 	} else
260*3cf69cc8SAmir Vadai 		ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
261*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
262*3cf69cc8SAmir Vadai 
263*3cf69cc8SAmir Vadai 	return ent;
264*3cf69cc8SAmir Vadai }
265*3cf69cc8SAmir Vadai 
266*3cf69cc8SAmir Vadai static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
267*3cf69cc8SAmir Vadai {
268*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
269*3cf69cc8SAmir Vadai 	unsigned long flags;
270*3cf69cc8SAmir Vadai 
271*3cf69cc8SAmir Vadai 	spin_lock_irqsave(&sriov->going_down_lock, flags);
272*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
273*3cf69cc8SAmir Vadai 	/*make sure that there is no schedule inside the scheduled work.*/
274*3cf69cc8SAmir Vadai 	if (!sriov->is_going_down) {
275*3cf69cc8SAmir Vadai 		id->scheduled_delete = 1;
276*3cf69cc8SAmir Vadai 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
277*3cf69cc8SAmir Vadai 	}
278*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
279*3cf69cc8SAmir Vadai 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
280*3cf69cc8SAmir Vadai }
281*3cf69cc8SAmir Vadai 
282*3cf69cc8SAmir Vadai int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
283*3cf69cc8SAmir Vadai 		struct ib_mad *mad)
284*3cf69cc8SAmir Vadai {
285*3cf69cc8SAmir Vadai 	struct id_map_entry *id;
286*3cf69cc8SAmir Vadai 	u32 sl_cm_id;
287*3cf69cc8SAmir Vadai 	int pv_cm_id = -1;
288*3cf69cc8SAmir Vadai 
289*3cf69cc8SAmir Vadai 	sl_cm_id = get_local_comm_id(mad);
290*3cf69cc8SAmir Vadai 
291*3cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
292*3cf69cc8SAmir Vadai 			mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
293*3cf69cc8SAmir Vadai 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
294*3cf69cc8SAmir Vadai 		if (IS_ERR(id)) {
295*3cf69cc8SAmir Vadai 			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
296*3cf69cc8SAmir Vadai 				__func__, slave_id, sl_cm_id);
297*3cf69cc8SAmir Vadai 			return PTR_ERR(id);
298*3cf69cc8SAmir Vadai 		}
299*3cf69cc8SAmir Vadai 	} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
300*3cf69cc8SAmir Vadai 		return 0;
301*3cf69cc8SAmir Vadai 	} else {
302*3cf69cc8SAmir Vadai 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
303*3cf69cc8SAmir Vadai 	}
304*3cf69cc8SAmir Vadai 
305*3cf69cc8SAmir Vadai 	if (!id) {
306*3cf69cc8SAmir Vadai 		pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
307*3cf69cc8SAmir Vadai 			 slave_id, sl_cm_id);
308*3cf69cc8SAmir Vadai 		return -EINVAL;
309*3cf69cc8SAmir Vadai 	}
310*3cf69cc8SAmir Vadai 
311*3cf69cc8SAmir Vadai 	set_local_comm_id(mad, id->pv_cm_id);
312*3cf69cc8SAmir Vadai 
313*3cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
314*3cf69cc8SAmir Vadai 		schedule_delayed(ibdev, id);
315*3cf69cc8SAmir Vadai 	else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
316*3cf69cc8SAmir Vadai 		id_map_find_del(ibdev, pv_cm_id);
317*3cf69cc8SAmir Vadai 
318*3cf69cc8SAmir Vadai 	return 0;
319*3cf69cc8SAmir Vadai }
320*3cf69cc8SAmir Vadai 
321*3cf69cc8SAmir Vadai int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
322*3cf69cc8SAmir Vadai 							     struct ib_mad *mad)
323*3cf69cc8SAmir Vadai {
324*3cf69cc8SAmir Vadai 	u32 pv_cm_id;
325*3cf69cc8SAmir Vadai 	struct id_map_entry *id;
326*3cf69cc8SAmir Vadai 
327*3cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
328*3cf69cc8SAmir Vadai 		union ib_gid gid;
329*3cf69cc8SAmir Vadai 
330*3cf69cc8SAmir Vadai 		gid = gid_from_req_msg(ibdev, mad);
331*3cf69cc8SAmir Vadai 		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
332*3cf69cc8SAmir Vadai 		if (*slave < 0) {
333*3cf69cc8SAmir Vadai 			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
334*3cf69cc8SAmir Vadai 					gid.global.interface_id);
335*3cf69cc8SAmir Vadai 			return -ENOENT;
336*3cf69cc8SAmir Vadai 		}
337*3cf69cc8SAmir Vadai 		return 0;
338*3cf69cc8SAmir Vadai 	}
339*3cf69cc8SAmir Vadai 
340*3cf69cc8SAmir Vadai 	pv_cm_id = get_remote_comm_id(mad);
341*3cf69cc8SAmir Vadai 	id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
342*3cf69cc8SAmir Vadai 
343*3cf69cc8SAmir Vadai 	if (!id) {
344*3cf69cc8SAmir Vadai 		pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
345*3cf69cc8SAmir Vadai 		return -ENOENT;
346*3cf69cc8SAmir Vadai 	}
347*3cf69cc8SAmir Vadai 
348*3cf69cc8SAmir Vadai 	*slave = id->slave_id;
349*3cf69cc8SAmir Vadai 	set_remote_comm_id(mad, id->sl_cm_id);
350*3cf69cc8SAmir Vadai 
351*3cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
352*3cf69cc8SAmir Vadai 		schedule_delayed(ibdev, id);
353*3cf69cc8SAmir Vadai 	else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
354*3cf69cc8SAmir Vadai 			mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
355*3cf69cc8SAmir Vadai 		id_map_find_del(ibdev, (int) pv_cm_id);
356*3cf69cc8SAmir Vadai 	}
357*3cf69cc8SAmir Vadai 
358*3cf69cc8SAmir Vadai 	return 0;
359*3cf69cc8SAmir Vadai }
360*3cf69cc8SAmir Vadai 
361*3cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
362*3cf69cc8SAmir Vadai {
363*3cf69cc8SAmir Vadai 	spin_lock_init(&dev->sriov.id_map_lock);
364*3cf69cc8SAmir Vadai 	INIT_LIST_HEAD(&dev->sriov.cm_list);
365*3cf69cc8SAmir Vadai 	dev->sriov.sl_id_map = RB_ROOT;
366*3cf69cc8SAmir Vadai 	idr_init(&dev->sriov.pv_id_table);
367*3cf69cc8SAmir Vadai 	idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
368*3cf69cc8SAmir Vadai }
369*3cf69cc8SAmir Vadai 
370*3cf69cc8SAmir Vadai /* slave = -1 ==> all slaves */
371*3cf69cc8SAmir Vadai /* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
372*3cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
373*3cf69cc8SAmir Vadai {
374*3cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &dev->sriov;
375*3cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &sriov->sl_id_map;
376*3cf69cc8SAmir Vadai 	struct list_head lh;
377*3cf69cc8SAmir Vadai 	struct rb_node *nd;
378*3cf69cc8SAmir Vadai 	int need_flush = 1;
379*3cf69cc8SAmir Vadai 	struct id_map_entry *map, *tmp_map;
380*3cf69cc8SAmir Vadai 	/* cancel all delayed work queue entries */
381*3cf69cc8SAmir Vadai 	INIT_LIST_HEAD(&lh);
382*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
383*3cf69cc8SAmir Vadai 	list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
384*3cf69cc8SAmir Vadai 		if (slave < 0 || slave == map->slave_id) {
385*3cf69cc8SAmir Vadai 			if (map->scheduled_delete)
386*3cf69cc8SAmir Vadai 				need_flush &= !!cancel_delayed_work(&map->timeout);
387*3cf69cc8SAmir Vadai 		}
388*3cf69cc8SAmir Vadai 	}
389*3cf69cc8SAmir Vadai 
390*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
391*3cf69cc8SAmir Vadai 
392*3cf69cc8SAmir Vadai 	if (!need_flush)
393*3cf69cc8SAmir Vadai 		flush_scheduled_work(); /* make sure all timers were flushed */
394*3cf69cc8SAmir Vadai 
395*3cf69cc8SAmir Vadai 	/* now, remove all leftover entries from databases*/
396*3cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
397*3cf69cc8SAmir Vadai 	if (slave < 0) {
398*3cf69cc8SAmir Vadai 		while (rb_first(sl_id_map)) {
399*3cf69cc8SAmir Vadai 			struct id_map_entry *ent =
400*3cf69cc8SAmir Vadai 				rb_entry(rb_first(sl_id_map),
401*3cf69cc8SAmir Vadai 					 struct id_map_entry, node);
402*3cf69cc8SAmir Vadai 
403*3cf69cc8SAmir Vadai 			rb_erase(&ent->node, sl_id_map);
404*3cf69cc8SAmir Vadai 			idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
405*3cf69cc8SAmir Vadai 		}
406*3cf69cc8SAmir Vadai 		list_splice_init(&dev->sriov.cm_list, &lh);
407*3cf69cc8SAmir Vadai 	} else {
408*3cf69cc8SAmir Vadai 		/* first, move nodes belonging to slave to db remove list */
409*3cf69cc8SAmir Vadai 		nd = rb_first(sl_id_map);
410*3cf69cc8SAmir Vadai 		while (nd) {
411*3cf69cc8SAmir Vadai 			struct id_map_entry *ent =
412*3cf69cc8SAmir Vadai 				rb_entry(nd, struct id_map_entry, node);
413*3cf69cc8SAmir Vadai 			nd = rb_next(nd);
414*3cf69cc8SAmir Vadai 			if (ent->slave_id == slave)
415*3cf69cc8SAmir Vadai 				list_move_tail(&ent->list, &lh);
416*3cf69cc8SAmir Vadai 		}
417*3cf69cc8SAmir Vadai 		/* remove those nodes from databases */
418*3cf69cc8SAmir Vadai 		list_for_each_entry_safe(map, tmp_map, &lh, list) {
419*3cf69cc8SAmir Vadai 			rb_erase(&map->node, sl_id_map);
420*3cf69cc8SAmir Vadai 			idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
421*3cf69cc8SAmir Vadai 		}
422*3cf69cc8SAmir Vadai 
423*3cf69cc8SAmir Vadai 		/* add remaining nodes from cm_list */
424*3cf69cc8SAmir Vadai 		list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
425*3cf69cc8SAmir Vadai 			if (slave == map->slave_id)
426*3cf69cc8SAmir Vadai 				list_move_tail(&map->list, &lh);
427*3cf69cc8SAmir Vadai 		}
428*3cf69cc8SAmir Vadai 	}
429*3cf69cc8SAmir Vadai 
430*3cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
431*3cf69cc8SAmir Vadai 
432*3cf69cc8SAmir Vadai 	/* free any map entries left behind due to cancel_delayed_work above */
433*3cf69cc8SAmir Vadai 	list_for_each_entry_safe(map, tmp_map, &lh, list) {
434*3cf69cc8SAmir Vadai 		list_del(&map->list);
435*3cf69cc8SAmir Vadai 		kfree(map);
436*3cf69cc8SAmir Vadai 	}
437*3cf69cc8SAmir Vadai }
438