xref: /openbmc/linux/drivers/infiniband/hw/mlx4/cm.c (revision 227a0e142e375909959a74b7782403e14331f6f3)
13cf69cc8SAmir Vadai /*
23cf69cc8SAmir Vadai  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
33cf69cc8SAmir Vadai  *
43cf69cc8SAmir Vadai  * This software is available to you under a choice of one of two
53cf69cc8SAmir Vadai  * licenses.  You may choose to be licensed under the terms of the GNU
63cf69cc8SAmir Vadai  * General Public License (GPL) Version 2, available from the file
73cf69cc8SAmir Vadai  * COPYING in the main directory of this source tree, or the
83cf69cc8SAmir Vadai  * OpenIB.org BSD license below:
93cf69cc8SAmir Vadai  *
103cf69cc8SAmir Vadai  *     Redistribution and use in source and binary forms, with or
113cf69cc8SAmir Vadai  *     without modification, are permitted provided that the following
123cf69cc8SAmir Vadai  *     conditions are met:
133cf69cc8SAmir Vadai  *
143cf69cc8SAmir Vadai  *      - Redistributions of source code must retain the above
153cf69cc8SAmir Vadai  *        copyright notice, this list of conditions and the following
163cf69cc8SAmir Vadai  *        disclaimer.
173cf69cc8SAmir Vadai  *
183cf69cc8SAmir Vadai  *      - Redistributions in binary form must reproduce the above
193cf69cc8SAmir Vadai  *        copyright notice, this list of conditions and the following
203cf69cc8SAmir Vadai  *        disclaimer in the documentation and/or other materials
213cf69cc8SAmir Vadai  *        provided with the distribution.
223cf69cc8SAmir Vadai  *
233cf69cc8SAmir Vadai  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
243cf69cc8SAmir Vadai  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
253cf69cc8SAmir Vadai  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
263cf69cc8SAmir Vadai  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
273cf69cc8SAmir Vadai  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
283cf69cc8SAmir Vadai  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
293cf69cc8SAmir Vadai  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
303cf69cc8SAmir Vadai  * SOFTWARE.
313cf69cc8SAmir Vadai  */
323cf69cc8SAmir Vadai 
333cf69cc8SAmir Vadai #include <rdma/ib_mad.h>
343cf69cc8SAmir Vadai 
353cf69cc8SAmir Vadai #include <linux/mlx4/cmd.h>
363cf69cc8SAmir Vadai #include <linux/rbtree.h>
373cf69cc8SAmir Vadai #include <linux/idr.h>
383cf69cc8SAmir Vadai #include <rdma/ib_cm.h>
393cf69cc8SAmir Vadai 
403cf69cc8SAmir Vadai #include "mlx4_ib.h"
413cf69cc8SAmir Vadai 
422612d723SHåkon Bugge #define CM_CLEANUP_CACHE_TIMEOUT  (30 * HZ)
433cf69cc8SAmir Vadai 
443cf69cc8SAmir Vadai struct id_map_entry {
453cf69cc8SAmir Vadai 	struct rb_node node;
463cf69cc8SAmir Vadai 
473cf69cc8SAmir Vadai 	u32 sl_cm_id;
483cf69cc8SAmir Vadai 	u32 pv_cm_id;
493cf69cc8SAmir Vadai 	int slave_id;
503cf69cc8SAmir Vadai 	int scheduled_delete;
513cf69cc8SAmir Vadai 	struct mlx4_ib_dev *dev;
523cf69cc8SAmir Vadai 
533cf69cc8SAmir Vadai 	struct list_head list;
543cf69cc8SAmir Vadai 	struct delayed_work timeout;
553cf69cc8SAmir Vadai };
563cf69cc8SAmir Vadai 
57*227a0e14SHåkon Bugge struct rej_tmout_entry {
58*227a0e14SHåkon Bugge 	int slave;
59*227a0e14SHåkon Bugge 	u32 rem_pv_cm_id;
60*227a0e14SHåkon Bugge 	struct delayed_work timeout;
61*227a0e14SHåkon Bugge 	struct radix_tree_root *rej_tmout_root;
62*227a0e14SHåkon Bugge 	/* Points to the mutex protecting this radix-tree */
63*227a0e14SHåkon Bugge 	struct mutex *lock;
64*227a0e14SHåkon Bugge };
65*227a0e14SHåkon Bugge 
663cf69cc8SAmir Vadai struct cm_generic_msg {
673cf69cc8SAmir Vadai 	struct ib_mad_hdr hdr;
683cf69cc8SAmir Vadai 
693cf69cc8SAmir Vadai 	__be32 local_comm_id;
703cf69cc8SAmir Vadai 	__be32 remote_comm_id;
71*227a0e14SHåkon Bugge 	unsigned char unused[2];
72*227a0e14SHåkon Bugge 	__be16 rej_reason;
733cf69cc8SAmir Vadai };
743cf69cc8SAmir Vadai 
75ceb5433bSShani Michaelli struct cm_sidr_generic_msg {
76ceb5433bSShani Michaelli 	struct ib_mad_hdr hdr;
77ceb5433bSShani Michaelli 	__be32 request_id;
78ceb5433bSShani Michaelli };
79ceb5433bSShani Michaelli 
803cf69cc8SAmir Vadai struct cm_req_msg {
813cf69cc8SAmir Vadai 	unsigned char unused[0x60];
823cf69cc8SAmir Vadai 	union ib_gid primary_path_sgid;
833cf69cc8SAmir Vadai };
843cf69cc8SAmir Vadai 
853cf69cc8SAmir Vadai 
863cf69cc8SAmir Vadai static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
873cf69cc8SAmir Vadai {
88ceb5433bSShani Michaelli 	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
89ceb5433bSShani Michaelli 		struct cm_sidr_generic_msg *msg =
90ceb5433bSShani Michaelli 			(struct cm_sidr_generic_msg *)mad;
91ceb5433bSShani Michaelli 		msg->request_id = cpu_to_be32(cm_id);
92ceb5433bSShani Michaelli 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
93ceb5433bSShani Michaelli 		pr_err("trying to set local_comm_id in SIDR_REP\n");
94ceb5433bSShani Michaelli 		return;
95ceb5433bSShani Michaelli 	} else {
963cf69cc8SAmir Vadai 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
973cf69cc8SAmir Vadai 		msg->local_comm_id = cpu_to_be32(cm_id);
983cf69cc8SAmir Vadai 	}
99ceb5433bSShani Michaelli }
1003cf69cc8SAmir Vadai 
1013cf69cc8SAmir Vadai static u32 get_local_comm_id(struct ib_mad *mad)
1023cf69cc8SAmir Vadai {
103ceb5433bSShani Michaelli 	if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
104ceb5433bSShani Michaelli 		struct cm_sidr_generic_msg *msg =
105ceb5433bSShani Michaelli 			(struct cm_sidr_generic_msg *)mad;
106ceb5433bSShani Michaelli 		return be32_to_cpu(msg->request_id);
107ceb5433bSShani Michaelli 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
108ceb5433bSShani Michaelli 		pr_err("trying to set local_comm_id in SIDR_REP\n");
109ceb5433bSShani Michaelli 		return -1;
110ceb5433bSShani Michaelli 	} else {
1113cf69cc8SAmir Vadai 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1123cf69cc8SAmir Vadai 		return be32_to_cpu(msg->local_comm_id);
1133cf69cc8SAmir Vadai 	}
114ceb5433bSShani Michaelli }
1153cf69cc8SAmir Vadai 
1163cf69cc8SAmir Vadai static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
1173cf69cc8SAmir Vadai {
118ceb5433bSShani Michaelli 	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
119ceb5433bSShani Michaelli 		struct cm_sidr_generic_msg *msg =
120ceb5433bSShani Michaelli 			(struct cm_sidr_generic_msg *)mad;
121ceb5433bSShani Michaelli 		msg->request_id = cpu_to_be32(cm_id);
122ceb5433bSShani Michaelli 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
123ceb5433bSShani Michaelli 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
124ceb5433bSShani Michaelli 		return;
125ceb5433bSShani Michaelli 	} else {
1263cf69cc8SAmir Vadai 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1273cf69cc8SAmir Vadai 		msg->remote_comm_id = cpu_to_be32(cm_id);
1283cf69cc8SAmir Vadai 	}
129ceb5433bSShani Michaelli }
1303cf69cc8SAmir Vadai 
1313cf69cc8SAmir Vadai static u32 get_remote_comm_id(struct ib_mad *mad)
1323cf69cc8SAmir Vadai {
133ceb5433bSShani Michaelli 	if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
134ceb5433bSShani Michaelli 		struct cm_sidr_generic_msg *msg =
135ceb5433bSShani Michaelli 			(struct cm_sidr_generic_msg *)mad;
136ceb5433bSShani Michaelli 		return be32_to_cpu(msg->request_id);
137ceb5433bSShani Michaelli 	} else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
138ceb5433bSShani Michaelli 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
139ceb5433bSShani Michaelli 		return -1;
140ceb5433bSShani Michaelli 	} else {
1413cf69cc8SAmir Vadai 		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1423cf69cc8SAmir Vadai 		return be32_to_cpu(msg->remote_comm_id);
1433cf69cc8SAmir Vadai 	}
144ceb5433bSShani Michaelli }
1453cf69cc8SAmir Vadai 
1463cf69cc8SAmir Vadai static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
1473cf69cc8SAmir Vadai {
1483cf69cc8SAmir Vadai 	struct cm_req_msg *msg = (struct cm_req_msg *)mad;
1493cf69cc8SAmir Vadai 
1503cf69cc8SAmir Vadai 	return msg->primary_path_sgid;
1513cf69cc8SAmir Vadai }
1523cf69cc8SAmir Vadai 
1533cf69cc8SAmir Vadai /* Lock should be taken before called */
1543cf69cc8SAmir Vadai static struct id_map_entry *
1553cf69cc8SAmir Vadai id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
1563cf69cc8SAmir Vadai {
1573cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
1583cf69cc8SAmir Vadai 	struct rb_node *node = sl_id_map->rb_node;
1593cf69cc8SAmir Vadai 
1603cf69cc8SAmir Vadai 	while (node) {
1613cf69cc8SAmir Vadai 		struct id_map_entry *id_map_entry =
1623cf69cc8SAmir Vadai 			rb_entry(node, struct id_map_entry, node);
1633cf69cc8SAmir Vadai 
1643cf69cc8SAmir Vadai 		if (id_map_entry->sl_cm_id > sl_cm_id)
1653cf69cc8SAmir Vadai 			node = node->rb_left;
1663cf69cc8SAmir Vadai 		else if (id_map_entry->sl_cm_id < sl_cm_id)
1673cf69cc8SAmir Vadai 			node = node->rb_right;
1683cf69cc8SAmir Vadai 		else if (id_map_entry->slave_id > slave_id)
1693cf69cc8SAmir Vadai 			node = node->rb_left;
1703cf69cc8SAmir Vadai 		else if (id_map_entry->slave_id < slave_id)
1713cf69cc8SAmir Vadai 			node = node->rb_right;
1723cf69cc8SAmir Vadai 		else
1733cf69cc8SAmir Vadai 			return id_map_entry;
1743cf69cc8SAmir Vadai 	}
1753cf69cc8SAmir Vadai 	return NULL;
1763cf69cc8SAmir Vadai }
1773cf69cc8SAmir Vadai 
1783cf69cc8SAmir Vadai static void id_map_ent_timeout(struct work_struct *work)
1793cf69cc8SAmir Vadai {
1803cf69cc8SAmir Vadai 	struct delayed_work *delay = to_delayed_work(work);
1813cf69cc8SAmir Vadai 	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
182f1430536SMatthew Wilcox 	struct id_map_entry *found_ent;
1833cf69cc8SAmir Vadai 	struct mlx4_ib_dev *dev = ent->dev;
1843cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &dev->sriov;
1853cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &sriov->sl_id_map;
1863cf69cc8SAmir Vadai 
1873cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
188f1430536SMatthew Wilcox 	if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
1893cf69cc8SAmir Vadai 		goto out;
1903cf69cc8SAmir Vadai 	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
1913cf69cc8SAmir Vadai 	if (found_ent && found_ent == ent)
1923cf69cc8SAmir Vadai 		rb_erase(&found_ent->node, sl_id_map);
1933cf69cc8SAmir Vadai 
1943cf69cc8SAmir Vadai out:
1953cf69cc8SAmir Vadai 	list_del(&ent->list);
1963cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
1973cf69cc8SAmir Vadai 	kfree(ent);
1983cf69cc8SAmir Vadai }
1993cf69cc8SAmir Vadai 
2003cf69cc8SAmir Vadai static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
2013cf69cc8SAmir Vadai {
2023cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
2033cf69cc8SAmir Vadai 	struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
2043cf69cc8SAmir Vadai 	struct id_map_entry *ent;
2053cf69cc8SAmir Vadai 	int slave_id = new->slave_id;
2063cf69cc8SAmir Vadai 	int sl_cm_id = new->sl_cm_id;
2073cf69cc8SAmir Vadai 
2083cf69cc8SAmir Vadai 	ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
2093cf69cc8SAmir Vadai 	if (ent) {
2103cf69cc8SAmir Vadai 		pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
2113cf69cc8SAmir Vadai 			 sl_cm_id);
2123cf69cc8SAmir Vadai 
2133cf69cc8SAmir Vadai 		rb_replace_node(&ent->node, &new->node, sl_id_map);
2143cf69cc8SAmir Vadai 		return;
2153cf69cc8SAmir Vadai 	}
2163cf69cc8SAmir Vadai 
2173cf69cc8SAmir Vadai 	/* Go to the bottom of the tree */
2183cf69cc8SAmir Vadai 	while (*link) {
2193cf69cc8SAmir Vadai 		parent = *link;
2203cf69cc8SAmir Vadai 		ent = rb_entry(parent, struct id_map_entry, node);
2213cf69cc8SAmir Vadai 
2223cf69cc8SAmir Vadai 		if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
2233cf69cc8SAmir Vadai 			link = &(*link)->rb_left;
2243cf69cc8SAmir Vadai 		else
2253cf69cc8SAmir Vadai 			link = &(*link)->rb_right;
2263cf69cc8SAmir Vadai 	}
2273cf69cc8SAmir Vadai 
2283cf69cc8SAmir Vadai 	rb_link_node(&new->node, parent, link);
2293cf69cc8SAmir Vadai 	rb_insert_color(&new->node, sl_id_map);
2303cf69cc8SAmir Vadai }
2313cf69cc8SAmir Vadai 
2323cf69cc8SAmir Vadai static struct id_map_entry *
2333cf69cc8SAmir Vadai id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
2343cf69cc8SAmir Vadai {
2356a920060STejun Heo 	int ret;
2363cf69cc8SAmir Vadai 	struct id_map_entry *ent;
2373cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2383cf69cc8SAmir Vadai 
2393cf69cc8SAmir Vadai 	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
24015d4626eSLeon Romanovsky 	if (!ent)
2413cf69cc8SAmir Vadai 		return ERR_PTR(-ENOMEM);
2423cf69cc8SAmir Vadai 
2433cf69cc8SAmir Vadai 	ent->sl_cm_id = sl_cm_id;
2443cf69cc8SAmir Vadai 	ent->slave_id = slave_id;
2453cf69cc8SAmir Vadai 	ent->scheduled_delete = 0;
2463cf69cc8SAmir Vadai 	ent->dev = to_mdev(ibdev);
2473cf69cc8SAmir Vadai 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
2483cf69cc8SAmir Vadai 
249f1430536SMatthew Wilcox 	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
250f1430536SMatthew Wilcox 			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
2516a920060STejun Heo 	if (ret >= 0) {
252f1430536SMatthew Wilcox 		spin_lock(&sriov->id_map_lock);
2533cf69cc8SAmir Vadai 		sl_id_map_add(ibdev, ent);
2546a920060STejun Heo 		list_add_tail(&ent->list, &sriov->cm_list);
2553cf69cc8SAmir Vadai 		spin_unlock(&sriov->id_map_lock);
2563cf69cc8SAmir Vadai 		return ent;
257f1430536SMatthew Wilcox 	}
2586a920060STejun Heo 
2593cf69cc8SAmir Vadai 	/*error flow*/
2603cf69cc8SAmir Vadai 	kfree(ent);
261f1430536SMatthew Wilcox 	mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
2623cf69cc8SAmir Vadai 	return ERR_PTR(-ENOMEM);
2633cf69cc8SAmir Vadai }
2643cf69cc8SAmir Vadai 
2653cf69cc8SAmir Vadai static struct id_map_entry *
266b03ee4caSHåkon Bugge id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
2673cf69cc8SAmir Vadai {
2683cf69cc8SAmir Vadai 	struct id_map_entry *ent;
2693cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2703cf69cc8SAmir Vadai 
2713cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
2723cf69cc8SAmir Vadai 	if (*pv_cm_id == -1) {
273b03ee4caSHåkon Bugge 		ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
2743cf69cc8SAmir Vadai 		if (ent)
2753cf69cc8SAmir Vadai 			*pv_cm_id = (int) ent->pv_cm_id;
2763cf69cc8SAmir Vadai 	} else
277f1430536SMatthew Wilcox 		ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
2783cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
2793cf69cc8SAmir Vadai 
2803cf69cc8SAmir Vadai 	return ent;
2813cf69cc8SAmir Vadai }
2823cf69cc8SAmir Vadai 
2833cf69cc8SAmir Vadai static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
2843cf69cc8SAmir Vadai {
2853cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2863cf69cc8SAmir Vadai 	unsigned long flags;
2873cf69cc8SAmir Vadai 
2883cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
289ceb7decbSJack Morgenstein 	spin_lock_irqsave(&sriov->going_down_lock, flags);
2903cf69cc8SAmir Vadai 	/*make sure that there is no schedule inside the scheduled work.*/
291ea660ad7SHåkon Bugge 	if (!sriov->is_going_down && !id->scheduled_delete) {
2923cf69cc8SAmir Vadai 		id->scheduled_delete = 1;
2933cf69cc8SAmir Vadai 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
2943cf69cc8SAmir Vadai 	}
2953cf69cc8SAmir Vadai 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
296ceb7decbSJack Morgenstein 	spin_unlock(&sriov->id_map_lock);
2973cf69cc8SAmir Vadai }
2983cf69cc8SAmir Vadai 
299*227a0e14SHåkon Bugge #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
3003cf69cc8SAmir Vadai int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
3013cf69cc8SAmir Vadai 		struct ib_mad *mad)
3023cf69cc8SAmir Vadai {
3033cf69cc8SAmir Vadai 	struct id_map_entry *id;
3043cf69cc8SAmir Vadai 	u32 sl_cm_id;
3053cf69cc8SAmir Vadai 	int pv_cm_id = -1;
3063cf69cc8SAmir Vadai 
3073cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
308ceb5433bSShani Michaelli 	    mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
309e7d087fcSHåkon Bugge 	    mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
310*227a0e14SHåkon Bugge 	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
311*227a0e14SHåkon Bugge 	    (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
312ceb5433bSShani Michaelli 		sl_cm_id = get_local_comm_id(mad);
3134542e3c7SHåkon Bugge 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
3144542e3c7SHåkon Bugge 		if (id)
3154542e3c7SHåkon Bugge 			goto cont;
3163cf69cc8SAmir Vadai 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
3173cf69cc8SAmir Vadai 		if (IS_ERR(id)) {
3183cf69cc8SAmir Vadai 			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
3193cf69cc8SAmir Vadai 				__func__, slave_id, sl_cm_id);
3203cf69cc8SAmir Vadai 			return PTR_ERR(id);
3213cf69cc8SAmir Vadai 		}
322ceb5433bSShani Michaelli 	} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
323ceb5433bSShani Michaelli 		   mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
3243cf69cc8SAmir Vadai 		return 0;
3253cf69cc8SAmir Vadai 	} else {
326ceb5433bSShani Michaelli 		sl_cm_id = get_local_comm_id(mad);
3273cf69cc8SAmir Vadai 		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
3283cf69cc8SAmir Vadai 	}
3293cf69cc8SAmir Vadai 
3303cf69cc8SAmir Vadai 	if (!id) {
33109461944SHåkon Bugge 		pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
33209461944SHåkon Bugge 			 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
3333cf69cc8SAmir Vadai 		return -EINVAL;
3343cf69cc8SAmir Vadai 	}
3353cf69cc8SAmir Vadai 
3364542e3c7SHåkon Bugge cont:
3373cf69cc8SAmir Vadai 	set_local_comm_id(mad, id->pv_cm_id);
3383cf69cc8SAmir Vadai 
3393cf69cc8SAmir Vadai 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
3403cf69cc8SAmir Vadai 		schedule_delayed(ibdev, id);
3413cf69cc8SAmir Vadai 	return 0;
3423cf69cc8SAmir Vadai }
3433cf69cc8SAmir Vadai 
344*227a0e14SHåkon Bugge static void rej_tmout_timeout(struct work_struct *work)
345*227a0e14SHåkon Bugge {
346*227a0e14SHåkon Bugge 	struct delayed_work *delay = to_delayed_work(work);
347*227a0e14SHåkon Bugge 	struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
348*227a0e14SHåkon Bugge 	struct rej_tmout_entry *deleted;
349*227a0e14SHåkon Bugge 
350*227a0e14SHåkon Bugge 	mutex_lock(item->lock);
351*227a0e14SHåkon Bugge 	deleted = radix_tree_delete_item(item->rej_tmout_root, item->rem_pv_cm_id, NULL);
352*227a0e14SHåkon Bugge 	mutex_unlock(item->lock);
353*227a0e14SHåkon Bugge 
354*227a0e14SHåkon Bugge 	if (deleted != item)
355*227a0e14SHåkon Bugge 		pr_debug("deleted(%p) != item(%p)\n", deleted, item);
356*227a0e14SHåkon Bugge 
357*227a0e14SHåkon Bugge 	kfree(item);
358*227a0e14SHåkon Bugge }
359*227a0e14SHåkon Bugge 
360*227a0e14SHåkon Bugge static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
361*227a0e14SHåkon Bugge {
362*227a0e14SHåkon Bugge 	struct rej_tmout_entry *item;
363*227a0e14SHåkon Bugge 	int sts;
364*227a0e14SHåkon Bugge 
365*227a0e14SHåkon Bugge 	mutex_lock(&sriov->rej_tmout_lock);
366*227a0e14SHåkon Bugge 	item = radix_tree_lookup(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id);
367*227a0e14SHåkon Bugge 	mutex_unlock(&sriov->rej_tmout_lock);
368*227a0e14SHåkon Bugge 	if (item) {
369*227a0e14SHåkon Bugge 		if (IS_ERR(item))
370*227a0e14SHåkon Bugge 			return PTR_ERR(item);
371*227a0e14SHåkon Bugge 		/* If a retry, adjust delayed work */
372*227a0e14SHåkon Bugge 		mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
373*227a0e14SHåkon Bugge 		return 0;
374*227a0e14SHåkon Bugge 	}
375*227a0e14SHåkon Bugge 
376*227a0e14SHåkon Bugge 	item = kmalloc(sizeof(*item), GFP_KERNEL);
377*227a0e14SHåkon Bugge 	if (!item)
378*227a0e14SHåkon Bugge 		return -ENOMEM;
379*227a0e14SHåkon Bugge 
380*227a0e14SHåkon Bugge 	INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
381*227a0e14SHåkon Bugge 	item->slave = slave;
382*227a0e14SHåkon Bugge 	item->rem_pv_cm_id = rem_pv_cm_id;
383*227a0e14SHåkon Bugge 	item->rej_tmout_root = &sriov->rej_tmout_root;
384*227a0e14SHåkon Bugge 	item->lock = &sriov->rej_tmout_lock;
385*227a0e14SHåkon Bugge 
386*227a0e14SHåkon Bugge 	mutex_lock(&sriov->rej_tmout_lock);
387*227a0e14SHåkon Bugge 	sts = radix_tree_insert(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id, item);
388*227a0e14SHåkon Bugge 	mutex_unlock(&sriov->rej_tmout_lock);
389*227a0e14SHåkon Bugge 	if (sts)
390*227a0e14SHåkon Bugge 		goto err_insert;
391*227a0e14SHåkon Bugge 
392*227a0e14SHåkon Bugge 	schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
393*227a0e14SHåkon Bugge 
394*227a0e14SHåkon Bugge 	return 0;
395*227a0e14SHåkon Bugge 
396*227a0e14SHåkon Bugge err_insert:
397*227a0e14SHåkon Bugge 	kfree(item);
398*227a0e14SHåkon Bugge 	return sts;
399*227a0e14SHåkon Bugge }
400*227a0e14SHåkon Bugge 
401*227a0e14SHåkon Bugge static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
402*227a0e14SHåkon Bugge {
403*227a0e14SHåkon Bugge 	struct rej_tmout_entry *item;
404*227a0e14SHåkon Bugge 
405*227a0e14SHåkon Bugge 	mutex_lock(&sriov->rej_tmout_lock);
406*227a0e14SHåkon Bugge 	item = radix_tree_lookup(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id);
407*227a0e14SHåkon Bugge 	mutex_unlock(&sriov->rej_tmout_lock);
408*227a0e14SHåkon Bugge 
409*227a0e14SHåkon Bugge 	if (!item || IS_ERR(item)) {
410*227a0e14SHåkon Bugge 		pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
411*227a0e14SHåkon Bugge 			 rem_pv_cm_id, (int)PTR_ERR(item));
412*227a0e14SHåkon Bugge 		return !item ? -ENOENT : PTR_ERR(item);
413*227a0e14SHåkon Bugge 	}
414*227a0e14SHåkon Bugge 
415*227a0e14SHåkon Bugge 	return item->slave;
416*227a0e14SHåkon Bugge }
417*227a0e14SHåkon Bugge 
4183cf69cc8SAmir Vadai int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
4193cf69cc8SAmir Vadai 			     struct ib_mad *mad)
4203cf69cc8SAmir Vadai {
421*227a0e14SHåkon Bugge 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
422*227a0e14SHåkon Bugge 	u32 rem_pv_cm_id = get_local_comm_id(mad);
4233cf69cc8SAmir Vadai 	u32 pv_cm_id;
4243cf69cc8SAmir Vadai 	struct id_map_entry *id;
425*227a0e14SHåkon Bugge 	int sts;
4263cf69cc8SAmir Vadai 
427ceb5433bSShani Michaelli 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
428ceb5433bSShani Michaelli 	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
4293cf69cc8SAmir Vadai 		union ib_gid gid;
4303cf69cc8SAmir Vadai 
4316ee51a4eSJack Morgenstein 		if (!slave)
4326ee51a4eSJack Morgenstein 			return 0;
4336ee51a4eSJack Morgenstein 
4343cf69cc8SAmir Vadai 		gid = gid_from_req_msg(ibdev, mad);
4353cf69cc8SAmir Vadai 		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
4363cf69cc8SAmir Vadai 		if (*slave < 0) {
4373cf69cc8SAmir Vadai 			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
43898e8be86SJack Morgenstein 				     be64_to_cpu(gid.global.interface_id));
4393cf69cc8SAmir Vadai 			return -ENOENT;
4403cf69cc8SAmir Vadai 		}
441*227a0e14SHåkon Bugge 
442*227a0e14SHåkon Bugge 		sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
443*227a0e14SHåkon Bugge 		if (sts)
444*227a0e14SHåkon Bugge 			/* Even if this fails, we pass on the REQ to the slave */
445*227a0e14SHåkon Bugge 			pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
446*227a0e14SHåkon Bugge 				 rem_pv_cm_id, *slave, sts);
447*227a0e14SHåkon Bugge 
4483cf69cc8SAmir Vadai 		return 0;
4493cf69cc8SAmir Vadai 	}
4503cf69cc8SAmir Vadai 
4513cf69cc8SAmir Vadai 	pv_cm_id = get_remote_comm_id(mad);
4523cf69cc8SAmir Vadai 	id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
4533cf69cc8SAmir Vadai 
4543cf69cc8SAmir Vadai 	if (!id) {
455*227a0e14SHåkon Bugge 		if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
456*227a0e14SHåkon Bugge 		    REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
457*227a0e14SHåkon Bugge 			*slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
458*227a0e14SHåkon Bugge 
459*227a0e14SHåkon Bugge 			return (*slave < 0) ? *slave : 0;
460*227a0e14SHåkon Bugge 		}
46109461944SHåkon Bugge 		pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
46209461944SHåkon Bugge 			 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
4633cf69cc8SAmir Vadai 		return -ENOENT;
4643cf69cc8SAmir Vadai 	}
4653cf69cc8SAmir Vadai 
4666ee51a4eSJack Morgenstein 	if (slave)
4673cf69cc8SAmir Vadai 		*slave = id->slave_id;
4683cf69cc8SAmir Vadai 	set_remote_comm_id(mad, id->sl_cm_id);
4693cf69cc8SAmir Vadai 
470ea660ad7SHåkon Bugge 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
471ea660ad7SHåkon Bugge 	    mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
4723cf69cc8SAmir Vadai 		schedule_delayed(ibdev, id);
4733cf69cc8SAmir Vadai 
4743cf69cc8SAmir Vadai 	return 0;
4753cf69cc8SAmir Vadai }
4763cf69cc8SAmir Vadai 
4773cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
4783cf69cc8SAmir Vadai {
4793cf69cc8SAmir Vadai 	spin_lock_init(&dev->sriov.id_map_lock);
4803cf69cc8SAmir Vadai 	INIT_LIST_HEAD(&dev->sriov.cm_list);
4813cf69cc8SAmir Vadai 	dev->sriov.sl_id_map = RB_ROOT;
482f1430536SMatthew Wilcox 	xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
483*227a0e14SHåkon Bugge 	mutex_init(&dev->sriov.rej_tmout_lock);
484*227a0e14SHåkon Bugge 	INIT_RADIX_TREE(&dev->sriov.rej_tmout_root, GFP_KERNEL);
485*227a0e14SHåkon Bugge }
486*227a0e14SHåkon Bugge 
487*227a0e14SHåkon Bugge static void rej_tmout_tree_cleanup(struct mlx4_ib_sriov *sriov, int slave)
488*227a0e14SHåkon Bugge {
489*227a0e14SHåkon Bugge 	struct radix_tree_iter iter;
490*227a0e14SHåkon Bugge 	bool flush_needed = false;
491*227a0e14SHåkon Bugge 	__rcu void **slot;
492*227a0e14SHåkon Bugge 	int cnt = 0;
493*227a0e14SHåkon Bugge 
494*227a0e14SHåkon Bugge 	mutex_lock(&sriov->rej_tmout_lock);
495*227a0e14SHåkon Bugge 	radix_tree_for_each_slot(slot, &sriov->rej_tmout_root, &iter, 0) {
496*227a0e14SHåkon Bugge 		struct rej_tmout_entry *item = *slot;
497*227a0e14SHåkon Bugge 
498*227a0e14SHåkon Bugge 		if (slave < 0 || slave == item->slave) {
499*227a0e14SHåkon Bugge 			mod_delayed_work(system_wq, &item->timeout, 0);
500*227a0e14SHåkon Bugge 			flush_needed = true;
501*227a0e14SHåkon Bugge 			++cnt;
502*227a0e14SHåkon Bugge 		}
503*227a0e14SHåkon Bugge 	}
504*227a0e14SHåkon Bugge 	mutex_unlock(&sriov->rej_tmout_lock);
505*227a0e14SHåkon Bugge 
506*227a0e14SHåkon Bugge 	if (flush_needed) {
507*227a0e14SHåkon Bugge 		flush_scheduled_work();
508*227a0e14SHåkon Bugge 		pr_debug("Deleted %d entries in radix_tree for slave %d during cleanup\n",
509*227a0e14SHåkon Bugge 			 slave, cnt);
510*227a0e14SHåkon Bugge 	}
5113cf69cc8SAmir Vadai }
5123cf69cc8SAmir Vadai 
5133cf69cc8SAmir Vadai /* slave = -1 ==> all slaves */
5143cf69cc8SAmir Vadai /* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
5153cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
5163cf69cc8SAmir Vadai {
5173cf69cc8SAmir Vadai 	struct mlx4_ib_sriov *sriov = &dev->sriov;
5183cf69cc8SAmir Vadai 	struct rb_root *sl_id_map = &sriov->sl_id_map;
5193cf69cc8SAmir Vadai 	struct list_head lh;
5203cf69cc8SAmir Vadai 	struct rb_node *nd;
52189878288SHåkon Bugge 	int need_flush = 0;
5223cf69cc8SAmir Vadai 	struct id_map_entry *map, *tmp_map;
5233cf69cc8SAmir Vadai 	/* cancel all delayed work queue entries */
5243cf69cc8SAmir Vadai 	INIT_LIST_HEAD(&lh);
5253cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
5263cf69cc8SAmir Vadai 	list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
5273cf69cc8SAmir Vadai 		if (slave < 0 || slave == map->slave_id) {
5283cf69cc8SAmir Vadai 			if (map->scheduled_delete)
52989878288SHåkon Bugge 				need_flush |= !cancel_delayed_work(&map->timeout);
5303cf69cc8SAmir Vadai 		}
5313cf69cc8SAmir Vadai 	}
5323cf69cc8SAmir Vadai 
5333cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
5343cf69cc8SAmir Vadai 
53589878288SHåkon Bugge 	if (need_flush)
5363cf69cc8SAmir Vadai 		flush_scheduled_work(); /* make sure all timers were flushed */
5373cf69cc8SAmir Vadai 
5383cf69cc8SAmir Vadai 	/* now, remove all leftover entries from databases*/
5393cf69cc8SAmir Vadai 	spin_lock(&sriov->id_map_lock);
5403cf69cc8SAmir Vadai 	if (slave < 0) {
5413cf69cc8SAmir Vadai 		while (rb_first(sl_id_map)) {
5423cf69cc8SAmir Vadai 			struct id_map_entry *ent =
5433cf69cc8SAmir Vadai 				rb_entry(rb_first(sl_id_map),
5443cf69cc8SAmir Vadai 					 struct id_map_entry, node);
5453cf69cc8SAmir Vadai 
5463cf69cc8SAmir Vadai 			rb_erase(&ent->node, sl_id_map);
547f1430536SMatthew Wilcox 			xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
5483cf69cc8SAmir Vadai 		}
5493cf69cc8SAmir Vadai 		list_splice_init(&dev->sriov.cm_list, &lh);
5503cf69cc8SAmir Vadai 	} else {
5513cf69cc8SAmir Vadai 		/* first, move nodes belonging to slave to db remove list */
5523cf69cc8SAmir Vadai 		nd = rb_first(sl_id_map);
5533cf69cc8SAmir Vadai 		while (nd) {
5543cf69cc8SAmir Vadai 			struct id_map_entry *ent =
5553cf69cc8SAmir Vadai 				rb_entry(nd, struct id_map_entry, node);
5563cf69cc8SAmir Vadai 			nd = rb_next(nd);
5573cf69cc8SAmir Vadai 			if (ent->slave_id == slave)
5583cf69cc8SAmir Vadai 				list_move_tail(&ent->list, &lh);
5593cf69cc8SAmir Vadai 		}
5603cf69cc8SAmir Vadai 		/* remove those nodes from databases */
5613cf69cc8SAmir Vadai 		list_for_each_entry_safe(map, tmp_map, &lh, list) {
5623cf69cc8SAmir Vadai 			rb_erase(&map->node, sl_id_map);
563f1430536SMatthew Wilcox 			xa_erase(&sriov->pv_id_table, map->pv_cm_id);
5643cf69cc8SAmir Vadai 		}
5653cf69cc8SAmir Vadai 
5663cf69cc8SAmir Vadai 		/* add remaining nodes from cm_list */
5673cf69cc8SAmir Vadai 		list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
5683cf69cc8SAmir Vadai 			if (slave == map->slave_id)
5693cf69cc8SAmir Vadai 				list_move_tail(&map->list, &lh);
5703cf69cc8SAmir Vadai 		}
5713cf69cc8SAmir Vadai 	}
5723cf69cc8SAmir Vadai 
5733cf69cc8SAmir Vadai 	spin_unlock(&sriov->id_map_lock);
5743cf69cc8SAmir Vadai 
5753cf69cc8SAmir Vadai 	/* free any map entries left behind due to cancel_delayed_work above */
5763cf69cc8SAmir Vadai 	list_for_each_entry_safe(map, tmp_map, &lh, list) {
5773cf69cc8SAmir Vadai 		list_del(&map->list);
5783cf69cc8SAmir Vadai 		kfree(map);
5793cf69cc8SAmir Vadai 	}
580*227a0e14SHåkon Bugge 
581*227a0e14SHåkon Bugge 	rej_tmout_tree_cleanup(sriov, slave);
5823cf69cc8SAmir Vadai }
583