13cf69cc8SAmir Vadai /*
23cf69cc8SAmir Vadai * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
33cf69cc8SAmir Vadai *
43cf69cc8SAmir Vadai * This software is available to you under a choice of one of two
53cf69cc8SAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU
63cf69cc8SAmir Vadai * General Public License (GPL) Version 2, available from the file
73cf69cc8SAmir Vadai * COPYING in the main directory of this source tree, or the
83cf69cc8SAmir Vadai * OpenIB.org BSD license below:
93cf69cc8SAmir Vadai *
103cf69cc8SAmir Vadai * Redistribution and use in source and binary forms, with or
113cf69cc8SAmir Vadai * without modification, are permitted provided that the following
123cf69cc8SAmir Vadai * conditions are met:
133cf69cc8SAmir Vadai *
143cf69cc8SAmir Vadai * - Redistributions of source code must retain the above
153cf69cc8SAmir Vadai * copyright notice, this list of conditions and the following
163cf69cc8SAmir Vadai * disclaimer.
173cf69cc8SAmir Vadai *
183cf69cc8SAmir Vadai * - Redistributions in binary form must reproduce the above
193cf69cc8SAmir Vadai * copyright notice, this list of conditions and the following
203cf69cc8SAmir Vadai * disclaimer in the documentation and/or other materials
213cf69cc8SAmir Vadai * provided with the distribution.
223cf69cc8SAmir Vadai *
233cf69cc8SAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
243cf69cc8SAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
253cf69cc8SAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
263cf69cc8SAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
273cf69cc8SAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
283cf69cc8SAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
293cf69cc8SAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
303cf69cc8SAmir Vadai * SOFTWARE.
313cf69cc8SAmir Vadai */
323cf69cc8SAmir Vadai
333cf69cc8SAmir Vadai #include <rdma/ib_mad.h>
343cf69cc8SAmir Vadai
353cf69cc8SAmir Vadai #include <linux/mlx4/cmd.h>
363cf69cc8SAmir Vadai #include <linux/rbtree.h>
373cf69cc8SAmir Vadai #include <linux/idr.h>
383cf69cc8SAmir Vadai #include <rdma/ib_cm.h>
393cf69cc8SAmir Vadai
403cf69cc8SAmir Vadai #include "mlx4_ib.h"
413cf69cc8SAmir Vadai
422612d723SHåkon Bugge #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
433cf69cc8SAmir Vadai
443cf69cc8SAmir Vadai struct id_map_entry {
453cf69cc8SAmir Vadai struct rb_node node;
463cf69cc8SAmir Vadai
473cf69cc8SAmir Vadai u32 sl_cm_id;
483cf69cc8SAmir Vadai u32 pv_cm_id;
493cf69cc8SAmir Vadai int slave_id;
503cf69cc8SAmir Vadai int scheduled_delete;
513cf69cc8SAmir Vadai struct mlx4_ib_dev *dev;
523cf69cc8SAmir Vadai
533cf69cc8SAmir Vadai struct list_head list;
543cf69cc8SAmir Vadai struct delayed_work timeout;
553cf69cc8SAmir Vadai };
563cf69cc8SAmir Vadai
57227a0e14SHåkon Bugge struct rej_tmout_entry {
58227a0e14SHåkon Bugge int slave;
59227a0e14SHåkon Bugge u32 rem_pv_cm_id;
60227a0e14SHåkon Bugge struct delayed_work timeout;
61bf6a4764SHåkon Bugge struct xarray *xa_rej_tmout;
62227a0e14SHåkon Bugge };
63227a0e14SHåkon Bugge
643cf69cc8SAmir Vadai struct cm_generic_msg {
653cf69cc8SAmir Vadai struct ib_mad_hdr hdr;
663cf69cc8SAmir Vadai
673cf69cc8SAmir Vadai __be32 local_comm_id;
683cf69cc8SAmir Vadai __be32 remote_comm_id;
69227a0e14SHåkon Bugge unsigned char unused[2];
70227a0e14SHåkon Bugge __be16 rej_reason;
713cf69cc8SAmir Vadai };
723cf69cc8SAmir Vadai
73ceb5433bSShani Michaelli struct cm_sidr_generic_msg {
74ceb5433bSShani Michaelli struct ib_mad_hdr hdr;
75ceb5433bSShani Michaelli __be32 request_id;
76ceb5433bSShani Michaelli };
77ceb5433bSShani Michaelli
783cf69cc8SAmir Vadai struct cm_req_msg {
793cf69cc8SAmir Vadai unsigned char unused[0x60];
803cf69cc8SAmir Vadai union ib_gid primary_path_sgid;
813cf69cc8SAmir Vadai };
823cf69cc8SAmir Vadai
83*9cf62d91STetsuo Handa static struct workqueue_struct *cm_wq;
843cf69cc8SAmir Vadai
set_local_comm_id(struct ib_mad * mad,u32 cm_id)853cf69cc8SAmir Vadai static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
863cf69cc8SAmir Vadai {
87ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
88ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg =
89ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad;
90ceb5433bSShani Michaelli msg->request_id = cpu_to_be32(cm_id);
91ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
92ceb5433bSShani Michaelli pr_err("trying to set local_comm_id in SIDR_REP\n");
93ceb5433bSShani Michaelli return;
94ceb5433bSShani Michaelli } else {
953cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
963cf69cc8SAmir Vadai msg->local_comm_id = cpu_to_be32(cm_id);
973cf69cc8SAmir Vadai }
98ceb5433bSShani Michaelli }
993cf69cc8SAmir Vadai
get_local_comm_id(struct ib_mad * mad)1003cf69cc8SAmir Vadai static u32 get_local_comm_id(struct ib_mad *mad)
1013cf69cc8SAmir Vadai {
102ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
103ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg =
104ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad;
105ceb5433bSShani Michaelli return be32_to_cpu(msg->request_id);
106ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
107ceb5433bSShani Michaelli pr_err("trying to set local_comm_id in SIDR_REP\n");
108ceb5433bSShani Michaelli return -1;
109ceb5433bSShani Michaelli } else {
1103cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1113cf69cc8SAmir Vadai return be32_to_cpu(msg->local_comm_id);
1123cf69cc8SAmir Vadai }
113ceb5433bSShani Michaelli }
1143cf69cc8SAmir Vadai
set_remote_comm_id(struct ib_mad * mad,u32 cm_id)1153cf69cc8SAmir Vadai static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
1163cf69cc8SAmir Vadai {
117ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
118ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg =
119ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad;
120ceb5433bSShani Michaelli msg->request_id = cpu_to_be32(cm_id);
121ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
122ceb5433bSShani Michaelli pr_err("trying to set remote_comm_id in SIDR_REQ\n");
123ceb5433bSShani Michaelli return;
124ceb5433bSShani Michaelli } else {
1253cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1263cf69cc8SAmir Vadai msg->remote_comm_id = cpu_to_be32(cm_id);
1273cf69cc8SAmir Vadai }
128ceb5433bSShani Michaelli }
1293cf69cc8SAmir Vadai
get_remote_comm_id(struct ib_mad * mad)1303cf69cc8SAmir Vadai static u32 get_remote_comm_id(struct ib_mad *mad)
1313cf69cc8SAmir Vadai {
132ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
133ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg =
134ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad;
135ceb5433bSShani Michaelli return be32_to_cpu(msg->request_id);
136ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
137ceb5433bSShani Michaelli pr_err("trying to set remote_comm_id in SIDR_REQ\n");
138ceb5433bSShani Michaelli return -1;
139ceb5433bSShani Michaelli } else {
1403cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
1413cf69cc8SAmir Vadai return be32_to_cpu(msg->remote_comm_id);
1423cf69cc8SAmir Vadai }
143ceb5433bSShani Michaelli }
1443cf69cc8SAmir Vadai
gid_from_req_msg(struct ib_device * ibdev,struct ib_mad * mad)1453cf69cc8SAmir Vadai static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
1463cf69cc8SAmir Vadai {
1473cf69cc8SAmir Vadai struct cm_req_msg *msg = (struct cm_req_msg *)mad;
1483cf69cc8SAmir Vadai
1493cf69cc8SAmir Vadai return msg->primary_path_sgid;
1503cf69cc8SAmir Vadai }
1513cf69cc8SAmir Vadai
1523cf69cc8SAmir Vadai /* Lock should be taken before called */
1533cf69cc8SAmir Vadai static struct id_map_entry *
id_map_find_by_sl_id(struct ib_device * ibdev,u32 slave_id,u32 sl_cm_id)1543cf69cc8SAmir Vadai id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
1553cf69cc8SAmir Vadai {
1563cf69cc8SAmir Vadai struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
1573cf69cc8SAmir Vadai struct rb_node *node = sl_id_map->rb_node;
1583cf69cc8SAmir Vadai
1593cf69cc8SAmir Vadai while (node) {
1603cf69cc8SAmir Vadai struct id_map_entry *id_map_entry =
1613cf69cc8SAmir Vadai rb_entry(node, struct id_map_entry, node);
1623cf69cc8SAmir Vadai
1633cf69cc8SAmir Vadai if (id_map_entry->sl_cm_id > sl_cm_id)
1643cf69cc8SAmir Vadai node = node->rb_left;
1653cf69cc8SAmir Vadai else if (id_map_entry->sl_cm_id < sl_cm_id)
1663cf69cc8SAmir Vadai node = node->rb_right;
1673cf69cc8SAmir Vadai else if (id_map_entry->slave_id > slave_id)
1683cf69cc8SAmir Vadai node = node->rb_left;
1693cf69cc8SAmir Vadai else if (id_map_entry->slave_id < slave_id)
1703cf69cc8SAmir Vadai node = node->rb_right;
1713cf69cc8SAmir Vadai else
1723cf69cc8SAmir Vadai return id_map_entry;
1733cf69cc8SAmir Vadai }
1743cf69cc8SAmir Vadai return NULL;
1753cf69cc8SAmir Vadai }
1763cf69cc8SAmir Vadai
id_map_ent_timeout(struct work_struct * work)1773cf69cc8SAmir Vadai static void id_map_ent_timeout(struct work_struct *work)
1783cf69cc8SAmir Vadai {
1793cf69cc8SAmir Vadai struct delayed_work *delay = to_delayed_work(work);
1803cf69cc8SAmir Vadai struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
181f1430536SMatthew Wilcox struct id_map_entry *found_ent;
1823cf69cc8SAmir Vadai struct mlx4_ib_dev *dev = ent->dev;
1833cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &dev->sriov;
1843cf69cc8SAmir Vadai struct rb_root *sl_id_map = &sriov->sl_id_map;
1853cf69cc8SAmir Vadai
1863cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock);
187f1430536SMatthew Wilcox if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
1883cf69cc8SAmir Vadai goto out;
1893cf69cc8SAmir Vadai found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
1903cf69cc8SAmir Vadai if (found_ent && found_ent == ent)
1913cf69cc8SAmir Vadai rb_erase(&found_ent->node, sl_id_map);
1923cf69cc8SAmir Vadai
1933cf69cc8SAmir Vadai out:
1943cf69cc8SAmir Vadai list_del(&ent->list);
1953cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock);
1963cf69cc8SAmir Vadai kfree(ent);
1973cf69cc8SAmir Vadai }
1983cf69cc8SAmir Vadai
sl_id_map_add(struct ib_device * ibdev,struct id_map_entry * new)1993cf69cc8SAmir Vadai static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
2003cf69cc8SAmir Vadai {
2013cf69cc8SAmir Vadai struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
2023cf69cc8SAmir Vadai struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
2033cf69cc8SAmir Vadai struct id_map_entry *ent;
2043cf69cc8SAmir Vadai int slave_id = new->slave_id;
2053cf69cc8SAmir Vadai int sl_cm_id = new->sl_cm_id;
2063cf69cc8SAmir Vadai
2073cf69cc8SAmir Vadai ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
2083cf69cc8SAmir Vadai if (ent) {
2093cf69cc8SAmir Vadai pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
2103cf69cc8SAmir Vadai sl_cm_id);
2113cf69cc8SAmir Vadai
2123cf69cc8SAmir Vadai rb_replace_node(&ent->node, &new->node, sl_id_map);
2133cf69cc8SAmir Vadai return;
2143cf69cc8SAmir Vadai }
2153cf69cc8SAmir Vadai
2163cf69cc8SAmir Vadai /* Go to the bottom of the tree */
2173cf69cc8SAmir Vadai while (*link) {
2183cf69cc8SAmir Vadai parent = *link;
2193cf69cc8SAmir Vadai ent = rb_entry(parent, struct id_map_entry, node);
2203cf69cc8SAmir Vadai
2213cf69cc8SAmir Vadai if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
2223cf69cc8SAmir Vadai link = &(*link)->rb_left;
2233cf69cc8SAmir Vadai else
2243cf69cc8SAmir Vadai link = &(*link)->rb_right;
2253cf69cc8SAmir Vadai }
2263cf69cc8SAmir Vadai
2273cf69cc8SAmir Vadai rb_link_node(&new->node, parent, link);
2283cf69cc8SAmir Vadai rb_insert_color(&new->node, sl_id_map);
2293cf69cc8SAmir Vadai }
2303cf69cc8SAmir Vadai
2313cf69cc8SAmir Vadai static struct id_map_entry *
id_map_alloc(struct ib_device * ibdev,int slave_id,u32 sl_cm_id)2323cf69cc8SAmir Vadai id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
2333cf69cc8SAmir Vadai {
2346a920060STejun Heo int ret;
2353cf69cc8SAmir Vadai struct id_map_entry *ent;
2363cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2373cf69cc8SAmir Vadai
2383cf69cc8SAmir Vadai ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
23915d4626eSLeon Romanovsky if (!ent)
2403cf69cc8SAmir Vadai return ERR_PTR(-ENOMEM);
2413cf69cc8SAmir Vadai
2423cf69cc8SAmir Vadai ent->sl_cm_id = sl_cm_id;
2433cf69cc8SAmir Vadai ent->slave_id = slave_id;
2443cf69cc8SAmir Vadai ent->scheduled_delete = 0;
2453cf69cc8SAmir Vadai ent->dev = to_mdev(ibdev);
2463cf69cc8SAmir Vadai INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
2473cf69cc8SAmir Vadai
248f1430536SMatthew Wilcox ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
249f1430536SMatthew Wilcox xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
2506a920060STejun Heo if (ret >= 0) {
251f1430536SMatthew Wilcox spin_lock(&sriov->id_map_lock);
2523cf69cc8SAmir Vadai sl_id_map_add(ibdev, ent);
2536a920060STejun Heo list_add_tail(&ent->list, &sriov->cm_list);
2543cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock);
2553cf69cc8SAmir Vadai return ent;
256f1430536SMatthew Wilcox }
2576a920060STejun Heo
2583cf69cc8SAmir Vadai /*error flow*/
2593cf69cc8SAmir Vadai kfree(ent);
260f1430536SMatthew Wilcox mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
2613cf69cc8SAmir Vadai return ERR_PTR(-ENOMEM);
2623cf69cc8SAmir Vadai }
2633cf69cc8SAmir Vadai
2643cf69cc8SAmir Vadai static struct id_map_entry *
id_map_get(struct ib_device * ibdev,int * pv_cm_id,int slave_id,int sl_cm_id)265b03ee4caSHåkon Bugge id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
2663cf69cc8SAmir Vadai {
2673cf69cc8SAmir Vadai struct id_map_entry *ent;
2683cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2693cf69cc8SAmir Vadai
2703cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock);
2713cf69cc8SAmir Vadai if (*pv_cm_id == -1) {
272b03ee4caSHåkon Bugge ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
2733cf69cc8SAmir Vadai if (ent)
2743cf69cc8SAmir Vadai *pv_cm_id = (int) ent->pv_cm_id;
2753cf69cc8SAmir Vadai } else
276f1430536SMatthew Wilcox ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
2773cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock);
2783cf69cc8SAmir Vadai
2793cf69cc8SAmir Vadai return ent;
2803cf69cc8SAmir Vadai }
2813cf69cc8SAmir Vadai
schedule_delayed(struct ib_device * ibdev,struct id_map_entry * id)2823cf69cc8SAmir Vadai static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
2833cf69cc8SAmir Vadai {
2843cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2853cf69cc8SAmir Vadai unsigned long flags;
2863cf69cc8SAmir Vadai
2873cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock);
288ceb7decbSJack Morgenstein spin_lock_irqsave(&sriov->going_down_lock, flags);
2893cf69cc8SAmir Vadai /*make sure that there is no schedule inside the scheduled work.*/
290ea660ad7SHåkon Bugge if (!sriov->is_going_down && !id->scheduled_delete) {
2913cf69cc8SAmir Vadai id->scheduled_delete = 1;
292*9cf62d91STetsuo Handa queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
293785167a1SHåkon Bugge } else if (id->scheduled_delete) {
294785167a1SHåkon Bugge /* Adjust timeout if already scheduled */
295*9cf62d91STetsuo Handa mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
2963cf69cc8SAmir Vadai }
2973cf69cc8SAmir Vadai spin_unlock_irqrestore(&sriov->going_down_lock, flags);
298ceb7decbSJack Morgenstein spin_unlock(&sriov->id_map_lock);
2993cf69cc8SAmir Vadai }
3003cf69cc8SAmir Vadai
301227a0e14SHåkon Bugge #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
mlx4_ib_multiplex_cm_handler(struct ib_device * ibdev,int port,int slave_id,struct ib_mad * mad)3023cf69cc8SAmir Vadai int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
3033cf69cc8SAmir Vadai struct ib_mad *mad)
3043cf69cc8SAmir Vadai {
3053cf69cc8SAmir Vadai struct id_map_entry *id;
3063cf69cc8SAmir Vadai u32 sl_cm_id;
3073cf69cc8SAmir Vadai int pv_cm_id = -1;
3083cf69cc8SAmir Vadai
3093cf69cc8SAmir Vadai if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
310ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
311e7d087fcSHåkon Bugge mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
312227a0e14SHåkon Bugge mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
313227a0e14SHåkon Bugge (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
314ceb5433bSShani Michaelli sl_cm_id = get_local_comm_id(mad);
3154542e3c7SHåkon Bugge id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
3164542e3c7SHåkon Bugge if (id)
3174542e3c7SHåkon Bugge goto cont;
3183cf69cc8SAmir Vadai id = id_map_alloc(ibdev, slave_id, sl_cm_id);
3193cf69cc8SAmir Vadai if (IS_ERR(id)) {
3203cf69cc8SAmir Vadai mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
3213cf69cc8SAmir Vadai __func__, slave_id, sl_cm_id);
3223cf69cc8SAmir Vadai return PTR_ERR(id);
3233cf69cc8SAmir Vadai }
324ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
325ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
3263cf69cc8SAmir Vadai return 0;
3273cf69cc8SAmir Vadai } else {
328ceb5433bSShani Michaelli sl_cm_id = get_local_comm_id(mad);
3293cf69cc8SAmir Vadai id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
3303cf69cc8SAmir Vadai }
3313cf69cc8SAmir Vadai
3323cf69cc8SAmir Vadai if (!id) {
33309461944SHåkon Bugge pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
33409461944SHåkon Bugge slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
3353cf69cc8SAmir Vadai return -EINVAL;
3363cf69cc8SAmir Vadai }
3373cf69cc8SAmir Vadai
3384542e3c7SHåkon Bugge cont:
3393cf69cc8SAmir Vadai set_local_comm_id(mad, id->pv_cm_id);
3403cf69cc8SAmir Vadai
3413cf69cc8SAmir Vadai if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
3423cf69cc8SAmir Vadai schedule_delayed(ibdev, id);
3433cf69cc8SAmir Vadai return 0;
3443cf69cc8SAmir Vadai }
3453cf69cc8SAmir Vadai
rej_tmout_timeout(struct work_struct * work)346227a0e14SHåkon Bugge static void rej_tmout_timeout(struct work_struct *work)
347227a0e14SHåkon Bugge {
348227a0e14SHåkon Bugge struct delayed_work *delay = to_delayed_work(work);
349227a0e14SHåkon Bugge struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
350227a0e14SHåkon Bugge struct rej_tmout_entry *deleted;
351227a0e14SHåkon Bugge
352bf6a4764SHåkon Bugge deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
353227a0e14SHåkon Bugge
354227a0e14SHåkon Bugge if (deleted != item)
355227a0e14SHåkon Bugge pr_debug("deleted(%p) != item(%p)\n", deleted, item);
356227a0e14SHåkon Bugge
357227a0e14SHåkon Bugge kfree(item);
358227a0e14SHåkon Bugge }
359227a0e14SHåkon Bugge
alloc_rej_tmout(struct mlx4_ib_sriov * sriov,u32 rem_pv_cm_id,int slave)360227a0e14SHåkon Bugge static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
361227a0e14SHåkon Bugge {
362227a0e14SHåkon Bugge struct rej_tmout_entry *item;
363bf6a4764SHåkon Bugge struct rej_tmout_entry *old;
364bf6a4764SHåkon Bugge int ret = 0;
365227a0e14SHåkon Bugge
366bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout);
367bf6a4764SHåkon Bugge item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
368bf6a4764SHåkon Bugge
369227a0e14SHåkon Bugge if (item) {
370bf6a4764SHåkon Bugge if (xa_err(item))
371bf6a4764SHåkon Bugge ret = xa_err(item);
372bf6a4764SHåkon Bugge else
373227a0e14SHåkon Bugge /* If a retry, adjust delayed work */
374*9cf62d91STetsuo Handa mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
375bf6a4764SHåkon Bugge goto err_or_exists;
376227a0e14SHåkon Bugge }
377bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout);
378227a0e14SHåkon Bugge
379227a0e14SHåkon Bugge item = kmalloc(sizeof(*item), GFP_KERNEL);
380227a0e14SHåkon Bugge if (!item)
381227a0e14SHåkon Bugge return -ENOMEM;
382227a0e14SHåkon Bugge
383227a0e14SHåkon Bugge INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
384227a0e14SHåkon Bugge item->slave = slave;
385227a0e14SHåkon Bugge item->rem_pv_cm_id = rem_pv_cm_id;
386bf6a4764SHåkon Bugge item->xa_rej_tmout = &sriov->xa_rej_tmout;
387227a0e14SHåkon Bugge
388bf6a4764SHåkon Bugge old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
389bf6a4764SHåkon Bugge if (old) {
390bf6a4764SHåkon Bugge pr_debug(
391bf6a4764SHåkon Bugge "Non-null old entry (%p) or error (%d) when inserting\n",
392bf6a4764SHåkon Bugge old, xa_err(old));
393bf6a4764SHåkon Bugge kfree(item);
394bf6a4764SHåkon Bugge return xa_err(old);
395bf6a4764SHåkon Bugge }
396227a0e14SHåkon Bugge
397*9cf62d91STetsuo Handa queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
398227a0e14SHåkon Bugge
399227a0e14SHåkon Bugge return 0;
400227a0e14SHåkon Bugge
401bf6a4764SHåkon Bugge err_or_exists:
402bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout);
403bf6a4764SHåkon Bugge return ret;
404227a0e14SHåkon Bugge }
405227a0e14SHåkon Bugge
lookup_rej_tmout_slave(struct mlx4_ib_sriov * sriov,u32 rem_pv_cm_id)406227a0e14SHåkon Bugge static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
407227a0e14SHåkon Bugge {
408227a0e14SHåkon Bugge struct rej_tmout_entry *item;
409bf6a4764SHåkon Bugge int slave;
410227a0e14SHåkon Bugge
411bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout);
412bf6a4764SHåkon Bugge item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
413227a0e14SHåkon Bugge
414bf6a4764SHåkon Bugge if (!item || xa_err(item)) {
415227a0e14SHåkon Bugge pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
416bf6a4764SHåkon Bugge rem_pv_cm_id, xa_err(item));
417bf6a4764SHåkon Bugge slave = !item ? -ENOENT : xa_err(item);
418bf6a4764SHåkon Bugge } else {
419bf6a4764SHåkon Bugge slave = item->slave;
420227a0e14SHåkon Bugge }
421bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout);
422227a0e14SHåkon Bugge
423bf6a4764SHåkon Bugge return slave;
424227a0e14SHåkon Bugge }
425227a0e14SHåkon Bugge
mlx4_ib_demux_cm_handler(struct ib_device * ibdev,int port,int * slave,struct ib_mad * mad)4263cf69cc8SAmir Vadai int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
4273cf69cc8SAmir Vadai struct ib_mad *mad)
4283cf69cc8SAmir Vadai {
429227a0e14SHåkon Bugge struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
430227a0e14SHåkon Bugge u32 rem_pv_cm_id = get_local_comm_id(mad);
4313cf69cc8SAmir Vadai u32 pv_cm_id;
4323cf69cc8SAmir Vadai struct id_map_entry *id;
433227a0e14SHåkon Bugge int sts;
4343cf69cc8SAmir Vadai
435ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
436ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
4373cf69cc8SAmir Vadai union ib_gid gid;
4383cf69cc8SAmir Vadai
4396ee51a4eSJack Morgenstein if (!slave)
4406ee51a4eSJack Morgenstein return 0;
4416ee51a4eSJack Morgenstein
4423cf69cc8SAmir Vadai gid = gid_from_req_msg(ibdev, mad);
4433cf69cc8SAmir Vadai *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
4443cf69cc8SAmir Vadai if (*slave < 0) {
4453cf69cc8SAmir Vadai mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
44698e8be86SJack Morgenstein be64_to_cpu(gid.global.interface_id));
4473cf69cc8SAmir Vadai return -ENOENT;
4483cf69cc8SAmir Vadai }
449227a0e14SHåkon Bugge
450227a0e14SHåkon Bugge sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
451227a0e14SHåkon Bugge if (sts)
452227a0e14SHåkon Bugge /* Even if this fails, we pass on the REQ to the slave */
453227a0e14SHåkon Bugge pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
454227a0e14SHåkon Bugge rem_pv_cm_id, *slave, sts);
455227a0e14SHåkon Bugge
4563cf69cc8SAmir Vadai return 0;
4573cf69cc8SAmir Vadai }
4583cf69cc8SAmir Vadai
4593cf69cc8SAmir Vadai pv_cm_id = get_remote_comm_id(mad);
4603cf69cc8SAmir Vadai id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
4613cf69cc8SAmir Vadai
4623cf69cc8SAmir Vadai if (!id) {
463227a0e14SHåkon Bugge if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
464227a0e14SHåkon Bugge REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
465227a0e14SHåkon Bugge *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
466227a0e14SHåkon Bugge
467227a0e14SHåkon Bugge return (*slave < 0) ? *slave : 0;
468227a0e14SHåkon Bugge }
46909461944SHåkon Bugge pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
47009461944SHåkon Bugge pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
4713cf69cc8SAmir Vadai return -ENOENT;
4723cf69cc8SAmir Vadai }
4733cf69cc8SAmir Vadai
4746ee51a4eSJack Morgenstein if (slave)
4753cf69cc8SAmir Vadai *slave = id->slave_id;
4763cf69cc8SAmir Vadai set_remote_comm_id(mad, id->sl_cm_id);
4773cf69cc8SAmir Vadai
478ea660ad7SHåkon Bugge if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
479ea660ad7SHåkon Bugge mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
4803cf69cc8SAmir Vadai schedule_delayed(ibdev, id);
4813cf69cc8SAmir Vadai
4823cf69cc8SAmir Vadai return 0;
4833cf69cc8SAmir Vadai }
4843cf69cc8SAmir Vadai
mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev * dev)4853cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
4863cf69cc8SAmir Vadai {
4873cf69cc8SAmir Vadai spin_lock_init(&dev->sriov.id_map_lock);
4883cf69cc8SAmir Vadai INIT_LIST_HEAD(&dev->sriov.cm_list);
4893cf69cc8SAmir Vadai dev->sriov.sl_id_map = RB_ROOT;
490f1430536SMatthew Wilcox xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
491bf6a4764SHåkon Bugge xa_init(&dev->sriov.xa_rej_tmout);
492227a0e14SHåkon Bugge }
493227a0e14SHåkon Bugge
rej_tmout_xa_cleanup(struct mlx4_ib_sriov * sriov,int slave)494bf6a4764SHåkon Bugge static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
495227a0e14SHåkon Bugge {
496bf6a4764SHåkon Bugge struct rej_tmout_entry *item;
497227a0e14SHåkon Bugge bool flush_needed = false;
498bf6a4764SHåkon Bugge unsigned long id;
499227a0e14SHåkon Bugge int cnt = 0;
500227a0e14SHåkon Bugge
501bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout);
502bf6a4764SHåkon Bugge xa_for_each(&sriov->xa_rej_tmout, id, item) {
503227a0e14SHåkon Bugge if (slave < 0 || slave == item->slave) {
504*9cf62d91STetsuo Handa mod_delayed_work(cm_wq, &item->timeout, 0);
505227a0e14SHåkon Bugge flush_needed = true;
506227a0e14SHåkon Bugge ++cnt;
507227a0e14SHåkon Bugge }
508227a0e14SHåkon Bugge }
509bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout);
510227a0e14SHåkon Bugge
511227a0e14SHåkon Bugge if (flush_needed) {
512*9cf62d91STetsuo Handa flush_workqueue(cm_wq);
513bf6a4764SHåkon Bugge pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
514bf6a4764SHåkon Bugge cnt, slave);
515227a0e14SHåkon Bugge }
516bf6a4764SHåkon Bugge
517bf6a4764SHåkon Bugge if (slave < 0)
518bf6a4764SHåkon Bugge WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
5193cf69cc8SAmir Vadai }
5203cf69cc8SAmir Vadai
5213cf69cc8SAmir Vadai /* slave = -1 ==> all slaves */
5223cf69cc8SAmir Vadai /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev * dev,int slave)5233cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
5243cf69cc8SAmir Vadai {
5253cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &dev->sriov;
5263cf69cc8SAmir Vadai struct rb_root *sl_id_map = &sriov->sl_id_map;
5273cf69cc8SAmir Vadai struct list_head lh;
5283cf69cc8SAmir Vadai struct rb_node *nd;
52989878288SHåkon Bugge int need_flush = 0;
5303cf69cc8SAmir Vadai struct id_map_entry *map, *tmp_map;
5313cf69cc8SAmir Vadai /* cancel all delayed work queue entries */
5323cf69cc8SAmir Vadai INIT_LIST_HEAD(&lh);
5333cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock);
5343cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
5353cf69cc8SAmir Vadai if (slave < 0 || slave == map->slave_id) {
5363cf69cc8SAmir Vadai if (map->scheduled_delete)
53789878288SHåkon Bugge need_flush |= !cancel_delayed_work(&map->timeout);
5383cf69cc8SAmir Vadai }
5393cf69cc8SAmir Vadai }
5403cf69cc8SAmir Vadai
5413cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock);
5423cf69cc8SAmir Vadai
54389878288SHåkon Bugge if (need_flush)
544*9cf62d91STetsuo Handa flush_workqueue(cm_wq); /* make sure all timers were flushed */
5453cf69cc8SAmir Vadai
5463cf69cc8SAmir Vadai /* now, remove all leftover entries from databases*/
5473cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock);
5483cf69cc8SAmir Vadai if (slave < 0) {
5493cf69cc8SAmir Vadai while (rb_first(sl_id_map)) {
5503cf69cc8SAmir Vadai struct id_map_entry *ent =
5513cf69cc8SAmir Vadai rb_entry(rb_first(sl_id_map),
5523cf69cc8SAmir Vadai struct id_map_entry, node);
5533cf69cc8SAmir Vadai
5543cf69cc8SAmir Vadai rb_erase(&ent->node, sl_id_map);
555f1430536SMatthew Wilcox xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
5563cf69cc8SAmir Vadai }
5573cf69cc8SAmir Vadai list_splice_init(&dev->sriov.cm_list, &lh);
5583cf69cc8SAmir Vadai } else {
5593cf69cc8SAmir Vadai /* first, move nodes belonging to slave to db remove list */
5603cf69cc8SAmir Vadai nd = rb_first(sl_id_map);
5613cf69cc8SAmir Vadai while (nd) {
5623cf69cc8SAmir Vadai struct id_map_entry *ent =
5633cf69cc8SAmir Vadai rb_entry(nd, struct id_map_entry, node);
5643cf69cc8SAmir Vadai nd = rb_next(nd);
5653cf69cc8SAmir Vadai if (ent->slave_id == slave)
5663cf69cc8SAmir Vadai list_move_tail(&ent->list, &lh);
5673cf69cc8SAmir Vadai }
5683cf69cc8SAmir Vadai /* remove those nodes from databases */
5693cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &lh, list) {
5703cf69cc8SAmir Vadai rb_erase(&map->node, sl_id_map);
571f1430536SMatthew Wilcox xa_erase(&sriov->pv_id_table, map->pv_cm_id);
5723cf69cc8SAmir Vadai }
5733cf69cc8SAmir Vadai
5743cf69cc8SAmir Vadai /* add remaining nodes from cm_list */
5753cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
5763cf69cc8SAmir Vadai if (slave == map->slave_id)
5773cf69cc8SAmir Vadai list_move_tail(&map->list, &lh);
5783cf69cc8SAmir Vadai }
5793cf69cc8SAmir Vadai }
5803cf69cc8SAmir Vadai
5813cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock);
5823cf69cc8SAmir Vadai
5833cf69cc8SAmir Vadai /* free any map entries left behind due to cancel_delayed_work above */
5843cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &lh, list) {
5853cf69cc8SAmir Vadai list_del(&map->list);
5863cf69cc8SAmir Vadai kfree(map);
5873cf69cc8SAmir Vadai }
588227a0e14SHåkon Bugge
589bf6a4764SHåkon Bugge rej_tmout_xa_cleanup(sriov, slave);
5903cf69cc8SAmir Vadai }
591*9cf62d91STetsuo Handa
mlx4_ib_cm_init(void)592*9cf62d91STetsuo Handa int mlx4_ib_cm_init(void)
593*9cf62d91STetsuo Handa {
594*9cf62d91STetsuo Handa cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
595*9cf62d91STetsuo Handa if (!cm_wq)
596*9cf62d91STetsuo Handa return -ENOMEM;
597*9cf62d91STetsuo Handa
598*9cf62d91STetsuo Handa return 0;
599*9cf62d91STetsuo Handa }
600*9cf62d91STetsuo Handa
mlx4_ib_cm_destroy(void)601*9cf62d91STetsuo Handa void mlx4_ib_cm_destroy(void)
602*9cf62d91STetsuo Handa {
603*9cf62d91STetsuo Handa destroy_workqueue(cm_wq);
604*9cf62d91STetsuo Handa }
605