13cf69cc8SAmir Vadai /* 23cf69cc8SAmir Vadai * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 33cf69cc8SAmir Vadai * 43cf69cc8SAmir Vadai * This software is available to you under a choice of one of two 53cf69cc8SAmir Vadai * licenses. You may choose to be licensed under the terms of the GNU 63cf69cc8SAmir Vadai * General Public License (GPL) Version 2, available from the file 73cf69cc8SAmir Vadai * COPYING in the main directory of this source tree, or the 83cf69cc8SAmir Vadai * OpenIB.org BSD license below: 93cf69cc8SAmir Vadai * 103cf69cc8SAmir Vadai * Redistribution and use in source and binary forms, with or 113cf69cc8SAmir Vadai * without modification, are permitted provided that the following 123cf69cc8SAmir Vadai * conditions are met: 133cf69cc8SAmir Vadai * 143cf69cc8SAmir Vadai * - Redistributions of source code must retain the above 153cf69cc8SAmir Vadai * copyright notice, this list of conditions and the following 163cf69cc8SAmir Vadai * disclaimer. 173cf69cc8SAmir Vadai * 183cf69cc8SAmir Vadai * - Redistributions in binary form must reproduce the above 193cf69cc8SAmir Vadai * copyright notice, this list of conditions and the following 203cf69cc8SAmir Vadai * disclaimer in the documentation and/or other materials 213cf69cc8SAmir Vadai * provided with the distribution. 223cf69cc8SAmir Vadai * 233cf69cc8SAmir Vadai * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 243cf69cc8SAmir Vadai * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 253cf69cc8SAmir Vadai * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 263cf69cc8SAmir Vadai * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 273cf69cc8SAmir Vadai * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 283cf69cc8SAmir Vadai * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 293cf69cc8SAmir Vadai * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 303cf69cc8SAmir Vadai * SOFTWARE. 313cf69cc8SAmir Vadai */ 323cf69cc8SAmir Vadai 333cf69cc8SAmir Vadai #include <rdma/ib_mad.h> 343cf69cc8SAmir Vadai 353cf69cc8SAmir Vadai #include <linux/mlx4/cmd.h> 363cf69cc8SAmir Vadai #include <linux/rbtree.h> 373cf69cc8SAmir Vadai #include <linux/idr.h> 383cf69cc8SAmir Vadai #include <rdma/ib_cm.h> 393cf69cc8SAmir Vadai 403cf69cc8SAmir Vadai #include "mlx4_ib.h" 413cf69cc8SAmir Vadai 422612d723SHåkon Bugge #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ) 433cf69cc8SAmir Vadai 443cf69cc8SAmir Vadai struct id_map_entry { 453cf69cc8SAmir Vadai struct rb_node node; 463cf69cc8SAmir Vadai 473cf69cc8SAmir Vadai u32 sl_cm_id; 483cf69cc8SAmir Vadai u32 pv_cm_id; 493cf69cc8SAmir Vadai int slave_id; 503cf69cc8SAmir Vadai int scheduled_delete; 513cf69cc8SAmir Vadai struct mlx4_ib_dev *dev; 523cf69cc8SAmir Vadai 533cf69cc8SAmir Vadai struct list_head list; 543cf69cc8SAmir Vadai struct delayed_work timeout; 553cf69cc8SAmir Vadai }; 563cf69cc8SAmir Vadai 57227a0e14SHåkon Bugge struct rej_tmout_entry { 58227a0e14SHåkon Bugge int slave; 59227a0e14SHåkon Bugge u32 rem_pv_cm_id; 60227a0e14SHåkon Bugge struct delayed_work timeout; 61*bf6a4764SHåkon Bugge struct xarray *xa_rej_tmout; 62227a0e14SHåkon Bugge }; 63227a0e14SHåkon Bugge 643cf69cc8SAmir Vadai struct cm_generic_msg { 653cf69cc8SAmir Vadai struct ib_mad_hdr hdr; 663cf69cc8SAmir Vadai 673cf69cc8SAmir Vadai __be32 local_comm_id; 683cf69cc8SAmir Vadai __be32 remote_comm_id; 69227a0e14SHåkon Bugge unsigned char unused[2]; 70227a0e14SHåkon Bugge __be16 rej_reason; 713cf69cc8SAmir Vadai }; 723cf69cc8SAmir Vadai 73ceb5433bSShani Michaelli struct cm_sidr_generic_msg { 74ceb5433bSShani Michaelli struct ib_mad_hdr hdr; 75ceb5433bSShani Michaelli __be32 request_id; 76ceb5433bSShani Michaelli }; 77ceb5433bSShani Michaelli 783cf69cc8SAmir Vadai struct cm_req_msg { 793cf69cc8SAmir Vadai unsigned char unused[0x60]; 803cf69cc8SAmir Vadai union ib_gid primary_path_sgid; 813cf69cc8SAmir Vadai }; 823cf69cc8SAmir Vadai 833cf69cc8SAmir Vadai 843cf69cc8SAmir Vadai static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) 853cf69cc8SAmir Vadai { 86ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 87ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg = 88ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad; 89ceb5433bSShani Michaelli msg->request_id = cpu_to_be32(cm_id); 90ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { 91ceb5433bSShani Michaelli pr_err("trying to set local_comm_id in SIDR_REP\n"); 92ceb5433bSShani Michaelli return; 93ceb5433bSShani Michaelli } else { 943cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 953cf69cc8SAmir Vadai msg->local_comm_id = cpu_to_be32(cm_id); 963cf69cc8SAmir Vadai } 97ceb5433bSShani Michaelli } 983cf69cc8SAmir Vadai 993cf69cc8SAmir Vadai static u32 get_local_comm_id(struct ib_mad *mad) 1003cf69cc8SAmir Vadai { 101ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 102ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg = 103ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad; 104ceb5433bSShani Michaelli return be32_to_cpu(msg->request_id); 105ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { 106ceb5433bSShani Michaelli pr_err("trying to set local_comm_id in SIDR_REP\n"); 107ceb5433bSShani Michaelli return -1; 108ceb5433bSShani Michaelli } else { 1093cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 1103cf69cc8SAmir Vadai return be32_to_cpu(msg->local_comm_id); 1113cf69cc8SAmir Vadai } 112ceb5433bSShani Michaelli } 1133cf69cc8SAmir Vadai 1143cf69cc8SAmir Vadai static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) 1153cf69cc8SAmir Vadai { 116ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { 117ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg = 118ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad; 119ceb5433bSShani Michaelli msg->request_id = cpu_to_be32(cm_id); 120ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 121ceb5433bSShani Michaelli pr_err("trying to set remote_comm_id in SIDR_REQ\n"); 122ceb5433bSShani Michaelli return; 123ceb5433bSShani Michaelli } else { 1243cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 1253cf69cc8SAmir Vadai msg->remote_comm_id = cpu_to_be32(cm_id); 1263cf69cc8SAmir Vadai } 127ceb5433bSShani Michaelli } 1283cf69cc8SAmir Vadai 1293cf69cc8SAmir Vadai static u32 get_remote_comm_id(struct ib_mad *mad) 1303cf69cc8SAmir Vadai { 131ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { 132ceb5433bSShani Michaelli struct cm_sidr_generic_msg *msg = 133ceb5433bSShani Michaelli (struct cm_sidr_generic_msg *)mad; 134ceb5433bSShani Michaelli return be32_to_cpu(msg->request_id); 135ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 136ceb5433bSShani Michaelli pr_err("trying to set remote_comm_id in SIDR_REQ\n"); 137ceb5433bSShani Michaelli return -1; 138ceb5433bSShani Michaelli } else { 1393cf69cc8SAmir Vadai struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 1403cf69cc8SAmir Vadai return be32_to_cpu(msg->remote_comm_id); 1413cf69cc8SAmir Vadai } 142ceb5433bSShani Michaelli } 1433cf69cc8SAmir Vadai 1443cf69cc8SAmir Vadai static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) 1453cf69cc8SAmir Vadai { 1463cf69cc8SAmir Vadai struct cm_req_msg *msg = (struct cm_req_msg *)mad; 1473cf69cc8SAmir Vadai 1483cf69cc8SAmir Vadai return msg->primary_path_sgid; 1493cf69cc8SAmir Vadai } 1503cf69cc8SAmir Vadai 1513cf69cc8SAmir Vadai /* Lock should be taken before called */ 1523cf69cc8SAmir Vadai static struct id_map_entry * 1533cf69cc8SAmir Vadai id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) 1543cf69cc8SAmir Vadai { 1553cf69cc8SAmir Vadai struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 1563cf69cc8SAmir Vadai struct rb_node *node = sl_id_map->rb_node; 1573cf69cc8SAmir Vadai 1583cf69cc8SAmir Vadai while (node) { 1593cf69cc8SAmir Vadai struct id_map_entry *id_map_entry = 1603cf69cc8SAmir Vadai rb_entry(node, struct id_map_entry, node); 1613cf69cc8SAmir Vadai 1623cf69cc8SAmir Vadai if (id_map_entry->sl_cm_id > sl_cm_id) 1633cf69cc8SAmir Vadai node = node->rb_left; 1643cf69cc8SAmir Vadai else if (id_map_entry->sl_cm_id < sl_cm_id) 1653cf69cc8SAmir Vadai node = node->rb_right; 1663cf69cc8SAmir Vadai else if (id_map_entry->slave_id > slave_id) 1673cf69cc8SAmir Vadai node = node->rb_left; 1683cf69cc8SAmir Vadai else if (id_map_entry->slave_id < slave_id) 1693cf69cc8SAmir Vadai node = node->rb_right; 1703cf69cc8SAmir Vadai else 1713cf69cc8SAmir Vadai return id_map_entry; 1723cf69cc8SAmir Vadai } 1733cf69cc8SAmir Vadai return NULL; 1743cf69cc8SAmir Vadai } 1753cf69cc8SAmir Vadai 1763cf69cc8SAmir Vadai static void id_map_ent_timeout(struct work_struct *work) 1773cf69cc8SAmir Vadai { 1783cf69cc8SAmir Vadai struct delayed_work *delay = to_delayed_work(work); 1793cf69cc8SAmir Vadai struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); 180f1430536SMatthew Wilcox struct id_map_entry *found_ent; 1813cf69cc8SAmir Vadai struct mlx4_ib_dev *dev = ent->dev; 1823cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &dev->sriov; 1833cf69cc8SAmir Vadai struct rb_root *sl_id_map = &sriov->sl_id_map; 1843cf69cc8SAmir Vadai 1853cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock); 186f1430536SMatthew Wilcox if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) 1873cf69cc8SAmir Vadai goto out; 1883cf69cc8SAmir Vadai found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); 1893cf69cc8SAmir Vadai if (found_ent && found_ent == ent) 1903cf69cc8SAmir Vadai rb_erase(&found_ent->node, sl_id_map); 1913cf69cc8SAmir Vadai 1923cf69cc8SAmir Vadai out: 1933cf69cc8SAmir Vadai list_del(&ent->list); 1943cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock); 1953cf69cc8SAmir Vadai kfree(ent); 1963cf69cc8SAmir Vadai } 1973cf69cc8SAmir Vadai 1983cf69cc8SAmir Vadai static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) 1993cf69cc8SAmir Vadai { 2003cf69cc8SAmir Vadai struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 2013cf69cc8SAmir Vadai struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; 2023cf69cc8SAmir Vadai struct id_map_entry *ent; 2033cf69cc8SAmir Vadai int slave_id = new->slave_id; 2043cf69cc8SAmir Vadai int sl_cm_id = new->sl_cm_id; 2053cf69cc8SAmir Vadai 2063cf69cc8SAmir Vadai ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); 2073cf69cc8SAmir Vadai if (ent) { 2083cf69cc8SAmir Vadai pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n", 2093cf69cc8SAmir Vadai sl_cm_id); 2103cf69cc8SAmir Vadai 2113cf69cc8SAmir Vadai rb_replace_node(&ent->node, &new->node, sl_id_map); 2123cf69cc8SAmir Vadai return; 2133cf69cc8SAmir Vadai } 2143cf69cc8SAmir Vadai 2153cf69cc8SAmir Vadai /* Go to the bottom of the tree */ 2163cf69cc8SAmir Vadai while (*link) { 2173cf69cc8SAmir Vadai parent = *link; 2183cf69cc8SAmir Vadai ent = rb_entry(parent, struct id_map_entry, node); 2193cf69cc8SAmir Vadai 2203cf69cc8SAmir Vadai if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) 2213cf69cc8SAmir Vadai link = &(*link)->rb_left; 2223cf69cc8SAmir Vadai else 2233cf69cc8SAmir Vadai link = &(*link)->rb_right; 2243cf69cc8SAmir Vadai } 2253cf69cc8SAmir Vadai 2263cf69cc8SAmir Vadai rb_link_node(&new->node, parent, link); 2273cf69cc8SAmir Vadai rb_insert_color(&new->node, sl_id_map); 2283cf69cc8SAmir Vadai } 2293cf69cc8SAmir Vadai 2303cf69cc8SAmir Vadai static struct id_map_entry * 2313cf69cc8SAmir Vadai id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) 2323cf69cc8SAmir Vadai { 2336a920060STejun Heo int ret; 2343cf69cc8SAmir Vadai struct id_map_entry *ent; 2353cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 2363cf69cc8SAmir Vadai 2373cf69cc8SAmir Vadai ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); 23815d4626eSLeon Romanovsky if (!ent) 2393cf69cc8SAmir Vadai return ERR_PTR(-ENOMEM); 2403cf69cc8SAmir Vadai 2413cf69cc8SAmir Vadai ent->sl_cm_id = sl_cm_id; 2423cf69cc8SAmir Vadai ent->slave_id = slave_id; 2433cf69cc8SAmir Vadai ent->scheduled_delete = 0; 2443cf69cc8SAmir Vadai ent->dev = to_mdev(ibdev); 2453cf69cc8SAmir Vadai INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); 2463cf69cc8SAmir Vadai 247f1430536SMatthew Wilcox ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, 248f1430536SMatthew Wilcox xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL); 2496a920060STejun Heo if (ret >= 0) { 250f1430536SMatthew Wilcox spin_lock(&sriov->id_map_lock); 2513cf69cc8SAmir Vadai sl_id_map_add(ibdev, ent); 2526a920060STejun Heo list_add_tail(&ent->list, &sriov->cm_list); 2533cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock); 2543cf69cc8SAmir Vadai return ent; 255f1430536SMatthew Wilcox } 2566a920060STejun Heo 2573cf69cc8SAmir Vadai /*error flow*/ 2583cf69cc8SAmir Vadai kfree(ent); 259f1430536SMatthew Wilcox mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret); 2603cf69cc8SAmir Vadai return ERR_PTR(-ENOMEM); 2613cf69cc8SAmir Vadai } 2623cf69cc8SAmir Vadai 2633cf69cc8SAmir Vadai static struct id_map_entry * 264b03ee4caSHåkon Bugge id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id) 2653cf69cc8SAmir Vadai { 2663cf69cc8SAmir Vadai struct id_map_entry *ent; 2673cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 2683cf69cc8SAmir Vadai 2693cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock); 2703cf69cc8SAmir Vadai if (*pv_cm_id == -1) { 271b03ee4caSHåkon Bugge ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); 2723cf69cc8SAmir Vadai if (ent) 2733cf69cc8SAmir Vadai *pv_cm_id = (int) ent->pv_cm_id; 2743cf69cc8SAmir Vadai } else 275f1430536SMatthew Wilcox ent = xa_load(&sriov->pv_id_table, *pv_cm_id); 2763cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock); 2773cf69cc8SAmir Vadai 2783cf69cc8SAmir Vadai return ent; 2793cf69cc8SAmir Vadai } 2803cf69cc8SAmir Vadai 2813cf69cc8SAmir Vadai static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) 2823cf69cc8SAmir Vadai { 2833cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 2843cf69cc8SAmir Vadai unsigned long flags; 2853cf69cc8SAmir Vadai 2863cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock); 287ceb7decbSJack Morgenstein spin_lock_irqsave(&sriov->going_down_lock, flags); 2883cf69cc8SAmir Vadai /*make sure that there is no schedule inside the scheduled work.*/ 289ea660ad7SHåkon Bugge if (!sriov->is_going_down && !id->scheduled_delete) { 2903cf69cc8SAmir Vadai id->scheduled_delete = 1; 2913cf69cc8SAmir Vadai schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 292785167a1SHåkon Bugge } else if (id->scheduled_delete) { 293785167a1SHåkon Bugge /* Adjust timeout if already scheduled */ 294785167a1SHåkon Bugge mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 2953cf69cc8SAmir Vadai } 2963cf69cc8SAmir Vadai spin_unlock_irqrestore(&sriov->going_down_lock, flags); 297ceb7decbSJack Morgenstein spin_unlock(&sriov->id_map_lock); 2983cf69cc8SAmir Vadai } 2993cf69cc8SAmir Vadai 300227a0e14SHåkon Bugge #define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason) 3013cf69cc8SAmir Vadai int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, 3023cf69cc8SAmir Vadai struct ib_mad *mad) 3033cf69cc8SAmir Vadai { 3043cf69cc8SAmir Vadai struct id_map_entry *id; 3053cf69cc8SAmir Vadai u32 sl_cm_id; 3063cf69cc8SAmir Vadai int pv_cm_id = -1; 3073cf69cc8SAmir Vadai 3083cf69cc8SAmir Vadai if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || 309ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_REP_ATTR_ID || 310e7d087fcSHåkon Bugge mad->mad_hdr.attr_id == CM_MRA_ATTR_ID || 311227a0e14SHåkon Bugge mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID || 312227a0e14SHåkon Bugge (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) { 313ceb5433bSShani Michaelli sl_cm_id = get_local_comm_id(mad); 3144542e3c7SHåkon Bugge id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); 3154542e3c7SHåkon Bugge if (id) 3164542e3c7SHåkon Bugge goto cont; 3173cf69cc8SAmir Vadai id = id_map_alloc(ibdev, slave_id, sl_cm_id); 3183cf69cc8SAmir Vadai if (IS_ERR(id)) { 3193cf69cc8SAmir Vadai mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", 3203cf69cc8SAmir Vadai __func__, slave_id, sl_cm_id); 3213cf69cc8SAmir Vadai return PTR_ERR(id); 3223cf69cc8SAmir Vadai } 323ceb5433bSShani Michaelli } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || 324ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { 3253cf69cc8SAmir Vadai return 0; 3263cf69cc8SAmir Vadai } else { 327ceb5433bSShani Michaelli sl_cm_id = get_local_comm_id(mad); 3283cf69cc8SAmir Vadai id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); 3293cf69cc8SAmir Vadai } 3303cf69cc8SAmir Vadai 3313cf69cc8SAmir Vadai if (!id) { 33209461944SHåkon Bugge pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n", 33309461944SHåkon Bugge slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); 3343cf69cc8SAmir Vadai return -EINVAL; 3353cf69cc8SAmir Vadai } 3363cf69cc8SAmir Vadai 3374542e3c7SHåkon Bugge cont: 3383cf69cc8SAmir Vadai set_local_comm_id(mad, id->pv_cm_id); 3393cf69cc8SAmir Vadai 3403cf69cc8SAmir Vadai if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 3413cf69cc8SAmir Vadai schedule_delayed(ibdev, id); 3423cf69cc8SAmir Vadai return 0; 3433cf69cc8SAmir Vadai } 3443cf69cc8SAmir Vadai 345227a0e14SHåkon Bugge static void rej_tmout_timeout(struct work_struct *work) 346227a0e14SHåkon Bugge { 347227a0e14SHåkon Bugge struct delayed_work *delay = to_delayed_work(work); 348227a0e14SHåkon Bugge struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout); 349227a0e14SHåkon Bugge struct rej_tmout_entry *deleted; 350227a0e14SHåkon Bugge 351*bf6a4764SHåkon Bugge deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0); 352227a0e14SHåkon Bugge 353227a0e14SHåkon Bugge if (deleted != item) 354227a0e14SHåkon Bugge pr_debug("deleted(%p) != item(%p)\n", deleted, item); 355227a0e14SHåkon Bugge 356227a0e14SHåkon Bugge kfree(item); 357227a0e14SHåkon Bugge } 358227a0e14SHåkon Bugge 359227a0e14SHåkon Bugge static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave) 360227a0e14SHåkon Bugge { 361227a0e14SHåkon Bugge struct rej_tmout_entry *item; 362*bf6a4764SHåkon Bugge struct rej_tmout_entry *old; 363*bf6a4764SHåkon Bugge int ret = 0; 364227a0e14SHåkon Bugge 365*bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout); 366*bf6a4764SHåkon Bugge item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); 367*bf6a4764SHåkon Bugge 368227a0e14SHåkon Bugge if (item) { 369*bf6a4764SHåkon Bugge if (xa_err(item)) 370*bf6a4764SHåkon Bugge ret = xa_err(item); 371*bf6a4764SHåkon Bugge else 372227a0e14SHåkon Bugge /* If a retry, adjust delayed work */ 373227a0e14SHåkon Bugge mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 374*bf6a4764SHåkon Bugge goto err_or_exists; 375227a0e14SHåkon Bugge } 376*bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout); 377227a0e14SHåkon Bugge 378227a0e14SHåkon Bugge item = kmalloc(sizeof(*item), GFP_KERNEL); 379227a0e14SHåkon Bugge if (!item) 380227a0e14SHåkon Bugge return -ENOMEM; 381227a0e14SHåkon Bugge 382227a0e14SHåkon Bugge INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout); 383227a0e14SHåkon Bugge item->slave = slave; 384227a0e14SHåkon Bugge item->rem_pv_cm_id = rem_pv_cm_id; 385*bf6a4764SHåkon Bugge item->xa_rej_tmout = &sriov->xa_rej_tmout; 386227a0e14SHåkon Bugge 387*bf6a4764SHåkon Bugge old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL); 388*bf6a4764SHåkon Bugge if (old) { 389*bf6a4764SHåkon Bugge pr_debug( 390*bf6a4764SHåkon Bugge "Non-null old entry (%p) or error (%d) when inserting\n", 391*bf6a4764SHåkon Bugge old, xa_err(old)); 392*bf6a4764SHåkon Bugge kfree(item); 393*bf6a4764SHåkon Bugge return xa_err(old); 394*bf6a4764SHåkon Bugge } 395227a0e14SHåkon Bugge 396227a0e14SHåkon Bugge schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 397227a0e14SHåkon Bugge 398227a0e14SHåkon Bugge return 0; 399227a0e14SHåkon Bugge 400*bf6a4764SHåkon Bugge err_or_exists: 401*bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout); 402*bf6a4764SHåkon Bugge return ret; 403227a0e14SHåkon Bugge } 404227a0e14SHåkon Bugge 405227a0e14SHåkon Bugge static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id) 406227a0e14SHåkon Bugge { 407227a0e14SHåkon Bugge struct rej_tmout_entry *item; 408*bf6a4764SHåkon Bugge int slave; 409227a0e14SHåkon Bugge 410*bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout); 411*bf6a4764SHåkon Bugge item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); 412227a0e14SHåkon Bugge 413*bf6a4764SHåkon Bugge if (!item || xa_err(item)) { 414227a0e14SHåkon Bugge pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n", 415*bf6a4764SHåkon Bugge rem_pv_cm_id, xa_err(item)); 416*bf6a4764SHåkon Bugge slave = !item ? -ENOENT : xa_err(item); 417*bf6a4764SHåkon Bugge } else { 418*bf6a4764SHåkon Bugge slave = item->slave; 419227a0e14SHåkon Bugge } 420*bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout); 421227a0e14SHåkon Bugge 422*bf6a4764SHåkon Bugge return slave; 423227a0e14SHåkon Bugge } 424227a0e14SHåkon Bugge 4253cf69cc8SAmir Vadai int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, 4263cf69cc8SAmir Vadai struct ib_mad *mad) 4273cf69cc8SAmir Vadai { 428227a0e14SHåkon Bugge struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 429227a0e14SHåkon Bugge u32 rem_pv_cm_id = get_local_comm_id(mad); 4303cf69cc8SAmir Vadai u32 pv_cm_id; 4313cf69cc8SAmir Vadai struct id_map_entry *id; 432227a0e14SHåkon Bugge int sts; 4333cf69cc8SAmir Vadai 434ceb5433bSShani Michaelli if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || 435ceb5433bSShani Michaelli mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 4363cf69cc8SAmir Vadai union ib_gid gid; 4373cf69cc8SAmir Vadai 4386ee51a4eSJack Morgenstein if (!slave) 4396ee51a4eSJack Morgenstein return 0; 4406ee51a4eSJack Morgenstein 4413cf69cc8SAmir Vadai gid = gid_from_req_msg(ibdev, mad); 4423cf69cc8SAmir Vadai *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 4433cf69cc8SAmir Vadai if (*slave < 0) { 4443cf69cc8SAmir Vadai mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 44598e8be86SJack Morgenstein be64_to_cpu(gid.global.interface_id)); 4463cf69cc8SAmir Vadai return -ENOENT; 4473cf69cc8SAmir Vadai } 448227a0e14SHåkon Bugge 449227a0e14SHåkon Bugge sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave); 450227a0e14SHåkon Bugge if (sts) 451227a0e14SHåkon Bugge /* Even if this fails, we pass on the REQ to the slave */ 452227a0e14SHåkon Bugge pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n", 453227a0e14SHåkon Bugge rem_pv_cm_id, *slave, sts); 454227a0e14SHåkon Bugge 4553cf69cc8SAmir Vadai return 0; 4563cf69cc8SAmir Vadai } 4573cf69cc8SAmir Vadai 4583cf69cc8SAmir Vadai pv_cm_id = get_remote_comm_id(mad); 4593cf69cc8SAmir Vadai id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); 4603cf69cc8SAmir Vadai 4613cf69cc8SAmir Vadai if (!id) { 462227a0e14SHåkon Bugge if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && 463227a0e14SHåkon Bugge REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) { 464227a0e14SHåkon Bugge *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id); 465227a0e14SHåkon Bugge 466227a0e14SHåkon Bugge return (*slave < 0) ? *slave : 0; 467227a0e14SHåkon Bugge } 46809461944SHåkon Bugge pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n", 46909461944SHåkon Bugge pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id)); 4703cf69cc8SAmir Vadai return -ENOENT; 4713cf69cc8SAmir Vadai } 4723cf69cc8SAmir Vadai 4736ee51a4eSJack Morgenstein if (slave) 4743cf69cc8SAmir Vadai *slave = id->slave_id; 4753cf69cc8SAmir Vadai set_remote_comm_id(mad, id->sl_cm_id); 4763cf69cc8SAmir Vadai 477ea660ad7SHåkon Bugge if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID || 478ea660ad7SHåkon Bugge mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) 4793cf69cc8SAmir Vadai schedule_delayed(ibdev, id); 4803cf69cc8SAmir Vadai 4813cf69cc8SAmir Vadai return 0; 4823cf69cc8SAmir Vadai } 4833cf69cc8SAmir Vadai 4843cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) 4853cf69cc8SAmir Vadai { 4863cf69cc8SAmir Vadai spin_lock_init(&dev->sriov.id_map_lock); 4873cf69cc8SAmir Vadai INIT_LIST_HEAD(&dev->sriov.cm_list); 4883cf69cc8SAmir Vadai dev->sriov.sl_id_map = RB_ROOT; 489f1430536SMatthew Wilcox xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC); 490*bf6a4764SHåkon Bugge xa_init(&dev->sriov.xa_rej_tmout); 491227a0e14SHåkon Bugge } 492227a0e14SHåkon Bugge 493*bf6a4764SHåkon Bugge static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave) 494227a0e14SHåkon Bugge { 495*bf6a4764SHåkon Bugge struct rej_tmout_entry *item; 496227a0e14SHåkon Bugge bool flush_needed = false; 497*bf6a4764SHåkon Bugge unsigned long id; 498227a0e14SHåkon Bugge int cnt = 0; 499227a0e14SHåkon Bugge 500*bf6a4764SHåkon Bugge xa_lock(&sriov->xa_rej_tmout); 501*bf6a4764SHåkon Bugge xa_for_each(&sriov->xa_rej_tmout, id, item) { 502227a0e14SHåkon Bugge if (slave < 0 || slave == item->slave) { 503227a0e14SHåkon Bugge mod_delayed_work(system_wq, &item->timeout, 0); 504227a0e14SHåkon Bugge flush_needed = true; 505227a0e14SHåkon Bugge ++cnt; 506227a0e14SHåkon Bugge } 507227a0e14SHåkon Bugge } 508*bf6a4764SHåkon Bugge xa_unlock(&sriov->xa_rej_tmout); 509227a0e14SHåkon Bugge 510227a0e14SHåkon Bugge if (flush_needed) { 511227a0e14SHåkon Bugge flush_scheduled_work(); 512*bf6a4764SHåkon Bugge pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n", 513*bf6a4764SHåkon Bugge cnt, slave); 514227a0e14SHåkon Bugge } 515*bf6a4764SHåkon Bugge 516*bf6a4764SHåkon Bugge if (slave < 0) 517*bf6a4764SHåkon Bugge WARN_ON(!xa_empty(&sriov->xa_rej_tmout)); 5183cf69cc8SAmir Vadai } 5193cf69cc8SAmir Vadai 5203cf69cc8SAmir Vadai /* slave = -1 ==> all slaves */ 5213cf69cc8SAmir Vadai /* TBD -- call paravirt clean for single slave. Need for slave RESET event */ 5223cf69cc8SAmir Vadai void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) 5233cf69cc8SAmir Vadai { 5243cf69cc8SAmir Vadai struct mlx4_ib_sriov *sriov = &dev->sriov; 5253cf69cc8SAmir Vadai struct rb_root *sl_id_map = &sriov->sl_id_map; 5263cf69cc8SAmir Vadai struct list_head lh; 5273cf69cc8SAmir Vadai struct rb_node *nd; 52889878288SHåkon Bugge int need_flush = 0; 5293cf69cc8SAmir Vadai struct id_map_entry *map, *tmp_map; 5303cf69cc8SAmir Vadai /* cancel all delayed work queue entries */ 5313cf69cc8SAmir Vadai INIT_LIST_HEAD(&lh); 5323cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock); 5333cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 5343cf69cc8SAmir Vadai if (slave < 0 || slave == map->slave_id) { 5353cf69cc8SAmir Vadai if (map->scheduled_delete) 53689878288SHåkon Bugge need_flush |= !cancel_delayed_work(&map->timeout); 5373cf69cc8SAmir Vadai } 5383cf69cc8SAmir Vadai } 5393cf69cc8SAmir Vadai 5403cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock); 5413cf69cc8SAmir Vadai 54289878288SHåkon Bugge if (need_flush) 5433cf69cc8SAmir Vadai flush_scheduled_work(); /* make sure all timers were flushed */ 5443cf69cc8SAmir Vadai 5453cf69cc8SAmir Vadai /* now, remove all leftover entries from databases*/ 5463cf69cc8SAmir Vadai spin_lock(&sriov->id_map_lock); 5473cf69cc8SAmir Vadai if (slave < 0) { 5483cf69cc8SAmir Vadai while (rb_first(sl_id_map)) { 5493cf69cc8SAmir Vadai struct id_map_entry *ent = 5503cf69cc8SAmir Vadai rb_entry(rb_first(sl_id_map), 5513cf69cc8SAmir Vadai struct id_map_entry, node); 5523cf69cc8SAmir Vadai 5533cf69cc8SAmir Vadai rb_erase(&ent->node, sl_id_map); 554f1430536SMatthew Wilcox xa_erase(&sriov->pv_id_table, ent->pv_cm_id); 5553cf69cc8SAmir Vadai } 5563cf69cc8SAmir Vadai list_splice_init(&dev->sriov.cm_list, &lh); 5573cf69cc8SAmir Vadai } else { 5583cf69cc8SAmir Vadai /* first, move nodes belonging to slave to db remove list */ 5593cf69cc8SAmir Vadai nd = rb_first(sl_id_map); 5603cf69cc8SAmir Vadai while (nd) { 5613cf69cc8SAmir Vadai struct id_map_entry *ent = 5623cf69cc8SAmir Vadai rb_entry(nd, struct id_map_entry, node); 5633cf69cc8SAmir Vadai nd = rb_next(nd); 5643cf69cc8SAmir Vadai if (ent->slave_id == slave) 5653cf69cc8SAmir Vadai list_move_tail(&ent->list, &lh); 5663cf69cc8SAmir Vadai } 5673cf69cc8SAmir Vadai /* remove those nodes from databases */ 5683cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &lh, list) { 5693cf69cc8SAmir Vadai rb_erase(&map->node, sl_id_map); 570f1430536SMatthew Wilcox xa_erase(&sriov->pv_id_table, map->pv_cm_id); 5713cf69cc8SAmir Vadai } 5723cf69cc8SAmir Vadai 5733cf69cc8SAmir Vadai /* add remaining nodes from cm_list */ 5743cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 5753cf69cc8SAmir Vadai if (slave == map->slave_id) 5763cf69cc8SAmir Vadai list_move_tail(&map->list, &lh); 5773cf69cc8SAmir Vadai } 5783cf69cc8SAmir Vadai } 5793cf69cc8SAmir Vadai 5803cf69cc8SAmir Vadai spin_unlock(&sriov->id_map_lock); 5813cf69cc8SAmir Vadai 5823cf69cc8SAmir Vadai /* free any map entries left behind due to cancel_delayed_work above */ 5833cf69cc8SAmir Vadai list_for_each_entry_safe(map, tmp_map, &lh, list) { 5843cf69cc8SAmir Vadai list_del(&map->list); 5853cf69cc8SAmir Vadai kfree(map); 5863cf69cc8SAmir Vadai } 587227a0e14SHåkon Bugge 588*bf6a4764SHåkon Bugge rej_tmout_xa_cleanup(sriov, slave); 5893cf69cc8SAmir Vadai } 590