1 /* 2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_mad.h> 34 35 #include <linux/mlx4/cmd.h> 36 #include <linux/rbtree.h> 37 #include <linux/idr.h> 38 #include <rdma/ib_cm.h> 39 40 #include "mlx4_ib.h" 41 42 #define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ) 43 44 struct id_map_entry { 45 struct rb_node node; 46 47 u32 sl_cm_id; 48 u32 pv_cm_id; 49 int slave_id; 50 int scheduled_delete; 51 struct mlx4_ib_dev *dev; 52 53 struct list_head list; 54 struct delayed_work timeout; 55 }; 56 57 struct cm_generic_msg { 58 struct ib_mad_hdr hdr; 59 60 __be32 local_comm_id; 61 __be32 remote_comm_id; 62 }; 63 64 struct cm_req_msg { 65 unsigned char unused[0x60]; 66 union ib_gid primary_path_sgid; 67 }; 68 69 70 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) 71 { 72 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 73 msg->local_comm_id = cpu_to_be32(cm_id); 74 } 75 76 static u32 get_local_comm_id(struct ib_mad *mad) 77 { 78 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 79 80 return be32_to_cpu(msg->local_comm_id); 81 } 82 83 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) 84 { 85 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 86 msg->remote_comm_id = cpu_to_be32(cm_id); 87 } 88 89 static u32 get_remote_comm_id(struct ib_mad *mad) 90 { 91 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 92 93 return be32_to_cpu(msg->remote_comm_id); 94 } 95 96 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) 97 { 98 struct cm_req_msg *msg = (struct cm_req_msg *)mad; 99 100 return msg->primary_path_sgid; 101 } 102 103 /* Lock should be taken before called */ 104 static struct id_map_entry * 105 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) 106 { 107 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 108 struct rb_node *node = sl_id_map->rb_node; 109 110 while (node) { 111 struct id_map_entry *id_map_entry = 112 rb_entry(node, struct id_map_entry, node); 113 114 if (id_map_entry->sl_cm_id > sl_cm_id) 115 node = node->rb_left; 116 else if (id_map_entry->sl_cm_id < sl_cm_id) 117 node = node->rb_right; 118 else if (id_map_entry->slave_id > slave_id) 119 node = node->rb_left; 120 else if (id_map_entry->slave_id < slave_id) 121 node = node->rb_right; 122 else 123 return id_map_entry; 124 } 125 return NULL; 126 } 127 128 static void id_map_ent_timeout(struct work_struct *work) 129 { 130 struct delayed_work *delay = to_delayed_work(work); 131 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); 132 struct id_map_entry *db_ent, *found_ent; 133 struct mlx4_ib_dev *dev = ent->dev; 134 struct mlx4_ib_sriov *sriov = &dev->sriov; 135 struct rb_root *sl_id_map = &sriov->sl_id_map; 136 int pv_id = (int) ent->pv_cm_id; 137 138 spin_lock(&sriov->id_map_lock); 139 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); 140 if (!db_ent) 141 goto out; 142 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); 143 if (found_ent && found_ent == ent) 144 rb_erase(&found_ent->node, sl_id_map); 145 idr_remove(&sriov->pv_id_table, pv_id); 146 147 out: 148 list_del(&ent->list); 149 spin_unlock(&sriov->id_map_lock); 150 kfree(ent); 151 } 152 153 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) 154 { 155 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 156 struct rb_root *sl_id_map = &sriov->sl_id_map; 157 struct id_map_entry *ent, *found_ent; 158 159 spin_lock(&sriov->id_map_lock); 160 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); 161 if (!ent) 162 goto out; 163 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); 164 if (found_ent && found_ent == ent) 165 rb_erase(&found_ent->node, sl_id_map); 166 idr_remove(&sriov->pv_id_table, pv_cm_id); 167 out: 168 spin_unlock(&sriov->id_map_lock); 169 } 170 171 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) 172 { 173 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 174 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; 175 struct id_map_entry *ent; 176 int slave_id = new->slave_id; 177 int sl_cm_id = new->sl_cm_id; 178 179 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); 180 if (ent) { 181 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n", 182 sl_cm_id); 183 184 rb_replace_node(&ent->node, &new->node, sl_id_map); 185 return; 186 } 187 188 /* Go to the bottom of the tree */ 189 while (*link) { 190 parent = *link; 191 ent = rb_entry(parent, struct id_map_entry, node); 192 193 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) 194 link = &(*link)->rb_left; 195 else 196 link = &(*link)->rb_right; 197 } 198 199 rb_link_node(&new->node, parent, link); 200 rb_insert_color(&new->node, sl_id_map); 201 } 202 203 static struct id_map_entry * 204 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) 205 { 206 int ret; 207 struct id_map_entry *ent; 208 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 209 210 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); 211 if (!ent) { 212 mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n"); 213 return ERR_PTR(-ENOMEM); 214 } 215 216 ent->sl_cm_id = sl_cm_id; 217 ent->slave_id = slave_id; 218 ent->scheduled_delete = 0; 219 ent->dev = to_mdev(ibdev); 220 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); 221 222 idr_preload(GFP_KERNEL); 223 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); 224 225 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); 226 if (ret >= 0) { 227 ent->pv_cm_id = (u32)ret; 228 sl_id_map_add(ibdev, ent); 229 list_add_tail(&ent->list, &sriov->cm_list); 230 } 231 232 spin_unlock(&sriov->id_map_lock); 233 idr_preload_end(); 234 235 if (ret >= 0) 236 return ent; 237 238 /*error flow*/ 239 kfree(ent); 240 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); 241 return ERR_PTR(-ENOMEM); 242 } 243 244 static struct id_map_entry * 245 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) 246 { 247 struct id_map_entry *ent; 248 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 249 250 spin_lock(&sriov->id_map_lock); 251 if (*pv_cm_id == -1) { 252 ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id); 253 if (ent) 254 *pv_cm_id = (int) ent->pv_cm_id; 255 } else 256 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); 257 spin_unlock(&sriov->id_map_lock); 258 259 return ent; 260 } 261 262 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) 263 { 264 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 265 unsigned long flags; 266 267 spin_lock(&sriov->id_map_lock); 268 spin_lock_irqsave(&sriov->going_down_lock, flags); 269 /*make sure that there is no schedule inside the scheduled work.*/ 270 if (!sriov->is_going_down) { 271 id->scheduled_delete = 1; 272 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 273 } 274 spin_unlock_irqrestore(&sriov->going_down_lock, flags); 275 spin_unlock(&sriov->id_map_lock); 276 } 277 278 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, 279 struct ib_mad *mad) 280 { 281 struct id_map_entry *id; 282 u32 sl_cm_id; 283 int pv_cm_id = -1; 284 285 sl_cm_id = get_local_comm_id(mad); 286 287 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || 288 mad->mad_hdr.attr_id == CM_REP_ATTR_ID) { 289 id = id_map_alloc(ibdev, slave_id, sl_cm_id); 290 if (IS_ERR(id)) { 291 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", 292 __func__, slave_id, sl_cm_id); 293 return PTR_ERR(id); 294 } 295 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) { 296 return 0; 297 } else { 298 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); 299 } 300 301 if (!id) { 302 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n", 303 slave_id, sl_cm_id); 304 return -EINVAL; 305 } 306 307 set_local_comm_id(mad, id->pv_cm_id); 308 309 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 310 schedule_delayed(ibdev, id); 311 else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) 312 id_map_find_del(ibdev, pv_cm_id); 313 314 return 0; 315 } 316 317 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, 318 struct ib_mad *mad) 319 { 320 u32 pv_cm_id; 321 struct id_map_entry *id; 322 323 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) { 324 union ib_gid gid; 325 326 gid = gid_from_req_msg(ibdev, mad); 327 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 328 if (*slave < 0) { 329 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 330 gid.global.interface_id); 331 return -ENOENT; 332 } 333 return 0; 334 } 335 336 pv_cm_id = get_remote_comm_id(mad); 337 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); 338 339 if (!id) { 340 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id); 341 return -ENOENT; 342 } 343 344 *slave = id->slave_id; 345 set_remote_comm_id(mad, id->sl_cm_id); 346 347 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 348 schedule_delayed(ibdev, id); 349 else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || 350 mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) { 351 id_map_find_del(ibdev, (int) pv_cm_id); 352 } 353 354 return 0; 355 } 356 357 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) 358 { 359 spin_lock_init(&dev->sriov.id_map_lock); 360 INIT_LIST_HEAD(&dev->sriov.cm_list); 361 dev->sriov.sl_id_map = RB_ROOT; 362 idr_init(&dev->sriov.pv_id_table); 363 } 364 365 /* slave = -1 ==> all slaves */ 366 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */ 367 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) 368 { 369 struct mlx4_ib_sriov *sriov = &dev->sriov; 370 struct rb_root *sl_id_map = &sriov->sl_id_map; 371 struct list_head lh; 372 struct rb_node *nd; 373 int need_flush = 1; 374 struct id_map_entry *map, *tmp_map; 375 /* cancel all delayed work queue entries */ 376 INIT_LIST_HEAD(&lh); 377 spin_lock(&sriov->id_map_lock); 378 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 379 if (slave < 0 || slave == map->slave_id) { 380 if (map->scheduled_delete) 381 need_flush &= !!cancel_delayed_work(&map->timeout); 382 } 383 } 384 385 spin_unlock(&sriov->id_map_lock); 386 387 if (!need_flush) 388 flush_scheduled_work(); /* make sure all timers were flushed */ 389 390 /* now, remove all leftover entries from databases*/ 391 spin_lock(&sriov->id_map_lock); 392 if (slave < 0) { 393 while (rb_first(sl_id_map)) { 394 struct id_map_entry *ent = 395 rb_entry(rb_first(sl_id_map), 396 struct id_map_entry, node); 397 398 rb_erase(&ent->node, sl_id_map); 399 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); 400 } 401 list_splice_init(&dev->sriov.cm_list, &lh); 402 } else { 403 /* first, move nodes belonging to slave to db remove list */ 404 nd = rb_first(sl_id_map); 405 while (nd) { 406 struct id_map_entry *ent = 407 rb_entry(nd, struct id_map_entry, node); 408 nd = rb_next(nd); 409 if (ent->slave_id == slave) 410 list_move_tail(&ent->list, &lh); 411 } 412 /* remove those nodes from databases */ 413 list_for_each_entry_safe(map, tmp_map, &lh, list) { 414 rb_erase(&map->node, sl_id_map); 415 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); 416 } 417 418 /* add remaining nodes from cm_list */ 419 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 420 if (slave == map->slave_id) 421 list_move_tail(&map->list, &lh); 422 } 423 } 424 425 spin_unlock(&sriov->id_map_lock); 426 427 /* free any map entries left behind due to cancel_delayed_work above */ 428 list_for_each_entry_safe(map, tmp_map, &lh, list) { 429 list_del(&map->list); 430 kfree(map); 431 } 432 } 433