cm.c (c39f2d9db0fd81ea20bb5cce9b3f082ca63753e2) cm.c (ea660ad7c1c476fd6e5e3b17780d47159db71dea)
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 172 unchanged lines hidden (view full) ---

181 rb_erase(&found_ent->node, sl_id_map);
182
183out:
184 list_del(&ent->list);
185 spin_unlock(&sriov->id_map_lock);
186 kfree(ent);
187}
188
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 172 unchanged lines hidden (view full) ---

181 rb_erase(&found_ent->node, sl_id_map);
182
183out:
184 list_del(&ent->list);
185 spin_unlock(&sriov->id_map_lock);
186 kfree(ent);
187}
188
189static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
190{
191 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
192 struct rb_root *sl_id_map = &sriov->sl_id_map;
193 struct id_map_entry *ent, *found_ent;
194
195 spin_lock(&sriov->id_map_lock);
196 ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
197 if (!ent)
198 goto out;
199 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
200 if (found_ent && found_ent == ent)
201 rb_erase(&found_ent->node, sl_id_map);
202out:
203 spin_unlock(&sriov->id_map_lock);
204}
205
206static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
207{
208 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
209 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
210 struct id_map_entry *ent;
211 int slave_id = new->slave_id;
212 int sl_cm_id = new->sl_cm_id;
213

--- 75 unchanged lines hidden (view full) ---

289static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
290{
291 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
292 unsigned long flags;
293
294 spin_lock(&sriov->id_map_lock);
295 spin_lock_irqsave(&sriov->going_down_lock, flags);
296 /*make sure that there is no schedule inside the scheduled work.*/
189static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
190{
191 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
192 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
193 struct id_map_entry *ent;
194 int slave_id = new->slave_id;
195 int sl_cm_id = new->sl_cm_id;
196

--- 75 unchanged lines hidden (view full) ---

272static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
273{
274 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
275 unsigned long flags;
276
277 spin_lock(&sriov->id_map_lock);
278 spin_lock_irqsave(&sriov->going_down_lock, flags);
279 /*make sure that there is no schedule inside the scheduled work.*/
297 if (!sriov->is_going_down) {
280 if (!sriov->is_going_down && !id->scheduled_delete) {
298 id->scheduled_delete = 1;
299 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
300 }
301 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
302 spin_unlock(&sriov->id_map_lock);
303}
304
305int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,

--- 30 unchanged lines hidden (view full) ---

336 return -EINVAL;
337 }
338
339cont:
340 set_local_comm_id(mad, id->pv_cm_id);
341
342 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
343 schedule_delayed(ibdev, id);
281 id->scheduled_delete = 1;
282 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
283 }
284 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
285 spin_unlock(&sriov->id_map_lock);
286}
287
288int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,

--- 30 unchanged lines hidden (view full) ---

319 return -EINVAL;
320 }
321
322cont:
323 set_local_comm_id(mad, id->pv_cm_id);
324
325 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
326 schedule_delayed(ibdev, id);
344 else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
345 id_map_find_del(ibdev, pv_cm_id);
346
347 return 0;
348}
349
350int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
351 struct ib_mad *mad)
352{
353 u32 pv_cm_id;
354 struct id_map_entry *id;

--- 22 unchanged lines hidden (view full) ---

377 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
378 return -ENOENT;
379 }
380
381 if (slave)
382 *slave = id->slave_id;
383 set_remote_comm_id(mad, id->sl_cm_id);
384
327 return 0;
328}
329
330int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
331 struct ib_mad *mad)
332{
333 u32 pv_cm_id;
334 struct id_map_entry *id;

--- 22 unchanged lines hidden (view full) ---

357 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
358 return -ENOENT;
359 }
360
361 if (slave)
362 *slave = id->slave_id;
363 set_remote_comm_id(mad, id->sl_cm_id);
364
385 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
365 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
366 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
386 schedule_delayed(ibdev, id);
367 schedule_delayed(ibdev, id);
387 else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
388 mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
389 id_map_find_del(ibdev, (int) pv_cm_id);
390 }
391
392 return 0;
393}
394
395void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
396{
397 spin_lock_init(&dev->sriov.id_map_lock);
398 INIT_LIST_HEAD(&dev->sriov.cm_list);

--- 72 unchanged lines hidden ---
368
369 return 0;
370}
371
372void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
373{
374 spin_lock_init(&dev->sriov.id_map_lock);
375 INIT_LIST_HEAD(&dev->sriov.cm_list);

--- 72 unchanged lines hidden ---