cm.c (249acb5f47df27109d74a0d904a5da56c8bac28f) cm.c (f1430536e008cd3b70794e12c414c20d54aabec2)
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 154 unchanged lines hidden (view full) ---

163 }
164 return NULL;
165}
166
167static void id_map_ent_timeout(struct work_struct *work)
168{
169 struct delayed_work *delay = to_delayed_work(work);
170 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 154 unchanged lines hidden (view full) ---

163 }
164 return NULL;
165}
166
167static void id_map_ent_timeout(struct work_struct *work)
168{
169 struct delayed_work *delay = to_delayed_work(work);
170 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
171 struct id_map_entry *db_ent, *found_ent;
171 struct id_map_entry *found_ent;
172 struct mlx4_ib_dev *dev = ent->dev;
173 struct mlx4_ib_sriov *sriov = &dev->sriov;
174 struct rb_root *sl_id_map = &sriov->sl_id_map;
172 struct mlx4_ib_dev *dev = ent->dev;
173 struct mlx4_ib_sriov *sriov = &dev->sriov;
174 struct rb_root *sl_id_map = &sriov->sl_id_map;
175 int pv_id = (int) ent->pv_cm_id;
176
177 spin_lock(&sriov->id_map_lock);
175
176 spin_lock(&sriov->id_map_lock);
178 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
179 if (!db_ent)
177 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
180 goto out;
181 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
182 if (found_ent && found_ent == ent)
183 rb_erase(&found_ent->node, sl_id_map);
178 goto out;
179 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
180 if (found_ent && found_ent == ent)
181 rb_erase(&found_ent->node, sl_id_map);
184 idr_remove(&sriov->pv_id_table, pv_id);
185
186out:
187 list_del(&ent->list);
188 spin_unlock(&sriov->id_map_lock);
189 kfree(ent);
190}
191
192static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
193{
194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
195 struct rb_root *sl_id_map = &sriov->sl_id_map;
196 struct id_map_entry *ent, *found_ent;
197
198 spin_lock(&sriov->id_map_lock);
182
183out:
184 list_del(&ent->list);
185 spin_unlock(&sriov->id_map_lock);
186 kfree(ent);
187}
188
189static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
190{
191 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
192 struct rb_root *sl_id_map = &sriov->sl_id_map;
193 struct id_map_entry *ent, *found_ent;
194
195 spin_lock(&sriov->id_map_lock);
199 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
196 ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
200 if (!ent)
201 goto out;
202 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
203 if (found_ent && found_ent == ent)
204 rb_erase(&found_ent->node, sl_id_map);
197 if (!ent)
198 goto out;
199 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
200 if (found_ent && found_ent == ent)
201 rb_erase(&found_ent->node, sl_id_map);
205 idr_remove(&sriov->pv_id_table, pv_cm_id);
206out:
207 spin_unlock(&sriov->id_map_lock);
208}
209
210static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
211{
212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
213 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;

--- 37 unchanged lines hidden (view full) ---

251 return ERR_PTR(-ENOMEM);
252
253 ent->sl_cm_id = sl_cm_id;
254 ent->slave_id = slave_id;
255 ent->scheduled_delete = 0;
256 ent->dev = to_mdev(ibdev);
257 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
258
202out:
203 spin_unlock(&sriov->id_map_lock);
204}
205
206static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
207{
208 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
209 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;

--- 37 unchanged lines hidden (view full) ---

247 return ERR_PTR(-ENOMEM);
248
249 ent->sl_cm_id = sl_cm_id;
250 ent->slave_id = slave_id;
251 ent->scheduled_delete = 0;
252 ent->dev = to_mdev(ibdev);
253 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
254
259 idr_preload(GFP_KERNEL);
260 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
261
262 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
255 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
256 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
263 if (ret >= 0) {
257 if (ret >= 0) {
264 ent->pv_cm_id = (u32)ret;
258 spin_lock(&sriov->id_map_lock);
265 sl_id_map_add(ibdev, ent);
266 list_add_tail(&ent->list, &sriov->cm_list);
259 sl_id_map_add(ibdev, ent);
260 list_add_tail(&ent->list, &sriov->cm_list);
261 spin_unlock(&sriov->id_map_lock);
262 return ent;
267 }
268
263 }
264
269 spin_unlock(&sriov->id_map_lock);
270 idr_preload_end();
271
272 if (ret >= 0)
273 return ent;
274
275 /*error flow*/
276 kfree(ent);
265 /*error flow*/
266 kfree(ent);
277 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
267 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
278 return ERR_PTR(-ENOMEM);
279}
280
281static struct id_map_entry *
282id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
283{
284 struct id_map_entry *ent;
285 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
286
287 spin_lock(&sriov->id_map_lock);
288 if (*pv_cm_id == -1) {
289 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
290 if (ent)
291 *pv_cm_id = (int) ent->pv_cm_id;
292 } else
268 return ERR_PTR(-ENOMEM);
269}
270
271static struct id_map_entry *
272id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
273{
274 struct id_map_entry *ent;
275 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
276
277 spin_lock(&sriov->id_map_lock);
278 if (*pv_cm_id == -1) {
279 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
280 if (ent)
281 *pv_cm_id = (int) ent->pv_cm_id;
282 } else
293 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
283 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
294 spin_unlock(&sriov->id_map_lock);
295
296 return ent;
297}
298
299static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
300{
301 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;

--- 100 unchanged lines hidden (view full) ---

402 return 0;
403}
404
405void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
406{
407 spin_lock_init(&dev->sriov.id_map_lock);
408 INIT_LIST_HEAD(&dev->sriov.cm_list);
409 dev->sriov.sl_id_map = RB_ROOT;
284 spin_unlock(&sriov->id_map_lock);
285
286 return ent;
287}
288
289static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
290{
291 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;

--- 100 unchanged lines hidden (view full) ---

392 return 0;
393}
394
395void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
396{
397 spin_lock_init(&dev->sriov.id_map_lock);
398 INIT_LIST_HEAD(&dev->sriov.cm_list);
399 dev->sriov.sl_id_map = RB_ROOT;
410 idr_init(&dev->sriov.pv_id_table);
400 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
411}
412
413/* slave = -1 ==> all slaves */
414/* TBD -- call paravirt clean for single slave. Need for slave RESET event */
415void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
416{
417 struct mlx4_ib_sriov *sriov = &dev->sriov;
418 struct rb_root *sl_id_map = &sriov->sl_id_map;

--- 20 unchanged lines hidden (view full) ---

439 spin_lock(&sriov->id_map_lock);
440 if (slave < 0) {
441 while (rb_first(sl_id_map)) {
442 struct id_map_entry *ent =
443 rb_entry(rb_first(sl_id_map),
444 struct id_map_entry, node);
445
446 rb_erase(&ent->node, sl_id_map);
401}
402
403/* slave = -1 ==> all slaves */
404/* TBD -- call paravirt clean for single slave. Need for slave RESET event */
405void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
406{
407 struct mlx4_ib_sriov *sriov = &dev->sriov;
408 struct rb_root *sl_id_map = &sriov->sl_id_map;

--- 20 unchanged lines hidden (view full) ---

429 spin_lock(&sriov->id_map_lock);
430 if (slave < 0) {
431 while (rb_first(sl_id_map)) {
432 struct id_map_entry *ent =
433 rb_entry(rb_first(sl_id_map),
434 struct id_map_entry, node);
435
436 rb_erase(&ent->node, sl_id_map);
447 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
437 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
448 }
449 list_splice_init(&dev->sriov.cm_list, &lh);
450 } else {
451 /* first, move nodes belonging to slave to db remove list */
452 nd = rb_first(sl_id_map);
453 while (nd) {
454 struct id_map_entry *ent =
455 rb_entry(nd, struct id_map_entry, node);
456 nd = rb_next(nd);
457 if (ent->slave_id == slave)
458 list_move_tail(&ent->list, &lh);
459 }
460 /* remove those nodes from databases */
461 list_for_each_entry_safe(map, tmp_map, &lh, list) {
462 rb_erase(&map->node, sl_id_map);
438 }
439 list_splice_init(&dev->sriov.cm_list, &lh);
440 } else {
441 /* first, move nodes belonging to slave to db remove list */
442 nd = rb_first(sl_id_map);
443 while (nd) {
444 struct id_map_entry *ent =
445 rb_entry(nd, struct id_map_entry, node);
446 nd = rb_next(nd);
447 if (ent->slave_id == slave)
448 list_move_tail(&ent->list, &lh);
449 }
450 /* remove those nodes from databases */
451 list_for_each_entry_safe(map, tmp_map, &lh, list) {
452 rb_erase(&map->node, sl_id_map);
463 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
453 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
464 }
465
466 /* add remaining nodes from cm_list */
467 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
468 if (slave == map->slave_id)
469 list_move_tail(&map->list, &lh);
470 }
471 }
472
473 spin_unlock(&sriov->id_map_lock);
474
475 /* free any map entries left behind due to cancel_delayed_work above */
476 list_for_each_entry_safe(map, tmp_map, &lh, list) {
477 list_del(&map->list);
478 kfree(map);
479 }
480}
454 }
455
456 /* add remaining nodes from cm_list */
457 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
458 if (slave == map->slave_id)
459 list_move_tail(&map->list, &lh);
460 }
461 }
462
463 spin_unlock(&sriov->id_map_lock);
464
465 /* free any map entries left behind due to cancel_delayed_work above */
466 list_for_each_entry_safe(map, tmp_map, &lh, list) {
467 list_del(&map->list);
468 kfree(map);
469 }
470}