1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 9 #define RXE_POOL_ALIGN (16) 10 11 static const struct rxe_type_info { 12 const char *name; 13 size_t size; 14 size_t elem_offset; 15 void (*cleanup)(struct rxe_pool_elem *obj); 16 enum rxe_pool_flags flags; 17 u32 min_index; 18 u32 max_index; 19 size_t key_offset; 20 size_t key_size; 21 } rxe_type_info[RXE_NUM_TYPES] = { 22 [RXE_TYPE_UC] = { 23 .name = "rxe-uc", 24 .size = sizeof(struct rxe_ucontext), 25 .elem_offset = offsetof(struct rxe_ucontext, elem), 26 .flags = RXE_POOL_NO_ALLOC, 27 }, 28 [RXE_TYPE_PD] = { 29 .name = "rxe-pd", 30 .size = sizeof(struct rxe_pd), 31 .elem_offset = offsetof(struct rxe_pd, elem), 32 .flags = RXE_POOL_NO_ALLOC, 33 }, 34 [RXE_TYPE_AH] = { 35 .name = "rxe-ah", 36 .size = sizeof(struct rxe_ah), 37 .elem_offset = offsetof(struct rxe_ah, elem), 38 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 39 .min_index = RXE_MIN_AH_INDEX, 40 .max_index = RXE_MAX_AH_INDEX, 41 }, 42 [RXE_TYPE_SRQ] = { 43 .name = "rxe-srq", 44 .size = sizeof(struct rxe_srq), 45 .elem_offset = offsetof(struct rxe_srq, elem), 46 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 47 .min_index = RXE_MIN_SRQ_INDEX, 48 .max_index = RXE_MAX_SRQ_INDEX, 49 }, 50 [RXE_TYPE_QP] = { 51 .name = "rxe-qp", 52 .size = sizeof(struct rxe_qp), 53 .elem_offset = offsetof(struct rxe_qp, elem), 54 .cleanup = rxe_qp_cleanup, 55 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 56 .min_index = RXE_MIN_QP_INDEX, 57 .max_index = RXE_MAX_QP_INDEX, 58 }, 59 [RXE_TYPE_CQ] = { 60 .name = "rxe-cq", 61 .size = sizeof(struct rxe_cq), 62 .elem_offset = offsetof(struct rxe_cq, elem), 63 .flags = RXE_POOL_NO_ALLOC, 64 .cleanup = rxe_cq_cleanup, 65 }, 66 [RXE_TYPE_MR] = { 67 .name = "rxe-mr", 68 .size = sizeof(struct rxe_mr), 69 .elem_offset = offsetof(struct rxe_mr, elem), 70 .cleanup = rxe_mr_cleanup, 71 .flags = RXE_POOL_INDEX, 72 .min_index = RXE_MIN_MR_INDEX, 73 .max_index = RXE_MAX_MR_INDEX, 74 }, 75 [RXE_TYPE_MW] = { 76 .name = "rxe-mw", 77 .size = sizeof(struct rxe_mw), 78 .elem_offset = offsetof(struct rxe_mw, elem), 79 .cleanup = rxe_mw_cleanup, 80 .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 81 .min_index = RXE_MIN_MW_INDEX, 82 .max_index = RXE_MAX_MW_INDEX, 83 }, 84 [RXE_TYPE_MC_GRP] = { 85 .name = "rxe-mc_grp", 86 .size = sizeof(struct rxe_mc_grp), 87 .elem_offset = offsetof(struct rxe_mc_grp, elem), 88 .cleanup = rxe_mc_cleanup, 89 .flags = RXE_POOL_KEY, 90 .key_offset = offsetof(struct rxe_mc_grp, mgid), 91 .key_size = sizeof(union ib_gid), 92 }, 93 [RXE_TYPE_MC_ELEM] = { 94 .name = "rxe-mc_elem", 95 .size = sizeof(struct rxe_mc_elem), 96 .elem_offset = offsetof(struct rxe_mc_elem, elem), 97 }, 98 }; 99 100 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) 101 { 102 int err = 0; 103 104 if ((max - min + 1) < pool->max_elem) { 105 pr_warn("not enough indices for max_elem\n"); 106 err = -EINVAL; 107 goto out; 108 } 109 110 pool->index.max_index = max; 111 pool->index.min_index = min; 112 113 pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL); 114 if (!pool->index.table) { 115 err = -ENOMEM; 116 goto out; 117 } 118 119 out: 120 return err; 121 } 122 123 int rxe_pool_init( 124 struct rxe_dev *rxe, 125 struct rxe_pool *pool, 126 enum rxe_elem_type type, 127 unsigned int max_elem) 128 { 129 const struct rxe_type_info *info = &rxe_type_info[type]; 130 int err = 0; 131 132 memset(pool, 0, sizeof(*pool)); 133 134 pool->rxe = rxe; 135 pool->name = info->name; 136 pool->type = type; 137 pool->max_elem = max_elem; 138 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); 139 pool->elem_offset = info->elem_offset; 140 pool->flags = info->flags; 141 pool->cleanup = info->cleanup; 142 143 atomic_set(&pool->num_elem, 0); 144 145 rwlock_init(&pool->pool_lock); 146 147 if (pool->flags & RXE_POOL_INDEX) { 148 pool->index.tree = RB_ROOT; 149 err = rxe_pool_init_index(pool, info->max_index, 150 info->min_index); 151 if (err) 152 goto out; 153 } 154 155 if (pool->flags & RXE_POOL_KEY) { 156 pool->key.tree = RB_ROOT; 157 pool->key.key_offset = info->key_offset; 158 pool->key.key_size = info->key_size; 159 } 160 161 out: 162 return err; 163 } 164 165 void rxe_pool_cleanup(struct rxe_pool *pool) 166 { 167 if (atomic_read(&pool->num_elem) > 0) 168 pr_warn("%s pool destroyed with unfree'd elem\n", 169 pool->name); 170 171 if (pool->flags & RXE_POOL_INDEX) 172 bitmap_free(pool->index.table); 173 } 174 175 static u32 alloc_index(struct rxe_pool *pool) 176 { 177 u32 index; 178 u32 range = pool->index.max_index - pool->index.min_index + 1; 179 180 index = find_next_zero_bit(pool->index.table, range, pool->index.last); 181 if (index >= range) 182 index = find_first_zero_bit(pool->index.table, range); 183 184 WARN_ON_ONCE(index >= range); 185 set_bit(index, pool->index.table); 186 pool->index.last = index; 187 return index + pool->index.min_index; 188 } 189 190 static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new) 191 { 192 struct rb_node **link = &pool->index.tree.rb_node; 193 struct rb_node *parent = NULL; 194 struct rxe_pool_elem *elem; 195 196 while (*link) { 197 parent = *link; 198 elem = rb_entry(parent, struct rxe_pool_elem, index_node); 199 200 if (elem->index == new->index) { 201 pr_warn("element already exists!\n"); 202 return -EINVAL; 203 } 204 205 if (elem->index > new->index) 206 link = &(*link)->rb_left; 207 else 208 link = &(*link)->rb_right; 209 } 210 211 rb_link_node(&new->index_node, parent, link); 212 rb_insert_color(&new->index_node, &pool->index.tree); 213 214 return 0; 215 } 216 217 static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new) 218 { 219 struct rb_node **link = &pool->key.tree.rb_node; 220 struct rb_node *parent = NULL; 221 struct rxe_pool_elem *elem; 222 int cmp; 223 224 while (*link) { 225 parent = *link; 226 elem = rb_entry(parent, struct rxe_pool_elem, key_node); 227 228 cmp = memcmp((u8 *)elem + pool->key.key_offset, 229 (u8 *)new + pool->key.key_offset, 230 pool->key.key_size); 231 232 if (cmp == 0) { 233 pr_warn("key already exists!\n"); 234 return -EINVAL; 235 } 236 237 if (cmp > 0) 238 link = &(*link)->rb_left; 239 else 240 link = &(*link)->rb_right; 241 } 242 243 rb_link_node(&new->key_node, parent, link); 244 rb_insert_color(&new->key_node, &pool->key.tree); 245 246 return 0; 247 } 248 249 int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key) 250 { 251 struct rxe_pool *pool = elem->pool; 252 int err; 253 254 memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); 255 err = rxe_insert_key(pool, elem); 256 257 return err; 258 } 259 260 int __rxe_add_key(struct rxe_pool_elem *elem, void *key) 261 { 262 struct rxe_pool *pool = elem->pool; 263 int err; 264 265 write_lock_bh(&pool->pool_lock); 266 err = __rxe_add_key_locked(elem, key); 267 write_unlock_bh(&pool->pool_lock); 268 269 return err; 270 } 271 272 void __rxe_drop_key_locked(struct rxe_pool_elem *elem) 273 { 274 struct rxe_pool *pool = elem->pool; 275 276 rb_erase(&elem->key_node, &pool->key.tree); 277 } 278 279 void __rxe_drop_key(struct rxe_pool_elem *elem) 280 { 281 struct rxe_pool *pool = elem->pool; 282 283 write_lock_bh(&pool->pool_lock); 284 __rxe_drop_key_locked(elem); 285 write_unlock_bh(&pool->pool_lock); 286 } 287 288 int __rxe_add_index_locked(struct rxe_pool_elem *elem) 289 { 290 struct rxe_pool *pool = elem->pool; 291 int err; 292 293 elem->index = alloc_index(pool); 294 err = rxe_insert_index(pool, elem); 295 296 return err; 297 } 298 299 int __rxe_add_index(struct rxe_pool_elem *elem) 300 { 301 struct rxe_pool *pool = elem->pool; 302 int err; 303 304 write_lock_bh(&pool->pool_lock); 305 err = __rxe_add_index_locked(elem); 306 write_unlock_bh(&pool->pool_lock); 307 308 return err; 309 } 310 311 void __rxe_drop_index_locked(struct rxe_pool_elem *elem) 312 { 313 struct rxe_pool *pool = elem->pool; 314 315 clear_bit(elem->index - pool->index.min_index, pool->index.table); 316 rb_erase(&elem->index_node, &pool->index.tree); 317 } 318 319 void __rxe_drop_index(struct rxe_pool_elem *elem) 320 { 321 struct rxe_pool *pool = elem->pool; 322 323 write_lock_bh(&pool->pool_lock); 324 __rxe_drop_index_locked(elem); 325 write_unlock_bh(&pool->pool_lock); 326 } 327 328 void *rxe_alloc_locked(struct rxe_pool *pool) 329 { 330 struct rxe_pool_elem *elem; 331 void *obj; 332 333 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 334 goto out_cnt; 335 336 obj = kzalloc(pool->elem_size, GFP_ATOMIC); 337 if (!obj) 338 goto out_cnt; 339 340 elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); 341 342 elem->pool = pool; 343 elem->obj = obj; 344 kref_init(&elem->ref_cnt); 345 346 return obj; 347 348 out_cnt: 349 atomic_dec(&pool->num_elem); 350 return NULL; 351 } 352 353 void *rxe_alloc(struct rxe_pool *pool) 354 { 355 struct rxe_pool_elem *elem; 356 void *obj; 357 358 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 359 goto out_cnt; 360 361 obj = kzalloc(pool->elem_size, GFP_KERNEL); 362 if (!obj) 363 goto out_cnt; 364 365 elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); 366 367 elem->pool = pool; 368 elem->obj = obj; 369 kref_init(&elem->ref_cnt); 370 371 return obj; 372 373 out_cnt: 374 atomic_dec(&pool->num_elem); 375 return NULL; 376 } 377 378 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) 379 { 380 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 381 goto out_cnt; 382 383 elem->pool = pool; 384 elem->obj = (u8 *)elem - pool->elem_offset; 385 kref_init(&elem->ref_cnt); 386 387 return 0; 388 389 out_cnt: 390 atomic_dec(&pool->num_elem); 391 return -EINVAL; 392 } 393 394 void rxe_elem_release(struct kref *kref) 395 { 396 struct rxe_pool_elem *elem = 397 container_of(kref, struct rxe_pool_elem, ref_cnt); 398 struct rxe_pool *pool = elem->pool; 399 void *obj; 400 401 if (pool->cleanup) 402 pool->cleanup(elem); 403 404 if (!(pool->flags & RXE_POOL_NO_ALLOC)) { 405 obj = elem->obj; 406 kfree(obj); 407 } 408 409 atomic_dec(&pool->num_elem); 410 } 411 412 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) 413 { 414 struct rb_node *node; 415 struct rxe_pool_elem *elem; 416 void *obj; 417 418 node = pool->index.tree.rb_node; 419 420 while (node) { 421 elem = rb_entry(node, struct rxe_pool_elem, index_node); 422 423 if (elem->index > index) 424 node = node->rb_left; 425 else if (elem->index < index) 426 node = node->rb_right; 427 else 428 break; 429 } 430 431 if (node) { 432 kref_get(&elem->ref_cnt); 433 obj = elem->obj; 434 } else { 435 obj = NULL; 436 } 437 438 return obj; 439 } 440 441 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 442 { 443 void *obj; 444 445 read_lock_bh(&pool->pool_lock); 446 obj = rxe_pool_get_index_locked(pool, index); 447 read_unlock_bh(&pool->pool_lock); 448 449 return obj; 450 } 451 452 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) 453 { 454 struct rb_node *node; 455 struct rxe_pool_elem *elem; 456 void *obj; 457 int cmp; 458 459 node = pool->key.tree.rb_node; 460 461 while (node) { 462 elem = rb_entry(node, struct rxe_pool_elem, key_node); 463 464 cmp = memcmp((u8 *)elem + pool->key.key_offset, 465 key, pool->key.key_size); 466 467 if (cmp > 0) 468 node = node->rb_left; 469 else if (cmp < 0) 470 node = node->rb_right; 471 else 472 break; 473 } 474 475 if (node) { 476 kref_get(&elem->ref_cnt); 477 obj = elem->obj; 478 } else { 479 obj = NULL; 480 } 481 482 return obj; 483 } 484 485 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) 486 { 487 void *obj; 488 489 read_lock_bh(&pool->pool_lock); 490 obj = rxe_pool_get_key_locked(pool, key); 491 read_unlock_bh(&pool->pool_lock); 492 493 return obj; 494 } 495