1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include "rxe.h" 8 9 #define RXE_POOL_ALIGN (16) 10 11 static const struct rxe_type_info { 12 const char *name; 13 size_t size; 14 size_t elem_offset; 15 void (*cleanup)(struct rxe_pool_elem *elem); 16 enum rxe_pool_flags flags; 17 u32 min_index; 18 u32 max_index; 19 u32 max_elem; 20 } rxe_type_info[RXE_NUM_TYPES] = { 21 [RXE_TYPE_UC] = { 22 .name = "uc", 23 .size = sizeof(struct rxe_ucontext), 24 .elem_offset = offsetof(struct rxe_ucontext, elem), 25 .min_index = 1, 26 .max_index = UINT_MAX, 27 .max_elem = UINT_MAX, 28 }, 29 [RXE_TYPE_PD] = { 30 .name = "pd", 31 .size = sizeof(struct rxe_pd), 32 .elem_offset = offsetof(struct rxe_pd, elem), 33 .min_index = 1, 34 .max_index = UINT_MAX, 35 .max_elem = UINT_MAX, 36 }, 37 [RXE_TYPE_AH] = { 38 .name = "ah", 39 .size = sizeof(struct rxe_ah), 40 .elem_offset = offsetof(struct rxe_ah, elem), 41 .min_index = RXE_MIN_AH_INDEX, 42 .max_index = RXE_MAX_AH_INDEX, 43 .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1, 44 }, 45 [RXE_TYPE_SRQ] = { 46 .name = "srq", 47 .size = sizeof(struct rxe_srq), 48 .elem_offset = offsetof(struct rxe_srq, elem), 49 .min_index = RXE_MIN_SRQ_INDEX, 50 .max_index = RXE_MAX_SRQ_INDEX, 51 .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1, 52 }, 53 [RXE_TYPE_QP] = { 54 .name = "qp", 55 .size = sizeof(struct rxe_qp), 56 .elem_offset = offsetof(struct rxe_qp, elem), 57 .cleanup = rxe_qp_cleanup, 58 .min_index = RXE_MIN_QP_INDEX, 59 .max_index = RXE_MAX_QP_INDEX, 60 .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1, 61 }, 62 [RXE_TYPE_CQ] = { 63 .name = "cq", 64 .size = sizeof(struct rxe_cq), 65 .elem_offset = offsetof(struct rxe_cq, elem), 66 .cleanup = rxe_cq_cleanup, 67 .min_index = 1, 68 .max_index = UINT_MAX, 69 .max_elem = UINT_MAX, 70 }, 71 [RXE_TYPE_MR] = { 72 .name = "mr", 73 .size = sizeof(struct rxe_mr), 74 .elem_offset = offsetof(struct rxe_mr, elem), 75 .cleanup = rxe_mr_cleanup, 76 .flags = RXE_POOL_ALLOC, 77 .min_index = RXE_MIN_MR_INDEX, 78 .max_index = RXE_MAX_MR_INDEX, 79 .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1, 80 }, 81 [RXE_TYPE_MW] = { 82 .name = "mw", 83 .size = sizeof(struct rxe_mw), 84 .elem_offset = offsetof(struct rxe_mw, elem), 85 .min_index = RXE_MIN_MW_INDEX, 86 .max_index = RXE_MAX_MW_INDEX, 87 .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1, 88 }, 89 }; 90 91 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, 92 enum rxe_elem_type type) 93 { 94 const struct rxe_type_info *info = &rxe_type_info[type]; 95 96 memset(pool, 0, sizeof(*pool)); 97 98 pool->rxe = rxe; 99 pool->name = info->name; 100 pool->type = type; 101 pool->max_elem = info->max_elem; 102 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); 103 pool->elem_offset = info->elem_offset; 104 pool->flags = info->flags; 105 pool->cleanup = info->cleanup; 106 107 atomic_set(&pool->num_elem, 0); 108 109 xa_init_flags(&pool->xa, XA_FLAGS_ALLOC); 110 pool->limit.min = info->min_index; 111 pool->limit.max = info->max_index; 112 } 113 114 void rxe_pool_cleanup(struct rxe_pool *pool) 115 { 116 WARN_ON(!xa_empty(&pool->xa)); 117 } 118 119 void *rxe_alloc(struct rxe_pool *pool) 120 { 121 struct rxe_pool_elem *elem; 122 void *obj; 123 int err; 124 125 if (WARN_ON(!(pool->flags & RXE_POOL_ALLOC))) 126 return NULL; 127 128 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 129 goto err_cnt; 130 131 obj = kzalloc(pool->elem_size, GFP_KERNEL); 132 if (!obj) 133 goto err_cnt; 134 135 elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); 136 137 elem->pool = pool; 138 elem->obj = obj; 139 kref_init(&elem->ref_cnt); 140 141 err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit, 142 &pool->next, GFP_KERNEL); 143 if (err) 144 goto err_free; 145 146 return obj; 147 148 err_free: 149 kfree(obj); 150 err_cnt: 151 atomic_dec(&pool->num_elem); 152 return NULL; 153 } 154 155 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) 156 { 157 int err; 158 159 if (WARN_ON(pool->flags & RXE_POOL_ALLOC)) 160 return -EINVAL; 161 162 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) 163 goto err_cnt; 164 165 elem->pool = pool; 166 elem->obj = (u8 *)elem - pool->elem_offset; 167 kref_init(&elem->ref_cnt); 168 169 err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit, 170 &pool->next, GFP_KERNEL); 171 if (err) 172 goto err_cnt; 173 174 return 0; 175 176 err_cnt: 177 atomic_dec(&pool->num_elem); 178 return -EINVAL; 179 } 180 181 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) 182 { 183 struct rxe_pool_elem *elem; 184 struct xarray *xa = &pool->xa; 185 unsigned long flags; 186 void *obj; 187 188 xa_lock_irqsave(xa, flags); 189 elem = xa_load(xa, index); 190 if (elem && kref_get_unless_zero(&elem->ref_cnt)) 191 obj = elem->obj; 192 else 193 obj = NULL; 194 xa_unlock_irqrestore(xa, flags); 195 196 return obj; 197 } 198 199 static void rxe_elem_release(struct kref *kref) 200 { 201 struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt); 202 struct rxe_pool *pool = elem->pool; 203 204 xa_erase(&pool->xa, elem->index); 205 206 if (pool->cleanup) 207 pool->cleanup(elem); 208 209 if (pool->flags & RXE_POOL_ALLOC) 210 kfree(elem->obj); 211 212 atomic_dec(&pool->num_elem); 213 } 214 215 int __rxe_get(struct rxe_pool_elem *elem) 216 { 217 return kref_get_unless_zero(&elem->ref_cnt); 218 } 219 220 int __rxe_put(struct rxe_pool_elem *elem) 221 { 222 return kref_put(&elem->ref_cnt, rxe_elem_release); 223 } 224