1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <linux/vmalloc.h> 36 #include "hns_roce_device.h" 37 #include <rdma/ib_umem.h> 38 39 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj) 40 { 41 int ret = 0; 42 43 spin_lock(&bitmap->lock); 44 *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 45 if (*obj >= bitmap->max) { 46 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 47 & bitmap->mask; 48 *obj = find_first_zero_bit(bitmap->table, bitmap->max); 49 } 50 51 if (*obj < bitmap->max) { 52 set_bit(*obj, bitmap->table); 53 bitmap->last = (*obj + 1); 54 if (bitmap->last == bitmap->max) 55 bitmap->last = 0; 56 *obj |= bitmap->top; 57 } else { 58 ret = -EINVAL; 59 } 60 61 spin_unlock(&bitmap->lock); 62 63 return ret; 64 } 65 66 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, 67 int rr) 68 { 69 hns_roce_bitmap_free_range(bitmap, obj, 1, rr); 70 } 71 72 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, 73 int align, unsigned long *obj) 74 { 75 int ret = 0; 76 int i; 77 78 if (likely(cnt == 1 && align == 1)) 79 return hns_roce_bitmap_alloc(bitmap, obj); 80 81 spin_lock(&bitmap->lock); 82 83 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 84 bitmap->last, cnt, align - 1); 85 if (*obj >= bitmap->max) { 86 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 87 & bitmap->mask; 88 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0, 89 cnt, align - 1); 90 } 91 92 if (*obj < bitmap->max) { 93 for (i = 0; i < cnt; i++) 94 set_bit(*obj + i, bitmap->table); 95 96 if (*obj == bitmap->last) { 97 bitmap->last = (*obj + cnt); 98 if (bitmap->last >= bitmap->max) 99 bitmap->last = 0; 100 } 101 *obj |= bitmap->top; 102 } else { 103 ret = -EINVAL; 104 } 105 106 spin_unlock(&bitmap->lock); 107 108 return ret; 109 } 110 111 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, 112 unsigned long obj, int cnt, 113 int rr) 114 { 115 int i; 116 117 obj &= bitmap->max + bitmap->reserved_top - 1; 118 119 spin_lock(&bitmap->lock); 120 for (i = 0; i < cnt; i++) 121 clear_bit(obj + i, bitmap->table); 122 123 if (!rr) 124 bitmap->last = min(bitmap->last, obj); 125 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 126 & bitmap->mask; 127 spin_unlock(&bitmap->lock); 128 } 129 130 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, 131 u32 reserved_bot, u32 reserved_top) 132 { 133 u32 i; 134 135 if (num != roundup_pow_of_two(num)) 136 return -EINVAL; 137 138 bitmap->last = 0; 139 bitmap->top = 0; 140 bitmap->max = num - reserved_top; 141 bitmap->mask = mask; 142 bitmap->reserved_top = reserved_top; 143 spin_lock_init(&bitmap->lock); 144 bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), 145 GFP_KERNEL); 146 if (!bitmap->table) 147 return -ENOMEM; 148 149 for (i = 0; i < reserved_bot; ++i) 150 set_bit(i, bitmap->table); 151 152 return 0; 153 } 154 155 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap) 156 { 157 kfree(bitmap->table); 158 } 159 160 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) 161 { 162 struct device *dev = hr_dev->dev; 163 u32 size = buf->size; 164 int i; 165 166 if (size == 0) 167 return; 168 169 buf->size = 0; 170 171 if (hns_roce_buf_is_direct(buf)) { 172 dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); 173 } else { 174 for (i = 0; i < buf->npages; ++i) 175 if (buf->page_list[i].buf) 176 dma_free_coherent(dev, 1 << buf->page_shift, 177 buf->page_list[i].buf, 178 buf->page_list[i].map); 179 kfree(buf->page_list); 180 buf->page_list = NULL; 181 } 182 } 183 184 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, 185 struct hns_roce_buf *buf, u32 page_shift) 186 { 187 struct hns_roce_buf_list *buf_list; 188 struct device *dev = hr_dev->dev; 189 u32 page_size; 190 int i; 191 192 /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */ 193 buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift); 194 195 page_size = 1 << buf->page_shift; 196 buf->npages = DIV_ROUND_UP(size, page_size); 197 198 /* required size is not bigger than one trunk size */ 199 if (size <= max_direct) { 200 buf->page_list = NULL; 201 buf->direct.buf = dma_alloc_coherent(dev, size, 202 &buf->direct.map, 203 GFP_KERNEL); 204 if (!buf->direct.buf) 205 return -ENOMEM; 206 } else { 207 buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL); 208 if (!buf_list) 209 return -ENOMEM; 210 211 for (i = 0; i < buf->npages; i++) { 212 buf_list[i].buf = dma_alloc_coherent(dev, page_size, 213 &buf_list[i].map, 214 GFP_KERNEL); 215 if (!buf_list[i].buf) 216 break; 217 } 218 219 if (i != buf->npages && i > 0) { 220 while (i-- > 0) 221 dma_free_coherent(dev, page_size, 222 buf_list[i].buf, 223 buf_list[i].map); 224 kfree(buf_list); 225 return -ENOMEM; 226 } 227 buf->page_list = buf_list; 228 } 229 buf->size = size; 230 231 return 0; 232 } 233 234 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, 235 int buf_cnt, int start, struct hns_roce_buf *buf) 236 { 237 int i, end; 238 int total; 239 240 end = start + buf_cnt; 241 if (end > buf->npages) { 242 dev_err(hr_dev->dev, 243 "Failed to check kmem bufs, end %d + %d total %d!\n", 244 start, buf_cnt, buf->npages); 245 return -EINVAL; 246 } 247 248 total = 0; 249 for (i = start; i < end; i++) 250 bufs[total++] = hns_roce_buf_page(buf, i); 251 252 return total; 253 } 254 255 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, 256 int buf_cnt, int start, struct ib_umem *umem, 257 unsigned int page_shift) 258 { 259 struct ib_block_iter biter; 260 int total = 0; 261 int idx = 0; 262 u64 addr; 263 264 if (page_shift < HNS_HW_PAGE_SHIFT) { 265 dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n", 266 page_shift); 267 return -EINVAL; 268 } 269 270 /* convert system page cnt to hw page cnt */ 271 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { 272 addr = rdma_block_iter_dma_address(&biter); 273 if (idx >= start) { 274 bufs[total++] = addr; 275 if (total >= buf_cnt) 276 goto done; 277 } 278 idx++; 279 } 280 281 done: 282 return total; 283 } 284 285 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) 286 { 287 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) 288 hns_roce_cleanup_srq_table(hr_dev); 289 hns_roce_cleanup_qp_table(hr_dev); 290 hns_roce_cleanup_cq_table(hr_dev); 291 hns_roce_cleanup_mr_table(hr_dev); 292 hns_roce_cleanup_pd_table(hr_dev); 293 hns_roce_cleanup_uar_table(hr_dev); 294 } 295