1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <linux/vmalloc.h> 36 #include "hns_roce_device.h" 37 38 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj) 39 { 40 int ret = 0; 41 42 spin_lock(&bitmap->lock); 43 *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 44 if (*obj >= bitmap->max) { 45 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 46 & bitmap->mask; 47 *obj = find_first_zero_bit(bitmap->table, bitmap->max); 48 } 49 50 if (*obj < bitmap->max) { 51 set_bit(*obj, bitmap->table); 52 bitmap->last = (*obj + 1); 53 if (bitmap->last == bitmap->max) 54 bitmap->last = 0; 55 *obj |= bitmap->top; 56 } else { 57 ret = -1; 58 } 59 60 spin_unlock(&bitmap->lock); 61 62 return ret; 63 } 64 65 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, 66 int rr) 67 { 68 hns_roce_bitmap_free_range(bitmap, obj, 1, rr); 69 } 70 EXPORT_SYMBOL_GPL(hns_roce_bitmap_free); 71 72 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, 73 int align, unsigned long *obj) 74 { 75 int ret = 0; 76 int i; 77 78 if (likely(cnt == 1 && align == 1)) 79 return hns_roce_bitmap_alloc(bitmap, obj); 80 81 spin_lock(&bitmap->lock); 82 83 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 84 bitmap->last, cnt, align - 1); 85 if (*obj >= bitmap->max) { 86 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 87 & bitmap->mask; 88 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0, 89 cnt, align - 1); 90 } 91 92 if (*obj < bitmap->max) { 93 for (i = 0; i < cnt; i++) 94 set_bit(*obj + i, bitmap->table); 95 96 if (*obj == bitmap->last) { 97 bitmap->last = (*obj + cnt); 98 if (bitmap->last >= bitmap->max) 99 bitmap->last = 0; 100 } 101 *obj |= bitmap->top; 102 } else { 103 ret = -1; 104 } 105 106 spin_unlock(&bitmap->lock); 107 108 return ret; 109 } 110 111 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, 112 unsigned long obj, int cnt, 113 int rr) 114 { 115 int i; 116 117 obj &= bitmap->max + bitmap->reserved_top - 1; 118 119 spin_lock(&bitmap->lock); 120 for (i = 0; i < cnt; i++) 121 clear_bit(obj + i, bitmap->table); 122 123 if (!rr) 124 bitmap->last = min(bitmap->last, obj); 125 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 126 & bitmap->mask; 127 spin_unlock(&bitmap->lock); 128 } 129 130 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask, 131 u32 reserved_bot, u32 reserved_top) 132 { 133 u32 i; 134 135 if (num != roundup_pow_of_two(num)) 136 return -EINVAL; 137 138 bitmap->last = 0; 139 bitmap->top = 0; 140 bitmap->max = num - reserved_top; 141 bitmap->mask = mask; 142 bitmap->reserved_top = reserved_top; 143 spin_lock_init(&bitmap->lock); 144 bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), 145 GFP_KERNEL); 146 if (!bitmap->table) 147 return -ENOMEM; 148 149 for (i = 0; i < reserved_bot; ++i) 150 set_bit(i, bitmap->table); 151 152 return 0; 153 } 154 155 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap) 156 { 157 kfree(bitmap->table); 158 } 159 160 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, 161 struct hns_roce_buf *buf) 162 { 163 int i; 164 struct device *dev = hr_dev->dev; 165 u32 bits_per_long = BITS_PER_LONG; 166 167 if (buf->nbufs == 1) { 168 dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); 169 } else { 170 if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT) 171 vunmap(buf->direct.buf); 172 173 for (i = 0; i < buf->nbufs; ++i) 174 if (buf->page_list[i].buf) 175 dma_free_coherent(dev, 1 << buf->page_shift, 176 buf->page_list[i].buf, 177 buf->page_list[i].map); 178 kfree(buf->page_list); 179 } 180 } 181 EXPORT_SYMBOL_GPL(hns_roce_buf_free); 182 183 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, 184 struct hns_roce_buf *buf, u32 page_shift) 185 { 186 int i = 0; 187 dma_addr_t t; 188 struct page **pages; 189 struct device *dev = hr_dev->dev; 190 u32 bits_per_long = BITS_PER_LONG; 191 u32 page_size = 1 << page_shift; 192 u32 order; 193 194 /* SQ/RQ buf lease than one page, SQ + RQ = 8K */ 195 if (size <= max_direct) { 196 buf->nbufs = 1; 197 /* Npages calculated by page_size */ 198 order = get_order(size); 199 if (order <= page_shift - PAGE_SHIFT) 200 order = 0; 201 else 202 order -= page_shift - PAGE_SHIFT; 203 buf->npages = 1 << order; 204 buf->page_shift = page_shift; 205 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 206 buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL); 207 if (!buf->direct.buf) 208 return -ENOMEM; 209 210 buf->direct.map = t; 211 212 while (t & ((1 << buf->page_shift) - 1)) { 213 --buf->page_shift; 214 buf->npages *= 2; 215 } 216 217 memset(buf->direct.buf, 0, size); 218 } else { 219 buf->nbufs = (size + page_size - 1) / page_size; 220 buf->npages = buf->nbufs; 221 buf->page_shift = page_shift; 222 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 223 GFP_KERNEL); 224 225 if (!buf->page_list) 226 return -ENOMEM; 227 228 for (i = 0; i < buf->nbufs; ++i) { 229 buf->page_list[i].buf = dma_alloc_coherent(dev, 230 page_size, &t, 231 GFP_KERNEL); 232 233 if (!buf->page_list[i].buf) 234 goto err_free; 235 236 buf->page_list[i].map = t; 237 memset(buf->page_list[i].buf, 0, page_size); 238 } 239 if (bits_per_long == 64 && page_shift == PAGE_SHIFT) { 240 pages = kmalloc_array(buf->nbufs, sizeof(*pages), 241 GFP_KERNEL); 242 if (!pages) 243 goto err_free; 244 245 for (i = 0; i < buf->nbufs; ++i) 246 pages[i] = virt_to_page(buf->page_list[i].buf); 247 248 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, 249 PAGE_KERNEL); 250 kfree(pages); 251 if (!buf->direct.buf) 252 goto err_free; 253 } else { 254 buf->direct.buf = NULL; 255 } 256 } 257 258 return 0; 259 260 err_free: 261 hns_roce_buf_free(hr_dev, size, buf); 262 return -ENOMEM; 263 } 264 265 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) 266 { 267 hns_roce_cleanup_qp_table(hr_dev); 268 hns_roce_cleanup_cq_table(hr_dev); 269 hns_roce_cleanup_mr_table(hr_dev); 270 hns_roce_cleanup_pd_table(hr_dev); 271 hns_roce_cleanup_uar_table(hr_dev); 272 } 273