15a2cc190SJeff Kirsher /* 25a2cc190SJeff Kirsher * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 35a2cc190SJeff Kirsher * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 45a2cc190SJeff Kirsher * 55a2cc190SJeff Kirsher * This software is available to you under a choice of one of two 65a2cc190SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 75a2cc190SJeff Kirsher * General Public License (GPL) Version 2, available from the file 85a2cc190SJeff Kirsher * COPYING in the main directory of this source tree, or the 95a2cc190SJeff Kirsher * OpenIB.org BSD license below: 105a2cc190SJeff Kirsher * 115a2cc190SJeff Kirsher * Redistribution and use in source and binary forms, with or 125a2cc190SJeff Kirsher * without modification, are permitted provided that the following 135a2cc190SJeff Kirsher * conditions are met: 145a2cc190SJeff Kirsher * 155a2cc190SJeff Kirsher * - Redistributions of source code must retain the above 165a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 175a2cc190SJeff Kirsher * disclaimer. 185a2cc190SJeff Kirsher * 195a2cc190SJeff Kirsher * - Redistributions in binary form must reproduce the above 205a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 215a2cc190SJeff Kirsher * disclaimer in the documentation and/or other materials 225a2cc190SJeff Kirsher * provided with the distribution. 235a2cc190SJeff Kirsher * 245a2cc190SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 255a2cc190SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 265a2cc190SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 275a2cc190SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 285a2cc190SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 295a2cc190SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 305a2cc190SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 315a2cc190SJeff Kirsher * SOFTWARE. 325a2cc190SJeff Kirsher */ 335a2cc190SJeff Kirsher 345a2cc190SJeff Kirsher #include <linux/errno.h> 355a2cc190SJeff Kirsher #include <linux/slab.h> 365a2cc190SJeff Kirsher #include <linux/mm.h> 37ee40fa06SPaul Gortmaker #include <linux/export.h> 385a2cc190SJeff Kirsher #include <linux/bitmap.h> 395a2cc190SJeff Kirsher #include <linux/dma-mapping.h> 405a2cc190SJeff Kirsher #include <linux/vmalloc.h> 415a2cc190SJeff Kirsher 425a2cc190SJeff Kirsher #include "mlx4.h" 435a2cc190SJeff Kirsher 445a2cc190SJeff Kirsher u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) 455a2cc190SJeff Kirsher { 465a2cc190SJeff Kirsher u32 obj; 475a2cc190SJeff Kirsher 485a2cc190SJeff Kirsher spin_lock(&bitmap->lock); 495a2cc190SJeff Kirsher 505a2cc190SJeff Kirsher obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 515a2cc190SJeff Kirsher if (obj >= bitmap->max) { 525a2cc190SJeff Kirsher bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 535a2cc190SJeff Kirsher & bitmap->mask; 545a2cc190SJeff Kirsher obj = find_first_zero_bit(bitmap->table, bitmap->max); 555a2cc190SJeff Kirsher } 565a2cc190SJeff Kirsher 575a2cc190SJeff Kirsher if (obj < bitmap->max) { 585a2cc190SJeff Kirsher set_bit(obj, bitmap->table); 595a2cc190SJeff Kirsher bitmap->last = (obj + 1); 605a2cc190SJeff Kirsher if (bitmap->last == bitmap->max) 615a2cc190SJeff Kirsher bitmap->last = 0; 625a2cc190SJeff Kirsher obj |= bitmap->top; 635a2cc190SJeff Kirsher } else 645a2cc190SJeff Kirsher obj = -1; 655a2cc190SJeff Kirsher 665a2cc190SJeff Kirsher if (obj != -1) 675a2cc190SJeff Kirsher --bitmap->avail; 685a2cc190SJeff Kirsher 695a2cc190SJeff Kirsher spin_unlock(&bitmap->lock); 705a2cc190SJeff Kirsher 715a2cc190SJeff Kirsher return obj; 725a2cc190SJeff Kirsher } 735a2cc190SJeff Kirsher 747c6d74d2SJack Morgenstein void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) 755a2cc190SJeff Kirsher { 767c6d74d2SJack Morgenstein mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); 775a2cc190SJeff Kirsher } 785a2cc190SJeff Kirsher 79ddae0349SEugenia Emantayev static unsigned long find_aligned_range(unsigned long *bitmap, 80ddae0349SEugenia Emantayev u32 start, u32 nbits, 81ddae0349SEugenia Emantayev int len, int align, u32 skip_mask) 82ddae0349SEugenia Emantayev { 83ddae0349SEugenia Emantayev unsigned long end, i; 84ddae0349SEugenia Emantayev 85ddae0349SEugenia Emantayev again: 86ddae0349SEugenia Emantayev start = ALIGN(start, align); 87ddae0349SEugenia Emantayev 88ddae0349SEugenia Emantayev while ((start < nbits) && (test_bit(start, bitmap) || 89ddae0349SEugenia Emantayev (start & skip_mask))) 90ddae0349SEugenia Emantayev start += align; 91ddae0349SEugenia Emantayev 92ddae0349SEugenia Emantayev if (start >= nbits) 93ddae0349SEugenia Emantayev return -1; 94ddae0349SEugenia Emantayev 95ddae0349SEugenia Emantayev end = start+len; 96ddae0349SEugenia Emantayev if (end > nbits) 97ddae0349SEugenia Emantayev return -1; 98ddae0349SEugenia Emantayev 99ddae0349SEugenia Emantayev for (i = start + 1; i < end; i++) { 100ddae0349SEugenia Emantayev if (test_bit(i, bitmap) || ((u32)i & skip_mask)) { 101ddae0349SEugenia Emantayev start = i + 1; 102ddae0349SEugenia Emantayev goto again; 103ddae0349SEugenia Emantayev } 104ddae0349SEugenia Emantayev } 105ddae0349SEugenia Emantayev 106ddae0349SEugenia Emantayev return start; 107ddae0349SEugenia Emantayev } 108ddae0349SEugenia Emantayev 109ddae0349SEugenia Emantayev u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, 110ddae0349SEugenia Emantayev int align, u32 skip_mask) 1115a2cc190SJeff Kirsher { 1125a2cc190SJeff Kirsher u32 obj; 1135a2cc190SJeff Kirsher 114ddae0349SEugenia Emantayev if (likely(cnt == 1 && align == 1 && !skip_mask)) 1155a2cc190SJeff Kirsher return mlx4_bitmap_alloc(bitmap); 1165a2cc190SJeff Kirsher 1175a2cc190SJeff Kirsher spin_lock(&bitmap->lock); 1185a2cc190SJeff Kirsher 119ddae0349SEugenia Emantayev obj = find_aligned_range(bitmap->table, bitmap->last, 120ddae0349SEugenia Emantayev bitmap->max, cnt, align, skip_mask); 1215a2cc190SJeff Kirsher if (obj >= bitmap->max) { 1225a2cc190SJeff Kirsher bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 1235a2cc190SJeff Kirsher & bitmap->mask; 124ddae0349SEugenia Emantayev obj = find_aligned_range(bitmap->table, 0, bitmap->max, 125ddae0349SEugenia Emantayev cnt, align, skip_mask); 1265a2cc190SJeff Kirsher } 1275a2cc190SJeff Kirsher 1285a2cc190SJeff Kirsher if (obj < bitmap->max) { 1295a2cc190SJeff Kirsher bitmap_set(bitmap->table, obj, cnt); 1305a2cc190SJeff Kirsher if (obj == bitmap->last) { 1315a2cc190SJeff Kirsher bitmap->last = (obj + cnt); 1325a2cc190SJeff Kirsher if (bitmap->last >= bitmap->max) 1335a2cc190SJeff Kirsher bitmap->last = 0; 1345a2cc190SJeff Kirsher } 1355a2cc190SJeff Kirsher obj |= bitmap->top; 1365a2cc190SJeff Kirsher } else 1375a2cc190SJeff Kirsher obj = -1; 1385a2cc190SJeff Kirsher 1395a2cc190SJeff Kirsher if (obj != -1) 1405a2cc190SJeff Kirsher bitmap->avail -= cnt; 1415a2cc190SJeff Kirsher 1425a2cc190SJeff Kirsher spin_unlock(&bitmap->lock); 1435a2cc190SJeff Kirsher 1445a2cc190SJeff Kirsher return obj; 1455a2cc190SJeff Kirsher } 1465a2cc190SJeff Kirsher 1475a2cc190SJeff Kirsher u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) 1485a2cc190SJeff Kirsher { 1495a2cc190SJeff Kirsher return bitmap->avail; 1505a2cc190SJeff Kirsher } 1515a2cc190SJeff Kirsher 1527a89399fSMatan Barak static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj) 1537a89399fSMatan Barak { 1547a89399fSMatan Barak return obj & (bitmap->max + bitmap->reserved_top - 1); 1557a89399fSMatan Barak } 1567a89399fSMatan Barak 1577c6d74d2SJack Morgenstein void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, 1587c6d74d2SJack Morgenstein int use_rr) 1595a2cc190SJeff Kirsher { 1605a2cc190SJeff Kirsher obj &= bitmap->max + bitmap->reserved_top - 1; 1615a2cc190SJeff Kirsher 1625a2cc190SJeff Kirsher spin_lock(&bitmap->lock); 1637c6d74d2SJack Morgenstein if (!use_rr) { 1647c6d74d2SJack Morgenstein bitmap->last = min(bitmap->last, obj); 1657c6d74d2SJack Morgenstein bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 1667c6d74d2SJack Morgenstein & bitmap->mask; 1677c6d74d2SJack Morgenstein } 1685a2cc190SJeff Kirsher bitmap_clear(bitmap->table, obj, cnt); 1695a2cc190SJeff Kirsher bitmap->avail += cnt; 1705a2cc190SJeff Kirsher spin_unlock(&bitmap->lock); 1715a2cc190SJeff Kirsher } 1725a2cc190SJeff Kirsher 1735a2cc190SJeff Kirsher int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 1745a2cc190SJeff Kirsher u32 reserved_bot, u32 reserved_top) 1755a2cc190SJeff Kirsher { 1765a2cc190SJeff Kirsher /* num must be a power of 2 */ 1775a2cc190SJeff Kirsher if (num != roundup_pow_of_two(num)) 1785a2cc190SJeff Kirsher return -EINVAL; 1795a2cc190SJeff Kirsher 1805a2cc190SJeff Kirsher bitmap->last = 0; 1815a2cc190SJeff Kirsher bitmap->top = 0; 1825a2cc190SJeff Kirsher bitmap->max = num - reserved_top; 1835a2cc190SJeff Kirsher bitmap->mask = mask; 1845a2cc190SJeff Kirsher bitmap->reserved_top = reserved_top; 1855a2cc190SJeff Kirsher bitmap->avail = num - reserved_top - reserved_bot; 1867a89399fSMatan Barak bitmap->effective_len = bitmap->avail; 1875a2cc190SJeff Kirsher spin_lock_init(&bitmap->lock); 1886396bb22SKees Cook bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), 1896396bb22SKees Cook GFP_KERNEL); 1905a2cc190SJeff Kirsher if (!bitmap->table) 1915a2cc190SJeff Kirsher return -ENOMEM; 1925a2cc190SJeff Kirsher 1935a2cc190SJeff Kirsher bitmap_set(bitmap->table, 0, reserved_bot); 1945a2cc190SJeff Kirsher 1955a2cc190SJeff Kirsher return 0; 1965a2cc190SJeff Kirsher } 1975a2cc190SJeff Kirsher 1985a2cc190SJeff Kirsher void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) 1995a2cc190SJeff Kirsher { 2005a2cc190SJeff Kirsher kfree(bitmap->table); 2015a2cc190SJeff Kirsher } 2025a2cc190SJeff Kirsher 2037a89399fSMatan Barak struct mlx4_zone_allocator { 2047a89399fSMatan Barak struct list_head entries; 2057a89399fSMatan Barak struct list_head prios; 2067a89399fSMatan Barak u32 last_uid; 2077a89399fSMatan Barak u32 mask; 2087a89399fSMatan Barak /* protect the zone_allocator from concurrent accesses */ 2097a89399fSMatan Barak spinlock_t lock; 2107a89399fSMatan Barak enum mlx4_zone_alloc_flags flags; 2117a89399fSMatan Barak }; 2127a89399fSMatan Barak 2137a89399fSMatan Barak struct mlx4_zone_entry { 2147a89399fSMatan Barak struct list_head list; 2157a89399fSMatan Barak struct list_head prio_list; 2167a89399fSMatan Barak u32 uid; 2177a89399fSMatan Barak struct mlx4_zone_allocator *allocator; 2187a89399fSMatan Barak struct mlx4_bitmap *bitmap; 2197a89399fSMatan Barak int use_rr; 2207a89399fSMatan Barak int priority; 2217a89399fSMatan Barak int offset; 2227a89399fSMatan Barak enum mlx4_zone_flags flags; 2237a89399fSMatan Barak }; 2247a89399fSMatan Barak 2257a89399fSMatan Barak struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags) 2267a89399fSMatan Barak { 2277a89399fSMatan Barak struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL); 2287a89399fSMatan Barak 2297a89399fSMatan Barak if (NULL == zones) 2307a89399fSMatan Barak return NULL; 2317a89399fSMatan Barak 2327a89399fSMatan Barak INIT_LIST_HEAD(&zones->entries); 2337a89399fSMatan Barak INIT_LIST_HEAD(&zones->prios); 2347a89399fSMatan Barak spin_lock_init(&zones->lock); 2357a89399fSMatan Barak zones->last_uid = 0; 2367a89399fSMatan Barak zones->mask = 0; 2377a89399fSMatan Barak zones->flags = flags; 2387a89399fSMatan Barak 2397a89399fSMatan Barak return zones; 2407a89399fSMatan Barak } 2417a89399fSMatan Barak 2427a89399fSMatan Barak int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, 2437a89399fSMatan Barak struct mlx4_bitmap *bitmap, 2447a89399fSMatan Barak u32 flags, 2457a89399fSMatan Barak int priority, 2467a89399fSMatan Barak int offset, 2477a89399fSMatan Barak u32 *puid) 2487a89399fSMatan Barak { 2497a89399fSMatan Barak u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1); 2507a89399fSMatan Barak struct mlx4_zone_entry *it; 2517a89399fSMatan Barak struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); 2527a89399fSMatan Barak 2537a89399fSMatan Barak if (NULL == zone) 2547a89399fSMatan Barak return -ENOMEM; 2557a89399fSMatan Barak 2567a89399fSMatan Barak zone->flags = flags; 2577a89399fSMatan Barak zone->bitmap = bitmap; 2587a89399fSMatan Barak zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; 2597a89399fSMatan Barak zone->priority = priority; 2607a89399fSMatan Barak zone->offset = offset; 2617a89399fSMatan Barak 2627a89399fSMatan Barak spin_lock(&zone_alloc->lock); 2637a89399fSMatan Barak 2647a89399fSMatan Barak zone->uid = zone_alloc->last_uid++; 2657a89399fSMatan Barak zone->allocator = zone_alloc; 2667a89399fSMatan Barak 2677a89399fSMatan Barak if (zone_alloc->mask < mask) 2687a89399fSMatan Barak zone_alloc->mask = mask; 2697a89399fSMatan Barak 2707a89399fSMatan Barak list_for_each_entry(it, &zone_alloc->prios, prio_list) 2717a89399fSMatan Barak if (it->priority >= priority) 2727a89399fSMatan Barak break; 2737a89399fSMatan Barak 2747a89399fSMatan Barak if (&it->prio_list == &zone_alloc->prios || it->priority > priority) 2757a89399fSMatan Barak list_add_tail(&zone->prio_list, &it->prio_list); 2767a89399fSMatan Barak list_add_tail(&zone->list, &it->list); 2777a89399fSMatan Barak 2787a89399fSMatan Barak spin_unlock(&zone_alloc->lock); 2797a89399fSMatan Barak 2807a89399fSMatan Barak *puid = zone->uid; 2817a89399fSMatan Barak 2827a89399fSMatan Barak return 0; 2837a89399fSMatan Barak } 2847a89399fSMatan Barak 2857a89399fSMatan Barak /* Should be called under a lock */ 286e36fef66SZhu Yanjun static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) 2877a89399fSMatan Barak { 2887a89399fSMatan Barak struct mlx4_zone_allocator *zone_alloc = entry->allocator; 2897a89399fSMatan Barak 2907a89399fSMatan Barak if (!list_empty(&entry->prio_list)) { 2917a89399fSMatan Barak /* Check if we need to add an alternative node to the prio list */ 2927a89399fSMatan Barak if (!list_is_last(&entry->list, &zone_alloc->entries)) { 2937a89399fSMatan Barak struct mlx4_zone_entry *next = list_first_entry(&entry->list, 2947a89399fSMatan Barak typeof(*next), 2957a89399fSMatan Barak list); 2967a89399fSMatan Barak 2977a89399fSMatan Barak if (next->priority == entry->priority) 2987a89399fSMatan Barak list_add_tail(&next->prio_list, &entry->prio_list); 2997a89399fSMatan Barak } 3007a89399fSMatan Barak 3017a89399fSMatan Barak list_del(&entry->prio_list); 3027a89399fSMatan Barak } 3037a89399fSMatan Barak 3047a89399fSMatan Barak list_del(&entry->list); 3057a89399fSMatan Barak 3067a89399fSMatan Barak if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) { 3077a89399fSMatan Barak u32 mask = 0; 3087a89399fSMatan Barak struct mlx4_zone_entry *it; 3097a89399fSMatan Barak 3107a89399fSMatan Barak list_for_each_entry(it, &zone_alloc->prios, prio_list) { 3117a89399fSMatan Barak u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1); 3127a89399fSMatan Barak 3137a89399fSMatan Barak if (mask < cur_mask) 3147a89399fSMatan Barak mask = cur_mask; 3157a89399fSMatan Barak } 3167a89399fSMatan Barak zone_alloc->mask = mask; 3177a89399fSMatan Barak } 3187a89399fSMatan Barak } 3197a89399fSMatan Barak 3207a89399fSMatan Barak void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) 3217a89399fSMatan Barak { 3227a89399fSMatan Barak struct mlx4_zone_entry *zone, *tmp; 3237a89399fSMatan Barak 3247a89399fSMatan Barak spin_lock(&zone_alloc->lock); 3257a89399fSMatan Barak 3267a89399fSMatan Barak list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { 3277a89399fSMatan Barak list_del(&zone->list); 3287a89399fSMatan Barak list_del(&zone->prio_list); 3297a89399fSMatan Barak kfree(zone); 3307a89399fSMatan Barak } 3317a89399fSMatan Barak 3327a89399fSMatan Barak spin_unlock(&zone_alloc->lock); 3337a89399fSMatan Barak kfree(zone_alloc); 3347a89399fSMatan Barak } 3357a89399fSMatan Barak 3367a89399fSMatan Barak /* Should be called under a lock */ 3377a89399fSMatan Barak static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, 3387a89399fSMatan Barak int align, u32 skip_mask, u32 *puid) 3397a89399fSMatan Barak { 3407a89399fSMatan Barak u32 uid; 3417a89399fSMatan Barak u32 res; 3427a89399fSMatan Barak struct mlx4_zone_allocator *zone_alloc = zone->allocator; 3437a89399fSMatan Barak struct mlx4_zone_entry *curr_node; 3447a89399fSMatan Barak 3457a89399fSMatan Barak res = mlx4_bitmap_alloc_range(zone->bitmap, count, 3467a89399fSMatan Barak align, skip_mask); 3477a89399fSMatan Barak 3487a89399fSMatan Barak if (res != (u32)-1) { 3497a89399fSMatan Barak res += zone->offset; 3507a89399fSMatan Barak uid = zone->uid; 3517a89399fSMatan Barak goto out; 3527a89399fSMatan Barak } 3537a89399fSMatan Barak 3547a89399fSMatan Barak list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) { 3557a89399fSMatan Barak if (unlikely(curr_node->priority == zone->priority)) 3567a89399fSMatan Barak break; 3577a89399fSMatan Barak } 3587a89399fSMatan Barak 3597a89399fSMatan Barak if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) { 3607a89399fSMatan Barak struct mlx4_zone_entry *it = curr_node; 3617a89399fSMatan Barak 3627a89399fSMatan Barak list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { 3637a89399fSMatan Barak res = mlx4_bitmap_alloc_range(it->bitmap, count, 3647a89399fSMatan Barak align, skip_mask); 3657a89399fSMatan Barak if (res != (u32)-1) { 3667a89399fSMatan Barak res += it->offset; 3677a89399fSMatan Barak uid = it->uid; 3687a89399fSMatan Barak goto out; 3697a89399fSMatan Barak } 3707a89399fSMatan Barak } 3717a89399fSMatan Barak } 3727a89399fSMatan Barak 3737a89399fSMatan Barak if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) { 3747a89399fSMatan Barak struct mlx4_zone_entry *it = curr_node; 3757a89399fSMatan Barak 3767a89399fSMatan Barak list_for_each_entry_from(it, &zone_alloc->entries, list) { 3777a89399fSMatan Barak if (unlikely(it == zone)) 3787a89399fSMatan Barak continue; 3797a89399fSMatan Barak 3807a89399fSMatan Barak if (unlikely(it->priority != curr_node->priority)) 3817a89399fSMatan Barak break; 3827a89399fSMatan Barak 3837a89399fSMatan Barak res = mlx4_bitmap_alloc_range(it->bitmap, count, 3847a89399fSMatan Barak align, skip_mask); 3857a89399fSMatan Barak if (res != (u32)-1) { 3867a89399fSMatan Barak res += it->offset; 3877a89399fSMatan Barak uid = it->uid; 3887a89399fSMatan Barak goto out; 3897a89399fSMatan Barak } 3907a89399fSMatan Barak } 3917a89399fSMatan Barak } 3927a89399fSMatan Barak 3937a89399fSMatan Barak if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) { 3947a89399fSMatan Barak if (list_is_last(&curr_node->prio_list, &zone_alloc->prios)) 3957a89399fSMatan Barak goto out; 3967a89399fSMatan Barak 3977a89399fSMatan Barak curr_node = list_first_entry(&curr_node->prio_list, 3987a89399fSMatan Barak typeof(*curr_node), 3997a89399fSMatan Barak prio_list); 4007a89399fSMatan Barak 4017a89399fSMatan Barak list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { 4027a89399fSMatan Barak res = mlx4_bitmap_alloc_range(curr_node->bitmap, count, 4037a89399fSMatan Barak align, skip_mask); 4047a89399fSMatan Barak if (res != (u32)-1) { 4057a89399fSMatan Barak res += curr_node->offset; 4067a89399fSMatan Barak uid = curr_node->uid; 4077a89399fSMatan Barak goto out; 4087a89399fSMatan Barak } 4097a89399fSMatan Barak } 4107a89399fSMatan Barak } 4117a89399fSMatan Barak 4127a89399fSMatan Barak out: 4137a89399fSMatan Barak if (NULL != puid && res != (u32)-1) 4147a89399fSMatan Barak *puid = uid; 4157a89399fSMatan Barak return res; 4167a89399fSMatan Barak } 4177a89399fSMatan Barak 4187a89399fSMatan Barak /* Should be called under a lock */ 4197a89399fSMatan Barak static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj, 4207a89399fSMatan Barak u32 count) 4217a89399fSMatan Barak { 4227a89399fSMatan Barak mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); 4237a89399fSMatan Barak } 4247a89399fSMatan Barak 4257a89399fSMatan Barak /* Should be called under a lock */ 4267a89399fSMatan Barak static struct mlx4_zone_entry *__mlx4_find_zone_by_uid( 4277a89399fSMatan Barak struct mlx4_zone_allocator *zones, u32 uid) 4287a89399fSMatan Barak { 4297a89399fSMatan Barak struct mlx4_zone_entry *zone; 4307a89399fSMatan Barak 4317a89399fSMatan Barak list_for_each_entry(zone, &zones->entries, list) { 4327a89399fSMatan Barak if (zone->uid == uid) 4337a89399fSMatan Barak return zone; 4347a89399fSMatan Barak } 4357a89399fSMatan Barak 4367a89399fSMatan Barak return NULL; 4377a89399fSMatan Barak } 4387a89399fSMatan Barak 4397a89399fSMatan Barak struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid) 4407a89399fSMatan Barak { 4417a89399fSMatan Barak struct mlx4_zone_entry *zone; 4427a89399fSMatan Barak struct mlx4_bitmap *bitmap; 4437a89399fSMatan Barak 4447a89399fSMatan Barak spin_lock(&zones->lock); 4457a89399fSMatan Barak 4467a89399fSMatan Barak zone = __mlx4_find_zone_by_uid(zones, uid); 4477a89399fSMatan Barak 4487a89399fSMatan Barak bitmap = zone == NULL ? NULL : zone->bitmap; 4497a89399fSMatan Barak 4507a89399fSMatan Barak spin_unlock(&zones->lock); 4517a89399fSMatan Barak 4527a89399fSMatan Barak return bitmap; 4537a89399fSMatan Barak } 4547a89399fSMatan Barak 4557a89399fSMatan Barak int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) 4567a89399fSMatan Barak { 4577a89399fSMatan Barak struct mlx4_zone_entry *zone; 458e36fef66SZhu Yanjun int res = 0; 4597a89399fSMatan Barak 4607a89399fSMatan Barak spin_lock(&zones->lock); 4617a89399fSMatan Barak 4627a89399fSMatan Barak zone = __mlx4_find_zone_by_uid(zones, uid); 4637a89399fSMatan Barak 4647a89399fSMatan Barak if (NULL == zone) { 4657a89399fSMatan Barak res = -1; 4667a89399fSMatan Barak goto out; 4677a89399fSMatan Barak } 4687a89399fSMatan Barak 469e36fef66SZhu Yanjun __mlx4_zone_remove_one_entry(zone); 4707a89399fSMatan Barak 4717a89399fSMatan Barak out: 4727a89399fSMatan Barak spin_unlock(&zones->lock); 4737a89399fSMatan Barak kfree(zone); 4747a89399fSMatan Barak 4757a89399fSMatan Barak return res; 4767a89399fSMatan Barak } 4777a89399fSMatan Barak 4787a89399fSMatan Barak /* Should be called under a lock */ 4797a89399fSMatan Barak static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( 4807a89399fSMatan Barak struct mlx4_zone_allocator *zones, u32 obj) 4817a89399fSMatan Barak { 4827a89399fSMatan Barak struct mlx4_zone_entry *zone, *zone_candidate = NULL; 4837a89399fSMatan Barak u32 dist = (u32)-1; 4847a89399fSMatan Barak 4857a89399fSMatan Barak /* Search for the smallest zone that this obj could be 4867a89399fSMatan Barak * allocated from. This is done in order to handle 4877a89399fSMatan Barak * situations when small bitmaps are allocated from bigger 4887a89399fSMatan Barak * bitmaps (and the allocated space is marked as reserved in 4897a89399fSMatan Barak * the bigger bitmap. 4907a89399fSMatan Barak */ 4917a89399fSMatan Barak list_for_each_entry(zone, &zones->entries, list) { 4927a89399fSMatan Barak if (obj >= zone->offset) { 4937a89399fSMatan Barak u32 mobj = (obj - zone->offset) & zones->mask; 4947a89399fSMatan Barak 4957a89399fSMatan Barak if (mobj < zone->bitmap->max) { 4967a89399fSMatan Barak u32 curr_dist = zone->bitmap->effective_len; 4977a89399fSMatan Barak 4987a89399fSMatan Barak if (curr_dist < dist) { 4997a89399fSMatan Barak dist = curr_dist; 5007a89399fSMatan Barak zone_candidate = zone; 5017a89399fSMatan Barak } 5027a89399fSMatan Barak } 5037a89399fSMatan Barak } 5047a89399fSMatan Barak } 5057a89399fSMatan Barak 5067a89399fSMatan Barak return zone_candidate; 5077a89399fSMatan Barak } 5087a89399fSMatan Barak 5097a89399fSMatan Barak u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, 5107a89399fSMatan Barak int align, u32 skip_mask, u32 *puid) 5117a89399fSMatan Barak { 5127a89399fSMatan Barak struct mlx4_zone_entry *zone; 5137a89399fSMatan Barak int res = -1; 5147a89399fSMatan Barak 5157a89399fSMatan Barak spin_lock(&zones->lock); 5167a89399fSMatan Barak 5177a89399fSMatan Barak zone = __mlx4_find_zone_by_uid(zones, uid); 5187a89399fSMatan Barak 5197a89399fSMatan Barak if (NULL == zone) 5207a89399fSMatan Barak goto out; 5217a89399fSMatan Barak 5227a89399fSMatan Barak res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid); 5237a89399fSMatan Barak 5247a89399fSMatan Barak out: 5257a89399fSMatan Barak spin_unlock(&zones->lock); 5267a89399fSMatan Barak 5277a89399fSMatan Barak return res; 5287a89399fSMatan Barak } 5297a89399fSMatan Barak 5307a89399fSMatan Barak u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) 5317a89399fSMatan Barak { 5327a89399fSMatan Barak struct mlx4_zone_entry *zone; 5337a89399fSMatan Barak int res = 0; 5347a89399fSMatan Barak 5357a89399fSMatan Barak spin_lock(&zones->lock); 5367a89399fSMatan Barak 5377a89399fSMatan Barak zone = __mlx4_find_zone_by_uid(zones, uid); 5387a89399fSMatan Barak 5397a89399fSMatan Barak if (NULL == zone) { 5407a89399fSMatan Barak res = -1; 5417a89399fSMatan Barak goto out; 5427a89399fSMatan Barak } 5437a89399fSMatan Barak 5447a89399fSMatan Barak __mlx4_free_from_zone(zone, obj, count); 5457a89399fSMatan Barak 5467a89399fSMatan Barak out: 5477a89399fSMatan Barak spin_unlock(&zones->lock); 5487a89399fSMatan Barak 5497a89399fSMatan Barak return res; 5507a89399fSMatan Barak } 5517a89399fSMatan Barak 5527a89399fSMatan Barak u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) 5537a89399fSMatan Barak { 5547a89399fSMatan Barak struct mlx4_zone_entry *zone; 5557a89399fSMatan Barak int res; 5567a89399fSMatan Barak 5577a89399fSMatan Barak if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP)) 5587a89399fSMatan Barak return -EFAULT; 5597a89399fSMatan Barak 5607a89399fSMatan Barak spin_lock(&zones->lock); 5617a89399fSMatan Barak 5627a89399fSMatan Barak zone = __mlx4_find_zone_by_uid_unique(zones, obj); 5637a89399fSMatan Barak 5647a89399fSMatan Barak if (NULL == zone) { 5657a89399fSMatan Barak res = -1; 5667a89399fSMatan Barak goto out; 5677a89399fSMatan Barak } 5687a89399fSMatan Barak 5697a89399fSMatan Barak __mlx4_free_from_zone(zone, obj, count); 5707a89399fSMatan Barak res = 0; 5717a89399fSMatan Barak 5727a89399fSMatan Barak out: 5737a89399fSMatan Barak spin_unlock(&zones->lock); 5747a89399fSMatan Barak 5757a89399fSMatan Barak return res; 5767a89399fSMatan Barak } 5775a2cc190SJeff Kirsher 57873898db0SHaggai Abramovsky static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, 5798900b894SLeon Romanovsky struct mlx4_buf *buf) 5805a2cc190SJeff Kirsher { 5815a2cc190SJeff Kirsher dma_addr_t t; 5825a2cc190SJeff Kirsher 5835a2cc190SJeff Kirsher buf->nbufs = 1; 5845a2cc190SJeff Kirsher buf->npages = 1; 5855a2cc190SJeff Kirsher buf->page_shift = get_order(size) + PAGE_SHIFT; 58673898db0SHaggai Abramovsky buf->direct.buf = 58773898db0SHaggai Abramovsky dma_zalloc_coherent(&dev->persist->pdev->dev, 5888900b894SLeon Romanovsky size, &t, GFP_KERNEL); 5895a2cc190SJeff Kirsher if (!buf->direct.buf) 5905a2cc190SJeff Kirsher return -ENOMEM; 5915a2cc190SJeff Kirsher 5925a2cc190SJeff Kirsher buf->direct.map = t; 5935a2cc190SJeff Kirsher 5945a2cc190SJeff Kirsher while (t & ((1 << buf->page_shift) - 1)) { 5955a2cc190SJeff Kirsher --buf->page_shift; 5965a2cc190SJeff Kirsher buf->npages *= 2; 5975a2cc190SJeff Kirsher } 5985a2cc190SJeff Kirsher 59973898db0SHaggai Abramovsky return 0; 60073898db0SHaggai Abramovsky } 60173898db0SHaggai Abramovsky 60273898db0SHaggai Abramovsky /* Handling for queue buffers -- we allocate a bunch of memory and 60373898db0SHaggai Abramovsky * register it in a memory region at HCA virtual address 0. If the 60473898db0SHaggai Abramovsky * requested size is > max_direct, we split the allocation into 60573898db0SHaggai Abramovsky * multiple pages, so we don't require too much contiguous memory. 60673898db0SHaggai Abramovsky */ 60773898db0SHaggai Abramovsky int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 6088900b894SLeon Romanovsky struct mlx4_buf *buf) 60973898db0SHaggai Abramovsky { 61073898db0SHaggai Abramovsky if (size <= max_direct) { 6118900b894SLeon Romanovsky return mlx4_buf_direct_alloc(dev, size, buf); 6125a2cc190SJeff Kirsher } else { 61373898db0SHaggai Abramovsky dma_addr_t t; 6145a2cc190SJeff Kirsher int i; 6155a2cc190SJeff Kirsher 6165a2cc190SJeff Kirsher buf->direct.buf = NULL; 617f8a1988fSzhong jiang buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); 6185a2cc190SJeff Kirsher buf->npages = buf->nbufs; 6195a2cc190SJeff Kirsher buf->page_shift = PAGE_SHIFT; 6205a2cc190SJeff Kirsher buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 6218900b894SLeon Romanovsky GFP_KERNEL); 6225a2cc190SJeff Kirsher if (!buf->page_list) 6235a2cc190SJeff Kirsher return -ENOMEM; 6245a2cc190SJeff Kirsher 6255a2cc190SJeff Kirsher for (i = 0; i < buf->nbufs; ++i) { 6265a2cc190SJeff Kirsher buf->page_list[i].buf = 62773898db0SHaggai Abramovsky dma_zalloc_coherent(&dev->persist->pdev->dev, 6288900b894SLeon Romanovsky PAGE_SIZE, &t, GFP_KERNEL); 6295a2cc190SJeff Kirsher if (!buf->page_list[i].buf) 6305a2cc190SJeff Kirsher goto err_free; 6315a2cc190SJeff Kirsher 6325a2cc190SJeff Kirsher buf->page_list[i].map = t; 6335a2cc190SJeff Kirsher } 6345a2cc190SJeff Kirsher } 6355a2cc190SJeff Kirsher 6365a2cc190SJeff Kirsher return 0; 6375a2cc190SJeff Kirsher 6385a2cc190SJeff Kirsher err_free: 6395a2cc190SJeff Kirsher mlx4_buf_free(dev, size, buf); 6405a2cc190SJeff Kirsher 6415a2cc190SJeff Kirsher return -ENOMEM; 6425a2cc190SJeff Kirsher } 6435a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_buf_alloc); 6445a2cc190SJeff Kirsher 6455a2cc190SJeff Kirsher void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) 6465a2cc190SJeff Kirsher { 64773898db0SHaggai Abramovsky if (buf->nbufs == 1) { 648872bf2fbSYishai Hadas dma_free_coherent(&dev->persist->pdev->dev, size, 64973898db0SHaggai Abramovsky buf->direct.buf, buf->direct.map); 65073898db0SHaggai Abramovsky } else { 65173898db0SHaggai Abramovsky int i; 6525a2cc190SJeff Kirsher 6535a2cc190SJeff Kirsher for (i = 0; i < buf->nbufs; ++i) 6545a2cc190SJeff Kirsher if (buf->page_list[i].buf) 655872bf2fbSYishai Hadas dma_free_coherent(&dev->persist->pdev->dev, 656872bf2fbSYishai Hadas PAGE_SIZE, 6575a2cc190SJeff Kirsher buf->page_list[i].buf, 6585a2cc190SJeff Kirsher buf->page_list[i].map); 6595a2cc190SJeff Kirsher kfree(buf->page_list); 6605a2cc190SJeff Kirsher } 6615a2cc190SJeff Kirsher } 6625a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_buf_free); 6635a2cc190SJeff Kirsher 6648900b894SLeon Romanovsky static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) 6655a2cc190SJeff Kirsher { 6665a2cc190SJeff Kirsher struct mlx4_db_pgdir *pgdir; 6675a2cc190SJeff Kirsher 6688900b894SLeon Romanovsky pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); 6695a2cc190SJeff Kirsher if (!pgdir) 6705a2cc190SJeff Kirsher return NULL; 6715a2cc190SJeff Kirsher 6725a2cc190SJeff Kirsher bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); 6735a2cc190SJeff Kirsher pgdir->bits[0] = pgdir->order0; 6745a2cc190SJeff Kirsher pgdir->bits[1] = pgdir->order1; 6755a2cc190SJeff Kirsher pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 6768900b894SLeon Romanovsky &pgdir->db_dma, GFP_KERNEL); 6775a2cc190SJeff Kirsher if (!pgdir->db_page) { 6785a2cc190SJeff Kirsher kfree(pgdir); 6795a2cc190SJeff Kirsher return NULL; 6805a2cc190SJeff Kirsher } 6815a2cc190SJeff Kirsher 6825a2cc190SJeff Kirsher return pgdir; 6835a2cc190SJeff Kirsher } 6845a2cc190SJeff Kirsher 6855a2cc190SJeff Kirsher static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, 6865a2cc190SJeff Kirsher struct mlx4_db *db, int order) 6875a2cc190SJeff Kirsher { 6885a2cc190SJeff Kirsher int o; 6895a2cc190SJeff Kirsher int i; 6905a2cc190SJeff Kirsher 6915a2cc190SJeff Kirsher for (o = order; o <= 1; ++o) { 6925a2cc190SJeff Kirsher i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); 6935a2cc190SJeff Kirsher if (i < MLX4_DB_PER_PAGE >> o) 6945a2cc190SJeff Kirsher goto found; 6955a2cc190SJeff Kirsher } 6965a2cc190SJeff Kirsher 6975a2cc190SJeff Kirsher return -ENOMEM; 6985a2cc190SJeff Kirsher 6995a2cc190SJeff Kirsher found: 7005a2cc190SJeff Kirsher clear_bit(i, pgdir->bits[o]); 7015a2cc190SJeff Kirsher 7025a2cc190SJeff Kirsher i <<= o; 7035a2cc190SJeff Kirsher 7045a2cc190SJeff Kirsher if (o > order) 7055a2cc190SJeff Kirsher set_bit(i ^ 1, pgdir->bits[order]); 7065a2cc190SJeff Kirsher 7075a2cc190SJeff Kirsher db->u.pgdir = pgdir; 7085a2cc190SJeff Kirsher db->index = i; 7095a2cc190SJeff Kirsher db->db = pgdir->db_page + db->index; 7105a2cc190SJeff Kirsher db->dma = pgdir->db_dma + db->index * 4; 7115a2cc190SJeff Kirsher db->order = order; 7125a2cc190SJeff Kirsher 7135a2cc190SJeff Kirsher return 0; 7145a2cc190SJeff Kirsher } 7155a2cc190SJeff Kirsher 7168900b894SLeon Romanovsky int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) 7175a2cc190SJeff Kirsher { 7185a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 7195a2cc190SJeff Kirsher struct mlx4_db_pgdir *pgdir; 7205a2cc190SJeff Kirsher int ret = 0; 7215a2cc190SJeff Kirsher 7225a2cc190SJeff Kirsher mutex_lock(&priv->pgdir_mutex); 7235a2cc190SJeff Kirsher 7245a2cc190SJeff Kirsher list_for_each_entry(pgdir, &priv->pgdir_list, list) 7255a2cc190SJeff Kirsher if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 7265a2cc190SJeff Kirsher goto out; 7275a2cc190SJeff Kirsher 7288900b894SLeon Romanovsky pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev); 7295a2cc190SJeff Kirsher if (!pgdir) { 7305a2cc190SJeff Kirsher ret = -ENOMEM; 7315a2cc190SJeff Kirsher goto out; 7325a2cc190SJeff Kirsher } 7335a2cc190SJeff Kirsher 7345a2cc190SJeff Kirsher list_add(&pgdir->list, &priv->pgdir_list); 7355a2cc190SJeff Kirsher 7365a2cc190SJeff Kirsher /* This should never fail -- we just allocated an empty page: */ 7375a2cc190SJeff Kirsher WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); 7385a2cc190SJeff Kirsher 7395a2cc190SJeff Kirsher out: 7405a2cc190SJeff Kirsher mutex_unlock(&priv->pgdir_mutex); 7415a2cc190SJeff Kirsher 7425a2cc190SJeff Kirsher return ret; 7435a2cc190SJeff Kirsher } 7445a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_db_alloc); 7455a2cc190SJeff Kirsher 7465a2cc190SJeff Kirsher void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) 7475a2cc190SJeff Kirsher { 7485a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 7495a2cc190SJeff Kirsher int o; 7505a2cc190SJeff Kirsher int i; 7515a2cc190SJeff Kirsher 7525a2cc190SJeff Kirsher mutex_lock(&priv->pgdir_mutex); 7535a2cc190SJeff Kirsher 7545a2cc190SJeff Kirsher o = db->order; 7555a2cc190SJeff Kirsher i = db->index; 7565a2cc190SJeff Kirsher 7575a2cc190SJeff Kirsher if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 7585a2cc190SJeff Kirsher clear_bit(i ^ 1, db->u.pgdir->order0); 7595a2cc190SJeff Kirsher ++o; 7605a2cc190SJeff Kirsher } 7615a2cc190SJeff Kirsher i >>= o; 7625a2cc190SJeff Kirsher set_bit(i, db->u.pgdir->bits[o]); 7635a2cc190SJeff Kirsher 7645a2cc190SJeff Kirsher if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 765872bf2fbSYishai Hadas dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 7665a2cc190SJeff Kirsher db->u.pgdir->db_page, db->u.pgdir->db_dma); 7675a2cc190SJeff Kirsher list_del(&db->u.pgdir->list); 7685a2cc190SJeff Kirsher kfree(db->u.pgdir); 7695a2cc190SJeff Kirsher } 7705a2cc190SJeff Kirsher 7715a2cc190SJeff Kirsher mutex_unlock(&priv->pgdir_mutex); 7725a2cc190SJeff Kirsher } 7735a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_db_free); 7745a2cc190SJeff Kirsher 7755a2cc190SJeff Kirsher int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 77673898db0SHaggai Abramovsky int size) 7775a2cc190SJeff Kirsher { 7785a2cc190SJeff Kirsher int err; 7795a2cc190SJeff Kirsher 7808900b894SLeon Romanovsky err = mlx4_db_alloc(dev, &wqres->db, 1); 7815a2cc190SJeff Kirsher if (err) 7825a2cc190SJeff Kirsher return err; 7835a2cc190SJeff Kirsher 7845a2cc190SJeff Kirsher *wqres->db.db = 0; 7855a2cc190SJeff Kirsher 7868900b894SLeon Romanovsky err = mlx4_buf_direct_alloc(dev, size, &wqres->buf); 7875a2cc190SJeff Kirsher if (err) 7885a2cc190SJeff Kirsher goto err_db; 7895a2cc190SJeff Kirsher 7905a2cc190SJeff Kirsher err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, 7915a2cc190SJeff Kirsher &wqres->mtt); 7925a2cc190SJeff Kirsher if (err) 7935a2cc190SJeff Kirsher goto err_buf; 7945a2cc190SJeff Kirsher 7958900b894SLeon Romanovsky err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); 7965a2cc190SJeff Kirsher if (err) 7975a2cc190SJeff Kirsher goto err_mtt; 7985a2cc190SJeff Kirsher 7995a2cc190SJeff Kirsher return 0; 8005a2cc190SJeff Kirsher 8015a2cc190SJeff Kirsher err_mtt: 8025a2cc190SJeff Kirsher mlx4_mtt_cleanup(dev, &wqres->mtt); 8035a2cc190SJeff Kirsher err_buf: 8045a2cc190SJeff Kirsher mlx4_buf_free(dev, size, &wqres->buf); 8055a2cc190SJeff Kirsher err_db: 8065a2cc190SJeff Kirsher mlx4_db_free(dev, &wqres->db); 8075a2cc190SJeff Kirsher 8085a2cc190SJeff Kirsher return err; 8095a2cc190SJeff Kirsher } 8105a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); 8115a2cc190SJeff Kirsher 8125a2cc190SJeff Kirsher void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 8135a2cc190SJeff Kirsher int size) 8145a2cc190SJeff Kirsher { 8155a2cc190SJeff Kirsher mlx4_mtt_cleanup(dev, &wqres->mtt); 8165a2cc190SJeff Kirsher mlx4_buf_free(dev, size, &wqres->buf); 8175a2cc190SJeff Kirsher mlx4_db_free(dev, &wqres->db); 8185a2cc190SJeff Kirsher } 8195a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); 820