15a2cc190SJeff Kirsher /*
25a2cc190SJeff Kirsher  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
35a2cc190SJeff Kirsher  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
45a2cc190SJeff Kirsher  *
55a2cc190SJeff Kirsher  * This software is available to you under a choice of one of two
65a2cc190SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
75a2cc190SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
85a2cc190SJeff Kirsher  * COPYING in the main directory of this source tree, or the
95a2cc190SJeff Kirsher  * OpenIB.org BSD license below:
105a2cc190SJeff Kirsher  *
115a2cc190SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
125a2cc190SJeff Kirsher  *     without modification, are permitted provided that the following
135a2cc190SJeff Kirsher  *     conditions are met:
145a2cc190SJeff Kirsher  *
155a2cc190SJeff Kirsher  *      - Redistributions of source code must retain the above
165a2cc190SJeff Kirsher  *        copyright notice, this list of conditions and the following
175a2cc190SJeff Kirsher  *        disclaimer.
185a2cc190SJeff Kirsher  *
195a2cc190SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
205a2cc190SJeff Kirsher  *        copyright notice, this list of conditions and the following
215a2cc190SJeff Kirsher  *        disclaimer in the documentation and/or other materials
225a2cc190SJeff Kirsher  *        provided with the distribution.
235a2cc190SJeff Kirsher  *
245a2cc190SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
255a2cc190SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
265a2cc190SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
275a2cc190SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
285a2cc190SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
295a2cc190SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
305a2cc190SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
315a2cc190SJeff Kirsher  * SOFTWARE.
325a2cc190SJeff Kirsher  */
335a2cc190SJeff Kirsher 
345a2cc190SJeff Kirsher #include <linux/errno.h>
355a2cc190SJeff Kirsher #include <linux/slab.h>
365a2cc190SJeff Kirsher #include <linux/mm.h>
37ee40fa06SPaul Gortmaker #include <linux/export.h>
385a2cc190SJeff Kirsher #include <linux/bitmap.h>
395a2cc190SJeff Kirsher #include <linux/dma-mapping.h>
405a2cc190SJeff Kirsher #include <linux/vmalloc.h>
415a2cc190SJeff Kirsher 
425a2cc190SJeff Kirsher #include "mlx4.h"
435a2cc190SJeff Kirsher 
mlx4_bitmap_alloc(struct mlx4_bitmap * bitmap)445a2cc190SJeff Kirsher u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
455a2cc190SJeff Kirsher {
465a2cc190SJeff Kirsher 	u32 obj;
475a2cc190SJeff Kirsher 
485a2cc190SJeff Kirsher 	spin_lock(&bitmap->lock);
495a2cc190SJeff Kirsher 
505a2cc190SJeff Kirsher 	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
515a2cc190SJeff Kirsher 	if (obj >= bitmap->max) {
525a2cc190SJeff Kirsher 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
535a2cc190SJeff Kirsher 				& bitmap->mask;
545a2cc190SJeff Kirsher 		obj = find_first_zero_bit(bitmap->table, bitmap->max);
555a2cc190SJeff Kirsher 	}
565a2cc190SJeff Kirsher 
575a2cc190SJeff Kirsher 	if (obj < bitmap->max) {
585a2cc190SJeff Kirsher 		set_bit(obj, bitmap->table);
595a2cc190SJeff Kirsher 		bitmap->last = (obj + 1);
605a2cc190SJeff Kirsher 		if (bitmap->last == bitmap->max)
615a2cc190SJeff Kirsher 			bitmap->last = 0;
625a2cc190SJeff Kirsher 		obj |= bitmap->top;
635a2cc190SJeff Kirsher 	} else
645a2cc190SJeff Kirsher 		obj = -1;
655a2cc190SJeff Kirsher 
665a2cc190SJeff Kirsher 	if (obj != -1)
675a2cc190SJeff Kirsher 		--bitmap->avail;
685a2cc190SJeff Kirsher 
695a2cc190SJeff Kirsher 	spin_unlock(&bitmap->lock);
705a2cc190SJeff Kirsher 
715a2cc190SJeff Kirsher 	return obj;
725a2cc190SJeff Kirsher }
735a2cc190SJeff Kirsher 
mlx4_bitmap_free(struct mlx4_bitmap * bitmap,u32 obj,int use_rr)747c6d74d2SJack Morgenstein void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
755a2cc190SJeff Kirsher {
767c6d74d2SJack Morgenstein 	mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
775a2cc190SJeff Kirsher }
785a2cc190SJeff Kirsher 
find_aligned_range(unsigned long * bitmap,u32 start,u32 nbits,int len,int align,u32 skip_mask)79ddae0349SEugenia Emantayev static unsigned long find_aligned_range(unsigned long *bitmap,
80ddae0349SEugenia Emantayev 					u32 start, u32 nbits,
81ddae0349SEugenia Emantayev 					int len, int align, u32 skip_mask)
82ddae0349SEugenia Emantayev {
83ddae0349SEugenia Emantayev 	unsigned long end, i;
84ddae0349SEugenia Emantayev 
85ddae0349SEugenia Emantayev again:
86ddae0349SEugenia Emantayev 	start = ALIGN(start, align);
87ddae0349SEugenia Emantayev 
88ddae0349SEugenia Emantayev 	while ((start < nbits) && (test_bit(start, bitmap) ||
89ddae0349SEugenia Emantayev 				   (start & skip_mask)))
90ddae0349SEugenia Emantayev 		start += align;
91ddae0349SEugenia Emantayev 
92ddae0349SEugenia Emantayev 	if (start >= nbits)
93ddae0349SEugenia Emantayev 		return -1;
94ddae0349SEugenia Emantayev 
95ddae0349SEugenia Emantayev 	end = start+len;
96ddae0349SEugenia Emantayev 	if (end > nbits)
97ddae0349SEugenia Emantayev 		return -1;
98ddae0349SEugenia Emantayev 
99ddae0349SEugenia Emantayev 	for (i = start + 1; i < end; i++) {
100ddae0349SEugenia Emantayev 		if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
101ddae0349SEugenia Emantayev 			start = i + 1;
102ddae0349SEugenia Emantayev 			goto again;
103ddae0349SEugenia Emantayev 		}
104ddae0349SEugenia Emantayev 	}
105ddae0349SEugenia Emantayev 
106ddae0349SEugenia Emantayev 	return start;
107ddae0349SEugenia Emantayev }
108ddae0349SEugenia Emantayev 
mlx4_bitmap_alloc_range(struct mlx4_bitmap * bitmap,int cnt,int align,u32 skip_mask)109ddae0349SEugenia Emantayev u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
110ddae0349SEugenia Emantayev 			    int align, u32 skip_mask)
1115a2cc190SJeff Kirsher {
1125a2cc190SJeff Kirsher 	u32 obj;
1135a2cc190SJeff Kirsher 
114ddae0349SEugenia Emantayev 	if (likely(cnt == 1 && align == 1 && !skip_mask))
1155a2cc190SJeff Kirsher 		return mlx4_bitmap_alloc(bitmap);
1165a2cc190SJeff Kirsher 
1175a2cc190SJeff Kirsher 	spin_lock(&bitmap->lock);
1185a2cc190SJeff Kirsher 
119ddae0349SEugenia Emantayev 	obj = find_aligned_range(bitmap->table, bitmap->last,
120ddae0349SEugenia Emantayev 				 bitmap->max, cnt, align, skip_mask);
1215a2cc190SJeff Kirsher 	if (obj >= bitmap->max) {
1225a2cc190SJeff Kirsher 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
1235a2cc190SJeff Kirsher 				& bitmap->mask;
124ddae0349SEugenia Emantayev 		obj = find_aligned_range(bitmap->table, 0, bitmap->max,
125ddae0349SEugenia Emantayev 					 cnt, align, skip_mask);
1265a2cc190SJeff Kirsher 	}
1275a2cc190SJeff Kirsher 
1285a2cc190SJeff Kirsher 	if (obj < bitmap->max) {
1295a2cc190SJeff Kirsher 		bitmap_set(bitmap->table, obj, cnt);
1305a2cc190SJeff Kirsher 		if (obj == bitmap->last) {
1315a2cc190SJeff Kirsher 			bitmap->last = (obj + cnt);
1325a2cc190SJeff Kirsher 			if (bitmap->last >= bitmap->max)
1335a2cc190SJeff Kirsher 				bitmap->last = 0;
1345a2cc190SJeff Kirsher 		}
1355a2cc190SJeff Kirsher 		obj |= bitmap->top;
1365a2cc190SJeff Kirsher 	} else
1375a2cc190SJeff Kirsher 		obj = -1;
1385a2cc190SJeff Kirsher 
1395a2cc190SJeff Kirsher 	if (obj != -1)
1405a2cc190SJeff Kirsher 		bitmap->avail -= cnt;
1415a2cc190SJeff Kirsher 
1425a2cc190SJeff Kirsher 	spin_unlock(&bitmap->lock);
1435a2cc190SJeff Kirsher 
1445a2cc190SJeff Kirsher 	return obj;
1455a2cc190SJeff Kirsher }
1465a2cc190SJeff Kirsher 
mlx4_bitmap_avail(struct mlx4_bitmap * bitmap)1475a2cc190SJeff Kirsher u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
1485a2cc190SJeff Kirsher {
1495a2cc190SJeff Kirsher 	return bitmap->avail;
1505a2cc190SJeff Kirsher }
1515a2cc190SJeff Kirsher 
mlx4_bitmap_masked_value(struct mlx4_bitmap * bitmap,u32 obj)1527a89399fSMatan Barak static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
1537a89399fSMatan Barak {
1547a89399fSMatan Barak 	return obj & (bitmap->max + bitmap->reserved_top - 1);
1557a89399fSMatan Barak }
1567a89399fSMatan Barak 
mlx4_bitmap_free_range(struct mlx4_bitmap * bitmap,u32 obj,int cnt,int use_rr)1577c6d74d2SJack Morgenstein void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
1587c6d74d2SJack Morgenstein 			    int use_rr)
1595a2cc190SJeff Kirsher {
1605a2cc190SJeff Kirsher 	obj &= bitmap->max + bitmap->reserved_top - 1;
1615a2cc190SJeff Kirsher 
1625a2cc190SJeff Kirsher 	spin_lock(&bitmap->lock);
1637c6d74d2SJack Morgenstein 	if (!use_rr) {
1647c6d74d2SJack Morgenstein 		bitmap->last = min(bitmap->last, obj);
1657c6d74d2SJack Morgenstein 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
1667c6d74d2SJack Morgenstein 				& bitmap->mask;
1677c6d74d2SJack Morgenstein 	}
1685a2cc190SJeff Kirsher 	bitmap_clear(bitmap->table, obj, cnt);
1695a2cc190SJeff Kirsher 	bitmap->avail += cnt;
1705a2cc190SJeff Kirsher 	spin_unlock(&bitmap->lock);
1715a2cc190SJeff Kirsher }
1725a2cc190SJeff Kirsher 
mlx4_bitmap_init(struct mlx4_bitmap * bitmap,u32 num,u32 mask,u32 reserved_bot,u32 reserved_top)1735a2cc190SJeff Kirsher int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
1745a2cc190SJeff Kirsher 		     u32 reserved_bot, u32 reserved_top)
1755a2cc190SJeff Kirsher {
1765a2cc190SJeff Kirsher 	/* num must be a power of 2 */
1775a2cc190SJeff Kirsher 	if (num != roundup_pow_of_two(num))
1785a2cc190SJeff Kirsher 		return -EINVAL;
1795a2cc190SJeff Kirsher 
1805a2cc190SJeff Kirsher 	bitmap->last = 0;
1815a2cc190SJeff Kirsher 	bitmap->top  = 0;
1825a2cc190SJeff Kirsher 	bitmap->max  = num - reserved_top;
1835a2cc190SJeff Kirsher 	bitmap->mask = mask;
1845a2cc190SJeff Kirsher 	bitmap->reserved_top = reserved_top;
1855a2cc190SJeff Kirsher 	bitmap->avail = num - reserved_top - reserved_bot;
1867a89399fSMatan Barak 	bitmap->effective_len = bitmap->avail;
1875a2cc190SJeff Kirsher 	spin_lock_init(&bitmap->lock);
188214fa1c4SAndy Shevchenko 	bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL);
1895a2cc190SJeff Kirsher 	if (!bitmap->table)
1905a2cc190SJeff Kirsher 		return -ENOMEM;
1915a2cc190SJeff Kirsher 
1925a2cc190SJeff Kirsher 	bitmap_set(bitmap->table, 0, reserved_bot);
1935a2cc190SJeff Kirsher 
1945a2cc190SJeff Kirsher 	return 0;
1955a2cc190SJeff Kirsher }
1965a2cc190SJeff Kirsher 
mlx4_bitmap_cleanup(struct mlx4_bitmap * bitmap)1975a2cc190SJeff Kirsher void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
1985a2cc190SJeff Kirsher {
199214fa1c4SAndy Shevchenko 	bitmap_free(bitmap->table);
2005a2cc190SJeff Kirsher }
2015a2cc190SJeff Kirsher 
2027a89399fSMatan Barak struct mlx4_zone_allocator {
2037a89399fSMatan Barak 	struct list_head		entries;
2047a89399fSMatan Barak 	struct list_head		prios;
2057a89399fSMatan Barak 	u32				last_uid;
2067a89399fSMatan Barak 	u32				mask;
2077a89399fSMatan Barak 	/* protect the zone_allocator from concurrent accesses */
2087a89399fSMatan Barak 	spinlock_t			lock;
2097a89399fSMatan Barak 	enum mlx4_zone_alloc_flags	flags;
2107a89399fSMatan Barak };
2117a89399fSMatan Barak 
2127a89399fSMatan Barak struct mlx4_zone_entry {
2137a89399fSMatan Barak 	struct list_head		list;
2147a89399fSMatan Barak 	struct list_head		prio_list;
2157a89399fSMatan Barak 	u32				uid;
2167a89399fSMatan Barak 	struct mlx4_zone_allocator	*allocator;
2177a89399fSMatan Barak 	struct mlx4_bitmap		*bitmap;
2187a89399fSMatan Barak 	int				use_rr;
2197a89399fSMatan Barak 	int				priority;
2207a89399fSMatan Barak 	int				offset;
2217a89399fSMatan Barak 	enum mlx4_zone_flags		flags;
2227a89399fSMatan Barak };
2237a89399fSMatan Barak 
mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)2247a89399fSMatan Barak struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
2257a89399fSMatan Barak {
2267a89399fSMatan Barak 	struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
2277a89399fSMatan Barak 
2287a89399fSMatan Barak 	if (NULL == zones)
2297a89399fSMatan Barak 		return NULL;
2307a89399fSMatan Barak 
2317a89399fSMatan Barak 	INIT_LIST_HEAD(&zones->entries);
2327a89399fSMatan Barak 	INIT_LIST_HEAD(&zones->prios);
2337a89399fSMatan Barak 	spin_lock_init(&zones->lock);
2347a89399fSMatan Barak 	zones->last_uid = 0;
2357a89399fSMatan Barak 	zones->mask = 0;
2367a89399fSMatan Barak 	zones->flags = flags;
2377a89399fSMatan Barak 
2387a89399fSMatan Barak 	return zones;
2397a89399fSMatan Barak }
2407a89399fSMatan Barak 
mlx4_zone_add_one(struct mlx4_zone_allocator * zone_alloc,struct mlx4_bitmap * bitmap,u32 flags,int priority,int offset,u32 * puid)2417a89399fSMatan Barak int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
2427a89399fSMatan Barak 		      struct mlx4_bitmap *bitmap,
2437a89399fSMatan Barak 		      u32 flags,
2447a89399fSMatan Barak 		      int priority,
2457a89399fSMatan Barak 		      int offset,
2467a89399fSMatan Barak 		      u32 *puid)
2477a89399fSMatan Barak {
2487a89399fSMatan Barak 	u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
2497a89399fSMatan Barak 	struct mlx4_zone_entry *it;
2507a89399fSMatan Barak 	struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
2517a89399fSMatan Barak 
2527a89399fSMatan Barak 	if (NULL == zone)
2537a89399fSMatan Barak 		return -ENOMEM;
2547a89399fSMatan Barak 
2557a89399fSMatan Barak 	zone->flags = flags;
2567a89399fSMatan Barak 	zone->bitmap = bitmap;
2577a89399fSMatan Barak 	zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
2587a89399fSMatan Barak 	zone->priority = priority;
2597a89399fSMatan Barak 	zone->offset = offset;
2607a89399fSMatan Barak 
2617a89399fSMatan Barak 	spin_lock(&zone_alloc->lock);
2627a89399fSMatan Barak 
2637a89399fSMatan Barak 	zone->uid = zone_alloc->last_uid++;
2647a89399fSMatan Barak 	zone->allocator = zone_alloc;
2657a89399fSMatan Barak 
2667a89399fSMatan Barak 	if (zone_alloc->mask < mask)
2677a89399fSMatan Barak 		zone_alloc->mask = mask;
2687a89399fSMatan Barak 
2697a89399fSMatan Barak 	list_for_each_entry(it, &zone_alloc->prios, prio_list)
2707a89399fSMatan Barak 		if (it->priority >= priority)
2717a89399fSMatan Barak 			break;
2727a89399fSMatan Barak 
2737a89399fSMatan Barak 	if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
2747a89399fSMatan Barak 		list_add_tail(&zone->prio_list, &it->prio_list);
2757a89399fSMatan Barak 	list_add_tail(&zone->list, &it->list);
2767a89399fSMatan Barak 
2777a89399fSMatan Barak 	spin_unlock(&zone_alloc->lock);
2787a89399fSMatan Barak 
2797a89399fSMatan Barak 	*puid = zone->uid;
2807a89399fSMatan Barak 
2817a89399fSMatan Barak 	return 0;
2827a89399fSMatan Barak }
2837a89399fSMatan Barak 
2847a89399fSMatan Barak /* Should be called under a lock */
__mlx4_zone_remove_one_entry(struct mlx4_zone_entry * entry)285e36fef66SZhu Yanjun static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
2867a89399fSMatan Barak {
2877a89399fSMatan Barak 	struct mlx4_zone_allocator *zone_alloc = entry->allocator;
2887a89399fSMatan Barak 
2897a89399fSMatan Barak 	if (!list_empty(&entry->prio_list)) {
2907a89399fSMatan Barak 		/* Check if we need to add an alternative node to the prio list */
2917a89399fSMatan Barak 		if (!list_is_last(&entry->list, &zone_alloc->entries)) {
2927a89399fSMatan Barak 			struct mlx4_zone_entry *next = list_first_entry(&entry->list,
2937a89399fSMatan Barak 									typeof(*next),
2947a89399fSMatan Barak 									list);
2957a89399fSMatan Barak 
2967a89399fSMatan Barak 			if (next->priority == entry->priority)
2977a89399fSMatan Barak 				list_add_tail(&next->prio_list, &entry->prio_list);
2987a89399fSMatan Barak 		}
2997a89399fSMatan Barak 
3007a89399fSMatan Barak 		list_del(&entry->prio_list);
3017a89399fSMatan Barak 	}
3027a89399fSMatan Barak 
3037a89399fSMatan Barak 	list_del(&entry->list);
3047a89399fSMatan Barak 
3057a89399fSMatan Barak 	if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
3067a89399fSMatan Barak 		u32 mask = 0;
3077a89399fSMatan Barak 		struct mlx4_zone_entry *it;
3087a89399fSMatan Barak 
3097a89399fSMatan Barak 		list_for_each_entry(it, &zone_alloc->prios, prio_list) {
3107a89399fSMatan Barak 			u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
3117a89399fSMatan Barak 
3127a89399fSMatan Barak 			if (mask < cur_mask)
3137a89399fSMatan Barak 				mask = cur_mask;
3147a89399fSMatan Barak 		}
3157a89399fSMatan Barak 		zone_alloc->mask = mask;
3167a89399fSMatan Barak 	}
3177a89399fSMatan Barak }
3187a89399fSMatan Barak 
mlx4_zone_allocator_destroy(struct mlx4_zone_allocator * zone_alloc)3197a89399fSMatan Barak void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
3207a89399fSMatan Barak {
3217a89399fSMatan Barak 	struct mlx4_zone_entry *zone, *tmp;
3227a89399fSMatan Barak 
3237a89399fSMatan Barak 	spin_lock(&zone_alloc->lock);
3247a89399fSMatan Barak 
3257a89399fSMatan Barak 	list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
3267a89399fSMatan Barak 		list_del(&zone->list);
3277a89399fSMatan Barak 		list_del(&zone->prio_list);
3287a89399fSMatan Barak 		kfree(zone);
3297a89399fSMatan Barak 	}
3307a89399fSMatan Barak 
3317a89399fSMatan Barak 	spin_unlock(&zone_alloc->lock);
3327a89399fSMatan Barak 	kfree(zone_alloc);
3337a89399fSMatan Barak }
3347a89399fSMatan Barak 
3357a89399fSMatan Barak /* Should be called under a lock */
__mlx4_alloc_from_zone(struct mlx4_zone_entry * zone,int count,int align,u32 skip_mask,u32 * puid)3367a89399fSMatan Barak static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
3377a89399fSMatan Barak 				  int align, u32 skip_mask, u32 *puid)
3387a89399fSMatan Barak {
3393ea7e7eaSTariq Toukan 	u32 uid = 0;
3407a89399fSMatan Barak 	u32 res;
3417a89399fSMatan Barak 	struct mlx4_zone_allocator *zone_alloc = zone->allocator;
3427a89399fSMatan Barak 	struct mlx4_zone_entry *curr_node;
3437a89399fSMatan Barak 
3447a89399fSMatan Barak 	res = mlx4_bitmap_alloc_range(zone->bitmap, count,
3457a89399fSMatan Barak 				      align, skip_mask);
3467a89399fSMatan Barak 
3477a89399fSMatan Barak 	if (res != (u32)-1) {
3487a89399fSMatan Barak 		res += zone->offset;
3497a89399fSMatan Barak 		uid = zone->uid;
3507a89399fSMatan Barak 		goto out;
3517a89399fSMatan Barak 	}
3527a89399fSMatan Barak 
3537a89399fSMatan Barak 	list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
3547a89399fSMatan Barak 		if (unlikely(curr_node->priority == zone->priority))
3557a89399fSMatan Barak 			break;
3567a89399fSMatan Barak 	}
3577a89399fSMatan Barak 
3587a89399fSMatan Barak 	if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
3597a89399fSMatan Barak 		struct mlx4_zone_entry *it = curr_node;
3607a89399fSMatan Barak 
3617a89399fSMatan Barak 		list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
3627a89399fSMatan Barak 			res = mlx4_bitmap_alloc_range(it->bitmap, count,
3637a89399fSMatan Barak 						      align, skip_mask);
3647a89399fSMatan Barak 			if (res != (u32)-1) {
3657a89399fSMatan Barak 				res += it->offset;
3667a89399fSMatan Barak 				uid = it->uid;
3677a89399fSMatan Barak 				goto out;
3687a89399fSMatan Barak 			}
3697a89399fSMatan Barak 		}
3707a89399fSMatan Barak 	}
3717a89399fSMatan Barak 
3727a89399fSMatan Barak 	if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
3737a89399fSMatan Barak 		struct mlx4_zone_entry *it = curr_node;
3747a89399fSMatan Barak 
3757a89399fSMatan Barak 		list_for_each_entry_from(it, &zone_alloc->entries, list) {
3767a89399fSMatan Barak 			if (unlikely(it == zone))
3777a89399fSMatan Barak 				continue;
3787a89399fSMatan Barak 
3797a89399fSMatan Barak 			if (unlikely(it->priority != curr_node->priority))
3807a89399fSMatan Barak 				break;
3817a89399fSMatan Barak 
3827a89399fSMatan Barak 			res = mlx4_bitmap_alloc_range(it->bitmap, count,
3837a89399fSMatan Barak 						      align, skip_mask);
3847a89399fSMatan Barak 			if (res != (u32)-1) {
3857a89399fSMatan Barak 				res += it->offset;
3867a89399fSMatan Barak 				uid = it->uid;
3877a89399fSMatan Barak 				goto out;
3887a89399fSMatan Barak 			}
3897a89399fSMatan Barak 		}
3907a89399fSMatan Barak 	}
3917a89399fSMatan Barak 
3927a89399fSMatan Barak 	if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
3937a89399fSMatan Barak 		if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
3947a89399fSMatan Barak 			goto out;
3957a89399fSMatan Barak 
3967a89399fSMatan Barak 		curr_node = list_first_entry(&curr_node->prio_list,
3977a89399fSMatan Barak 					     typeof(*curr_node),
3987a89399fSMatan Barak 					     prio_list);
3997a89399fSMatan Barak 
4007a89399fSMatan Barak 		list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
4017a89399fSMatan Barak 			res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
4027a89399fSMatan Barak 						      align, skip_mask);
4037a89399fSMatan Barak 			if (res != (u32)-1) {
4047a89399fSMatan Barak 				res += curr_node->offset;
4057a89399fSMatan Barak 				uid = curr_node->uid;
4067a89399fSMatan Barak 				goto out;
4077a89399fSMatan Barak 			}
4087a89399fSMatan Barak 		}
4097a89399fSMatan Barak 	}
4107a89399fSMatan Barak 
4117a89399fSMatan Barak out:
4127a89399fSMatan Barak 	if (NULL != puid && res != (u32)-1)
4137a89399fSMatan Barak 		*puid = uid;
4147a89399fSMatan Barak 	return res;
4157a89399fSMatan Barak }
4167a89399fSMatan Barak 
4177a89399fSMatan Barak /* Should be called under a lock */
__mlx4_free_from_zone(struct mlx4_zone_entry * zone,u32 obj,u32 count)4187a89399fSMatan Barak static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
4197a89399fSMatan Barak 				  u32 count)
4207a89399fSMatan Barak {
4217a89399fSMatan Barak 	mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
4227a89399fSMatan Barak }
4237a89399fSMatan Barak 
4247a89399fSMatan Barak /* Should be called under a lock */
__mlx4_find_zone_by_uid(struct mlx4_zone_allocator * zones,u32 uid)4257a89399fSMatan Barak static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
4267a89399fSMatan Barak 		struct mlx4_zone_allocator *zones, u32 uid)
4277a89399fSMatan Barak {
4287a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
4297a89399fSMatan Barak 
4307a89399fSMatan Barak 	list_for_each_entry(zone, &zones->entries, list) {
4317a89399fSMatan Barak 		if (zone->uid == uid)
4327a89399fSMatan Barak 			return zone;
4337a89399fSMatan Barak 	}
4347a89399fSMatan Barak 
4357a89399fSMatan Barak 	return NULL;
4367a89399fSMatan Barak }
4377a89399fSMatan Barak 
mlx4_zone_get_bitmap(struct mlx4_zone_allocator * zones,u32 uid)4387a89399fSMatan Barak struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
4397a89399fSMatan Barak {
4407a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
4417a89399fSMatan Barak 	struct mlx4_bitmap *bitmap;
4427a89399fSMatan Barak 
4437a89399fSMatan Barak 	spin_lock(&zones->lock);
4447a89399fSMatan Barak 
4457a89399fSMatan Barak 	zone = __mlx4_find_zone_by_uid(zones, uid);
4467a89399fSMatan Barak 
4477a89399fSMatan Barak 	bitmap = zone == NULL ? NULL : zone->bitmap;
4487a89399fSMatan Barak 
4497a89399fSMatan Barak 	spin_unlock(&zones->lock);
4507a89399fSMatan Barak 
4517a89399fSMatan Barak 	return bitmap;
4527a89399fSMatan Barak }
4537a89399fSMatan Barak 
mlx4_zone_remove_one(struct mlx4_zone_allocator * zones,u32 uid)4547a89399fSMatan Barak int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
4557a89399fSMatan Barak {
4567a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
457e36fef66SZhu Yanjun 	int res = 0;
4587a89399fSMatan Barak 
4597a89399fSMatan Barak 	spin_lock(&zones->lock);
4607a89399fSMatan Barak 
4617a89399fSMatan Barak 	zone = __mlx4_find_zone_by_uid(zones, uid);
4627a89399fSMatan Barak 
4637a89399fSMatan Barak 	if (NULL == zone) {
4647a89399fSMatan Barak 		res = -1;
4657a89399fSMatan Barak 		goto out;
4667a89399fSMatan Barak 	}
4677a89399fSMatan Barak 
468e36fef66SZhu Yanjun 	__mlx4_zone_remove_one_entry(zone);
4697a89399fSMatan Barak 
4707a89399fSMatan Barak out:
4717a89399fSMatan Barak 	spin_unlock(&zones->lock);
4727a89399fSMatan Barak 	kfree(zone);
4737a89399fSMatan Barak 
4747a89399fSMatan Barak 	return res;
4757a89399fSMatan Barak }
4767a89399fSMatan Barak 
4777a89399fSMatan Barak /* Should be called under a lock */
__mlx4_find_zone_by_uid_unique(struct mlx4_zone_allocator * zones,u32 obj)4787a89399fSMatan Barak static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
4797a89399fSMatan Barak 		struct mlx4_zone_allocator *zones, u32 obj)
4807a89399fSMatan Barak {
4817a89399fSMatan Barak 	struct mlx4_zone_entry *zone, *zone_candidate = NULL;
4827a89399fSMatan Barak 	u32 dist = (u32)-1;
4837a89399fSMatan Barak 
4847a89399fSMatan Barak 	/* Search for the smallest zone that this obj could be
4857a89399fSMatan Barak 	 * allocated from. This is done in order to handle
4867a89399fSMatan Barak 	 * situations when small bitmaps are allocated from bigger
4877a89399fSMatan Barak 	 * bitmaps (and the allocated space is marked as reserved in
4887a89399fSMatan Barak 	 * the bigger bitmap.
4897a89399fSMatan Barak 	 */
4907a89399fSMatan Barak 	list_for_each_entry(zone, &zones->entries, list) {
4917a89399fSMatan Barak 		if (obj >= zone->offset) {
4927a89399fSMatan Barak 			u32 mobj = (obj - zone->offset) & zones->mask;
4937a89399fSMatan Barak 
4947a89399fSMatan Barak 			if (mobj < zone->bitmap->max) {
4957a89399fSMatan Barak 				u32 curr_dist = zone->bitmap->effective_len;
4967a89399fSMatan Barak 
4977a89399fSMatan Barak 				if (curr_dist < dist) {
4987a89399fSMatan Barak 					dist = curr_dist;
4997a89399fSMatan Barak 					zone_candidate = zone;
5007a89399fSMatan Barak 				}
5017a89399fSMatan Barak 			}
5027a89399fSMatan Barak 		}
5037a89399fSMatan Barak 	}
5047a89399fSMatan Barak 
5057a89399fSMatan Barak 	return zone_candidate;
5067a89399fSMatan Barak }
5077a89399fSMatan Barak 
mlx4_zone_alloc_entries(struct mlx4_zone_allocator * zones,u32 uid,int count,int align,u32 skip_mask,u32 * puid)5087a89399fSMatan Barak u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
5097a89399fSMatan Barak 			    int align, u32 skip_mask, u32 *puid)
5107a89399fSMatan Barak {
5117a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
5127a89399fSMatan Barak 	int res = -1;
5137a89399fSMatan Barak 
5147a89399fSMatan Barak 	spin_lock(&zones->lock);
5157a89399fSMatan Barak 
5167a89399fSMatan Barak 	zone = __mlx4_find_zone_by_uid(zones, uid);
5177a89399fSMatan Barak 
5187a89399fSMatan Barak 	if (NULL == zone)
5197a89399fSMatan Barak 		goto out;
5207a89399fSMatan Barak 
5217a89399fSMatan Barak 	res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
5227a89399fSMatan Barak 
5237a89399fSMatan Barak out:
5247a89399fSMatan Barak 	spin_unlock(&zones->lock);
5257a89399fSMatan Barak 
5267a89399fSMatan Barak 	return res;
5277a89399fSMatan Barak }
5287a89399fSMatan Barak 
mlx4_zone_free_entries(struct mlx4_zone_allocator * zones,u32 uid,u32 obj,u32 count)5297a89399fSMatan Barak u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
5307a89399fSMatan Barak {
5317a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
5327a89399fSMatan Barak 	int res = 0;
5337a89399fSMatan Barak 
5347a89399fSMatan Barak 	spin_lock(&zones->lock);
5357a89399fSMatan Barak 
5367a89399fSMatan Barak 	zone = __mlx4_find_zone_by_uid(zones, uid);
5377a89399fSMatan Barak 
5387a89399fSMatan Barak 	if (NULL == zone) {
5397a89399fSMatan Barak 		res = -1;
5407a89399fSMatan Barak 		goto out;
5417a89399fSMatan Barak 	}
5427a89399fSMatan Barak 
5437a89399fSMatan Barak 	__mlx4_free_from_zone(zone, obj, count);
5447a89399fSMatan Barak 
5457a89399fSMatan Barak out:
5467a89399fSMatan Barak 	spin_unlock(&zones->lock);
5477a89399fSMatan Barak 
5487a89399fSMatan Barak 	return res;
5497a89399fSMatan Barak }
5507a89399fSMatan Barak 
mlx4_zone_free_entries_unique(struct mlx4_zone_allocator * zones,u32 obj,u32 count)5517a89399fSMatan Barak u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
5527a89399fSMatan Barak {
5537a89399fSMatan Barak 	struct mlx4_zone_entry *zone;
5547a89399fSMatan Barak 	int res;
5557a89399fSMatan Barak 
5567a89399fSMatan Barak 	if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
5577a89399fSMatan Barak 		return -EFAULT;
5587a89399fSMatan Barak 
5597a89399fSMatan Barak 	spin_lock(&zones->lock);
5607a89399fSMatan Barak 
5617a89399fSMatan Barak 	zone = __mlx4_find_zone_by_uid_unique(zones, obj);
5627a89399fSMatan Barak 
5637a89399fSMatan Barak 	if (NULL == zone) {
5647a89399fSMatan Barak 		res = -1;
5657a89399fSMatan Barak 		goto out;
5667a89399fSMatan Barak 	}
5677a89399fSMatan Barak 
5687a89399fSMatan Barak 	__mlx4_free_from_zone(zone, obj, count);
5697a89399fSMatan Barak 	res = 0;
5707a89399fSMatan Barak 
5717a89399fSMatan Barak out:
5727a89399fSMatan Barak 	spin_unlock(&zones->lock);
5737a89399fSMatan Barak 
5747a89399fSMatan Barak 	return res;
5757a89399fSMatan Barak }
5765a2cc190SJeff Kirsher 
mlx4_buf_direct_alloc(struct mlx4_dev * dev,int size,struct mlx4_buf * buf)57773898db0SHaggai Abramovsky static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
5788900b894SLeon Romanovsky 				 struct mlx4_buf *buf)
5795a2cc190SJeff Kirsher {
5805a2cc190SJeff Kirsher 	dma_addr_t t;
5815a2cc190SJeff Kirsher 
5825a2cc190SJeff Kirsher 	buf->nbufs        = 1;
5835a2cc190SJeff Kirsher 	buf->npages       = 1;
5845a2cc190SJeff Kirsher 	buf->page_shift   = get_order(size) + PAGE_SHIFT;
58573898db0SHaggai Abramovsky 	buf->direct.buf   =
586750afb08SLuis Chamberlain 		dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
587750afb08SLuis Chamberlain 				   GFP_KERNEL);
5885a2cc190SJeff Kirsher 	if (!buf->direct.buf)
5895a2cc190SJeff Kirsher 		return -ENOMEM;
5905a2cc190SJeff Kirsher 
5915a2cc190SJeff Kirsher 	buf->direct.map = t;
5925a2cc190SJeff Kirsher 
5935a2cc190SJeff Kirsher 	while (t & ((1 << buf->page_shift) - 1)) {
5945a2cc190SJeff Kirsher 		--buf->page_shift;
5955a2cc190SJeff Kirsher 		buf->npages *= 2;
5965a2cc190SJeff Kirsher 	}
5975a2cc190SJeff Kirsher 
59873898db0SHaggai Abramovsky 	return 0;
59973898db0SHaggai Abramovsky }
60073898db0SHaggai Abramovsky 
60173898db0SHaggai Abramovsky /* Handling for queue buffers -- we allocate a bunch of memory and
60273898db0SHaggai Abramovsky  * register it in a memory region at HCA virtual address 0. If the
60373898db0SHaggai Abramovsky  *  requested size is > max_direct, we split the allocation into
60473898db0SHaggai Abramovsky  *  multiple pages, so we don't require too much contiguous memory.
60573898db0SHaggai Abramovsky  */
mlx4_buf_alloc(struct mlx4_dev * dev,int size,int max_direct,struct mlx4_buf * buf)60673898db0SHaggai Abramovsky int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
6078900b894SLeon Romanovsky 		   struct mlx4_buf *buf)
60873898db0SHaggai Abramovsky {
60973898db0SHaggai Abramovsky 	if (size <= max_direct) {
6108900b894SLeon Romanovsky 		return mlx4_buf_direct_alloc(dev, size, buf);
6115a2cc190SJeff Kirsher 	} else {
61273898db0SHaggai Abramovsky 		dma_addr_t t;
6135a2cc190SJeff Kirsher 		int i;
6145a2cc190SJeff Kirsher 
6155a2cc190SJeff Kirsher 		buf->direct.buf = NULL;
616f8a1988fSzhong jiang 		buf->nbufs      = DIV_ROUND_UP(size, PAGE_SIZE);
6175a2cc190SJeff Kirsher 		buf->npages	= buf->nbufs;
6185a2cc190SJeff Kirsher 		buf->page_shift  = PAGE_SHIFT;
6195a2cc190SJeff Kirsher 		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
6208900b894SLeon Romanovsky 					   GFP_KERNEL);
6215a2cc190SJeff Kirsher 		if (!buf->page_list)
6225a2cc190SJeff Kirsher 			return -ENOMEM;
6235a2cc190SJeff Kirsher 
6245a2cc190SJeff Kirsher 		for (i = 0; i < buf->nbufs; ++i) {
6255a2cc190SJeff Kirsher 			buf->page_list[i].buf =
626750afb08SLuis Chamberlain 				dma_alloc_coherent(&dev->persist->pdev->dev,
6278900b894SLeon Romanovsky 						   PAGE_SIZE, &t, GFP_KERNEL);
6285a2cc190SJeff Kirsher 			if (!buf->page_list[i].buf)
6295a2cc190SJeff Kirsher 				goto err_free;
6305a2cc190SJeff Kirsher 
6315a2cc190SJeff Kirsher 			buf->page_list[i].map = t;
6325a2cc190SJeff Kirsher 		}
6335a2cc190SJeff Kirsher 	}
6345a2cc190SJeff Kirsher 
6355a2cc190SJeff Kirsher 	return 0;
6365a2cc190SJeff Kirsher 
6375a2cc190SJeff Kirsher err_free:
6385a2cc190SJeff Kirsher 	mlx4_buf_free(dev, size, buf);
6395a2cc190SJeff Kirsher 
6405a2cc190SJeff Kirsher 	return -ENOMEM;
6415a2cc190SJeff Kirsher }
6425a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
6435a2cc190SJeff Kirsher 
mlx4_buf_free(struct mlx4_dev * dev,int size,struct mlx4_buf * buf)6445a2cc190SJeff Kirsher void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
6455a2cc190SJeff Kirsher {
64673898db0SHaggai Abramovsky 	if (buf->nbufs == 1) {
647872bf2fbSYishai Hadas 		dma_free_coherent(&dev->persist->pdev->dev, size,
64873898db0SHaggai Abramovsky 				  buf->direct.buf, buf->direct.map);
64973898db0SHaggai Abramovsky 	} else {
65073898db0SHaggai Abramovsky 		int i;
6515a2cc190SJeff Kirsher 
6525a2cc190SJeff Kirsher 		for (i = 0; i < buf->nbufs; ++i)
6535a2cc190SJeff Kirsher 			if (buf->page_list[i].buf)
654872bf2fbSYishai Hadas 				dma_free_coherent(&dev->persist->pdev->dev,
655872bf2fbSYishai Hadas 						  PAGE_SIZE,
6565a2cc190SJeff Kirsher 						  buf->page_list[i].buf,
6575a2cc190SJeff Kirsher 						  buf->page_list[i].map);
6585a2cc190SJeff Kirsher 		kfree(buf->page_list);
6595a2cc190SJeff Kirsher 	}
6605a2cc190SJeff Kirsher }
6615a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_buf_free);
6625a2cc190SJeff Kirsher 
mlx4_alloc_db_pgdir(struct device * dma_device)6638900b894SLeon Romanovsky static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
6645a2cc190SJeff Kirsher {
6655a2cc190SJeff Kirsher 	struct mlx4_db_pgdir *pgdir;
6665a2cc190SJeff Kirsher 
6678900b894SLeon Romanovsky 	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
6685a2cc190SJeff Kirsher 	if (!pgdir)
6695a2cc190SJeff Kirsher 		return NULL;
6705a2cc190SJeff Kirsher 
6715a2cc190SJeff Kirsher 	bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
6725a2cc190SJeff Kirsher 	pgdir->bits[0] = pgdir->order0;
6735a2cc190SJeff Kirsher 	pgdir->bits[1] = pgdir->order1;
6745a2cc190SJeff Kirsher 	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
6758900b894SLeon Romanovsky 					    &pgdir->db_dma, GFP_KERNEL);
6765a2cc190SJeff Kirsher 	if (!pgdir->db_page) {
6775a2cc190SJeff Kirsher 		kfree(pgdir);
6785a2cc190SJeff Kirsher 		return NULL;
6795a2cc190SJeff Kirsher 	}
6805a2cc190SJeff Kirsher 
6815a2cc190SJeff Kirsher 	return pgdir;
6825a2cc190SJeff Kirsher }
6835a2cc190SJeff Kirsher 
mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir * pgdir,struct mlx4_db * db,int order)6845a2cc190SJeff Kirsher static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
6855a2cc190SJeff Kirsher 				    struct mlx4_db *db, int order)
6865a2cc190SJeff Kirsher {
6875a2cc190SJeff Kirsher 	int o;
6885a2cc190SJeff Kirsher 	int i;
6895a2cc190SJeff Kirsher 
6905a2cc190SJeff Kirsher 	for (o = order; o <= 1; ++o) {
6915a2cc190SJeff Kirsher 		i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
6925a2cc190SJeff Kirsher 		if (i < MLX4_DB_PER_PAGE >> o)
6935a2cc190SJeff Kirsher 			goto found;
6945a2cc190SJeff Kirsher 	}
6955a2cc190SJeff Kirsher 
6965a2cc190SJeff Kirsher 	return -ENOMEM;
6975a2cc190SJeff Kirsher 
6985a2cc190SJeff Kirsher found:
6995a2cc190SJeff Kirsher 	clear_bit(i, pgdir->bits[o]);
7005a2cc190SJeff Kirsher 
7015a2cc190SJeff Kirsher 	i <<= o;
7025a2cc190SJeff Kirsher 
7035a2cc190SJeff Kirsher 	if (o > order)
7045a2cc190SJeff Kirsher 		set_bit(i ^ 1, pgdir->bits[order]);
7055a2cc190SJeff Kirsher 
7065a2cc190SJeff Kirsher 	db->u.pgdir = pgdir;
7075a2cc190SJeff Kirsher 	db->index   = i;
7085a2cc190SJeff Kirsher 	db->db      = pgdir->db_page + db->index;
7095a2cc190SJeff Kirsher 	db->dma     = pgdir->db_dma  + db->index * 4;
7105a2cc190SJeff Kirsher 	db->order   = order;
7115a2cc190SJeff Kirsher 
7125a2cc190SJeff Kirsher 	return 0;
7135a2cc190SJeff Kirsher }
7145a2cc190SJeff Kirsher 
mlx4_db_alloc(struct mlx4_dev * dev,struct mlx4_db * db,int order)7158900b894SLeon Romanovsky int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
7165a2cc190SJeff Kirsher {
7175a2cc190SJeff Kirsher 	struct mlx4_priv *priv = mlx4_priv(dev);
7185a2cc190SJeff Kirsher 	struct mlx4_db_pgdir *pgdir;
7195a2cc190SJeff Kirsher 	int ret = 0;
7205a2cc190SJeff Kirsher 
7215a2cc190SJeff Kirsher 	mutex_lock(&priv->pgdir_mutex);
7225a2cc190SJeff Kirsher 
7235a2cc190SJeff Kirsher 	list_for_each_entry(pgdir, &priv->pgdir_list, list)
7245a2cc190SJeff Kirsher 		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
7255a2cc190SJeff Kirsher 			goto out;
7265a2cc190SJeff Kirsher 
7278900b894SLeon Romanovsky 	pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
7285a2cc190SJeff Kirsher 	if (!pgdir) {
7295a2cc190SJeff Kirsher 		ret = -ENOMEM;
7305a2cc190SJeff Kirsher 		goto out;
7315a2cc190SJeff Kirsher 	}
7325a2cc190SJeff Kirsher 
7335a2cc190SJeff Kirsher 	list_add(&pgdir->list, &priv->pgdir_list);
7345a2cc190SJeff Kirsher 
7355a2cc190SJeff Kirsher 	/* This should never fail -- we just allocated an empty page: */
7365a2cc190SJeff Kirsher 	WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
7375a2cc190SJeff Kirsher 
7385a2cc190SJeff Kirsher out:
7395a2cc190SJeff Kirsher 	mutex_unlock(&priv->pgdir_mutex);
7405a2cc190SJeff Kirsher 
7415a2cc190SJeff Kirsher 	return ret;
7425a2cc190SJeff Kirsher }
7435a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_db_alloc);
7445a2cc190SJeff Kirsher 
mlx4_db_free(struct mlx4_dev * dev,struct mlx4_db * db)7455a2cc190SJeff Kirsher void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
7465a2cc190SJeff Kirsher {
7475a2cc190SJeff Kirsher 	struct mlx4_priv *priv = mlx4_priv(dev);
7485a2cc190SJeff Kirsher 	int o;
7495a2cc190SJeff Kirsher 	int i;
7505a2cc190SJeff Kirsher 
7515a2cc190SJeff Kirsher 	mutex_lock(&priv->pgdir_mutex);
7525a2cc190SJeff Kirsher 
7535a2cc190SJeff Kirsher 	o = db->order;
7545a2cc190SJeff Kirsher 	i = db->index;
7555a2cc190SJeff Kirsher 
7565a2cc190SJeff Kirsher 	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
7575a2cc190SJeff Kirsher 		clear_bit(i ^ 1, db->u.pgdir->order0);
7585a2cc190SJeff Kirsher 		++o;
7595a2cc190SJeff Kirsher 	}
7605a2cc190SJeff Kirsher 	i >>= o;
7615a2cc190SJeff Kirsher 	set_bit(i, db->u.pgdir->bits[o]);
7625a2cc190SJeff Kirsher 
7635a2cc190SJeff Kirsher 	if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
764872bf2fbSYishai Hadas 		dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
7655a2cc190SJeff Kirsher 				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
7665a2cc190SJeff Kirsher 		list_del(&db->u.pgdir->list);
7675a2cc190SJeff Kirsher 		kfree(db->u.pgdir);
7685a2cc190SJeff Kirsher 	}
7695a2cc190SJeff Kirsher 
7705a2cc190SJeff Kirsher 	mutex_unlock(&priv->pgdir_mutex);
7715a2cc190SJeff Kirsher }
7725a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_db_free);
7735a2cc190SJeff Kirsher 
mlx4_alloc_hwq_res(struct mlx4_dev * dev,struct mlx4_hwq_resources * wqres,int size)7745a2cc190SJeff Kirsher int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
77573898db0SHaggai Abramovsky 		       int size)
7765a2cc190SJeff Kirsher {
7775a2cc190SJeff Kirsher 	int err;
7785a2cc190SJeff Kirsher 
7798900b894SLeon Romanovsky 	err = mlx4_db_alloc(dev, &wqres->db, 1);
7805a2cc190SJeff Kirsher 	if (err)
7815a2cc190SJeff Kirsher 		return err;
7825a2cc190SJeff Kirsher 
7835a2cc190SJeff Kirsher 	*wqres->db.db = 0;
7845a2cc190SJeff Kirsher 
7858900b894SLeon Romanovsky 	err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
7865a2cc190SJeff Kirsher 	if (err)
7875a2cc190SJeff Kirsher 		goto err_db;
7885a2cc190SJeff Kirsher 
7895a2cc190SJeff Kirsher 	err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
7905a2cc190SJeff Kirsher 			    &wqres->mtt);
7915a2cc190SJeff Kirsher 	if (err)
7925a2cc190SJeff Kirsher 		goto err_buf;
7935a2cc190SJeff Kirsher 
7948900b894SLeon Romanovsky 	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
7955a2cc190SJeff Kirsher 	if (err)
7965a2cc190SJeff Kirsher 		goto err_mtt;
7975a2cc190SJeff Kirsher 
7985a2cc190SJeff Kirsher 	return 0;
7995a2cc190SJeff Kirsher 
8005a2cc190SJeff Kirsher err_mtt:
8015a2cc190SJeff Kirsher 	mlx4_mtt_cleanup(dev, &wqres->mtt);
8025a2cc190SJeff Kirsher err_buf:
8035a2cc190SJeff Kirsher 	mlx4_buf_free(dev, size, &wqres->buf);
8045a2cc190SJeff Kirsher err_db:
8055a2cc190SJeff Kirsher 	mlx4_db_free(dev, &wqres->db);
8065a2cc190SJeff Kirsher 
8075a2cc190SJeff Kirsher 	return err;
8085a2cc190SJeff Kirsher }
8095a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
8105a2cc190SJeff Kirsher 
mlx4_free_hwq_res(struct mlx4_dev * dev,struct mlx4_hwq_resources * wqres,int size)8115a2cc190SJeff Kirsher void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
8125a2cc190SJeff Kirsher 		       int size)
8135a2cc190SJeff Kirsher {
8145a2cc190SJeff Kirsher 	mlx4_mtt_cleanup(dev, &wqres->mtt);
8155a2cc190SJeff Kirsher 	mlx4_buf_free(dev, size, &wqres->buf);
8165a2cc190SJeff Kirsher 	mlx4_db_free(dev, &wqres->db);
8175a2cc190SJeff Kirsher }
8185a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
819