xref: /openbmc/linux/drivers/iommu/iova.c (revision e5c86679)
1 /*
2  * Copyright © 2006-2009, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
18  */
19 
20 #include <linux/iova.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/bitops.h>
25 
26 static bool iova_rcache_insert(struct iova_domain *iovad,
27 			       unsigned long pfn,
28 			       unsigned long size);
29 static unsigned long iova_rcache_get(struct iova_domain *iovad,
30 				     unsigned long size,
31 				     unsigned long limit_pfn);
32 static void init_iova_rcaches(struct iova_domain *iovad);
33 static void free_iova_rcaches(struct iova_domain *iovad);
34 
35 void
36 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
37 	unsigned long start_pfn, unsigned long pfn_32bit)
38 {
39 	/*
40 	 * IOVA granularity will normally be equal to the smallest
41 	 * supported IOMMU page size; both *must* be capable of
42 	 * representing individual CPU pages exactly.
43 	 */
44 	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
45 
46 	spin_lock_init(&iovad->iova_rbtree_lock);
47 	iovad->rbroot = RB_ROOT;
48 	iovad->cached32_node = NULL;
49 	iovad->granule = granule;
50 	iovad->start_pfn = start_pfn;
51 	iovad->dma_32bit_pfn = pfn_32bit;
52 	init_iova_rcaches(iovad);
53 }
54 EXPORT_SYMBOL_GPL(init_iova_domain);
55 
56 static struct rb_node *
57 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
58 {
59 	if ((*limit_pfn > iovad->dma_32bit_pfn) ||
60 		(iovad->cached32_node == NULL))
61 		return rb_last(&iovad->rbroot);
62 	else {
63 		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
64 		struct iova *curr_iova =
65 			rb_entry(iovad->cached32_node, struct iova, node);
66 		*limit_pfn = curr_iova->pfn_lo - 1;
67 		return prev_node;
68 	}
69 }
70 
71 static void
72 __cached_rbnode_insert_update(struct iova_domain *iovad,
73 	unsigned long limit_pfn, struct iova *new)
74 {
75 	if (limit_pfn != iovad->dma_32bit_pfn)
76 		return;
77 	iovad->cached32_node = &new->node;
78 }
79 
80 static void
81 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
82 {
83 	struct iova *cached_iova;
84 	struct rb_node *curr;
85 
86 	if (!iovad->cached32_node)
87 		return;
88 	curr = iovad->cached32_node;
89 	cached_iova = rb_entry(curr, struct iova, node);
90 
91 	if (free->pfn_lo >= cached_iova->pfn_lo) {
92 		struct rb_node *node = rb_next(&free->node);
93 		struct iova *iova = rb_entry(node, struct iova, node);
94 
95 		/* only cache if it's below 32bit pfn */
96 		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
97 			iovad->cached32_node = node;
98 		else
99 			iovad->cached32_node = NULL;
100 	}
101 }
102 
103 /*
104  * Computes the padding size required, to make the start address
105  * naturally aligned on the power-of-two order of its size
106  */
107 static unsigned int
108 iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
109 {
110 	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
111 }
112 
113 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
114 		unsigned long size, unsigned long limit_pfn,
115 			struct iova *new, bool size_aligned)
116 {
117 	struct rb_node *prev, *curr = NULL;
118 	unsigned long flags;
119 	unsigned long saved_pfn;
120 	unsigned int pad_size = 0;
121 
122 	/* Walk the tree backwards */
123 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
124 	saved_pfn = limit_pfn;
125 	curr = __get_cached_rbnode(iovad, &limit_pfn);
126 	prev = curr;
127 	while (curr) {
128 		struct iova *curr_iova = rb_entry(curr, struct iova, node);
129 
130 		if (limit_pfn < curr_iova->pfn_lo)
131 			goto move_left;
132 		else if (limit_pfn < curr_iova->pfn_hi)
133 			goto adjust_limit_pfn;
134 		else {
135 			if (size_aligned)
136 				pad_size = iova_get_pad_size(size, limit_pfn);
137 			if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
138 				break;	/* found a free slot */
139 		}
140 adjust_limit_pfn:
141 		limit_pfn = curr_iova->pfn_lo - 1;
142 move_left:
143 		prev = curr;
144 		curr = rb_prev(curr);
145 	}
146 
147 	if (!curr) {
148 		if (size_aligned)
149 			pad_size = iova_get_pad_size(size, limit_pfn);
150 		if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
151 			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
152 			return -ENOMEM;
153 		}
154 	}
155 
156 	/* pfn_lo will point to size aligned address if size_aligned is set */
157 	new->pfn_lo = limit_pfn - (size + pad_size) + 1;
158 	new->pfn_hi = new->pfn_lo + size - 1;
159 
160 	/* Insert the new_iova into domain rbtree by holding writer lock */
161 	/* Add new node and rebalance tree. */
162 	{
163 		struct rb_node **entry, *parent = NULL;
164 
165 		/* If we have 'prev', it's a valid place to start the
166 		   insertion. Otherwise, start from the root. */
167 		if (prev)
168 			entry = &prev;
169 		else
170 			entry = &iovad->rbroot.rb_node;
171 
172 		/* Figure out where to put new node */
173 		while (*entry) {
174 			struct iova *this = rb_entry(*entry, struct iova, node);
175 			parent = *entry;
176 
177 			if (new->pfn_lo < this->pfn_lo)
178 				entry = &((*entry)->rb_left);
179 			else if (new->pfn_lo > this->pfn_lo)
180 				entry = &((*entry)->rb_right);
181 			else
182 				BUG(); /* this should not happen */
183 		}
184 
185 		/* Add new node and rebalance tree. */
186 		rb_link_node(&new->node, parent, entry);
187 		rb_insert_color(&new->node, &iovad->rbroot);
188 	}
189 	__cached_rbnode_insert_update(iovad, saved_pfn, new);
190 
191 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
192 
193 
194 	return 0;
195 }
196 
197 static void
198 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
199 {
200 	struct rb_node **new = &(root->rb_node), *parent = NULL;
201 	/* Figure out where to put new node */
202 	while (*new) {
203 		struct iova *this = rb_entry(*new, struct iova, node);
204 
205 		parent = *new;
206 
207 		if (iova->pfn_lo < this->pfn_lo)
208 			new = &((*new)->rb_left);
209 		else if (iova->pfn_lo > this->pfn_lo)
210 			new = &((*new)->rb_right);
211 		else
212 			BUG(); /* this should not happen */
213 	}
214 	/* Add new node and rebalance tree. */
215 	rb_link_node(&iova->node, parent, new);
216 	rb_insert_color(&iova->node, root);
217 }
218 
219 static struct kmem_cache *iova_cache;
220 static unsigned int iova_cache_users;
221 static DEFINE_MUTEX(iova_cache_mutex);
222 
223 struct iova *alloc_iova_mem(void)
224 {
225 	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
226 }
227 EXPORT_SYMBOL(alloc_iova_mem);
228 
229 void free_iova_mem(struct iova *iova)
230 {
231 	kmem_cache_free(iova_cache, iova);
232 }
233 EXPORT_SYMBOL(free_iova_mem);
234 
235 int iova_cache_get(void)
236 {
237 	mutex_lock(&iova_cache_mutex);
238 	if (!iova_cache_users) {
239 		iova_cache = kmem_cache_create(
240 			"iommu_iova", sizeof(struct iova), 0,
241 			SLAB_HWCACHE_ALIGN, NULL);
242 		if (!iova_cache) {
243 			mutex_unlock(&iova_cache_mutex);
244 			printk(KERN_ERR "Couldn't create iova cache\n");
245 			return -ENOMEM;
246 		}
247 	}
248 
249 	iova_cache_users++;
250 	mutex_unlock(&iova_cache_mutex);
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(iova_cache_get);
255 
256 void iova_cache_put(void)
257 {
258 	mutex_lock(&iova_cache_mutex);
259 	if (WARN_ON(!iova_cache_users)) {
260 		mutex_unlock(&iova_cache_mutex);
261 		return;
262 	}
263 	iova_cache_users--;
264 	if (!iova_cache_users)
265 		kmem_cache_destroy(iova_cache);
266 	mutex_unlock(&iova_cache_mutex);
267 }
268 EXPORT_SYMBOL_GPL(iova_cache_put);
269 
270 /**
271  * alloc_iova - allocates an iova
272  * @iovad: - iova domain in question
273  * @size: - size of page frames to allocate
274  * @limit_pfn: - max limit address
275  * @size_aligned: - set if size_aligned address range is required
276  * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
277  * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
278  * flag is set then the allocated address iova->pfn_lo will be naturally
279  * aligned on roundup_power_of_two(size).
280  */
281 struct iova *
282 alloc_iova(struct iova_domain *iovad, unsigned long size,
283 	unsigned long limit_pfn,
284 	bool size_aligned)
285 {
286 	struct iova *new_iova;
287 	int ret;
288 
289 	new_iova = alloc_iova_mem();
290 	if (!new_iova)
291 		return NULL;
292 
293 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
294 			new_iova, size_aligned);
295 
296 	if (ret) {
297 		free_iova_mem(new_iova);
298 		return NULL;
299 	}
300 
301 	return new_iova;
302 }
303 EXPORT_SYMBOL_GPL(alloc_iova);
304 
305 static struct iova *
306 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
307 {
308 	struct rb_node *node = iovad->rbroot.rb_node;
309 
310 	assert_spin_locked(&iovad->iova_rbtree_lock);
311 
312 	while (node) {
313 		struct iova *iova = rb_entry(node, struct iova, node);
314 
315 		/* If pfn falls within iova's range, return iova */
316 		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
317 			return iova;
318 		}
319 
320 		if (pfn < iova->pfn_lo)
321 			node = node->rb_left;
322 		else if (pfn > iova->pfn_lo)
323 			node = node->rb_right;
324 	}
325 
326 	return NULL;
327 }
328 
329 static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
330 {
331 	assert_spin_locked(&iovad->iova_rbtree_lock);
332 	__cached_rbnode_delete_update(iovad, iova);
333 	rb_erase(&iova->node, &iovad->rbroot);
334 	free_iova_mem(iova);
335 }
336 
337 /**
338  * find_iova - finds an iova for a given pfn
339  * @iovad: - iova domain in question.
340  * @pfn: - page frame number
341  * This function finds and returns an iova belonging to the
342  * given doamin which matches the given pfn.
343  */
344 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
345 {
346 	unsigned long flags;
347 	struct iova *iova;
348 
349 	/* Take the lock so that no other thread is manipulating the rbtree */
350 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
351 	iova = private_find_iova(iovad, pfn);
352 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
353 	return iova;
354 }
355 EXPORT_SYMBOL_GPL(find_iova);
356 
357 /**
358  * __free_iova - frees the given iova
359  * @iovad: iova domain in question.
360  * @iova: iova in question.
361  * Frees the given iova belonging to the giving domain
362  */
363 void
364 __free_iova(struct iova_domain *iovad, struct iova *iova)
365 {
366 	unsigned long flags;
367 
368 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
369 	private_free_iova(iovad, iova);
370 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
371 }
372 EXPORT_SYMBOL_GPL(__free_iova);
373 
374 /**
375  * free_iova - finds and frees the iova for a given pfn
376  * @iovad: - iova domain in question.
377  * @pfn: - pfn that is allocated previously
378  * This functions finds an iova for a given pfn and then
379  * frees the iova from that domain.
380  */
381 void
382 free_iova(struct iova_domain *iovad, unsigned long pfn)
383 {
384 	struct iova *iova = find_iova(iovad, pfn);
385 
386 	if (iova)
387 		__free_iova(iovad, iova);
388 
389 }
390 EXPORT_SYMBOL_GPL(free_iova);
391 
392 /**
393  * alloc_iova_fast - allocates an iova from rcache
394  * @iovad: - iova domain in question
395  * @size: - size of page frames to allocate
396  * @limit_pfn: - max limit address
397  * This function tries to satisfy an iova allocation from the rcache,
398  * and falls back to regular allocation on failure.
399 */
400 unsigned long
401 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
402 		unsigned long limit_pfn)
403 {
404 	bool flushed_rcache = false;
405 	unsigned long iova_pfn;
406 	struct iova *new_iova;
407 
408 	iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
409 	if (iova_pfn)
410 		return iova_pfn;
411 
412 retry:
413 	new_iova = alloc_iova(iovad, size, limit_pfn, true);
414 	if (!new_iova) {
415 		unsigned int cpu;
416 
417 		if (flushed_rcache)
418 			return 0;
419 
420 		/* Try replenishing IOVAs by flushing rcache. */
421 		flushed_rcache = true;
422 		preempt_disable();
423 		for_each_online_cpu(cpu)
424 			free_cpu_cached_iovas(cpu, iovad);
425 		preempt_enable();
426 		goto retry;
427 	}
428 
429 	return new_iova->pfn_lo;
430 }
431 EXPORT_SYMBOL_GPL(alloc_iova_fast);
432 
433 /**
434  * free_iova_fast - free iova pfn range into rcache
435  * @iovad: - iova domain in question.
436  * @pfn: - pfn that is allocated previously
437  * @size: - # of pages in range
438  * This functions frees an iova range by trying to put it into the rcache,
439  * falling back to regular iova deallocation via free_iova() if this fails.
440  */
441 void
442 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
443 {
444 	if (iova_rcache_insert(iovad, pfn, size))
445 		return;
446 
447 	free_iova(iovad, pfn);
448 }
449 EXPORT_SYMBOL_GPL(free_iova_fast);
450 
451 /**
452  * put_iova_domain - destroys the iova doamin
453  * @iovad: - iova domain in question.
454  * All the iova's in that domain are destroyed.
455  */
456 void put_iova_domain(struct iova_domain *iovad)
457 {
458 	struct rb_node *node;
459 	unsigned long flags;
460 
461 	free_iova_rcaches(iovad);
462 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
463 	node = rb_first(&iovad->rbroot);
464 	while (node) {
465 		struct iova *iova = rb_entry(node, struct iova, node);
466 
467 		rb_erase(node, &iovad->rbroot);
468 		free_iova_mem(iova);
469 		node = rb_first(&iovad->rbroot);
470 	}
471 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
472 }
473 EXPORT_SYMBOL_GPL(put_iova_domain);
474 
475 static int
476 __is_range_overlap(struct rb_node *node,
477 	unsigned long pfn_lo, unsigned long pfn_hi)
478 {
479 	struct iova *iova = rb_entry(node, struct iova, node);
480 
481 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
482 		return 1;
483 	return 0;
484 }
485 
486 static inline struct iova *
487 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
488 {
489 	struct iova *iova;
490 
491 	iova = alloc_iova_mem();
492 	if (iova) {
493 		iova->pfn_lo = pfn_lo;
494 		iova->pfn_hi = pfn_hi;
495 	}
496 
497 	return iova;
498 }
499 
500 static struct iova *
501 __insert_new_range(struct iova_domain *iovad,
502 	unsigned long pfn_lo, unsigned long pfn_hi)
503 {
504 	struct iova *iova;
505 
506 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
507 	if (iova)
508 		iova_insert_rbtree(&iovad->rbroot, iova);
509 
510 	return iova;
511 }
512 
513 static void
514 __adjust_overlap_range(struct iova *iova,
515 	unsigned long *pfn_lo, unsigned long *pfn_hi)
516 {
517 	if (*pfn_lo < iova->pfn_lo)
518 		iova->pfn_lo = *pfn_lo;
519 	if (*pfn_hi > iova->pfn_hi)
520 		*pfn_lo = iova->pfn_hi + 1;
521 }
522 
523 /**
524  * reserve_iova - reserves an iova in the given range
525  * @iovad: - iova domain pointer
526  * @pfn_lo: - lower page frame address
527  * @pfn_hi:- higher pfn adderss
528  * This function allocates reserves the address range from pfn_lo to pfn_hi so
529  * that this address is not dished out as part of alloc_iova.
530  */
531 struct iova *
532 reserve_iova(struct iova_domain *iovad,
533 	unsigned long pfn_lo, unsigned long pfn_hi)
534 {
535 	struct rb_node *node;
536 	unsigned long flags;
537 	struct iova *iova;
538 	unsigned int overlap = 0;
539 
540 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
541 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
542 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
543 			iova = rb_entry(node, struct iova, node);
544 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
545 			if ((pfn_lo >= iova->pfn_lo) &&
546 				(pfn_hi <= iova->pfn_hi))
547 				goto finish;
548 			overlap = 1;
549 
550 		} else if (overlap)
551 				break;
552 	}
553 
554 	/* We are here either because this is the first reserver node
555 	 * or need to insert remaining non overlap addr range
556 	 */
557 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
558 finish:
559 
560 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
561 	return iova;
562 }
563 EXPORT_SYMBOL_GPL(reserve_iova);
564 
565 /**
566  * copy_reserved_iova - copies the reserved between domains
567  * @from: - source doamin from where to copy
568  * @to: - destination domin where to copy
569  * This function copies reserved iova's from one doamin to
570  * other.
571  */
572 void
573 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
574 {
575 	unsigned long flags;
576 	struct rb_node *node;
577 
578 	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
579 	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
580 		struct iova *iova = rb_entry(node, struct iova, node);
581 		struct iova *new_iova;
582 
583 		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
584 		if (!new_iova)
585 			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
586 				iova->pfn_lo, iova->pfn_lo);
587 	}
588 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
589 }
590 EXPORT_SYMBOL_GPL(copy_reserved_iova);
591 
592 struct iova *
593 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
594 		      unsigned long pfn_lo, unsigned long pfn_hi)
595 {
596 	unsigned long flags;
597 	struct iova *prev = NULL, *next = NULL;
598 
599 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
600 	if (iova->pfn_lo < pfn_lo) {
601 		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
602 		if (prev == NULL)
603 			goto error;
604 	}
605 	if (iova->pfn_hi > pfn_hi) {
606 		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
607 		if (next == NULL)
608 			goto error;
609 	}
610 
611 	__cached_rbnode_delete_update(iovad, iova);
612 	rb_erase(&iova->node, &iovad->rbroot);
613 
614 	if (prev) {
615 		iova_insert_rbtree(&iovad->rbroot, prev);
616 		iova->pfn_lo = pfn_lo;
617 	}
618 	if (next) {
619 		iova_insert_rbtree(&iovad->rbroot, next);
620 		iova->pfn_hi = pfn_hi;
621 	}
622 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
623 
624 	return iova;
625 
626 error:
627 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
628 	if (prev)
629 		free_iova_mem(prev);
630 	return NULL;
631 }
632 
633 /*
634  * Magazine caches for IOVA ranges.  For an introduction to magazines,
635  * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
636  * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
637  * For simplicity, we use a static magazine size and don't implement the
638  * dynamic size tuning described in the paper.
639  */
640 
641 #define IOVA_MAG_SIZE 128
642 
643 struct iova_magazine {
644 	unsigned long size;
645 	unsigned long pfns[IOVA_MAG_SIZE];
646 };
647 
648 struct iova_cpu_rcache {
649 	spinlock_t lock;
650 	struct iova_magazine *loaded;
651 	struct iova_magazine *prev;
652 };
653 
654 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
655 {
656 	return kzalloc(sizeof(struct iova_magazine), flags);
657 }
658 
659 static void iova_magazine_free(struct iova_magazine *mag)
660 {
661 	kfree(mag);
662 }
663 
664 static void
665 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
666 {
667 	unsigned long flags;
668 	int i;
669 
670 	if (!mag)
671 		return;
672 
673 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
674 
675 	for (i = 0 ; i < mag->size; ++i) {
676 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
677 
678 		BUG_ON(!iova);
679 		private_free_iova(iovad, iova);
680 	}
681 
682 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
683 
684 	mag->size = 0;
685 }
686 
687 static bool iova_magazine_full(struct iova_magazine *mag)
688 {
689 	return (mag && mag->size == IOVA_MAG_SIZE);
690 }
691 
692 static bool iova_magazine_empty(struct iova_magazine *mag)
693 {
694 	return (!mag || mag->size == 0);
695 }
696 
697 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
698 				       unsigned long limit_pfn)
699 {
700 	BUG_ON(iova_magazine_empty(mag));
701 
702 	if (mag->pfns[mag->size - 1] >= limit_pfn)
703 		return 0;
704 
705 	return mag->pfns[--mag->size];
706 }
707 
708 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
709 {
710 	BUG_ON(iova_magazine_full(mag));
711 
712 	mag->pfns[mag->size++] = pfn;
713 }
714 
715 static void init_iova_rcaches(struct iova_domain *iovad)
716 {
717 	struct iova_cpu_rcache *cpu_rcache;
718 	struct iova_rcache *rcache;
719 	unsigned int cpu;
720 	int i;
721 
722 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
723 		rcache = &iovad->rcaches[i];
724 		spin_lock_init(&rcache->lock);
725 		rcache->depot_size = 0;
726 		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
727 		if (WARN_ON(!rcache->cpu_rcaches))
728 			continue;
729 		for_each_possible_cpu(cpu) {
730 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
731 			spin_lock_init(&cpu_rcache->lock);
732 			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
733 			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
734 		}
735 	}
736 }
737 
738 /*
739  * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
740  * return true on success.  Can fail if rcache is full and we can't free
741  * space, and free_iova() (our only caller) will then return the IOVA
742  * range to the rbtree instead.
743  */
744 static bool __iova_rcache_insert(struct iova_domain *iovad,
745 				 struct iova_rcache *rcache,
746 				 unsigned long iova_pfn)
747 {
748 	struct iova_magazine *mag_to_free = NULL;
749 	struct iova_cpu_rcache *cpu_rcache;
750 	bool can_insert = false;
751 	unsigned long flags;
752 
753 	cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
754 	spin_lock_irqsave(&cpu_rcache->lock, flags);
755 
756 	if (!iova_magazine_full(cpu_rcache->loaded)) {
757 		can_insert = true;
758 	} else if (!iova_magazine_full(cpu_rcache->prev)) {
759 		swap(cpu_rcache->prev, cpu_rcache->loaded);
760 		can_insert = true;
761 	} else {
762 		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
763 
764 		if (new_mag) {
765 			spin_lock(&rcache->lock);
766 			if (rcache->depot_size < MAX_GLOBAL_MAGS) {
767 				rcache->depot[rcache->depot_size++] =
768 						cpu_rcache->loaded;
769 			} else {
770 				mag_to_free = cpu_rcache->loaded;
771 			}
772 			spin_unlock(&rcache->lock);
773 
774 			cpu_rcache->loaded = new_mag;
775 			can_insert = true;
776 		}
777 	}
778 
779 	if (can_insert)
780 		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
781 
782 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
783 	put_cpu_ptr(rcache->cpu_rcaches);
784 
785 	if (mag_to_free) {
786 		iova_magazine_free_pfns(mag_to_free, iovad);
787 		iova_magazine_free(mag_to_free);
788 	}
789 
790 	return can_insert;
791 }
792 
793 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
794 			       unsigned long size)
795 {
796 	unsigned int log_size = order_base_2(size);
797 
798 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
799 		return false;
800 
801 	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
802 }
803 
804 /*
805  * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
806  * satisfy the request, return a matching non-NULL range and remove
807  * it from the 'rcache'.
808  */
809 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
810 				       unsigned long limit_pfn)
811 {
812 	struct iova_cpu_rcache *cpu_rcache;
813 	unsigned long iova_pfn = 0;
814 	bool has_pfn = false;
815 	unsigned long flags;
816 
817 	cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
818 	spin_lock_irqsave(&cpu_rcache->lock, flags);
819 
820 	if (!iova_magazine_empty(cpu_rcache->loaded)) {
821 		has_pfn = true;
822 	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
823 		swap(cpu_rcache->prev, cpu_rcache->loaded);
824 		has_pfn = true;
825 	} else {
826 		spin_lock(&rcache->lock);
827 		if (rcache->depot_size > 0) {
828 			iova_magazine_free(cpu_rcache->loaded);
829 			cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
830 			has_pfn = true;
831 		}
832 		spin_unlock(&rcache->lock);
833 	}
834 
835 	if (has_pfn)
836 		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
837 
838 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
839 	put_cpu_ptr(rcache->cpu_rcaches);
840 
841 	return iova_pfn;
842 }
843 
844 /*
845  * Try to satisfy IOVA allocation range from rcache.  Fail if requested
846  * size is too big or the DMA limit we are given isn't satisfied by the
847  * top element in the magazine.
848  */
849 static unsigned long iova_rcache_get(struct iova_domain *iovad,
850 				     unsigned long size,
851 				     unsigned long limit_pfn)
852 {
853 	unsigned int log_size = order_base_2(size);
854 
855 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
856 		return 0;
857 
858 	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
859 }
860 
861 /*
862  * Free a cpu's rcache.
863  */
864 static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
865 				 struct iova_rcache *rcache)
866 {
867 	struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
868 	unsigned long flags;
869 
870 	spin_lock_irqsave(&cpu_rcache->lock, flags);
871 
872 	iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
873 	iova_magazine_free(cpu_rcache->loaded);
874 
875 	iova_magazine_free_pfns(cpu_rcache->prev, iovad);
876 	iova_magazine_free(cpu_rcache->prev);
877 
878 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
879 }
880 
881 /*
882  * free rcache data structures.
883  */
884 static void free_iova_rcaches(struct iova_domain *iovad)
885 {
886 	struct iova_rcache *rcache;
887 	unsigned long flags;
888 	unsigned int cpu;
889 	int i, j;
890 
891 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
892 		rcache = &iovad->rcaches[i];
893 		for_each_possible_cpu(cpu)
894 			free_cpu_iova_rcache(cpu, iovad, rcache);
895 		spin_lock_irqsave(&rcache->lock, flags);
896 		free_percpu(rcache->cpu_rcaches);
897 		for (j = 0; j < rcache->depot_size; ++j) {
898 			iova_magazine_free_pfns(rcache->depot[j], iovad);
899 			iova_magazine_free(rcache->depot[j]);
900 		}
901 		spin_unlock_irqrestore(&rcache->lock, flags);
902 	}
903 }
904 
905 /*
906  * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
907  */
908 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
909 {
910 	struct iova_cpu_rcache *cpu_rcache;
911 	struct iova_rcache *rcache;
912 	unsigned long flags;
913 	int i;
914 
915 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
916 		rcache = &iovad->rcaches[i];
917 		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
918 		spin_lock_irqsave(&cpu_rcache->lock, flags);
919 		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
920 		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
921 		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
922 	}
923 }
924 
925 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
926 MODULE_LICENSE("GPL");
927