xref: /openbmc/linux/drivers/iommu/iova.c (revision 32c2e6dd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2006-2009, Intel Corporation.
4  *
5  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6  */
7 
8 #include <linux/iova.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/smp.h>
12 #include <linux/bitops.h>
13 #include <linux/cpu.h>
14 
15 /* The anchor node sits above the top of the usable address space */
16 #define IOVA_ANCHOR	~0UL
17 
18 static bool iova_rcache_insert(struct iova_domain *iovad,
19 			       unsigned long pfn,
20 			       unsigned long size);
21 static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 				     unsigned long size,
23 				     unsigned long limit_pfn);
24 static void init_iova_rcaches(struct iova_domain *iovad);
25 static void free_iova_rcaches(struct iova_domain *iovad);
26 static void fq_destroy_all_entries(struct iova_domain *iovad);
27 static void fq_flush_timeout(struct timer_list *t);
28 static void free_global_cached_iovas(struct iova_domain *iovad);
29 
30 void
31 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
32 	unsigned long start_pfn)
33 {
34 	/*
35 	 * IOVA granularity will normally be equal to the smallest
36 	 * supported IOMMU page size; both *must* be capable of
37 	 * representing individual CPU pages exactly.
38 	 */
39 	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
40 
41 	spin_lock_init(&iovad->iova_rbtree_lock);
42 	iovad->rbroot = RB_ROOT;
43 	iovad->cached_node = &iovad->anchor.node;
44 	iovad->cached32_node = &iovad->anchor.node;
45 	iovad->granule = granule;
46 	iovad->start_pfn = start_pfn;
47 	iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
48 	iovad->max32_alloc_size = iovad->dma_32bit_pfn;
49 	iovad->flush_cb = NULL;
50 	iovad->fq = NULL;
51 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
52 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
53 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
54 	init_iova_rcaches(iovad);
55 }
56 EXPORT_SYMBOL_GPL(init_iova_domain);
57 
58 static bool has_iova_flush_queue(struct iova_domain *iovad)
59 {
60 	return !!iovad->fq;
61 }
62 
63 static void free_iova_flush_queue(struct iova_domain *iovad)
64 {
65 	if (!has_iova_flush_queue(iovad))
66 		return;
67 
68 	if (timer_pending(&iovad->fq_timer))
69 		del_timer(&iovad->fq_timer);
70 
71 	fq_destroy_all_entries(iovad);
72 
73 	free_percpu(iovad->fq);
74 
75 	iovad->fq         = NULL;
76 	iovad->flush_cb   = NULL;
77 	iovad->entry_dtor = NULL;
78 }
79 
80 int init_iova_flush_queue(struct iova_domain *iovad,
81 			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
82 {
83 	struct iova_fq __percpu *queue;
84 	int cpu;
85 
86 	atomic64_set(&iovad->fq_flush_start_cnt,  0);
87 	atomic64_set(&iovad->fq_flush_finish_cnt, 0);
88 
89 	queue = alloc_percpu(struct iova_fq);
90 	if (!queue)
91 		return -ENOMEM;
92 
93 	iovad->flush_cb   = flush_cb;
94 	iovad->entry_dtor = entry_dtor;
95 
96 	for_each_possible_cpu(cpu) {
97 		struct iova_fq *fq;
98 
99 		fq = per_cpu_ptr(queue, cpu);
100 		fq->head = 0;
101 		fq->tail = 0;
102 
103 		spin_lock_init(&fq->lock);
104 	}
105 
106 	smp_wmb();
107 
108 	iovad->fq = queue;
109 
110 	timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
111 	atomic_set(&iovad->fq_timer_on, 0);
112 
113 	return 0;
114 }
115 
116 static struct rb_node *
117 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
118 {
119 	if (limit_pfn <= iovad->dma_32bit_pfn)
120 		return iovad->cached32_node;
121 
122 	return iovad->cached_node;
123 }
124 
125 static void
126 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
127 {
128 	if (new->pfn_hi < iovad->dma_32bit_pfn)
129 		iovad->cached32_node = &new->node;
130 	else
131 		iovad->cached_node = &new->node;
132 }
133 
134 static void
135 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
136 {
137 	struct iova *cached_iova;
138 
139 	cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
140 	if (free == cached_iova ||
141 	    (free->pfn_hi < iovad->dma_32bit_pfn &&
142 	     free->pfn_lo >= cached_iova->pfn_lo)) {
143 		iovad->cached32_node = rb_next(&free->node);
144 		iovad->max32_alloc_size = iovad->dma_32bit_pfn;
145 	}
146 
147 	cached_iova = rb_entry(iovad->cached_node, struct iova, node);
148 	if (free->pfn_lo >= cached_iova->pfn_lo)
149 		iovad->cached_node = rb_next(&free->node);
150 }
151 
152 /* Insert the iova into domain rbtree by holding writer lock */
153 static void
154 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
155 		   struct rb_node *start)
156 {
157 	struct rb_node **new, *parent = NULL;
158 
159 	new = (start) ? &start : &(root->rb_node);
160 	/* Figure out where to put new node */
161 	while (*new) {
162 		struct iova *this = rb_entry(*new, struct iova, node);
163 
164 		parent = *new;
165 
166 		if (iova->pfn_lo < this->pfn_lo)
167 			new = &((*new)->rb_left);
168 		else if (iova->pfn_lo > this->pfn_lo)
169 			new = &((*new)->rb_right);
170 		else {
171 			WARN_ON(1); /* this should not happen */
172 			return;
173 		}
174 	}
175 	/* Add new node and rebalance tree. */
176 	rb_link_node(&iova->node, parent, new);
177 	rb_insert_color(&iova->node, root);
178 }
179 
180 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
181 		unsigned long size, unsigned long limit_pfn,
182 			struct iova *new, bool size_aligned)
183 {
184 	struct rb_node *curr, *prev;
185 	struct iova *curr_iova;
186 	unsigned long flags;
187 	unsigned long new_pfn, retry_pfn;
188 	unsigned long align_mask = ~0UL;
189 	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
190 
191 	if (size_aligned)
192 		align_mask <<= fls_long(size - 1);
193 
194 	/* Walk the tree backwards */
195 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
196 	if (limit_pfn <= iovad->dma_32bit_pfn &&
197 			size >= iovad->max32_alloc_size)
198 		goto iova32_full;
199 
200 	curr = __get_cached_rbnode(iovad, limit_pfn);
201 	curr_iova = rb_entry(curr, struct iova, node);
202 	retry_pfn = curr_iova->pfn_hi + 1;
203 
204 retry:
205 	do {
206 		high_pfn = min(high_pfn, curr_iova->pfn_lo);
207 		new_pfn = (high_pfn - size) & align_mask;
208 		prev = curr;
209 		curr = rb_prev(curr);
210 		curr_iova = rb_entry(curr, struct iova, node);
211 	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
212 
213 	if (high_pfn < size || new_pfn < low_pfn) {
214 		if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
215 			high_pfn = limit_pfn;
216 			low_pfn = retry_pfn;
217 			curr = &iovad->anchor.node;
218 			curr_iova = rb_entry(curr, struct iova, node);
219 			goto retry;
220 		}
221 		iovad->max32_alloc_size = size;
222 		goto iova32_full;
223 	}
224 
225 	/* pfn_lo will point to size aligned address if size_aligned is set */
226 	new->pfn_lo = new_pfn;
227 	new->pfn_hi = new->pfn_lo + size - 1;
228 
229 	/* If we have 'prev', it's a valid place to start the insertion. */
230 	iova_insert_rbtree(&iovad->rbroot, new, prev);
231 	__cached_rbnode_insert_update(iovad, new);
232 
233 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
234 	return 0;
235 
236 iova32_full:
237 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
238 	return -ENOMEM;
239 }
240 
241 static struct kmem_cache *iova_cache;
242 static unsigned int iova_cache_users;
243 static DEFINE_MUTEX(iova_cache_mutex);
244 
245 static struct iova *alloc_iova_mem(void)
246 {
247 	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
248 }
249 
250 static void free_iova_mem(struct iova *iova)
251 {
252 	if (iova->pfn_lo != IOVA_ANCHOR)
253 		kmem_cache_free(iova_cache, iova);
254 }
255 
256 int iova_cache_get(void)
257 {
258 	mutex_lock(&iova_cache_mutex);
259 	if (!iova_cache_users) {
260 		iova_cache = kmem_cache_create(
261 			"iommu_iova", sizeof(struct iova), 0,
262 			SLAB_HWCACHE_ALIGN, NULL);
263 		if (!iova_cache) {
264 			mutex_unlock(&iova_cache_mutex);
265 			pr_err("Couldn't create iova cache\n");
266 			return -ENOMEM;
267 		}
268 	}
269 
270 	iova_cache_users++;
271 	mutex_unlock(&iova_cache_mutex);
272 
273 	return 0;
274 }
275 EXPORT_SYMBOL_GPL(iova_cache_get);
276 
277 void iova_cache_put(void)
278 {
279 	mutex_lock(&iova_cache_mutex);
280 	if (WARN_ON(!iova_cache_users)) {
281 		mutex_unlock(&iova_cache_mutex);
282 		return;
283 	}
284 	iova_cache_users--;
285 	if (!iova_cache_users)
286 		kmem_cache_destroy(iova_cache);
287 	mutex_unlock(&iova_cache_mutex);
288 }
289 EXPORT_SYMBOL_GPL(iova_cache_put);
290 
291 /**
292  * alloc_iova - allocates an iova
293  * @iovad: - iova domain in question
294  * @size: - size of page frames to allocate
295  * @limit_pfn: - max limit address
296  * @size_aligned: - set if size_aligned address range is required
297  * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
298  * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
299  * flag is set then the allocated address iova->pfn_lo will be naturally
300  * aligned on roundup_power_of_two(size).
301  */
302 struct iova *
303 alloc_iova(struct iova_domain *iovad, unsigned long size,
304 	unsigned long limit_pfn,
305 	bool size_aligned)
306 {
307 	struct iova *new_iova;
308 	int ret;
309 
310 	new_iova = alloc_iova_mem();
311 	if (!new_iova)
312 		return NULL;
313 
314 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
315 			new_iova, size_aligned);
316 
317 	if (ret) {
318 		free_iova_mem(new_iova);
319 		return NULL;
320 	}
321 
322 	return new_iova;
323 }
324 EXPORT_SYMBOL_GPL(alloc_iova);
325 
326 static struct iova *
327 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
328 {
329 	struct rb_node *node = iovad->rbroot.rb_node;
330 
331 	assert_spin_locked(&iovad->iova_rbtree_lock);
332 
333 	while (node) {
334 		struct iova *iova = rb_entry(node, struct iova, node);
335 
336 		if (pfn < iova->pfn_lo)
337 			node = node->rb_left;
338 		else if (pfn > iova->pfn_hi)
339 			node = node->rb_right;
340 		else
341 			return iova;	/* pfn falls within iova's range */
342 	}
343 
344 	return NULL;
345 }
346 
347 static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
348 {
349 	assert_spin_locked(&iovad->iova_rbtree_lock);
350 	__cached_rbnode_delete_update(iovad, iova);
351 	rb_erase(&iova->node, &iovad->rbroot);
352 	free_iova_mem(iova);
353 }
354 
355 /**
356  * find_iova - finds an iova for a given pfn
357  * @iovad: - iova domain in question.
358  * @pfn: - page frame number
359  * This function finds and returns an iova belonging to the
360  * given domain which matches the given pfn.
361  */
362 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
363 {
364 	unsigned long flags;
365 	struct iova *iova;
366 
367 	/* Take the lock so that no other thread is manipulating the rbtree */
368 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
369 	iova = private_find_iova(iovad, pfn);
370 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
371 	return iova;
372 }
373 EXPORT_SYMBOL_GPL(find_iova);
374 
375 /**
376  * __free_iova - frees the given iova
377  * @iovad: iova domain in question.
378  * @iova: iova in question.
379  * Frees the given iova belonging to the giving domain
380  */
381 void
382 __free_iova(struct iova_domain *iovad, struct iova *iova)
383 {
384 	unsigned long flags;
385 
386 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
387 	private_free_iova(iovad, iova);
388 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
389 }
390 EXPORT_SYMBOL_GPL(__free_iova);
391 
392 /**
393  * free_iova - finds and frees the iova for a given pfn
394  * @iovad: - iova domain in question.
395  * @pfn: - pfn that is allocated previously
396  * This functions finds an iova for a given pfn and then
397  * frees the iova from that domain.
398  */
399 void
400 free_iova(struct iova_domain *iovad, unsigned long pfn)
401 {
402 	unsigned long flags;
403 	struct iova *iova;
404 
405 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
406 	iova = private_find_iova(iovad, pfn);
407 	if (iova)
408 		private_free_iova(iovad, iova);
409 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
410 
411 }
412 EXPORT_SYMBOL_GPL(free_iova);
413 
414 /**
415  * alloc_iova_fast - allocates an iova from rcache
416  * @iovad: - iova domain in question
417  * @size: - size of page frames to allocate
418  * @limit_pfn: - max limit address
419  * @flush_rcache: - set to flush rcache on regular allocation failure
420  * This function tries to satisfy an iova allocation from the rcache,
421  * and falls back to regular allocation on failure. If regular allocation
422  * fails too and the flush_rcache flag is set then the rcache will be flushed.
423 */
424 unsigned long
425 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
426 		unsigned long limit_pfn, bool flush_rcache)
427 {
428 	unsigned long iova_pfn;
429 	struct iova *new_iova;
430 
431 	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
432 	if (iova_pfn)
433 		return iova_pfn;
434 
435 retry:
436 	new_iova = alloc_iova(iovad, size, limit_pfn, true);
437 	if (!new_iova) {
438 		unsigned int cpu;
439 
440 		if (!flush_rcache)
441 			return 0;
442 
443 		/* Try replenishing IOVAs by flushing rcache. */
444 		flush_rcache = false;
445 		for_each_online_cpu(cpu)
446 			free_cpu_cached_iovas(cpu, iovad);
447 		free_global_cached_iovas(iovad);
448 		goto retry;
449 	}
450 
451 	return new_iova->pfn_lo;
452 }
453 
454 /**
455  * free_iova_fast - free iova pfn range into rcache
456  * @iovad: - iova domain in question.
457  * @pfn: - pfn that is allocated previously
458  * @size: - # of pages in range
459  * This functions frees an iova range by trying to put it into the rcache,
460  * falling back to regular iova deallocation via free_iova() if this fails.
461  */
462 void
463 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
464 {
465 	if (iova_rcache_insert(iovad, pfn, size))
466 		return;
467 
468 	free_iova(iovad, pfn);
469 }
470 EXPORT_SYMBOL_GPL(free_iova_fast);
471 
472 #define fq_ring_for_each(i, fq) \
473 	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
474 
475 static inline bool fq_full(struct iova_fq *fq)
476 {
477 	assert_spin_locked(&fq->lock);
478 	return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
479 }
480 
481 static inline unsigned fq_ring_add(struct iova_fq *fq)
482 {
483 	unsigned idx = fq->tail;
484 
485 	assert_spin_locked(&fq->lock);
486 
487 	fq->tail = (idx + 1) % IOVA_FQ_SIZE;
488 
489 	return idx;
490 }
491 
492 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
493 {
494 	u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
495 	unsigned idx;
496 
497 	assert_spin_locked(&fq->lock);
498 
499 	fq_ring_for_each(idx, fq) {
500 
501 		if (fq->entries[idx].counter >= counter)
502 			break;
503 
504 		if (iovad->entry_dtor)
505 			iovad->entry_dtor(fq->entries[idx].data);
506 
507 		free_iova_fast(iovad,
508 			       fq->entries[idx].iova_pfn,
509 			       fq->entries[idx].pages);
510 
511 		fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
512 	}
513 }
514 
515 static void iova_domain_flush(struct iova_domain *iovad)
516 {
517 	atomic64_inc(&iovad->fq_flush_start_cnt);
518 	iovad->flush_cb(iovad);
519 	atomic64_inc(&iovad->fq_flush_finish_cnt);
520 }
521 
522 static void fq_destroy_all_entries(struct iova_domain *iovad)
523 {
524 	int cpu;
525 
526 	/*
527 	 * This code runs when the iova_domain is being detroyed, so don't
528 	 * bother to free iovas, just call the entry_dtor on all remaining
529 	 * entries.
530 	 */
531 	if (!iovad->entry_dtor)
532 		return;
533 
534 	for_each_possible_cpu(cpu) {
535 		struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
536 		int idx;
537 
538 		fq_ring_for_each(idx, fq)
539 			iovad->entry_dtor(fq->entries[idx].data);
540 	}
541 }
542 
543 static void fq_flush_timeout(struct timer_list *t)
544 {
545 	struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
546 	int cpu;
547 
548 	atomic_set(&iovad->fq_timer_on, 0);
549 	iova_domain_flush(iovad);
550 
551 	for_each_possible_cpu(cpu) {
552 		unsigned long flags;
553 		struct iova_fq *fq;
554 
555 		fq = per_cpu_ptr(iovad->fq, cpu);
556 		spin_lock_irqsave(&fq->lock, flags);
557 		fq_ring_free(iovad, fq);
558 		spin_unlock_irqrestore(&fq->lock, flags);
559 	}
560 }
561 
562 void queue_iova(struct iova_domain *iovad,
563 		unsigned long pfn, unsigned long pages,
564 		unsigned long data)
565 {
566 	struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
567 	unsigned long flags;
568 	unsigned idx;
569 
570 	spin_lock_irqsave(&fq->lock, flags);
571 
572 	/*
573 	 * First remove all entries from the flush queue that have already been
574 	 * flushed out on another CPU. This makes the fq_full() check below less
575 	 * likely to be true.
576 	 */
577 	fq_ring_free(iovad, fq);
578 
579 	if (fq_full(fq)) {
580 		iova_domain_flush(iovad);
581 		fq_ring_free(iovad, fq);
582 	}
583 
584 	idx = fq_ring_add(fq);
585 
586 	fq->entries[idx].iova_pfn = pfn;
587 	fq->entries[idx].pages    = pages;
588 	fq->entries[idx].data     = data;
589 	fq->entries[idx].counter  = atomic64_read(&iovad->fq_flush_start_cnt);
590 
591 	spin_unlock_irqrestore(&fq->lock, flags);
592 
593 	/* Avoid false sharing as much as possible. */
594 	if (!atomic_read(&iovad->fq_timer_on) &&
595 	    !atomic_xchg(&iovad->fq_timer_on, 1))
596 		mod_timer(&iovad->fq_timer,
597 			  jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
598 }
599 
600 /**
601  * put_iova_domain - destroys the iova domain
602  * @iovad: - iova domain in question.
603  * All the iova's in that domain are destroyed.
604  */
605 void put_iova_domain(struct iova_domain *iovad)
606 {
607 	struct iova *iova, *tmp;
608 
609 	free_iova_flush_queue(iovad);
610 	free_iova_rcaches(iovad);
611 	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
612 		free_iova_mem(iova);
613 }
614 EXPORT_SYMBOL_GPL(put_iova_domain);
615 
616 static int
617 __is_range_overlap(struct rb_node *node,
618 	unsigned long pfn_lo, unsigned long pfn_hi)
619 {
620 	struct iova *iova = rb_entry(node, struct iova, node);
621 
622 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
623 		return 1;
624 	return 0;
625 }
626 
627 static inline struct iova *
628 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
629 {
630 	struct iova *iova;
631 
632 	iova = alloc_iova_mem();
633 	if (iova) {
634 		iova->pfn_lo = pfn_lo;
635 		iova->pfn_hi = pfn_hi;
636 	}
637 
638 	return iova;
639 }
640 
641 static struct iova *
642 __insert_new_range(struct iova_domain *iovad,
643 	unsigned long pfn_lo, unsigned long pfn_hi)
644 {
645 	struct iova *iova;
646 
647 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
648 	if (iova)
649 		iova_insert_rbtree(&iovad->rbroot, iova, NULL);
650 
651 	return iova;
652 }
653 
654 static void
655 __adjust_overlap_range(struct iova *iova,
656 	unsigned long *pfn_lo, unsigned long *pfn_hi)
657 {
658 	if (*pfn_lo < iova->pfn_lo)
659 		iova->pfn_lo = *pfn_lo;
660 	if (*pfn_hi > iova->pfn_hi)
661 		*pfn_lo = iova->pfn_hi + 1;
662 }
663 
664 /**
665  * reserve_iova - reserves an iova in the given range
666  * @iovad: - iova domain pointer
667  * @pfn_lo: - lower page frame address
668  * @pfn_hi:- higher pfn adderss
669  * This function allocates reserves the address range from pfn_lo to pfn_hi so
670  * that this address is not dished out as part of alloc_iova.
671  */
672 struct iova *
673 reserve_iova(struct iova_domain *iovad,
674 	unsigned long pfn_lo, unsigned long pfn_hi)
675 {
676 	struct rb_node *node;
677 	unsigned long flags;
678 	struct iova *iova;
679 	unsigned int overlap = 0;
680 
681 	/* Don't allow nonsensical pfns */
682 	if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
683 		return NULL;
684 
685 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
686 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
687 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
688 			iova = rb_entry(node, struct iova, node);
689 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
690 			if ((pfn_lo >= iova->pfn_lo) &&
691 				(pfn_hi <= iova->pfn_hi))
692 				goto finish;
693 			overlap = 1;
694 
695 		} else if (overlap)
696 				break;
697 	}
698 
699 	/* We are here either because this is the first reserver node
700 	 * or need to insert remaining non overlap addr range
701 	 */
702 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
703 finish:
704 
705 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
706 	return iova;
707 }
708 EXPORT_SYMBOL_GPL(reserve_iova);
709 
710 /*
711  * Magazine caches for IOVA ranges.  For an introduction to magazines,
712  * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
713  * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
714  * For simplicity, we use a static magazine size and don't implement the
715  * dynamic size tuning described in the paper.
716  */
717 
718 #define IOVA_MAG_SIZE 128
719 
720 struct iova_magazine {
721 	unsigned long size;
722 	unsigned long pfns[IOVA_MAG_SIZE];
723 };
724 
725 struct iova_cpu_rcache {
726 	spinlock_t lock;
727 	struct iova_magazine *loaded;
728 	struct iova_magazine *prev;
729 };
730 
731 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
732 {
733 	return kzalloc(sizeof(struct iova_magazine), flags);
734 }
735 
736 static void iova_magazine_free(struct iova_magazine *mag)
737 {
738 	kfree(mag);
739 }
740 
741 static void
742 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
743 {
744 	unsigned long flags;
745 	int i;
746 
747 	if (!mag)
748 		return;
749 
750 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
751 
752 	for (i = 0 ; i < mag->size; ++i) {
753 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
754 
755 		if (WARN_ON(!iova))
756 			continue;
757 
758 		private_free_iova(iovad, iova);
759 	}
760 
761 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
762 
763 	mag->size = 0;
764 }
765 
766 static bool iova_magazine_full(struct iova_magazine *mag)
767 {
768 	return (mag && mag->size == IOVA_MAG_SIZE);
769 }
770 
771 static bool iova_magazine_empty(struct iova_magazine *mag)
772 {
773 	return (!mag || mag->size == 0);
774 }
775 
776 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
777 				       unsigned long limit_pfn)
778 {
779 	int i;
780 	unsigned long pfn;
781 
782 	BUG_ON(iova_magazine_empty(mag));
783 
784 	/* Only fall back to the rbtree if we have no suitable pfns at all */
785 	for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
786 		if (i == 0)
787 			return 0;
788 
789 	/* Swap it to pop it */
790 	pfn = mag->pfns[i];
791 	mag->pfns[i] = mag->pfns[--mag->size];
792 
793 	return pfn;
794 }
795 
796 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
797 {
798 	BUG_ON(iova_magazine_full(mag));
799 
800 	mag->pfns[mag->size++] = pfn;
801 }
802 
803 static void init_iova_rcaches(struct iova_domain *iovad)
804 {
805 	struct iova_cpu_rcache *cpu_rcache;
806 	struct iova_rcache *rcache;
807 	unsigned int cpu;
808 	int i;
809 
810 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
811 		rcache = &iovad->rcaches[i];
812 		spin_lock_init(&rcache->lock);
813 		rcache->depot_size = 0;
814 		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
815 		if (WARN_ON(!rcache->cpu_rcaches))
816 			continue;
817 		for_each_possible_cpu(cpu) {
818 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
819 			spin_lock_init(&cpu_rcache->lock);
820 			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
821 			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
822 		}
823 	}
824 }
825 
826 /*
827  * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
828  * return true on success.  Can fail if rcache is full and we can't free
829  * space, and free_iova() (our only caller) will then return the IOVA
830  * range to the rbtree instead.
831  */
832 static bool __iova_rcache_insert(struct iova_domain *iovad,
833 				 struct iova_rcache *rcache,
834 				 unsigned long iova_pfn)
835 {
836 	struct iova_magazine *mag_to_free = NULL;
837 	struct iova_cpu_rcache *cpu_rcache;
838 	bool can_insert = false;
839 	unsigned long flags;
840 
841 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
842 	spin_lock_irqsave(&cpu_rcache->lock, flags);
843 
844 	if (!iova_magazine_full(cpu_rcache->loaded)) {
845 		can_insert = true;
846 	} else if (!iova_magazine_full(cpu_rcache->prev)) {
847 		swap(cpu_rcache->prev, cpu_rcache->loaded);
848 		can_insert = true;
849 	} else {
850 		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
851 
852 		if (new_mag) {
853 			spin_lock(&rcache->lock);
854 			if (rcache->depot_size < MAX_GLOBAL_MAGS) {
855 				rcache->depot[rcache->depot_size++] =
856 						cpu_rcache->loaded;
857 			} else {
858 				mag_to_free = cpu_rcache->loaded;
859 			}
860 			spin_unlock(&rcache->lock);
861 
862 			cpu_rcache->loaded = new_mag;
863 			can_insert = true;
864 		}
865 	}
866 
867 	if (can_insert)
868 		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
869 
870 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
871 
872 	if (mag_to_free) {
873 		iova_magazine_free_pfns(mag_to_free, iovad);
874 		iova_magazine_free(mag_to_free);
875 	}
876 
877 	return can_insert;
878 }
879 
880 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
881 			       unsigned long size)
882 {
883 	unsigned int log_size = order_base_2(size);
884 
885 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
886 		return false;
887 
888 	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
889 }
890 
891 /*
892  * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
893  * satisfy the request, return a matching non-NULL range and remove
894  * it from the 'rcache'.
895  */
896 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
897 				       unsigned long limit_pfn)
898 {
899 	struct iova_cpu_rcache *cpu_rcache;
900 	unsigned long iova_pfn = 0;
901 	bool has_pfn = false;
902 	unsigned long flags;
903 
904 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
905 	spin_lock_irqsave(&cpu_rcache->lock, flags);
906 
907 	if (!iova_magazine_empty(cpu_rcache->loaded)) {
908 		has_pfn = true;
909 	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
910 		swap(cpu_rcache->prev, cpu_rcache->loaded);
911 		has_pfn = true;
912 	} else {
913 		spin_lock(&rcache->lock);
914 		if (rcache->depot_size > 0) {
915 			iova_magazine_free(cpu_rcache->loaded);
916 			cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
917 			has_pfn = true;
918 		}
919 		spin_unlock(&rcache->lock);
920 	}
921 
922 	if (has_pfn)
923 		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
924 
925 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
926 
927 	return iova_pfn;
928 }
929 
930 /*
931  * Try to satisfy IOVA allocation range from rcache.  Fail if requested
932  * size is too big or the DMA limit we are given isn't satisfied by the
933  * top element in the magazine.
934  */
935 static unsigned long iova_rcache_get(struct iova_domain *iovad,
936 				     unsigned long size,
937 				     unsigned long limit_pfn)
938 {
939 	unsigned int log_size = order_base_2(size);
940 
941 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
942 		return 0;
943 
944 	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
945 }
946 
947 /*
948  * free rcache data structures.
949  */
950 static void free_iova_rcaches(struct iova_domain *iovad)
951 {
952 	struct iova_rcache *rcache;
953 	struct iova_cpu_rcache *cpu_rcache;
954 	unsigned int cpu;
955 	int i, j;
956 
957 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
958 		rcache = &iovad->rcaches[i];
959 		for_each_possible_cpu(cpu) {
960 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
961 			iova_magazine_free(cpu_rcache->loaded);
962 			iova_magazine_free(cpu_rcache->prev);
963 		}
964 		free_percpu(rcache->cpu_rcaches);
965 		for (j = 0; j < rcache->depot_size; ++j)
966 			iova_magazine_free(rcache->depot[j]);
967 	}
968 }
969 
970 /*
971  * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
972  */
973 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
974 {
975 	struct iova_cpu_rcache *cpu_rcache;
976 	struct iova_rcache *rcache;
977 	unsigned long flags;
978 	int i;
979 
980 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
981 		rcache = &iovad->rcaches[i];
982 		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
983 		spin_lock_irqsave(&cpu_rcache->lock, flags);
984 		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
985 		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
986 		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
987 	}
988 }
989 
990 /*
991  * free all the IOVA ranges of global cache
992  */
993 static void free_global_cached_iovas(struct iova_domain *iovad)
994 {
995 	struct iova_rcache *rcache;
996 	unsigned long flags;
997 	int i, j;
998 
999 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1000 		rcache = &iovad->rcaches[i];
1001 		spin_lock_irqsave(&rcache->lock, flags);
1002 		for (j = 0; j < rcache->depot_size; ++j) {
1003 			iova_magazine_free_pfns(rcache->depot[j], iovad);
1004 			iova_magazine_free(rcache->depot[j]);
1005 		}
1006 		rcache->depot_size = 0;
1007 		spin_unlock_irqrestore(&rcache->lock, flags);
1008 	}
1009 }
1010 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1011 MODULE_LICENSE("GPL");
1012