xref: /openbmc/linux/drivers/iommu/iova.c (revision 95e9fd10)
1 /*
2  * Copyright © 2006-2009, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
18  */
19 
20 #include <linux/iova.h>
21 
22 void
23 init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24 {
25 	spin_lock_init(&iovad->iova_rbtree_lock);
26 	iovad->rbroot = RB_ROOT;
27 	iovad->cached32_node = NULL;
28 	iovad->dma_32bit_pfn = pfn_32bit;
29 }
30 
31 static struct rb_node *
32 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
33 {
34 	if ((*limit_pfn != iovad->dma_32bit_pfn) ||
35 		(iovad->cached32_node == NULL))
36 		return rb_last(&iovad->rbroot);
37 	else {
38 		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
39 		struct iova *curr_iova =
40 			container_of(iovad->cached32_node, struct iova, node);
41 		*limit_pfn = curr_iova->pfn_lo - 1;
42 		return prev_node;
43 	}
44 }
45 
46 static void
47 __cached_rbnode_insert_update(struct iova_domain *iovad,
48 	unsigned long limit_pfn, struct iova *new)
49 {
50 	if (limit_pfn != iovad->dma_32bit_pfn)
51 		return;
52 	iovad->cached32_node = &new->node;
53 }
54 
55 static void
56 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
57 {
58 	struct iova *cached_iova;
59 	struct rb_node *curr;
60 
61 	if (!iovad->cached32_node)
62 		return;
63 	curr = iovad->cached32_node;
64 	cached_iova = container_of(curr, struct iova, node);
65 
66 	if (free->pfn_lo >= cached_iova->pfn_lo) {
67 		struct rb_node *node = rb_next(&free->node);
68 		struct iova *iova = container_of(node, struct iova, node);
69 
70 		/* only cache if it's below 32bit pfn */
71 		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
72 			iovad->cached32_node = node;
73 		else
74 			iovad->cached32_node = NULL;
75 	}
76 }
77 
78 /* Computes the padding size required, to make the
79  * the start address naturally aligned on its size
80  */
81 static int
82 iova_get_pad_size(int size, unsigned int limit_pfn)
83 {
84 	unsigned int pad_size = 0;
85 	unsigned int order = ilog2(size);
86 
87 	if (order)
88 		pad_size = (limit_pfn + 1) % (1 << order);
89 
90 	return pad_size;
91 }
92 
93 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
94 		unsigned long size, unsigned long limit_pfn,
95 			struct iova *new, bool size_aligned)
96 {
97 	struct rb_node *prev, *curr = NULL;
98 	unsigned long flags;
99 	unsigned long saved_pfn;
100 	unsigned int pad_size = 0;
101 
102 	/* Walk the tree backwards */
103 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
104 	saved_pfn = limit_pfn;
105 	curr = __get_cached_rbnode(iovad, &limit_pfn);
106 	prev = curr;
107 	while (curr) {
108 		struct iova *curr_iova = container_of(curr, struct iova, node);
109 
110 		if (limit_pfn < curr_iova->pfn_lo)
111 			goto move_left;
112 		else if (limit_pfn < curr_iova->pfn_hi)
113 			goto adjust_limit_pfn;
114 		else {
115 			if (size_aligned)
116 				pad_size = iova_get_pad_size(size, limit_pfn);
117 			if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
118 				break;	/* found a free slot */
119 		}
120 adjust_limit_pfn:
121 		limit_pfn = curr_iova->pfn_lo - 1;
122 move_left:
123 		prev = curr;
124 		curr = rb_prev(curr);
125 	}
126 
127 	if (!curr) {
128 		if (size_aligned)
129 			pad_size = iova_get_pad_size(size, limit_pfn);
130 		if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
131 			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
132 			return -ENOMEM;
133 		}
134 	}
135 
136 	/* pfn_lo will point to size aligned address if size_aligned is set */
137 	new->pfn_lo = limit_pfn - (size + pad_size) + 1;
138 	new->pfn_hi = new->pfn_lo + size - 1;
139 
140 	/* Insert the new_iova into domain rbtree by holding writer lock */
141 	/* Add new node and rebalance tree. */
142 	{
143 		struct rb_node **entry, *parent = NULL;
144 
145 		/* If we have 'prev', it's a valid place to start the
146 		   insertion. Otherwise, start from the root. */
147 		if (prev)
148 			entry = &prev;
149 		else
150 			entry = &iovad->rbroot.rb_node;
151 
152 		/* Figure out where to put new node */
153 		while (*entry) {
154 			struct iova *this = container_of(*entry,
155 							struct iova, node);
156 			parent = *entry;
157 
158 			if (new->pfn_lo < this->pfn_lo)
159 				entry = &((*entry)->rb_left);
160 			else if (new->pfn_lo > this->pfn_lo)
161 				entry = &((*entry)->rb_right);
162 			else
163 				BUG(); /* this should not happen */
164 		}
165 
166 		/* Add new node and rebalance tree. */
167 		rb_link_node(&new->node, parent, entry);
168 		rb_insert_color(&new->node, &iovad->rbroot);
169 	}
170 	__cached_rbnode_insert_update(iovad, saved_pfn, new);
171 
172 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
173 
174 
175 	return 0;
176 }
177 
178 static void
179 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
180 {
181 	struct rb_node **new = &(root->rb_node), *parent = NULL;
182 	/* Figure out where to put new node */
183 	while (*new) {
184 		struct iova *this = container_of(*new, struct iova, node);
185 		parent = *new;
186 
187 		if (iova->pfn_lo < this->pfn_lo)
188 			new = &((*new)->rb_left);
189 		else if (iova->pfn_lo > this->pfn_lo)
190 			new = &((*new)->rb_right);
191 		else
192 			BUG(); /* this should not happen */
193 	}
194 	/* Add new node and rebalance tree. */
195 	rb_link_node(&iova->node, parent, new);
196 	rb_insert_color(&iova->node, root);
197 }
198 
199 /**
200  * alloc_iova - allocates an iova
201  * @iovad: - iova domain in question
202  * @size: - size of page frames to allocate
203  * @limit_pfn: - max limit address
204  * @size_aligned: - set if size_aligned address range is required
205  * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
206  * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
207  * flag is set then the allocated address iova->pfn_lo will be naturally
208  * aligned on roundup_power_of_two(size).
209  */
210 struct iova *
211 alloc_iova(struct iova_domain *iovad, unsigned long size,
212 	unsigned long limit_pfn,
213 	bool size_aligned)
214 {
215 	struct iova *new_iova;
216 	int ret;
217 
218 	new_iova = alloc_iova_mem();
219 	if (!new_iova)
220 		return NULL;
221 
222 	/* If size aligned is set then round the size to
223 	 * to next power of two.
224 	 */
225 	if (size_aligned)
226 		size = __roundup_pow_of_two(size);
227 
228 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
229 			new_iova, size_aligned);
230 
231 	if (ret) {
232 		free_iova_mem(new_iova);
233 		return NULL;
234 	}
235 
236 	return new_iova;
237 }
238 
239 /**
240  * find_iova - find's an iova for a given pfn
241  * @iovad: - iova domain in question.
242  * @pfn: - page frame number
243  * This function finds and returns an iova belonging to the
244  * given doamin which matches the given pfn.
245  */
246 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
247 {
248 	unsigned long flags;
249 	struct rb_node *node;
250 
251 	/* Take the lock so that no other thread is manipulating the rbtree */
252 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
253 	node = iovad->rbroot.rb_node;
254 	while (node) {
255 		struct iova *iova = container_of(node, struct iova, node);
256 
257 		/* If pfn falls within iova's range, return iova */
258 		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
259 			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
260 			/* We are not holding the lock while this iova
261 			 * is referenced by the caller as the same thread
262 			 * which called this function also calls __free_iova()
263 			 * and it is by design that only one thread can possibly
264 			 * reference a particular iova and hence no conflict.
265 			 */
266 			return iova;
267 		}
268 
269 		if (pfn < iova->pfn_lo)
270 			node = node->rb_left;
271 		else if (pfn > iova->pfn_lo)
272 			node = node->rb_right;
273 	}
274 
275 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
276 	return NULL;
277 }
278 
279 /**
280  * __free_iova - frees the given iova
281  * @iovad: iova domain in question.
282  * @iova: iova in question.
283  * Frees the given iova belonging to the giving domain
284  */
285 void
286 __free_iova(struct iova_domain *iovad, struct iova *iova)
287 {
288 	unsigned long flags;
289 
290 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
291 	__cached_rbnode_delete_update(iovad, iova);
292 	rb_erase(&iova->node, &iovad->rbroot);
293 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
294 	free_iova_mem(iova);
295 }
296 
297 /**
298  * free_iova - finds and frees the iova for a given pfn
299  * @iovad: - iova domain in question.
300  * @pfn: - pfn that is allocated previously
301  * This functions finds an iova for a given pfn and then
302  * frees the iova from that domain.
303  */
304 void
305 free_iova(struct iova_domain *iovad, unsigned long pfn)
306 {
307 	struct iova *iova = find_iova(iovad, pfn);
308 	if (iova)
309 		__free_iova(iovad, iova);
310 
311 }
312 
313 /**
314  * put_iova_domain - destroys the iova doamin
315  * @iovad: - iova domain in question.
316  * All the iova's in that domain are destroyed.
317  */
318 void put_iova_domain(struct iova_domain *iovad)
319 {
320 	struct rb_node *node;
321 	unsigned long flags;
322 
323 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
324 	node = rb_first(&iovad->rbroot);
325 	while (node) {
326 		struct iova *iova = container_of(node, struct iova, node);
327 		rb_erase(node, &iovad->rbroot);
328 		free_iova_mem(iova);
329 		node = rb_first(&iovad->rbroot);
330 	}
331 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
332 }
333 
334 static int
335 __is_range_overlap(struct rb_node *node,
336 	unsigned long pfn_lo, unsigned long pfn_hi)
337 {
338 	struct iova *iova = container_of(node, struct iova, node);
339 
340 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
341 		return 1;
342 	return 0;
343 }
344 
345 static struct iova *
346 __insert_new_range(struct iova_domain *iovad,
347 	unsigned long pfn_lo, unsigned long pfn_hi)
348 {
349 	struct iova *iova;
350 
351 	iova = alloc_iova_mem();
352 	if (!iova)
353 		return iova;
354 
355 	iova->pfn_hi = pfn_hi;
356 	iova->pfn_lo = pfn_lo;
357 	iova_insert_rbtree(&iovad->rbroot, iova);
358 	return iova;
359 }
360 
361 static void
362 __adjust_overlap_range(struct iova *iova,
363 	unsigned long *pfn_lo, unsigned long *pfn_hi)
364 {
365 	if (*pfn_lo < iova->pfn_lo)
366 		iova->pfn_lo = *pfn_lo;
367 	if (*pfn_hi > iova->pfn_hi)
368 		*pfn_lo = iova->pfn_hi + 1;
369 }
370 
371 /**
372  * reserve_iova - reserves an iova in the given range
373  * @iovad: - iova domain pointer
374  * @pfn_lo: - lower page frame address
375  * @pfn_hi:- higher pfn adderss
376  * This function allocates reserves the address range from pfn_lo to pfn_hi so
377  * that this address is not dished out as part of alloc_iova.
378  */
379 struct iova *
380 reserve_iova(struct iova_domain *iovad,
381 	unsigned long pfn_lo, unsigned long pfn_hi)
382 {
383 	struct rb_node *node;
384 	unsigned long flags;
385 	struct iova *iova;
386 	unsigned int overlap = 0;
387 
388 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
389 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
390 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
391 			iova = container_of(node, struct iova, node);
392 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
393 			if ((pfn_lo >= iova->pfn_lo) &&
394 				(pfn_hi <= iova->pfn_hi))
395 				goto finish;
396 			overlap = 1;
397 
398 		} else if (overlap)
399 				break;
400 	}
401 
402 	/* We are here either because this is the first reserver node
403 	 * or need to insert remaining non overlap addr range
404 	 */
405 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
406 finish:
407 
408 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
409 	return iova;
410 }
411 
412 /**
413  * copy_reserved_iova - copies the reserved between domains
414  * @from: - source doamin from where to copy
415  * @to: - destination domin where to copy
416  * This function copies reserved iova's from one doamin to
417  * other.
418  */
419 void
420 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
421 {
422 	unsigned long flags;
423 	struct rb_node *node;
424 
425 	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
426 	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
427 		struct iova *iova = container_of(node, struct iova, node);
428 		struct iova *new_iova;
429 		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
430 		if (!new_iova)
431 			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
432 				iova->pfn_lo, iova->pfn_lo);
433 	}
434 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
435 }
436