xref: /openbmc/linux/drivers/gpu/drm/gma500/mmu.c (revision 9b93eb47)
1 /**************************************************************************
2  * Copyright (c) 2007, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  **************************************************************************/
18 
19 #include <linux/highmem.h>
20 
21 #include "mmu.h"
22 #include "psb_drv.h"
23 #include "psb_reg.h"
24 
25 /*
26  * Code for the SGX MMU:
27  */
28 
29 /*
30  * clflush on one processor only:
31  * clflush should apparently flush the cache line on all processors in an
32  * SMP system.
33  */
34 
35 /*
36  * kmap atomic:
37  * The usage of the slots must be completely encapsulated within a spinlock, and
38  * no other functions that may be using the locks for other purposed may be
39  * called from within the locked region.
40  * Since the slots are per processor, this will guarantee that we are the only
41  * user.
42  */
43 
44 /*
45  * TODO: Inserting ptes from an interrupt handler:
46  * This may be desirable for some SGX functionality where the GPU can fault in
47  * needed pages. For that, we need to make an atomic insert_pages function, that
48  * may fail.
49  * If it fails, the caller need to insert the page using a workqueue function,
50  * but on average it should be fast.
51  */
52 
53 static inline uint32_t psb_mmu_pt_index(uint32_t offset)
54 {
55 	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
56 }
57 
58 static inline uint32_t psb_mmu_pd_index(uint32_t offset)
59 {
60 	return offset >> PSB_PDE_SHIFT;
61 }
62 
63 #if defined(CONFIG_X86)
64 static inline void psb_clflush(void *addr)
65 {
66 	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
67 }
68 
69 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
70 {
71 	if (!driver->has_clflush)
72 		return;
73 
74 	mb();
75 	psb_clflush(addr);
76 	mb();
77 }
78 #else
79 
80 static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
81 {;
82 }
83 
84 #endif
85 
86 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
87 {
88 	struct drm_device *dev = driver->dev;
89 	struct drm_psb_private *dev_priv = dev->dev_private;
90 
91 	if (atomic_read(&driver->needs_tlbflush) || force) {
92 		uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
93 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
94 
95 		/* Make sure data cache is turned off before enabling it */
96 		wmb();
97 		PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
98 		(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
99 		if (driver->msvdx_mmu_invaldc)
100 			atomic_set(driver->msvdx_mmu_invaldc, 1);
101 	}
102 	atomic_set(&driver->needs_tlbflush, 0);
103 }
104 
105 #if 0
106 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
107 {
108 	down_write(&driver->sem);
109 	psb_mmu_flush_pd_locked(driver, force);
110 	up_write(&driver->sem);
111 }
112 #endif
113 
114 void psb_mmu_flush(struct psb_mmu_driver *driver)
115 {
116 	struct drm_device *dev = driver->dev;
117 	struct drm_psb_private *dev_priv = dev->dev_private;
118 	uint32_t val;
119 
120 	down_write(&driver->sem);
121 	val = PSB_RSGX32(PSB_CR_BIF_CTRL);
122 	if (atomic_read(&driver->needs_tlbflush))
123 		PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
124 	else
125 		PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
126 
127 	/* Make sure data cache is turned off and MMU is flushed before
128 	   restoring bank interface control register */
129 	wmb();
130 	PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
131 		   PSB_CR_BIF_CTRL);
132 	(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
133 
134 	atomic_set(&driver->needs_tlbflush, 0);
135 	if (driver->msvdx_mmu_invaldc)
136 		atomic_set(driver->msvdx_mmu_invaldc, 1);
137 	up_write(&driver->sem);
138 }
139 
140 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
141 {
142 	struct drm_device *dev = pd->driver->dev;
143 	struct drm_psb_private *dev_priv = dev->dev_private;
144 	uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
145 			  PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
146 
147 	down_write(&pd->driver->sem);
148 	PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
149 	wmb();
150 	psb_mmu_flush_pd_locked(pd->driver, 1);
151 	pd->hw_context = hw_context;
152 	up_write(&pd->driver->sem);
153 
154 }
155 
156 static inline unsigned long psb_pd_addr_end(unsigned long addr,
157 					    unsigned long end)
158 {
159 	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
160 	return (addr < end) ? addr : end;
161 }
162 
163 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
164 {
165 	uint32_t mask = PSB_PTE_VALID;
166 
167 	if (type & PSB_MMU_CACHED_MEMORY)
168 		mask |= PSB_PTE_CACHED;
169 	if (type & PSB_MMU_RO_MEMORY)
170 		mask |= PSB_PTE_RO;
171 	if (type & PSB_MMU_WO_MEMORY)
172 		mask |= PSB_PTE_WO;
173 
174 	return (pfn << PAGE_SHIFT) | mask;
175 }
176 
177 struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
178 				    int trap_pagefaults, int invalid_type)
179 {
180 	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
181 	uint32_t *v;
182 	int i;
183 
184 	if (!pd)
185 		return NULL;
186 
187 	pd->p = alloc_page(GFP_DMA32);
188 	if (!pd->p)
189 		goto out_err1;
190 	pd->dummy_pt = alloc_page(GFP_DMA32);
191 	if (!pd->dummy_pt)
192 		goto out_err2;
193 	pd->dummy_page = alloc_page(GFP_DMA32);
194 	if (!pd->dummy_page)
195 		goto out_err3;
196 
197 	if (!trap_pagefaults) {
198 		pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
199 						   invalid_type);
200 		pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
201 						   invalid_type);
202 	} else {
203 		pd->invalid_pde = 0;
204 		pd->invalid_pte = 0;
205 	}
206 
207 	v = kmap(pd->dummy_pt);
208 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
209 		v[i] = pd->invalid_pte;
210 
211 	kunmap(pd->dummy_pt);
212 
213 	v = kmap(pd->p);
214 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
215 		v[i] = pd->invalid_pde;
216 
217 	kunmap(pd->p);
218 
219 	clear_page(kmap(pd->dummy_page));
220 	kunmap(pd->dummy_page);
221 
222 	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
223 	if (!pd->tables)
224 		goto out_err4;
225 
226 	pd->hw_context = -1;
227 	pd->pd_mask = PSB_PTE_VALID;
228 	pd->driver = driver;
229 
230 	return pd;
231 
232 out_err4:
233 	__free_page(pd->dummy_page);
234 out_err3:
235 	__free_page(pd->dummy_pt);
236 out_err2:
237 	__free_page(pd->p);
238 out_err1:
239 	kfree(pd);
240 	return NULL;
241 }
242 
243 static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
244 {
245 	__free_page(pt->p);
246 	kfree(pt);
247 }
248 
249 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
250 {
251 	struct psb_mmu_driver *driver = pd->driver;
252 	struct drm_device *dev = driver->dev;
253 	struct drm_psb_private *dev_priv = dev->dev_private;
254 	struct psb_mmu_pt *pt;
255 	int i;
256 
257 	down_write(&driver->sem);
258 	if (pd->hw_context != -1) {
259 		PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
260 		psb_mmu_flush_pd_locked(driver, 1);
261 	}
262 
263 	/* Should take the spinlock here, but we don't need to do that
264 	   since we have the semaphore in write mode. */
265 
266 	for (i = 0; i < 1024; ++i) {
267 		pt = pd->tables[i];
268 		if (pt)
269 			psb_mmu_free_pt(pt);
270 	}
271 
272 	vfree(pd->tables);
273 	__free_page(pd->dummy_page);
274 	__free_page(pd->dummy_pt);
275 	__free_page(pd->p);
276 	kfree(pd);
277 	up_write(&driver->sem);
278 }
279 
280 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
281 {
282 	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
283 	void *v;
284 	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
285 	uint32_t clflush_count = PAGE_SIZE / clflush_add;
286 	spinlock_t *lock = &pd->driver->lock;
287 	uint8_t *clf;
288 	uint32_t *ptes;
289 	int i;
290 
291 	if (!pt)
292 		return NULL;
293 
294 	pt->p = alloc_page(GFP_DMA32);
295 	if (!pt->p) {
296 		kfree(pt);
297 		return NULL;
298 	}
299 
300 	spin_lock(lock);
301 
302 	v = kmap_atomic(pt->p);
303 	clf = (uint8_t *) v;
304 	ptes = (uint32_t *) v;
305 	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
306 		*ptes++ = pd->invalid_pte;
307 
308 #if defined(CONFIG_X86)
309 	if (pd->driver->has_clflush && pd->hw_context != -1) {
310 		mb();
311 		for (i = 0; i < clflush_count; ++i) {
312 			psb_clflush(clf);
313 			clf += clflush_add;
314 		}
315 		mb();
316 	}
317 #endif
318 	kunmap_atomic(v);
319 	spin_unlock(lock);
320 
321 	pt->count = 0;
322 	pt->pd = pd;
323 	pt->index = 0;
324 
325 	return pt;
326 }
327 
328 struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
329 					     unsigned long addr)
330 {
331 	uint32_t index = psb_mmu_pd_index(addr);
332 	struct psb_mmu_pt *pt;
333 	uint32_t *v;
334 	spinlock_t *lock = &pd->driver->lock;
335 
336 	spin_lock(lock);
337 	pt = pd->tables[index];
338 	while (!pt) {
339 		spin_unlock(lock);
340 		pt = psb_mmu_alloc_pt(pd);
341 		if (!pt)
342 			return NULL;
343 		spin_lock(lock);
344 
345 		if (pd->tables[index]) {
346 			spin_unlock(lock);
347 			psb_mmu_free_pt(pt);
348 			spin_lock(lock);
349 			pt = pd->tables[index];
350 			continue;
351 		}
352 
353 		v = kmap_atomic(pd->p);
354 		pd->tables[index] = pt;
355 		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
356 		pt->index = index;
357 		kunmap_atomic((void *) v);
358 
359 		if (pd->hw_context != -1) {
360 			psb_mmu_clflush(pd->driver, (void *)&v[index]);
361 			atomic_set(&pd->driver->needs_tlbflush, 1);
362 		}
363 	}
364 	pt->v = kmap_atomic(pt->p);
365 	return pt;
366 }
367 
368 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
369 					      unsigned long addr)
370 {
371 	uint32_t index = psb_mmu_pd_index(addr);
372 	struct psb_mmu_pt *pt;
373 	spinlock_t *lock = &pd->driver->lock;
374 
375 	spin_lock(lock);
376 	pt = pd->tables[index];
377 	if (!pt) {
378 		spin_unlock(lock);
379 		return NULL;
380 	}
381 	pt->v = kmap_atomic(pt->p);
382 	return pt;
383 }
384 
385 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
386 {
387 	struct psb_mmu_pd *pd = pt->pd;
388 	uint32_t *v;
389 
390 	kunmap_atomic(pt->v);
391 	if (pt->count == 0) {
392 		v = kmap_atomic(pd->p);
393 		v[pt->index] = pd->invalid_pde;
394 		pd->tables[pt->index] = NULL;
395 
396 		if (pd->hw_context != -1) {
397 			psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
398 			atomic_set(&pd->driver->needs_tlbflush, 1);
399 		}
400 		kunmap_atomic(v);
401 		spin_unlock(&pd->driver->lock);
402 		psb_mmu_free_pt(pt);
403 		return;
404 	}
405 	spin_unlock(&pd->driver->lock);
406 }
407 
408 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
409 				   uint32_t pte)
410 {
411 	pt->v[psb_mmu_pt_index(addr)] = pte;
412 }
413 
414 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
415 					  unsigned long addr)
416 {
417 	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
418 }
419 
420 struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
421 {
422 	struct psb_mmu_pd *pd;
423 
424 	down_read(&driver->sem);
425 	pd = driver->default_pd;
426 	up_read(&driver->sem);
427 
428 	return pd;
429 }
430 
431 /* Returns the physical address of the PD shared by sgx/msvdx */
432 uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
433 {
434 	struct psb_mmu_pd *pd;
435 
436 	pd = psb_mmu_get_default_pd(driver);
437 	return page_to_pfn(pd->p) << PAGE_SHIFT;
438 }
439 
440 void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
441 {
442 	struct drm_device *dev = driver->dev;
443 	struct drm_psb_private *dev_priv = dev->dev_private;
444 
445 	PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
446 	psb_mmu_free_pagedir(driver->default_pd);
447 	kfree(driver);
448 }
449 
450 struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
451 					   int trap_pagefaults,
452 					   int invalid_type,
453 					   atomic_t *msvdx_mmu_invaldc)
454 {
455 	struct psb_mmu_driver *driver;
456 	struct drm_psb_private *dev_priv = dev->dev_private;
457 
458 	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
459 
460 	if (!driver)
461 		return NULL;
462 
463 	driver->dev = dev;
464 	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
465 					      invalid_type);
466 	if (!driver->default_pd)
467 		goto out_err1;
468 
469 	spin_lock_init(&driver->lock);
470 	init_rwsem(&driver->sem);
471 	down_write(&driver->sem);
472 	atomic_set(&driver->needs_tlbflush, 1);
473 	driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
474 
475 	driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
476 	PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
477 		   PSB_CR_BIF_CTRL);
478 	PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
479 		   PSB_CR_BIF_CTRL);
480 
481 	driver->has_clflush = 0;
482 
483 #if defined(CONFIG_X86)
484 	if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
485 		uint32_t tfms, misc, cap0, cap4, clflush_size;
486 
487 		/*
488 		 * clflush size is determined at kernel setup for x86_64 but not
489 		 * for i386. We have to do it here.
490 		 */
491 
492 		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
493 		clflush_size = ((misc >> 8) & 0xff) * 8;
494 		driver->has_clflush = 1;
495 		driver->clflush_add =
496 		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
497 		driver->clflush_mask = driver->clflush_add - 1;
498 		driver->clflush_mask = ~driver->clflush_mask;
499 	}
500 #endif
501 
502 	up_write(&driver->sem);
503 	return driver;
504 
505 out_err1:
506 	kfree(driver);
507 	return NULL;
508 }
509 
510 #if defined(CONFIG_X86)
511 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
512 			       uint32_t num_pages, uint32_t desired_tile_stride,
513 			       uint32_t hw_tile_stride)
514 {
515 	struct psb_mmu_pt *pt;
516 	uint32_t rows = 1;
517 	uint32_t i;
518 	unsigned long addr;
519 	unsigned long end;
520 	unsigned long next;
521 	unsigned long add;
522 	unsigned long row_add;
523 	unsigned long clflush_add = pd->driver->clflush_add;
524 	unsigned long clflush_mask = pd->driver->clflush_mask;
525 
526 	if (!pd->driver->has_clflush)
527 		return;
528 
529 	if (hw_tile_stride)
530 		rows = num_pages / desired_tile_stride;
531 	else
532 		desired_tile_stride = num_pages;
533 
534 	add = desired_tile_stride << PAGE_SHIFT;
535 	row_add = hw_tile_stride << PAGE_SHIFT;
536 	mb();
537 	for (i = 0; i < rows; ++i) {
538 
539 		addr = address;
540 		end = addr + add;
541 
542 		do {
543 			next = psb_pd_addr_end(addr, end);
544 			pt = psb_mmu_pt_map_lock(pd, addr);
545 			if (!pt)
546 				continue;
547 			do {
548 				psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
549 			} while (addr += clflush_add,
550 				 (addr & clflush_mask) < next);
551 
552 			psb_mmu_pt_unmap_unlock(pt);
553 		} while (addr = next, next != end);
554 		address += row_add;
555 	}
556 	mb();
557 }
558 #else
559 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
560 			       uint32_t num_pages, uint32_t desired_tile_stride,
561 			       uint32_t hw_tile_stride)
562 {
563 	drm_ttm_cache_flush();
564 }
565 #endif
566 
567 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
568 				 unsigned long address, uint32_t num_pages)
569 {
570 	struct psb_mmu_pt *pt;
571 	unsigned long addr;
572 	unsigned long end;
573 	unsigned long next;
574 	unsigned long f_address = address;
575 
576 	down_read(&pd->driver->sem);
577 
578 	addr = address;
579 	end = addr + (num_pages << PAGE_SHIFT);
580 
581 	do {
582 		next = psb_pd_addr_end(addr, end);
583 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
584 		if (!pt)
585 			goto out;
586 		do {
587 			psb_mmu_invalidate_pte(pt, addr);
588 			--pt->count;
589 		} while (addr += PAGE_SIZE, addr < next);
590 		psb_mmu_pt_unmap_unlock(pt);
591 
592 	} while (addr = next, next != end);
593 
594 out:
595 	if (pd->hw_context != -1)
596 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
597 
598 	up_read(&pd->driver->sem);
599 
600 	if (pd->hw_context != -1)
601 		psb_mmu_flush(pd->driver);
602 
603 	return;
604 }
605 
606 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
607 			  uint32_t num_pages, uint32_t desired_tile_stride,
608 			  uint32_t hw_tile_stride)
609 {
610 	struct psb_mmu_pt *pt;
611 	uint32_t rows = 1;
612 	uint32_t i;
613 	unsigned long addr;
614 	unsigned long end;
615 	unsigned long next;
616 	unsigned long add;
617 	unsigned long row_add;
618 	unsigned long f_address = address;
619 
620 	if (hw_tile_stride)
621 		rows = num_pages / desired_tile_stride;
622 	else
623 		desired_tile_stride = num_pages;
624 
625 	add = desired_tile_stride << PAGE_SHIFT;
626 	row_add = hw_tile_stride << PAGE_SHIFT;
627 
628 	down_read(&pd->driver->sem);
629 
630 	/* Make sure we only need to flush this processor's cache */
631 
632 	for (i = 0; i < rows; ++i) {
633 
634 		addr = address;
635 		end = addr + add;
636 
637 		do {
638 			next = psb_pd_addr_end(addr, end);
639 			pt = psb_mmu_pt_map_lock(pd, addr);
640 			if (!pt)
641 				continue;
642 			do {
643 				psb_mmu_invalidate_pte(pt, addr);
644 				--pt->count;
645 
646 			} while (addr += PAGE_SIZE, addr < next);
647 			psb_mmu_pt_unmap_unlock(pt);
648 
649 		} while (addr = next, next != end);
650 		address += row_add;
651 	}
652 	if (pd->hw_context != -1)
653 		psb_mmu_flush_ptes(pd, f_address, num_pages,
654 				   desired_tile_stride, hw_tile_stride);
655 
656 	up_read(&pd->driver->sem);
657 
658 	if (pd->hw_context != -1)
659 		psb_mmu_flush(pd->driver);
660 }
661 
662 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
663 				unsigned long address, uint32_t num_pages,
664 				int type)
665 {
666 	struct psb_mmu_pt *pt;
667 	uint32_t pte;
668 	unsigned long addr;
669 	unsigned long end;
670 	unsigned long next;
671 	unsigned long f_address = address;
672 	int ret = -ENOMEM;
673 
674 	down_read(&pd->driver->sem);
675 
676 	addr = address;
677 	end = addr + (num_pages << PAGE_SHIFT);
678 
679 	do {
680 		next = psb_pd_addr_end(addr, end);
681 		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
682 		if (!pt) {
683 			ret = -ENOMEM;
684 			goto out;
685 		}
686 		do {
687 			pte = psb_mmu_mask_pte(start_pfn++, type);
688 			psb_mmu_set_pte(pt, addr, pte);
689 			pt->count++;
690 		} while (addr += PAGE_SIZE, addr < next);
691 		psb_mmu_pt_unmap_unlock(pt);
692 
693 	} while (addr = next, next != end);
694 	ret = 0;
695 
696 out:
697 	if (pd->hw_context != -1)
698 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
699 
700 	up_read(&pd->driver->sem);
701 
702 	if (pd->hw_context != -1)
703 		psb_mmu_flush(pd->driver);
704 
705 	return 0;
706 }
707 
708 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
709 			 unsigned long address, uint32_t num_pages,
710 			 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
711 			 int type)
712 {
713 	struct psb_mmu_pt *pt;
714 	uint32_t rows = 1;
715 	uint32_t i;
716 	uint32_t pte;
717 	unsigned long addr;
718 	unsigned long end;
719 	unsigned long next;
720 	unsigned long add;
721 	unsigned long row_add;
722 	unsigned long f_address = address;
723 	int ret = -ENOMEM;
724 
725 	if (hw_tile_stride) {
726 		if (num_pages % desired_tile_stride != 0)
727 			return -EINVAL;
728 		rows = num_pages / desired_tile_stride;
729 	} else {
730 		desired_tile_stride = num_pages;
731 	}
732 
733 	add = desired_tile_stride << PAGE_SHIFT;
734 	row_add = hw_tile_stride << PAGE_SHIFT;
735 
736 	down_read(&pd->driver->sem);
737 
738 	for (i = 0; i < rows; ++i) {
739 
740 		addr = address;
741 		end = addr + add;
742 
743 		do {
744 			next = psb_pd_addr_end(addr, end);
745 			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
746 			if (!pt)
747 				goto out;
748 			do {
749 				pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
750 						       type);
751 				psb_mmu_set_pte(pt, addr, pte);
752 				pt->count++;
753 			} while (addr += PAGE_SIZE, addr < next);
754 			psb_mmu_pt_unmap_unlock(pt);
755 
756 		} while (addr = next, next != end);
757 
758 		address += row_add;
759 	}
760 
761 	ret = 0;
762 out:
763 	if (pd->hw_context != -1)
764 		psb_mmu_flush_ptes(pd, f_address, num_pages,
765 				   desired_tile_stride, hw_tile_stride);
766 
767 	up_read(&pd->driver->sem);
768 
769 	if (pd->hw_context != -1)
770 		psb_mmu_flush(pd->driver);
771 
772 	return ret;
773 }
774 
775 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
776 			   unsigned long *pfn)
777 {
778 	int ret;
779 	struct psb_mmu_pt *pt;
780 	uint32_t tmp;
781 	spinlock_t *lock = &pd->driver->lock;
782 
783 	down_read(&pd->driver->sem);
784 	pt = psb_mmu_pt_map_lock(pd, virtual);
785 	if (!pt) {
786 		uint32_t *v;
787 
788 		spin_lock(lock);
789 		v = kmap_atomic(pd->p);
790 		tmp = v[psb_mmu_pd_index(virtual)];
791 		kunmap_atomic(v);
792 		spin_unlock(lock);
793 
794 		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
795 		    !(pd->invalid_pte & PSB_PTE_VALID)) {
796 			ret = -EINVAL;
797 			goto out;
798 		}
799 		ret = 0;
800 		*pfn = pd->invalid_pte >> PAGE_SHIFT;
801 		goto out;
802 	}
803 	tmp = pt->v[psb_mmu_pt_index(virtual)];
804 	if (!(tmp & PSB_PTE_VALID)) {
805 		ret = -EINVAL;
806 	} else {
807 		ret = 0;
808 		*pfn = tmp >> PAGE_SHIFT;
809 	}
810 	psb_mmu_pt_unmap_unlock(pt);
811 out:
812 	up_read(&pd->driver->sem);
813 	return ret;
814 }
815