1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * ISP MMU management wrap code
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h>
25 #include <linux/mm.h>		/* for GFP_ATOMIC */
26 #include <linux/slab.h>		/* for kmalloc */
27 #include <linux/list.h>
28 #include <linux/io.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/errno.h>
33 #include <linux/sizes.h>
34 
35 #ifdef CONFIG_X86
36 #include <asm/set_memory.h>
37 #endif
38 
39 #include "atomisp_internal.h"
40 #include "mmu/isp_mmu.h"
41 
42 /*
43  * 64-bit x86 processor physical address layout:
44  * 0		- 0x7fffffff		DDR RAM	(2GB)
45  * 0x80000000	- 0xffffffff		MMIO	(2GB)
46  * 0x100000000	- 0x3fffffffffff	DDR RAM	(64TB)
47  * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
48  * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
49  * We have to make sure memory is allocated from the lower 2GB for devices
50  * that are only 32-bit capable(e.g. the ISP MMU).
51  *
52  * For any confusion, contact bin.gao@intel.com.
53  */
54 #define NR_PAGES_2GB	(SZ_2G / PAGE_SIZE)
55 
56 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
57 			 unsigned int end_isp_virt);
58 
59 static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
60 {
61 	unsigned int *pt_virt = phys_to_virt(pt);
62 
63 	return *(pt_virt + idx);
64 }
65 
66 static void atomisp_set_pte(phys_addr_t pt,
67 			    unsigned int idx, unsigned int pte)
68 {
69 	unsigned int *pt_virt = phys_to_virt(pt);
70 	*(pt_virt + idx) = pte;
71 }
72 
73 static void *isp_pt_phys_to_virt(phys_addr_t phys)
74 {
75 	return phys_to_virt(phys);
76 }
77 
78 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
79 				     unsigned int pte)
80 {
81 	return mmu->driver->pte_to_phys(mmu, pte);
82 }
83 
84 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
85 	phys_addr_t phys)
86 {
87 	unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
88 
89 	return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
90 }
91 
92 /*
93  * allocate a uncacheable page table.
94  * return physical address.
95  */
96 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
97 {
98 	int i;
99 	phys_addr_t page;
100 	void *virt;
101 
102 	/*page table lock may needed here*/
103 	/*
104 	 * The slab allocator(kmem_cache and kmalloc family) doesn't handle
105 	 * GFP_DMA32 flag, so we have to use buddy allocator.
106 	 */
107 	if (totalram_pages() > (unsigned long)NR_PAGES_2GB)
108 		virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
109 	else
110 		virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
111 	if (!virt)
112 		return (phys_addr_t)NULL_PAGE;
113 
114 	/*
115 	 * we need a uncacheable page table.
116 	 */
117 #ifdef	CONFIG_X86
118 	set_memory_uc((unsigned long)virt, 1);
119 #endif
120 
121 	page = virt_to_phys(virt);
122 
123 	for (i = 0; i < 1024; i++) {
124 		/* NEED CHECK */
125 		atomisp_set_pte(page, i, mmu->driver->null_pte);
126 	}
127 
128 	return page;
129 }
130 
131 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
132 {
133 	void *virt;
134 
135 	page &= ISP_PAGE_MASK;
136 	/*
137 	 * reset the page to write back before free
138 	 */
139 	virt = phys_to_virt(page);
140 
141 #ifdef	CONFIG_X86
142 	set_memory_wb((unsigned long)virt, 1);
143 #endif
144 
145 	kmem_cache_free(mmu->tbl_cache, virt);
146 }
147 
148 static void mmu_remap_error(struct isp_mmu *mmu,
149 			    phys_addr_t l1_pt, unsigned int l1_idx,
150 			    phys_addr_t l2_pt, unsigned int l2_idx,
151 			    unsigned int isp_virt, phys_addr_t old_phys,
152 			    phys_addr_t new_phys)
153 {
154 	dev_err(atomisp_dev, "address remap:\n\n"
155 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
156 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
157 		"\told: isp_virt = 0x%x, phys = 0x%llx\n"
158 		"\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
159 		isp_pt_phys_to_virt(l1_pt),
160 		(u64)l1_pt, l1_idx,
161 		isp_pt_phys_to_virt(l2_pt),
162 		(u64)l2_pt, l2_idx, isp_virt,
163 		(u64)old_phys, isp_virt,
164 		(u64)new_phys);
165 }
166 
167 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
168 				   phys_addr_t l1_pt, unsigned int l1_idx,
169 				   phys_addr_t l2_pt, unsigned int l2_idx,
170 				   unsigned int isp_virt, unsigned int pte)
171 {
172 	dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
173 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
174 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
175 		"\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
176 		isp_pt_phys_to_virt(l1_pt),
177 		(u64)l1_pt, l1_idx,
178 		isp_pt_phys_to_virt(l2_pt),
179 		(u64)l2_pt, l2_idx, isp_virt,
180 		pte);
181 }
182 
183 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
184 				   phys_addr_t l1_pt, unsigned int l1_idx,
185 				   unsigned int isp_virt, unsigned int pte)
186 {
187 	dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
188 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
189 		"\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
190 		isp_pt_phys_to_virt(l1_pt),
191 		(u64)l1_pt, l1_idx, (unsigned int)isp_virt,
192 		pte);
193 }
194 
195 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
196 {
197 	dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
198 		"L1PT = 0x%x\n", (unsigned int)pte);
199 }
200 
201 /*
202  * Update L2 page table according to isp virtual address and page physical
203  * address
204  */
205 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
206 		      unsigned int l1_idx, phys_addr_t l2_pt,
207 		      unsigned int start, unsigned int end, phys_addr_t phys)
208 {
209 	unsigned int ptr;
210 	unsigned int idx;
211 	unsigned int pte;
212 
213 	l2_pt &= ISP_PAGE_MASK;
214 
215 	start = start & ISP_PAGE_MASK;
216 	end = ISP_PAGE_ALIGN(end);
217 	phys &= ISP_PAGE_MASK;
218 
219 	ptr = start;
220 	do {
221 		idx = ISP_PTR_TO_L2_IDX(ptr);
222 
223 		pte = atomisp_get_pte(l2_pt, idx);
224 
225 		if (ISP_PTE_VALID(mmu, pte)) {
226 			mmu_remap_error(mmu, l1_pt, l1_idx,
227 					l2_pt, idx, ptr, pte, phys);
228 
229 			/* free all mapped pages */
230 			free_mmu_map(mmu, start, ptr);
231 
232 			return -EINVAL;
233 		}
234 
235 		pte = isp_pgaddr_to_pte_valid(mmu, phys);
236 
237 		atomisp_set_pte(l2_pt, idx, pte);
238 		mmu->l2_pgt_refcount[l1_idx]++;
239 		ptr += (1U << ISP_L2PT_OFFSET);
240 		phys += (1U << ISP_L2PT_OFFSET);
241 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
242 
243 	return 0;
244 }
245 
246 /*
247  * Update L1 page table according to isp virtual address and page physical
248  * address
249  */
250 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
251 		      unsigned int start, unsigned int end,
252 		      phys_addr_t phys)
253 {
254 	phys_addr_t l2_pt;
255 	unsigned int ptr, l1_aligned;
256 	unsigned int idx;
257 	unsigned int l2_pte;
258 	int ret;
259 
260 	l1_pt &= ISP_PAGE_MASK;
261 
262 	start = start & ISP_PAGE_MASK;
263 	end = ISP_PAGE_ALIGN(end);
264 	phys &= ISP_PAGE_MASK;
265 
266 	ptr = start;
267 	do {
268 		idx = ISP_PTR_TO_L1_IDX(ptr);
269 
270 		l2_pte = atomisp_get_pte(l1_pt, idx);
271 
272 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
273 			l2_pt = alloc_page_table(mmu);
274 			if (l2_pt == NULL_PAGE) {
275 				dev_err(atomisp_dev,
276 					"alloc page table fail.\n");
277 
278 				/* free all mapped pages */
279 				free_mmu_map(mmu, start, ptr);
280 
281 				return -ENOMEM;
282 			}
283 
284 			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
285 
286 			atomisp_set_pte(l1_pt, idx, l2_pte);
287 			mmu->l2_pgt_refcount[idx] = 0;
288 		}
289 
290 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
291 
292 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
293 
294 		if (l1_aligned < end) {
295 			ret = mmu_l2_map(mmu, l1_pt, idx,
296 					 l2_pt, ptr, l1_aligned, phys);
297 			phys += (l1_aligned - ptr);
298 			ptr = l1_aligned;
299 		} else {
300 			ret = mmu_l2_map(mmu, l1_pt, idx,
301 					 l2_pt, ptr, end, phys);
302 			phys += (end - ptr);
303 			ptr = end;
304 		}
305 
306 		if (ret) {
307 			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
308 
309 			/* free all mapped pages */
310 			free_mmu_map(mmu, start, ptr);
311 
312 			return -EINVAL;
313 		}
314 	} while (ptr < end && idx < ISP_L1PT_PTES);
315 
316 	return 0;
317 }
318 
319 /*
320  * Update page table according to isp virtual address and page physical
321  * address
322  */
323 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
324 		   phys_addr_t phys, unsigned int pgnr)
325 {
326 	unsigned int start, end;
327 	phys_addr_t l1_pt;
328 	int ret;
329 
330 	mutex_lock(&mmu->pt_mutex);
331 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
332 		/*
333 		 * allocate 1 new page for L1 page table
334 		 */
335 		l1_pt = alloc_page_table(mmu);
336 		if (l1_pt == NULL_PAGE) {
337 			dev_err(atomisp_dev, "alloc page table fail.\n");
338 			mutex_unlock(&mmu->pt_mutex);
339 			return -ENOMEM;
340 		}
341 
342 		/*
343 		 * setup L1 page table physical addr to MMU
344 		 */
345 		mmu->base_address = l1_pt;
346 		mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
347 		memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
348 	}
349 
350 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
351 
352 	start = (isp_virt) & ISP_PAGE_MASK;
353 	end = start + (pgnr << ISP_PAGE_OFFSET);
354 	phys &= ISP_PAGE_MASK;
355 
356 	ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
357 
358 	if (ret)
359 		dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
360 
361 	mutex_unlock(&mmu->pt_mutex);
362 	return ret;
363 }
364 
365 /*
366  * Free L2 page table according to isp virtual address and page physical
367  * address
368  */
369 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
370 			 unsigned int l1_idx, phys_addr_t l2_pt,
371 			 unsigned int start, unsigned int end)
372 {
373 	unsigned int ptr;
374 	unsigned int idx;
375 	unsigned int pte;
376 
377 	l2_pt &= ISP_PAGE_MASK;
378 
379 	start = start & ISP_PAGE_MASK;
380 	end = ISP_PAGE_ALIGN(end);
381 
382 	ptr = start;
383 	do {
384 		idx = ISP_PTR_TO_L2_IDX(ptr);
385 
386 		pte = atomisp_get_pte(l2_pt, idx);
387 
388 		if (!ISP_PTE_VALID(mmu, pte))
389 			mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
390 					       l2_pt, idx, ptr, pte);
391 
392 		atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
393 		mmu->l2_pgt_refcount[l1_idx]--;
394 		ptr += (1U << ISP_L2PT_OFFSET);
395 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
396 
397 	if (mmu->l2_pgt_refcount[l1_idx] == 0) {
398 		free_page_table(mmu, l2_pt);
399 		atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
400 	}
401 }
402 
403 /*
404  * Free L1 page table according to isp virtual address and page physical
405  * address
406  */
407 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
408 			 unsigned int start, unsigned int end)
409 {
410 	phys_addr_t l2_pt;
411 	unsigned int ptr, l1_aligned;
412 	unsigned int idx;
413 	unsigned int l2_pte;
414 
415 	l1_pt &= ISP_PAGE_MASK;
416 
417 	start = start & ISP_PAGE_MASK;
418 	end = ISP_PAGE_ALIGN(end);
419 
420 	ptr = start;
421 	do {
422 		idx = ISP_PTR_TO_L1_IDX(ptr);
423 
424 		l2_pte = atomisp_get_pte(l1_pt, idx);
425 
426 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
427 			mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
428 			continue;
429 		}
430 
431 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
432 
433 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
434 
435 		if (l1_aligned < end) {
436 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
437 			ptr = l1_aligned;
438 		} else {
439 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
440 			ptr = end;
441 		}
442 		/*
443 		 * use the same L2 page next time, so we don't
444 		 * need to invalidate and free this PT.
445 		 */
446 		/*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
447 	} while (ptr < end && idx < ISP_L1PT_PTES);
448 }
449 
450 /*
451  * Free page table according to isp virtual address and page physical
452  * address
453  */
454 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
455 		      unsigned int pgnr)
456 {
457 	unsigned int start, end;
458 	phys_addr_t l1_pt;
459 
460 	mutex_lock(&mmu->pt_mutex);
461 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
462 		mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
463 		mutex_unlock(&mmu->pt_mutex);
464 		return;
465 	}
466 
467 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
468 
469 	start = (isp_virt) & ISP_PAGE_MASK;
470 	end = start + (pgnr << ISP_PAGE_OFFSET);
471 
472 	mmu_l1_unmap(mmu, l1_pt, start, end);
473 	mutex_unlock(&mmu->pt_mutex);
474 }
475 
476 /*
477  * Free page tables according to isp start virtual address and end virtual
478  * address.
479  */
480 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
481 			 unsigned int end_isp_virt)
482 {
483 	unsigned int pgnr;
484 	unsigned int start, end;
485 
486 	start = (start_isp_virt) & ISP_PAGE_MASK;
487 	end = (end_isp_virt) & ISP_PAGE_MASK;
488 	pgnr = (end - start) >> ISP_PAGE_OFFSET;
489 	mmu_unmap(mmu, start, pgnr);
490 }
491 
492 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
493 		phys_addr_t phys, unsigned int pgnr)
494 {
495 	return mmu_map(mmu, isp_virt, phys, pgnr);
496 }
497 
498 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
499 		   unsigned int pgnr)
500 {
501 	mmu_unmap(mmu, isp_virt, pgnr);
502 }
503 
504 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
505 	unsigned int start,
506 	unsigned int size)
507 {
508 	isp_mmu_flush_tlb(mmu);
509 }
510 
511 /*MMU init for internal structure*/
512 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
513 {
514 	if (!mmu)		/* error */
515 		return -EINVAL;
516 	if (!driver)		/* error */
517 		return -EINVAL;
518 
519 	if (!driver->name)
520 		dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
521 
522 	mmu->driver = driver;
523 
524 	if (!driver->tlb_flush_all) {
525 		dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
526 		return -EINVAL;
527 	}
528 
529 	if (!driver->tlb_flush_range)
530 		driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
531 
532 	if (!driver->pte_valid_mask) {
533 		dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
534 		return -EINVAL;
535 	}
536 
537 	mmu->l1_pte = driver->null_pte;
538 
539 	mutex_init(&mmu->pt_mutex);
540 
541 	mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
542 					   ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
543 					   NULL);
544 	if (!mmu->tbl_cache)
545 		return -ENOMEM;
546 
547 	return 0;
548 }
549 
550 /*Free L1 and L2 page table*/
551 void isp_mmu_exit(struct isp_mmu *mmu)
552 {
553 	unsigned int idx;
554 	unsigned int pte;
555 	phys_addr_t l1_pt, l2_pt;
556 
557 	if (!mmu)
558 		return;
559 
560 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
561 		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
562 			 (unsigned int)mmu->l1_pte);
563 		return;
564 	}
565 
566 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
567 
568 	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
569 		pte = atomisp_get_pte(l1_pt, idx);
570 
571 		if (ISP_PTE_VALID(mmu, pte)) {
572 			l2_pt = isp_pte_to_pgaddr(mmu, pte);
573 
574 			free_page_table(mmu, l2_pt);
575 		}
576 	}
577 
578 	free_page_table(mmu, l1_pt);
579 
580 	kmem_cache_destroy(mmu->tbl_cache);
581 }
582