1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * ISP MMU management wrap code
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h>
25 #include <linux/mm.h>		/* for GFP_ATOMIC */
26 #include <linux/slab.h>		/* for kmalloc */
27 #include <linux/list.h>
28 #include <linux/io.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/errno.h>
33 #include <linux/sizes.h>
34 
35 #ifdef CONFIG_X86
36 #include <asm/set_memory.h>
37 #endif
38 
39 #include "atomisp_internal.h"
40 #include "mmu/isp_mmu.h"
41 
42 /*
43  * 64-bit x86 processor physical address layout:
44  * 0		- 0x7fffffff		DDR RAM	(2GB)
45  * 0x80000000	- 0xffffffff		MMIO	(2GB)
46  * 0x100000000	- 0x3fffffffffff	DDR RAM	(64TB)
47  * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
48  * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
49  * We have to make sure memory is allocated from the lower 2GB for devices
50  * that are only 32-bit capable(e.g. the ISP MMU).
51  *
52  * For any confusion, contact bin.gao@intel.com.
53  */
54 #define NR_PAGES_2GB	(SZ_2G / PAGE_SIZE)
55 
56 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
57 			 unsigned int end_isp_virt);
58 
59 static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
60 {
61 	unsigned int *pt_virt = phys_to_virt(pt);
62 
63 	return *(pt_virt + idx);
64 }
65 
66 static void atomisp_set_pte(phys_addr_t pt,
67 			    unsigned int idx, unsigned int pte)
68 {
69 	unsigned int *pt_virt = phys_to_virt(pt);
70 	*(pt_virt + idx) = pte;
71 }
72 
73 static void *isp_pt_phys_to_virt(phys_addr_t phys)
74 {
75 	return phys_to_virt(phys);
76 }
77 
78 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
79 				     unsigned int pte)
80 {
81 	return mmu->driver->pte_to_phys(mmu, pte);
82 }
83 
84 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
85 	phys_addr_t phys)
86 {
87 	unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
88 
89 	return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
90 }
91 
92 /*
93  * allocate a uncacheable page table.
94  * return physical address.
95  */
96 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
97 {
98 	int i;
99 	phys_addr_t page;
100 	void *virt;
101 
102 	virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
103 
104 	if (!virt)
105 		return (phys_addr_t)NULL_PAGE;
106 
107 	/*
108 	 * we need a uncacheable page table.
109 	 */
110 #ifdef	CONFIG_X86
111 	set_memory_uc((unsigned long)virt, 1);
112 #endif
113 
114 	page = virt_to_phys(virt);
115 
116 	for (i = 0; i < 1024; i++) {
117 		/* NEED CHECK */
118 		atomisp_set_pte(page, i, mmu->driver->null_pte);
119 	}
120 
121 	return page;
122 }
123 
124 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
125 {
126 	void *virt;
127 
128 	page &= ISP_PAGE_MASK;
129 	/*
130 	 * reset the page to write back before free
131 	 */
132 	virt = phys_to_virt(page);
133 
134 #ifdef	CONFIG_X86
135 	set_memory_wb((unsigned long)virt, 1);
136 #endif
137 
138 	free_page((unsigned long)virt);
139 }
140 
141 static void mmu_remap_error(struct isp_mmu *mmu,
142 			    phys_addr_t l1_pt, unsigned int l1_idx,
143 			    phys_addr_t l2_pt, unsigned int l2_idx,
144 			    unsigned int isp_virt, phys_addr_t old_phys,
145 			    phys_addr_t new_phys)
146 {
147 	dev_err(atomisp_dev, "address remap:\n\n"
148 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
149 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
150 		"\told: isp_virt = 0x%x, phys = 0x%llx\n"
151 		"\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
152 		isp_pt_phys_to_virt(l1_pt),
153 		(u64)l1_pt, l1_idx,
154 		isp_pt_phys_to_virt(l2_pt),
155 		(u64)l2_pt, l2_idx, isp_virt,
156 		(u64)old_phys, isp_virt,
157 		(u64)new_phys);
158 }
159 
160 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
161 				   phys_addr_t l1_pt, unsigned int l1_idx,
162 				   phys_addr_t l2_pt, unsigned int l2_idx,
163 				   unsigned int isp_virt, unsigned int pte)
164 {
165 	dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
166 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
167 		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
168 		"\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
169 		isp_pt_phys_to_virt(l1_pt),
170 		(u64)l1_pt, l1_idx,
171 		isp_pt_phys_to_virt(l2_pt),
172 		(u64)l2_pt, l2_idx, isp_virt,
173 		pte);
174 }
175 
176 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
177 				   phys_addr_t l1_pt, unsigned int l1_idx,
178 				   unsigned int isp_virt, unsigned int pte)
179 {
180 	dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
181 		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
182 		"\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
183 		isp_pt_phys_to_virt(l1_pt),
184 		(u64)l1_pt, l1_idx, (unsigned int)isp_virt,
185 		pte);
186 }
187 
188 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
189 {
190 	dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
191 		"L1PT = 0x%x\n", (unsigned int)pte);
192 }
193 
194 /*
195  * Update L2 page table according to isp virtual address and page physical
196  * address
197  */
198 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
199 		      unsigned int l1_idx, phys_addr_t l2_pt,
200 		      unsigned int start, unsigned int end, phys_addr_t phys)
201 {
202 	unsigned int ptr;
203 	unsigned int idx;
204 	unsigned int pte;
205 
206 	l2_pt &= ISP_PAGE_MASK;
207 
208 	start = start & ISP_PAGE_MASK;
209 	end = ISP_PAGE_ALIGN(end);
210 	phys &= ISP_PAGE_MASK;
211 
212 	ptr = start;
213 	do {
214 		idx = ISP_PTR_TO_L2_IDX(ptr);
215 
216 		pte = atomisp_get_pte(l2_pt, idx);
217 
218 		if (ISP_PTE_VALID(mmu, pte)) {
219 			mmu_remap_error(mmu, l1_pt, l1_idx,
220 					l2_pt, idx, ptr, pte, phys);
221 
222 			/* free all mapped pages */
223 			free_mmu_map(mmu, start, ptr);
224 
225 			return -EINVAL;
226 		}
227 
228 		pte = isp_pgaddr_to_pte_valid(mmu, phys);
229 
230 		atomisp_set_pte(l2_pt, idx, pte);
231 		mmu->l2_pgt_refcount[l1_idx]++;
232 		ptr += (1U << ISP_L2PT_OFFSET);
233 		phys += (1U << ISP_L2PT_OFFSET);
234 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
235 
236 	return 0;
237 }
238 
239 /*
240  * Update L1 page table according to isp virtual address and page physical
241  * address
242  */
243 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
244 		      unsigned int start, unsigned int end,
245 		      phys_addr_t phys)
246 {
247 	phys_addr_t l2_pt;
248 	unsigned int ptr, l1_aligned;
249 	unsigned int idx;
250 	unsigned int l2_pte;
251 	int ret;
252 
253 	l1_pt &= ISP_PAGE_MASK;
254 
255 	start = start & ISP_PAGE_MASK;
256 	end = ISP_PAGE_ALIGN(end);
257 	phys &= ISP_PAGE_MASK;
258 
259 	ptr = start;
260 	do {
261 		idx = ISP_PTR_TO_L1_IDX(ptr);
262 
263 		l2_pte = atomisp_get_pte(l1_pt, idx);
264 
265 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
266 			l2_pt = alloc_page_table(mmu);
267 			if (l2_pt == NULL_PAGE) {
268 				dev_err(atomisp_dev,
269 					"alloc page table fail.\n");
270 
271 				/* free all mapped pages */
272 				free_mmu_map(mmu, start, ptr);
273 
274 				return -ENOMEM;
275 			}
276 
277 			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
278 
279 			atomisp_set_pte(l1_pt, idx, l2_pte);
280 			mmu->l2_pgt_refcount[idx] = 0;
281 		}
282 
283 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
284 
285 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
286 
287 		if (l1_aligned < end) {
288 			ret = mmu_l2_map(mmu, l1_pt, idx,
289 					 l2_pt, ptr, l1_aligned, phys);
290 			phys += (l1_aligned - ptr);
291 			ptr = l1_aligned;
292 		} else {
293 			ret = mmu_l2_map(mmu, l1_pt, idx,
294 					 l2_pt, ptr, end, phys);
295 			phys += (end - ptr);
296 			ptr = end;
297 		}
298 
299 		if (ret) {
300 			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
301 
302 			/* free all mapped pages */
303 			free_mmu_map(mmu, start, ptr);
304 
305 			return -EINVAL;
306 		}
307 	} while (ptr < end && idx < ISP_L1PT_PTES);
308 
309 	return 0;
310 }
311 
312 /*
313  * Update page table according to isp virtual address and page physical
314  * address
315  */
316 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
317 		   phys_addr_t phys, unsigned int pgnr)
318 {
319 	unsigned int start, end;
320 	phys_addr_t l1_pt;
321 	int ret;
322 
323 	mutex_lock(&mmu->pt_mutex);
324 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
325 		/*
326 		 * allocate 1 new page for L1 page table
327 		 */
328 		l1_pt = alloc_page_table(mmu);
329 		if (l1_pt == NULL_PAGE) {
330 			dev_err(atomisp_dev, "alloc page table fail.\n");
331 			mutex_unlock(&mmu->pt_mutex);
332 			return -ENOMEM;
333 		}
334 
335 		/*
336 		 * setup L1 page table physical addr to MMU
337 		 */
338 		mmu->base_address = l1_pt;
339 		mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
340 		memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
341 	}
342 
343 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
344 
345 	start = (isp_virt) & ISP_PAGE_MASK;
346 	end = start + (pgnr << ISP_PAGE_OFFSET);
347 	phys &= ISP_PAGE_MASK;
348 
349 	ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
350 
351 	if (ret)
352 		dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
353 
354 	mutex_unlock(&mmu->pt_mutex);
355 	return ret;
356 }
357 
358 /*
359  * Free L2 page table according to isp virtual address and page physical
360  * address
361  */
362 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
363 			 unsigned int l1_idx, phys_addr_t l2_pt,
364 			 unsigned int start, unsigned int end)
365 {
366 	unsigned int ptr;
367 	unsigned int idx;
368 	unsigned int pte;
369 
370 	l2_pt &= ISP_PAGE_MASK;
371 
372 	start = start & ISP_PAGE_MASK;
373 	end = ISP_PAGE_ALIGN(end);
374 
375 	ptr = start;
376 	do {
377 		idx = ISP_PTR_TO_L2_IDX(ptr);
378 
379 		pte = atomisp_get_pte(l2_pt, idx);
380 
381 		if (!ISP_PTE_VALID(mmu, pte))
382 			mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
383 					       l2_pt, idx, ptr, pte);
384 
385 		atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
386 		mmu->l2_pgt_refcount[l1_idx]--;
387 		ptr += (1U << ISP_L2PT_OFFSET);
388 	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
389 
390 	if (mmu->l2_pgt_refcount[l1_idx] == 0) {
391 		free_page_table(mmu, l2_pt);
392 		atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
393 	}
394 }
395 
396 /*
397  * Free L1 page table according to isp virtual address and page physical
398  * address
399  */
400 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
401 			 unsigned int start, unsigned int end)
402 {
403 	phys_addr_t l2_pt;
404 	unsigned int ptr, l1_aligned;
405 	unsigned int idx;
406 	unsigned int l2_pte;
407 
408 	l1_pt &= ISP_PAGE_MASK;
409 
410 	start = start & ISP_PAGE_MASK;
411 	end = ISP_PAGE_ALIGN(end);
412 
413 	ptr = start;
414 	do {
415 		idx = ISP_PTR_TO_L1_IDX(ptr);
416 
417 		l2_pte = atomisp_get_pte(l1_pt, idx);
418 
419 		if (!ISP_PTE_VALID(mmu, l2_pte)) {
420 			mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
421 			continue;
422 		}
423 
424 		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
425 
426 		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
427 
428 		if (l1_aligned < end) {
429 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
430 			ptr = l1_aligned;
431 		} else {
432 			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
433 			ptr = end;
434 		}
435 		/*
436 		 * use the same L2 page next time, so we don't
437 		 * need to invalidate and free this PT.
438 		 */
439 		/*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
440 	} while (ptr < end && idx < ISP_L1PT_PTES);
441 }
442 
443 /*
444  * Free page table according to isp virtual address and page physical
445  * address
446  */
447 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
448 		      unsigned int pgnr)
449 {
450 	unsigned int start, end;
451 	phys_addr_t l1_pt;
452 
453 	mutex_lock(&mmu->pt_mutex);
454 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
455 		mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
456 		mutex_unlock(&mmu->pt_mutex);
457 		return;
458 	}
459 
460 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
461 
462 	start = (isp_virt) & ISP_PAGE_MASK;
463 	end = start + (pgnr << ISP_PAGE_OFFSET);
464 
465 	mmu_l1_unmap(mmu, l1_pt, start, end);
466 	mutex_unlock(&mmu->pt_mutex);
467 }
468 
469 /*
470  * Free page tables according to isp start virtual address and end virtual
471  * address.
472  */
473 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
474 			 unsigned int end_isp_virt)
475 {
476 	unsigned int pgnr;
477 	unsigned int start, end;
478 
479 	start = (start_isp_virt) & ISP_PAGE_MASK;
480 	end = (end_isp_virt) & ISP_PAGE_MASK;
481 	pgnr = (end - start) >> ISP_PAGE_OFFSET;
482 	mmu_unmap(mmu, start, pgnr);
483 }
484 
485 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
486 		phys_addr_t phys, unsigned int pgnr)
487 {
488 	return mmu_map(mmu, isp_virt, phys, pgnr);
489 }
490 
491 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
492 		   unsigned int pgnr)
493 {
494 	mmu_unmap(mmu, isp_virt, pgnr);
495 }
496 
497 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
498 	unsigned int start,
499 	unsigned int size)
500 {
501 	isp_mmu_flush_tlb(mmu);
502 }
503 
504 /*MMU init for internal structure*/
505 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
506 {
507 	if (!mmu)		/* error */
508 		return -EINVAL;
509 	if (!driver)		/* error */
510 		return -EINVAL;
511 
512 	if (!driver->name)
513 		dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
514 
515 	mmu->driver = driver;
516 
517 	if (!driver->tlb_flush_all) {
518 		dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
519 		return -EINVAL;
520 	}
521 
522 	if (!driver->tlb_flush_range)
523 		driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
524 
525 	if (!driver->pte_valid_mask) {
526 		dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
527 		return -EINVAL;
528 	}
529 
530 	mmu->l1_pte = driver->null_pte;
531 
532 	mutex_init(&mmu->pt_mutex);
533 
534 	return 0;
535 }
536 
537 /*Free L1 and L2 page table*/
538 void isp_mmu_exit(struct isp_mmu *mmu)
539 {
540 	unsigned int idx;
541 	unsigned int pte;
542 	phys_addr_t l1_pt, l2_pt;
543 
544 	if (!mmu)
545 		return;
546 
547 	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
548 		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
549 			 (unsigned int)mmu->l1_pte);
550 		return;
551 	}
552 
553 	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
554 
555 	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
556 		pte = atomisp_get_pte(l1_pt, idx);
557 
558 		if (ISP_PTE_VALID(mmu, pte)) {
559 			l2_pt = isp_pte_to_pgaddr(mmu, pte);
560 
561 			free_page_table(mmu, l2_pt);
562 		}
563 	}
564 
565 	free_page_table(mmu, l1_pt);
566 }
567