1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * ISP MMU driver for classic two-level page tables
21  */
22 #ifndef	__ISP_MMU_H__
23 #define	__ISP_MMU_H__
24 
25 #include <linux/types.h>
26 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 
29 /*
30  * do not change these values, the page size for ISP must be the
31  * same as kernel's page size.
32  */
33 #define	ISP_PAGE_OFFSET		12
34 #define	ISP_PAGE_SIZE		BIT(ISP_PAGE_OFFSET)
35 #define	ISP_PAGE_MASK		(~(phys_addr_t)(ISP_PAGE_SIZE - 1))
36 
37 #define	ISP_L1PT_OFFSET		22
38 #define	ISP_L1PT_MASK		(~((1U << ISP_L1PT_OFFSET) - 1))
39 
40 #define	ISP_L2PT_OFFSET		12
41 #define	ISP_L2PT_MASK		(~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
42 
43 #define	ISP_L1PT_PTES		1024
44 #define	ISP_L2PT_PTES		1024
45 
46 #define	ISP_PTR_TO_L1_IDX(x)	((((x) & ISP_L1PT_MASK)) \
47 					>> ISP_L1PT_OFFSET)
48 
49 #define	ISP_PTR_TO_L2_IDX(x)	((((x) & ISP_L2PT_MASK)) \
50 					>> ISP_L2PT_OFFSET)
51 
52 #define	ISP_PAGE_ALIGN(x)	(((x) + (ISP_PAGE_SIZE - 1)) \
53 					& ISP_PAGE_MASK)
54 
55 #define	ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
56 		((l1_idx) << ISP_L1PT_OFFSET) | \
57 		((l2_idx) << ISP_L2PT_OFFSET) | \
58 		(offset)\
59 } while (0)
60 
61 #define	pgnr_to_size(pgnr)	((pgnr) << ISP_PAGE_OFFSET)
62 #define	size_to_pgnr_ceil(size)	(((size) + (1 << ISP_PAGE_OFFSET) - 1)\
63 						>> ISP_PAGE_OFFSET)
64 #define	size_to_pgnr_bottom(size)	((size) >> ISP_PAGE_OFFSET)
65 
66 struct isp_mmu;
67 
68 struct isp_mmu_client {
69 	/*
70 	 * const value
71 	 *
72 	 * @name:
73 	 *      driver name
74 	 * @pte_valid_mask:
75 	 *      should be 1 bit valid data, meaning the value should
76 	 *      be power of 2.
77 	 */
78 	char *name;
79 	unsigned int pte_valid_mask;
80 	unsigned int null_pte;
81 
82 	/*
83 	 * get page directory base address (physical address).
84 	 *
85 	 * must be provided.
86 	 */
87 	unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
88 	/*
89 	 * callback to flush tlb.
90 	 *
91 	 * tlb_flush_range will at least flush TLBs containing
92 	 * address mapping from addr to addr + size.
93 	 *
94 	 * tlb_flush_all will flush all TLBs.
95 	 *
96 	 * tlb_flush_all is must be provided. if tlb_flush_range is
97 	 * not valid, it will set to tlb_flush_all by default.
98 	 */
99 	void (*tlb_flush_range)(struct isp_mmu *mmu,
100 				unsigned int addr, unsigned int size);
101 	void (*tlb_flush_all)(struct isp_mmu *mmu);
102 	unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
103 				    phys_addr_t phys);
104 	phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
105 				   unsigned int pte);
106 
107 };
108 
109 struct isp_mmu {
110 	struct isp_mmu_client *driver;
111 	unsigned int l1_pte;
112 	int l2_pgt_refcount[ISP_L1PT_PTES];
113 	phys_addr_t base_address;
114 
115 	struct mutex pt_mutex;
116 };
117 
118 /* flags for PDE and PTE */
119 #define	ISP_PTE_VALID_MASK(mmu)	\
120 	((mmu)->driver->pte_valid_mask)
121 
122 #define	ISP_PTE_VALID(mmu, pte)	\
123 	((pte) & ISP_PTE_VALID_MASK(mmu))
124 
125 #define	NULL_PAGE	((phys_addr_t)(-1) & ISP_PAGE_MASK)
126 #define	PAGE_VALID(page)	((page) != NULL_PAGE)
127 
128 /*
129  * init mmu with specific mmu driver.
130  */
131 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
132 /*
133  * cleanup all mmu related things.
134  */
135 void isp_mmu_exit(struct isp_mmu *mmu);
136 
137 /*
138  * setup/remove address mapping for pgnr continuous physical pages
139  * and isp_virt.
140  *
141  * map/unmap is mutex lock protected, and caller does not have
142  * to do lock/unlock operation.
143  *
144  * map/unmap will not flush tlb, and caller needs to deal with
145  * this itself.
146  */
147 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
148 		phys_addr_t phys, unsigned int pgnr);
149 
150 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
151 		   unsigned int pgnr);
152 
153 static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
154 {
155 	if (mmu->driver && mmu->driver->tlb_flush_all)
156 		mmu->driver->tlb_flush_all(mmu);
157 }
158 
159 #define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
160 
161 static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
162 	unsigned int start, unsigned int size)
163 {
164 	if (mmu->driver && mmu->driver->tlb_flush_range)
165 		mmu->driver->tlb_flush_range(mmu, start, size);
166 }
167 
168 #endif /* ISP_MMU_H_ */
169