1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 
20 #ifndef	__HMM_BO_H__
21 #define	__HMM_BO_H__
22 
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/mutex.h>
28 #include "mmu/isp_mmu.h"
29 #include "hmm/hmm_common.h"
30 #include "ia_css_types.h"
31 
32 #define	check_bodev_null_return(bdev, exp)	\
33 		check_null_return(bdev, exp, \
34 			"NULL hmm_bo_device.\n")
35 
36 #define	check_bodev_null_return_void(bdev)	\
37 		check_null_return_void(bdev, \
38 			"NULL hmm_bo_device.\n")
39 
40 #define	check_bo_status_yes_goto(bo, _status, label) \
41 	var_not_equal_goto((bo->status & (_status)), (_status), \
42 			label, \
43 			"HMM buffer status not contain %s.\n", \
44 			#_status)
45 
46 #define	check_bo_status_no_goto(bo, _status, label) \
47 	var_equal_goto((bo->status & (_status)), (_status), \
48 			label, \
49 			"HMM buffer status contains %s.\n", \
50 			#_status)
51 
52 #define rbtree_node_to_hmm_bo(root_node)	\
53 	container_of((root_node), struct hmm_buffer_object, node)
54 
55 #define	list_to_hmm_bo(list_ptr)	\
56 	list_entry((list_ptr), struct hmm_buffer_object, list)
57 
58 #define	kref_to_hmm_bo(kref_ptr)	\
59 	list_entry((kref_ptr), struct hmm_buffer_object, kref)
60 
61 #define	check_bo_null_return(bo, exp)	\
62 	check_null_return(bo, exp, "NULL hmm buffer object.\n")
63 
64 #define	check_bo_null_return_void(bo)	\
65 	check_null_return_void(bo, "NULL hmm buffer object.\n")
66 
67 #define	HMM_MAX_ORDER		3
68 #define	HMM_MIN_ORDER		0
69 
70 #define	ISP_VM_START	0x0
71 #define	ISP_VM_SIZE	(0x7FFFFFFF)	/* 2G address space */
72 #define	ISP_PTR_NULL	NULL
73 
74 #define	HMM_BO_DEVICE_INITED	0x1
75 
76 enum hmm_bo_type {
77 	HMM_BO_PRIVATE,
78 	HMM_BO_SHARE,
79 	HMM_BO_USER,
80 	HMM_BO_LAST,
81 };
82 
83 enum hmm_page_type {
84 	HMM_PAGE_TYPE_RESERVED,
85 	HMM_PAGE_TYPE_DYNAMIC,
86 	HMM_PAGE_TYPE_GENERAL,
87 };
88 
89 #define	HMM_BO_MASK		0x1
90 #define	HMM_BO_FREE		0x0
91 #define	HMM_BO_ALLOCED	0x1
92 #define	HMM_BO_PAGE_ALLOCED	0x2
93 #define	HMM_BO_BINDED		0x4
94 #define	HMM_BO_MMAPED		0x8
95 #define	HMM_BO_VMAPED		0x10
96 #define	HMM_BO_VMAPED_CACHED	0x20
97 #define	HMM_BO_ACTIVE		0x1000
98 #define	HMM_BO_MEM_TYPE_USER     0x1
99 #define	HMM_BO_MEM_TYPE_PFN      0x2
100 
101 struct hmm_bo_device {
102 	struct isp_mmu		mmu;
103 
104 	/* start/pgnr/size is used to record the virtual memory of this bo */
105 	unsigned int start;
106 	unsigned int pgnr;
107 	unsigned int size;
108 
109 	/* list lock is used to protect the entire_bo_list */
110 	spinlock_t	list_lock;
111 	int flag;
112 
113 	/* linked list for entire buffer object */
114 	struct list_head entire_bo_list;
115 	/* rbtree for maintain entire allocated vm */
116 	struct rb_root allocated_rbtree;
117 	/* rbtree for maintain entire free vm */
118 	struct rb_root free_rbtree;
119 	struct mutex rbtree_mutex;
120 	struct kmem_cache *bo_cache;
121 };
122 
123 struct hmm_page_object {
124 	struct page		*page;
125 	enum hmm_page_type	type;
126 };
127 
128 struct hmm_buffer_object {
129 	struct hmm_bo_device	*bdev;
130 	struct list_head	list;
131 	struct kref	kref;
132 
133 	struct page **pages;
134 
135 	/* mutex protecting this BO */
136 	struct mutex		mutex;
137 	enum hmm_bo_type	type;
138 	struct hmm_page_object	*page_obj;	/* physical pages */
139 	int		from_highmem;
140 	int		mmap_count;
141 	int		status;
142 	int		mem_type;
143 	void		*vmap_addr; /* kernel virtual address by vmap */
144 
145 	struct rb_node	node;
146 	unsigned int	start;
147 	unsigned int	end;
148 	unsigned int	pgnr;
149 	/*
150 	 * When insert a bo which has the same pgnr with an existed
151 	 * bo node in the free_rbtree, using "prev & next" pointer
152 	 * to maintain a bo linked list instead of insert this bo
153 	 * into free_rbtree directly, it will make sure each node
154 	 * in free_rbtree has different pgnr.
155 	 * "prev & next" default is NULL.
156 	 */
157 	struct hmm_buffer_object	*prev;
158 	struct hmm_buffer_object	*next;
159 };
160 
161 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
162 				       unsigned int pgnr);
163 
164 void hmm_bo_release(struct hmm_buffer_object *bo);
165 
166 int hmm_bo_device_init(struct hmm_bo_device *bdev,
167 		       struct isp_mmu_client *mmu_driver,
168 		       unsigned int vaddr_start, unsigned int size);
169 
170 /*
171  * clean up all hmm_bo_device related things.
172  */
173 void hmm_bo_device_exit(struct hmm_bo_device *bdev);
174 
175 /*
176  * whether the bo device is inited or not.
177  */
178 int hmm_bo_device_inited(struct hmm_bo_device *bdev);
179 
180 /*
181  * increse buffer object reference.
182  */
183 void hmm_bo_ref(struct hmm_buffer_object *bo);
184 
185 /*
186  * decrese buffer object reference. if reference reaches 0,
187  * release function of the buffer object will be called.
188  *
189  * this call is also used to release hmm_buffer_object or its
190  * upper level object with it embedded in. you need to call
191  * this function when it is no longer used.
192  *
193  * Note:
194  *
195  * user dont need to care about internal resource release of
196  * the buffer object in the release callback, it will be
197  * handled internally.
198  *
199  * this call will only release internal resource of the buffer
200  * object but will not free the buffer object itself, as the
201  * buffer object can be both pre-allocated statically or
202  * dynamically allocated. so user need to deal with the release
203  * of the buffer object itself manually. below example shows
204  * the normal case of using the buffer object.
205  *
206  *	struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
207  *	......
208  *	hmm_bo_unref(bo);
209  *
210  * or:
211  *
212  *	struct hmm_buffer_object bo;
213  *
214  *	hmm_bo_init(bdev, &bo, pgnr, NULL);
215  *	...
216  *	hmm_bo_unref(&bo);
217  */
218 void hmm_bo_unref(struct hmm_buffer_object *bo);
219 
220 /*
221  * allocate/free physical pages for the bo. will try to alloc mem
222  * from highmem if from_highmem is set, and type indicate that the
223  * pages will be allocated by using video driver (for share buffer)
224  * or by ISP driver itself.
225  */
226 
227 int hmm_bo_allocated(struct hmm_buffer_object *bo);
228 
229 /*
230  * allocate/free physical pages for the bo. will try to alloc mem
231  * from highmem if from_highmem is set, and type indicate that the
232  * pages will be allocated by using video driver (for share buffer)
233  * or by ISP driver itself.
234  */
235 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
236 		       enum hmm_bo_type type, int from_highmem,
237 		       const void __user *userptr, bool cached);
238 void hmm_bo_free_pages(struct hmm_buffer_object *bo);
239 int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
240 
241 /*
242  * get physical page info of the bo.
243  */
244 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
245 			 struct hmm_page_object **page_obj, int *pgnr);
246 
247 /*
248  * bind/unbind the physical pages to a virtual address space.
249  */
250 int hmm_bo_bind(struct hmm_buffer_object *bo);
251 void hmm_bo_unbind(struct hmm_buffer_object *bo);
252 int hmm_bo_binded(struct hmm_buffer_object *bo);
253 
254 /*
255  * vmap buffer object's pages to contiguous kernel virtual address.
256  * if the buffer has been vmaped, return the virtual address directly.
257  */
258 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
259 
260 /*
261  * flush the cache for the vmapped buffer object's pages,
262  * if the buffer has not been vmapped, return directly.
263  */
264 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
265 
266 /*
267  * vunmap buffer object's kernel virtual address.
268  */
269 void hmm_bo_vunmap(struct hmm_buffer_object *bo);
270 
271 /*
272  * mmap the bo's physical pages to specific vma.
273  *
274  * vma's address space size must be the same as bo's size,
275  * otherwise it will return -EINVAL.
276  *
277  * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
278  */
279 int hmm_bo_mmap(struct vm_area_struct *vma,
280 		struct hmm_buffer_object *bo);
281 
282 extern struct hmm_pool	dynamic_pool;
283 extern struct hmm_pool	reserved_pool;
284 
285 /*
286  * find the buffer object by its virtual address vaddr.
287  * return NULL if no such buffer object found.
288  */
289 struct hmm_buffer_object *hmm_bo_device_search_start(
290     struct hmm_bo_device *bdev, ia_css_ptr vaddr);
291 
292 /*
293  * find the buffer object by its virtual address.
294  * it does not need to be the start address of one bo,
295  * it can be an address within the range of one bo.
296  * return NULL if no such buffer object found.
297  */
298 struct hmm_buffer_object *hmm_bo_device_search_in_range(
299     struct hmm_bo_device *bdev, ia_css_ptr vaddr);
300 
301 /*
302  * find the buffer object with kernel virtual address vaddr.
303  * return NULL if no such buffer object found.
304  */
305 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
306     struct hmm_bo_device *bdev, const void *vaddr);
307 
308 #endif
309