1 /* 2 * Support for Medifield PNW Camera Imaging ISP subsystem. 3 * 4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved. 5 * 6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * 18 */ 19 20 #ifndef __HMM_BO_H__ 21 #define __HMM_BO_H__ 22 23 #include <linux/kernel.h> 24 #include <linux/slab.h> 25 #include <linux/list.h> 26 #include <linux/spinlock.h> 27 #include <linux/mutex.h> 28 #include "mmu/isp_mmu.h" 29 #include "hmm/hmm_common.h" 30 #include "ia_css_types.h" 31 32 #define check_bodev_null_return(bdev, exp) \ 33 check_null_return(bdev, exp, \ 34 "NULL hmm_bo_device.\n") 35 36 #define check_bodev_null_return_void(bdev) \ 37 check_null_return_void(bdev, \ 38 "NULL hmm_bo_device.\n") 39 40 #define check_bo_status_yes_goto(bo, _status, label) \ 41 var_not_equal_goto((bo->status & (_status)), (_status), \ 42 label, \ 43 "HMM buffer status not contain %s.\n", \ 44 #_status) 45 46 #define check_bo_status_no_goto(bo, _status, label) \ 47 var_equal_goto((bo->status & (_status)), (_status), \ 48 label, \ 49 "HMM buffer status contains %s.\n", \ 50 #_status) 51 52 #define rbtree_node_to_hmm_bo(root_node) \ 53 container_of((root_node), struct hmm_buffer_object, node) 54 55 #define list_to_hmm_bo(list_ptr) \ 56 list_entry((list_ptr), struct hmm_buffer_object, list) 57 58 #define kref_to_hmm_bo(kref_ptr) \ 59 list_entry((kref_ptr), struct hmm_buffer_object, kref) 60 61 #define check_bo_null_return(bo, exp) \ 62 check_null_return(bo, exp, "NULL hmm buffer object.\n") 63 64 #define check_bo_null_return_void(bo) \ 65 check_null_return_void(bo, "NULL hmm buffer object.\n") 66 67 #define HMM_MAX_ORDER 3 68 #define HMM_MIN_ORDER 0 69 70 #define ISP_VM_START 0x0 71 #define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */ 72 #define ISP_PTR_NULL NULL 73 74 #define HMM_BO_DEVICE_INITED 0x1 75 76 enum hmm_bo_type { 77 HMM_BO_PRIVATE, 78 HMM_BO_SHARE, 79 HMM_BO_USER, 80 HMM_BO_LAST, 81 }; 82 83 enum hmm_page_type { 84 HMM_PAGE_TYPE_RESERVED, 85 HMM_PAGE_TYPE_DYNAMIC, 86 HMM_PAGE_TYPE_GENERAL, 87 }; 88 89 #define HMM_BO_MASK 0x1 90 #define HMM_BO_FREE 0x0 91 #define HMM_BO_ALLOCED 0x1 92 #define HMM_BO_PAGE_ALLOCED 0x2 93 #define HMM_BO_BINDED 0x4 94 #define HMM_BO_MMAPED 0x8 95 #define HMM_BO_VMAPED 0x10 96 #define HMM_BO_VMAPED_CACHED 0x20 97 #define HMM_BO_ACTIVE 0x1000 98 #define HMM_BO_MEM_TYPE_USER 0x1 99 #define HMM_BO_MEM_TYPE_PFN 0x2 100 101 struct hmm_bo_device { 102 struct isp_mmu mmu; 103 104 /* start/pgnr/size is used to record the virtual memory of this bo */ 105 unsigned int start; 106 unsigned int pgnr; 107 unsigned int size; 108 109 /* list lock is used to protect the entire_bo_list */ 110 spinlock_t list_lock; 111 int flag; 112 113 /* linked list for entire buffer object */ 114 struct list_head entire_bo_list; 115 /* rbtree for maintain entire allocated vm */ 116 struct rb_root allocated_rbtree; 117 /* rbtree for maintain entire free vm */ 118 struct rb_root free_rbtree; 119 struct mutex rbtree_mutex; 120 struct kmem_cache *bo_cache; 121 }; 122 123 struct hmm_page_object { 124 struct page *page; 125 enum hmm_page_type type; 126 }; 127 128 struct hmm_buffer_object { 129 struct hmm_bo_device *bdev; 130 struct list_head list; 131 struct kref kref; 132 133 /* mutex protecting this BO */ 134 struct mutex mutex; 135 enum hmm_bo_type type; 136 struct hmm_page_object *page_obj; /* physical pages */ 137 int from_highmem; 138 int mmap_count; 139 int status; 140 int mem_type; 141 void *vmap_addr; /* kernel virtual address by vmap */ 142 143 struct rb_node node; 144 unsigned int start; 145 unsigned int end; 146 unsigned int pgnr; 147 /* 148 * When insert a bo which has the same pgnr with an existed 149 * bo node in the free_rbtree, using "prev & next" pointer 150 * to maintain a bo linked list instead of insert this bo 151 * into free_rbtree directly, it will make sure each node 152 * in free_rbtree has different pgnr. 153 * "prev & next" default is NULL. 154 */ 155 struct hmm_buffer_object *prev; 156 struct hmm_buffer_object *next; 157 }; 158 159 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev, 160 unsigned int pgnr); 161 162 void hmm_bo_release(struct hmm_buffer_object *bo); 163 164 int hmm_bo_device_init(struct hmm_bo_device *bdev, 165 struct isp_mmu_client *mmu_driver, 166 unsigned int vaddr_start, unsigned int size); 167 168 /* 169 * clean up all hmm_bo_device related things. 170 */ 171 void hmm_bo_device_exit(struct hmm_bo_device *bdev); 172 173 /* 174 * whether the bo device is inited or not. 175 */ 176 int hmm_bo_device_inited(struct hmm_bo_device *bdev); 177 178 /* 179 * increse buffer object reference. 180 */ 181 void hmm_bo_ref(struct hmm_buffer_object *bo); 182 183 /* 184 * decrese buffer object reference. if reference reaches 0, 185 * release function of the buffer object will be called. 186 * 187 * this call is also used to release hmm_buffer_object or its 188 * upper level object with it embedded in. you need to call 189 * this function when it is no longer used. 190 * 191 * Note: 192 * 193 * user dont need to care about internal resource release of 194 * the buffer object in the release callback, it will be 195 * handled internally. 196 * 197 * this call will only release internal resource of the buffer 198 * object but will not free the buffer object itself, as the 199 * buffer object can be both pre-allocated statically or 200 * dynamically allocated. so user need to deal with the release 201 * of the buffer object itself manually. below example shows 202 * the normal case of using the buffer object. 203 * 204 * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr); 205 * ...... 206 * hmm_bo_unref(bo); 207 * 208 * or: 209 * 210 * struct hmm_buffer_object bo; 211 * 212 * hmm_bo_init(bdev, &bo, pgnr, NULL); 213 * ... 214 * hmm_bo_unref(&bo); 215 */ 216 void hmm_bo_unref(struct hmm_buffer_object *bo); 217 218 /* 219 * allocate/free physical pages for the bo. will try to alloc mem 220 * from highmem if from_highmem is set, and type indicate that the 221 * pages will be allocated by using video driver (for share buffer) 222 * or by ISP driver itself. 223 */ 224 225 int hmm_bo_allocated(struct hmm_buffer_object *bo); 226 227 /* 228 * allocate/free physical pages for the bo. will try to alloc mem 229 * from highmem if from_highmem is set, and type indicate that the 230 * pages will be allocated by using video driver (for share buffer) 231 * or by ISP driver itself. 232 */ 233 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, 234 enum hmm_bo_type type, int from_highmem, 235 const void __user *userptr, bool cached); 236 void hmm_bo_free_pages(struct hmm_buffer_object *bo); 237 int hmm_bo_page_allocated(struct hmm_buffer_object *bo); 238 239 /* 240 * get physical page info of the bo. 241 */ 242 int hmm_bo_get_page_info(struct hmm_buffer_object *bo, 243 struct hmm_page_object **page_obj, int *pgnr); 244 245 /* 246 * bind/unbind the physical pages to a virtual address space. 247 */ 248 int hmm_bo_bind(struct hmm_buffer_object *bo); 249 void hmm_bo_unbind(struct hmm_buffer_object *bo); 250 int hmm_bo_binded(struct hmm_buffer_object *bo); 251 252 /* 253 * vmap buffer object's pages to contiguous kernel virtual address. 254 * if the buffer has been vmaped, return the virtual address directly. 255 */ 256 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached); 257 258 /* 259 * flush the cache for the vmapped buffer object's pages, 260 * if the buffer has not been vmapped, return directly. 261 */ 262 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo); 263 264 /* 265 * vunmap buffer object's kernel virtual address. 266 */ 267 void hmm_bo_vunmap(struct hmm_buffer_object *bo); 268 269 /* 270 * mmap the bo's physical pages to specific vma. 271 * 272 * vma's address space size must be the same as bo's size, 273 * otherwise it will return -EINVAL. 274 * 275 * vma->vm_flags will be set to (VM_RESERVED | VM_IO). 276 */ 277 int hmm_bo_mmap(struct vm_area_struct *vma, 278 struct hmm_buffer_object *bo); 279 280 extern struct hmm_pool dynamic_pool; 281 extern struct hmm_pool reserved_pool; 282 283 /* 284 * find the buffer object by its virtual address vaddr. 285 * return NULL if no such buffer object found. 286 */ 287 struct hmm_buffer_object *hmm_bo_device_search_start( 288 struct hmm_bo_device *bdev, ia_css_ptr vaddr); 289 290 /* 291 * find the buffer object by its virtual address. 292 * it does not need to be the start address of one bo, 293 * it can be an address within the range of one bo. 294 * return NULL if no such buffer object found. 295 */ 296 struct hmm_buffer_object *hmm_bo_device_search_in_range( 297 struct hmm_bo_device *bdev, ia_css_ptr vaddr); 298 299 /* 300 * find the buffer object with kernel virtual address vaddr. 301 * return NULL if no such buffer object found. 302 */ 303 struct hmm_buffer_object *hmm_bo_device_search_vmap_start( 304 struct hmm_bo_device *bdev, const void *vaddr); 305 306 #endif 307