1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * This file contains functions for buffer object structure management
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h>		/* for GFP_ATOMIC */
25 #include <linux/mm.h>
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h>		/* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
35 #include <linux/io.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
39 
40 #include <asm/set_memory.h>
41 
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
46 
47 static unsigned int order_to_nr(unsigned int order)
48 {
49 	return 1U << order;
50 }
51 
52 static unsigned int nr_to_order_bottom(unsigned int nr)
53 {
54 	return fls(nr) - 1;
55 }
56 
57 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
58 		     unsigned int pgnr)
59 {
60 	check_bodev_null_return(bdev, -EINVAL);
61 	var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
62 			 "hmm_bo_device not inited yet.\n");
63 	/* prevent zero size buffer object */
64 	if (pgnr == 0) {
65 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
66 		return -EINVAL;
67 	}
68 
69 	memset(bo, 0, sizeof(*bo));
70 	mutex_init(&bo->mutex);
71 
72 	/* init the bo->list HEAD as an element of entire_bo_list */
73 	INIT_LIST_HEAD(&bo->list);
74 
75 	bo->bdev = bdev;
76 	bo->vmap_addr = NULL;
77 	bo->status = HMM_BO_FREE;
78 	bo->start = bdev->start;
79 	bo->pgnr = pgnr;
80 	bo->end = bo->start + pgnr_to_size(pgnr);
81 	bo->prev = NULL;
82 	bo->next = NULL;
83 
84 	return 0;
85 }
86 
87 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
88     struct rb_node *node, unsigned int pgnr)
89 {
90 	struct hmm_buffer_object *this, *ret_bo, *temp_bo;
91 
92 	this = rb_entry(node, struct hmm_buffer_object, node);
93 	if (this->pgnr == pgnr ||
94 	    (this->pgnr > pgnr && !this->node.rb_left)) {
95 		goto remove_bo_and_return;
96 	} else {
97 		if (this->pgnr < pgnr) {
98 			if (!this->node.rb_right)
99 				return NULL;
100 			ret_bo = __bo_search_and_remove_from_free_rbtree(
101 				     this->node.rb_right, pgnr);
102 		} else {
103 			ret_bo = __bo_search_and_remove_from_free_rbtree(
104 				     this->node.rb_left, pgnr);
105 		}
106 		if (!ret_bo) {
107 			if (this->pgnr > pgnr)
108 				goto remove_bo_and_return;
109 			else
110 				return NULL;
111 		}
112 		return ret_bo;
113 	}
114 
115 remove_bo_and_return:
116 	/* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
117 	 * 1. check if 'this->next' is NULL:
118 	 *	yes: erase 'this' node and rebalance rbtree, return 'this'.
119 	 */
120 	if (!this->next) {
121 		rb_erase(&this->node, &this->bdev->free_rbtree);
122 		return this;
123 	}
124 	/* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
125 	 * 2. check if 'this->next->next' is NULL:
126 	 *	yes: change the related 'next/prev' pointer,
127 	 *		return 'this->next' but the rbtree stays unchanged.
128 	 */
129 	temp_bo = this->next;
130 	this->next = temp_bo->next;
131 	if (temp_bo->next)
132 		temp_bo->next->prev = this;
133 	temp_bo->next = NULL;
134 	temp_bo->prev = NULL;
135 	return temp_bo;
136 }
137 
138 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
139 	ia_css_ptr start)
140 {
141 	struct rb_node *n = root->rb_node;
142 	struct hmm_buffer_object *bo;
143 
144 	do {
145 		bo = rb_entry(n, struct hmm_buffer_object, node);
146 
147 		if (bo->start > start) {
148 			if (!n->rb_left)
149 				return NULL;
150 			n = n->rb_left;
151 		} else if (bo->start < start) {
152 			if (!n->rb_right)
153 				return NULL;
154 			n = n->rb_right;
155 		} else {
156 			return bo;
157 		}
158 	} while (n);
159 
160 	return NULL;
161 }
162 
163 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
164     struct rb_root *root, unsigned int start)
165 {
166 	struct rb_node *n = root->rb_node;
167 	struct hmm_buffer_object *bo;
168 
169 	do {
170 		bo = rb_entry(n, struct hmm_buffer_object, node);
171 
172 		if (bo->start > start) {
173 			if (!n->rb_left)
174 				return NULL;
175 			n = n->rb_left;
176 		} else {
177 			if (bo->end > start)
178 				return bo;
179 			if (!n->rb_right)
180 				return NULL;
181 			n = n->rb_right;
182 		}
183 	} while (n);
184 
185 	return NULL;
186 }
187 
188 static void __bo_insert_to_free_rbtree(struct rb_root *root,
189 				       struct hmm_buffer_object *bo)
190 {
191 	struct rb_node **new = &root->rb_node;
192 	struct rb_node *parent = NULL;
193 	struct hmm_buffer_object *this;
194 	unsigned int pgnr = bo->pgnr;
195 
196 	while (*new) {
197 		parent = *new;
198 		this = container_of(*new, struct hmm_buffer_object, node);
199 
200 		if (pgnr < this->pgnr) {
201 			new = &((*new)->rb_left);
202 		} else if (pgnr > this->pgnr) {
203 			new = &((*new)->rb_right);
204 		} else {
205 			bo->prev = this;
206 			bo->next = this->next;
207 			if (this->next)
208 				this->next->prev = bo;
209 			this->next = bo;
210 			bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
211 			return;
212 		}
213 	}
214 
215 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
216 
217 	rb_link_node(&bo->node, parent, new);
218 	rb_insert_color(&bo->node, root);
219 }
220 
221 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
222 					struct hmm_buffer_object *bo)
223 {
224 	struct rb_node **new = &root->rb_node;
225 	struct rb_node *parent = NULL;
226 	struct hmm_buffer_object *this;
227 	unsigned int start = bo->start;
228 
229 	while (*new) {
230 		parent = *new;
231 		this = container_of(*new, struct hmm_buffer_object, node);
232 
233 		if (start < this->start)
234 			new = &((*new)->rb_left);
235 		else
236 			new = &((*new)->rb_right);
237 	}
238 
239 	kref_init(&bo->kref);
240 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
241 
242 	rb_link_node(&bo->node, parent, new);
243 	rb_insert_color(&bo->node, root);
244 }
245 
246 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
247 	struct hmm_buffer_object *bo,
248 	unsigned int pgnr)
249 {
250 	struct hmm_buffer_object *new_bo;
251 	unsigned long flags;
252 	int ret;
253 
254 	new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
255 	if (!new_bo) {
256 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
257 		return NULL;
258 	}
259 	ret = __bo_init(bdev, new_bo, pgnr);
260 	if (ret) {
261 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
262 		kmem_cache_free(bdev->bo_cache, new_bo);
263 		return NULL;
264 	}
265 
266 	new_bo->start = bo->start;
267 	new_bo->end = new_bo->start + pgnr_to_size(pgnr);
268 	bo->start = new_bo->end;
269 	bo->pgnr = bo->pgnr - pgnr;
270 
271 	spin_lock_irqsave(&bdev->list_lock, flags);
272 	list_add_tail(&new_bo->list, &bo->list);
273 	spin_unlock_irqrestore(&bdev->list_lock, flags);
274 
275 	return new_bo;
276 }
277 
278 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
279 {
280 	struct hmm_bo_device *bdev = bo->bdev;
281 	/* There are 4 situations when we take off a known bo from free rbtree:
282 	 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
283 	 *	and does not have a linked list after bo, to take off this bo,
284 	 *	we just need erase bo directly and rebalance the free rbtree
285 	 */
286 	if (!bo->prev && !bo->next) {
287 		rb_erase(&bo->node, &bdev->free_rbtree);
288 		/* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
289 		 *	and has a linked list,to take off this bo we need erase bo
290 		 *	first, then, insert bo->next into free rbtree and rebalance
291 		 *	the free rbtree
292 		 */
293 	} else if (!bo->prev && bo->next) {
294 		bo->next->prev = NULL;
295 		rb_erase(&bo->node, &bdev->free_rbtree);
296 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
297 		bo->next = NULL;
298 		/* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
299 		 *	node, bo is the last element of the linked list after rbtree
300 		 *	node, to take off this bo, we just need set the "prev/next"
301 		 *	pointers to NULL, the free rbtree stays unchaged
302 		 */
303 	} else if (bo->prev && !bo->next) {
304 		bo->prev->next = NULL;
305 		bo->prev = NULL;
306 		/* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
307 		 *	node, bo is in the middle of the linked list after rbtree node,
308 		 *	to take off this bo, we just set take the "prev/next" pointers
309 		 *	to NULL, the free rbtree stays unchaged
310 		 */
311 	} else if (bo->prev && bo->next) {
312 		bo->next->prev = bo->prev;
313 		bo->prev->next = bo->next;
314 		bo->next = NULL;
315 		bo->prev = NULL;
316 	}
317 }
318 
319 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
320 	struct hmm_buffer_object *next_bo)
321 {
322 	struct hmm_bo_device *bdev;
323 	unsigned long flags;
324 
325 	bdev = bo->bdev;
326 	next_bo->start = bo->start;
327 	next_bo->pgnr = next_bo->pgnr + bo->pgnr;
328 
329 	spin_lock_irqsave(&bdev->list_lock, flags);
330 	list_del(&bo->list);
331 	spin_unlock_irqrestore(&bdev->list_lock, flags);
332 
333 	kmem_cache_free(bo->bdev->bo_cache, bo);
334 
335 	return next_bo;
336 }
337 
338 /*
339  * hmm_bo_device functions.
340  */
341 int hmm_bo_device_init(struct hmm_bo_device *bdev,
342 		       struct isp_mmu_client *mmu_driver,
343 		       unsigned int vaddr_start,
344 		       unsigned int size)
345 {
346 	struct hmm_buffer_object *bo;
347 	unsigned long flags;
348 	int ret;
349 
350 	check_bodev_null_return(bdev, -EINVAL);
351 
352 	ret = isp_mmu_init(&bdev->mmu, mmu_driver);
353 	if (ret) {
354 		dev_err(atomisp_dev, "isp_mmu_init failed.\n");
355 		return ret;
356 	}
357 
358 	bdev->start = vaddr_start;
359 	bdev->pgnr = size_to_pgnr_ceil(size);
360 	bdev->size = pgnr_to_size(bdev->pgnr);
361 
362 	spin_lock_init(&bdev->list_lock);
363 	mutex_init(&bdev->rbtree_mutex);
364 
365 	bdev->flag = HMM_BO_DEVICE_INITED;
366 
367 	INIT_LIST_HEAD(&bdev->entire_bo_list);
368 	bdev->allocated_rbtree = RB_ROOT;
369 	bdev->free_rbtree = RB_ROOT;
370 
371 	bdev->bo_cache = kmem_cache_create("bo_cache",
372 					   sizeof(struct hmm_buffer_object), 0, 0, NULL);
373 	if (!bdev->bo_cache) {
374 		dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
375 		isp_mmu_exit(&bdev->mmu);
376 		return -ENOMEM;
377 	}
378 
379 	bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
380 	if (!bo) {
381 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
382 		isp_mmu_exit(&bdev->mmu);
383 		return -ENOMEM;
384 	}
385 
386 	ret = __bo_init(bdev, bo, bdev->pgnr);
387 	if (ret) {
388 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
389 		kmem_cache_free(bdev->bo_cache, bo);
390 		isp_mmu_exit(&bdev->mmu);
391 		return -EINVAL;
392 	}
393 
394 	spin_lock_irqsave(&bdev->list_lock, flags);
395 	list_add_tail(&bo->list, &bdev->entire_bo_list);
396 	spin_unlock_irqrestore(&bdev->list_lock, flags);
397 
398 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
399 
400 	return 0;
401 }
402 
403 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
404 				       unsigned int pgnr)
405 {
406 	struct hmm_buffer_object *bo, *new_bo;
407 	struct rb_root *root = &bdev->free_rbtree;
408 
409 	check_bodev_null_return(bdev, NULL);
410 	var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
411 			 "hmm_bo_device not inited yet.\n");
412 
413 	if (pgnr == 0) {
414 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
415 		return NULL;
416 	}
417 
418 	mutex_lock(&bdev->rbtree_mutex);
419 	bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
420 	if (!bo) {
421 		mutex_unlock(&bdev->rbtree_mutex);
422 		dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
423 			__func__);
424 		return NULL;
425 	}
426 
427 	if (bo->pgnr > pgnr) {
428 		new_bo = __bo_break_up(bdev, bo, pgnr);
429 		if (!new_bo) {
430 			mutex_unlock(&bdev->rbtree_mutex);
431 			dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
432 				__func__);
433 			return NULL;
434 		}
435 
436 		__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
437 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
438 
439 		mutex_unlock(&bdev->rbtree_mutex);
440 		return new_bo;
441 	}
442 
443 	__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
444 
445 	mutex_unlock(&bdev->rbtree_mutex);
446 	return bo;
447 }
448 
449 void hmm_bo_release(struct hmm_buffer_object *bo)
450 {
451 	struct hmm_bo_device *bdev = bo->bdev;
452 	struct hmm_buffer_object *next_bo, *prev_bo;
453 
454 	mutex_lock(&bdev->rbtree_mutex);
455 
456 	/*
457 	 * FIX ME:
458 	 *
459 	 * how to destroy the bo when it is stilled MMAPED?
460 	 *
461 	 * ideally, this will not happened as hmm_bo_release
462 	 * will only be called when kref reaches 0, and in mmap
463 	 * operation the hmm_bo_ref will eventually be called.
464 	 * so, if this happened, something goes wrong.
465 	 */
466 	if (bo->status & HMM_BO_MMAPED) {
467 		mutex_unlock(&bdev->rbtree_mutex);
468 		dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
469 		return;
470 	}
471 
472 	if (bo->status & HMM_BO_BINDED) {
473 		dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
474 		hmm_bo_unbind(bo);
475 	}
476 
477 	if (bo->status & HMM_BO_PAGE_ALLOCED) {
478 		dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
479 		hmm_bo_free_pages(bo);
480 	}
481 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
482 		dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
483 		hmm_bo_vunmap(bo);
484 	}
485 
486 	rb_erase(&bo->node, &bdev->allocated_rbtree);
487 
488 	prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
489 	next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
490 
491 	if (bo->list.prev != &bdev->entire_bo_list &&
492 	    prev_bo->end == bo->start &&
493 	    (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
494 		__bo_take_off_handling(prev_bo);
495 		bo = __bo_merge(prev_bo, bo);
496 	}
497 
498 	if (bo->list.next != &bdev->entire_bo_list &&
499 	    next_bo->start == bo->end &&
500 	    (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
501 		__bo_take_off_handling(next_bo);
502 		bo = __bo_merge(bo, next_bo);
503 	}
504 
505 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
506 
507 	mutex_unlock(&bdev->rbtree_mutex);
508 	return;
509 }
510 
511 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
512 {
513 	struct hmm_buffer_object *bo;
514 	unsigned long flags;
515 
516 	dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
517 
518 	check_bodev_null_return_void(bdev);
519 
520 	/*
521 	 * release all allocated bos even they a in use
522 	 * and all bos will be merged into a big bo
523 	 */
524 	while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
525 		hmm_bo_release(
526 		    rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
527 
528 	dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
529 		__func__);
530 
531 	/* free all bos to release all ISP virtual memory */
532 	while (!list_empty(&bdev->entire_bo_list)) {
533 		bo = list_to_hmm_bo(bdev->entire_bo_list.next);
534 
535 		spin_lock_irqsave(&bdev->list_lock, flags);
536 		list_del(&bo->list);
537 		spin_unlock_irqrestore(&bdev->list_lock, flags);
538 
539 		kmem_cache_free(bdev->bo_cache, bo);
540 	}
541 
542 	dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
543 
544 	kmem_cache_destroy(bdev->bo_cache);
545 
546 	isp_mmu_exit(&bdev->mmu);
547 }
548 
549 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
550 {
551 	check_bodev_null_return(bdev, -EINVAL);
552 
553 	return bdev->flag == HMM_BO_DEVICE_INITED;
554 }
555 
556 int hmm_bo_allocated(struct hmm_buffer_object *bo)
557 {
558 	check_bo_null_return(bo, 0);
559 
560 	return bo->status & HMM_BO_ALLOCED;
561 }
562 
563 struct hmm_buffer_object *hmm_bo_device_search_start(
564     struct hmm_bo_device *bdev, ia_css_ptr vaddr)
565 {
566 	struct hmm_buffer_object *bo;
567 
568 	check_bodev_null_return(bdev, NULL);
569 
570 	mutex_lock(&bdev->rbtree_mutex);
571 	bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
572 	if (!bo) {
573 		mutex_unlock(&bdev->rbtree_mutex);
574 		dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
575 			__func__, vaddr);
576 		return NULL;
577 	}
578 	mutex_unlock(&bdev->rbtree_mutex);
579 
580 	return bo;
581 }
582 
583 struct hmm_buffer_object *hmm_bo_device_search_in_range(
584     struct hmm_bo_device *bdev, unsigned int vaddr)
585 {
586 	struct hmm_buffer_object *bo;
587 
588 	check_bodev_null_return(bdev, NULL);
589 
590 	mutex_lock(&bdev->rbtree_mutex);
591 	bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
592 	if (!bo) {
593 		mutex_unlock(&bdev->rbtree_mutex);
594 		dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
595 			__func__, vaddr);
596 		return NULL;
597 	}
598 	mutex_unlock(&bdev->rbtree_mutex);
599 
600 	return bo;
601 }
602 
603 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
604     struct hmm_bo_device *bdev, const void *vaddr)
605 {
606 	struct list_head *pos;
607 	struct hmm_buffer_object *bo;
608 	unsigned long flags;
609 
610 	check_bodev_null_return(bdev, NULL);
611 
612 	spin_lock_irqsave(&bdev->list_lock, flags);
613 	list_for_each(pos, &bdev->entire_bo_list) {
614 		bo = list_to_hmm_bo(pos);
615 		/* pass bo which has no vm_node allocated */
616 		if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
617 			continue;
618 		if (bo->vmap_addr == vaddr)
619 			goto found;
620 	}
621 	spin_unlock_irqrestore(&bdev->list_lock, flags);
622 	return NULL;
623 found:
624 	spin_unlock_irqrestore(&bdev->list_lock, flags);
625 	return bo;
626 }
627 
628 static void free_private_bo_pages(struct hmm_buffer_object *bo,
629 				  struct hmm_pool *dypool,
630 				  struct hmm_pool *repool,
631 				  int free_pgnr)
632 {
633 	int i, ret;
634 
635 	for (i = 0; i < free_pgnr; i++) {
636 		switch (bo->page_obj[i].type) {
637 		case HMM_PAGE_TYPE_RESERVED:
638 			if (repool->pops
639 			    && repool->pops->pool_free_pages) {
640 				repool->pops->pool_free_pages(repool->pool_info,
641 							      &bo->page_obj[i]);
642 				hmm_mem_stat.res_cnt--;
643 			}
644 			break;
645 		/*
646 		 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
647 		 * memory, so when free them, they should be put into dynamic
648 		 * pool.
649 		 */
650 		case HMM_PAGE_TYPE_DYNAMIC:
651 		case HMM_PAGE_TYPE_GENERAL:
652 			if (dypool->pops
653 			    && dypool->pops->pool_inited
654 			    && dypool->pops->pool_inited(dypool->pool_info)) {
655 				if (dypool->pops->pool_free_pages)
656 					dypool->pops->pool_free_pages(
657 					    dypool->pool_info,
658 					    &bo->page_obj[i]);
659 				break;
660 			}
661 
662 		/*
663 		 * if dynamic memory pool doesn't exist, need to free
664 		 * pages to system directly.
665 		 */
666 		default:
667 			ret = set_pages_wb(bo->page_obj[i].page, 1);
668 			if (ret)
669 				dev_err(atomisp_dev,
670 					"set page to WB err ...ret = %d\n",
671 					ret);
672 			/*
673 			W/A: set_pages_wb seldom return value = -EFAULT
674 			indicate that address of page is not in valid
675 			range(0xffff880000000000~0xffffc7ffffffffff)
676 			then, _free_pages would panic; Do not know why page
677 			address be valid,it maybe memory corruption by lowmemory
678 			*/
679 			if (!ret) {
680 				__free_pages(bo->page_obj[i].page, 0);
681 				hmm_mem_stat.sys_size--;
682 			}
683 			break;
684 		}
685 	}
686 
687 	return;
688 }
689 
690 /*Allocate pages which will be used only by ISP*/
691 static int alloc_private_pages(struct hmm_buffer_object *bo,
692 			       int from_highmem,
693 			       bool cached,
694 			       struct hmm_pool *dypool,
695 			       struct hmm_pool *repool)
696 {
697 	int ret;
698 	unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
699 	struct page *pages;
700 	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
701 	int i, j;
702 	int failure_number = 0;
703 	bool reduce_order = false;
704 	bool lack_mem = true;
705 
706 	if (from_highmem)
707 		gfp |= __GFP_HIGHMEM;
708 
709 	pgnr = bo->pgnr;
710 
711 	bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
712 				     GFP_KERNEL);
713 	if (unlikely(!bo->page_obj))
714 		return -ENOMEM;
715 
716 	i = 0;
717 	alloc_pgnr = 0;
718 
719 	/*
720 	 * get physical pages from dynamic pages pool.
721 	 */
722 	if (dypool->pops && dypool->pops->pool_alloc_pages) {
723 		alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
724 			     bo->page_obj, pgnr,
725 			     cached);
726 		hmm_mem_stat.dyc_size -= alloc_pgnr;
727 
728 		if (alloc_pgnr == pgnr)
729 			return 0;
730 	}
731 
732 	pgnr -= alloc_pgnr;
733 	i += alloc_pgnr;
734 
735 	/*
736 	 * get physical pages from reserved pages pool for atomisp.
737 	 */
738 	if (repool->pops && repool->pops->pool_alloc_pages) {
739 		alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
740 			     &bo->page_obj[i], pgnr,
741 			     cached);
742 		hmm_mem_stat.res_cnt += alloc_pgnr;
743 		if (alloc_pgnr == pgnr)
744 			return 0;
745 	}
746 
747 	pgnr -= alloc_pgnr;
748 	i += alloc_pgnr;
749 
750 	while (pgnr) {
751 		order = nr_to_order_bottom(pgnr);
752 		/*
753 		 * if be short of memory, we will set order to 0
754 		 * everytime.
755 		 */
756 		if (lack_mem)
757 			order = HMM_MIN_ORDER;
758 		else if (order > HMM_MAX_ORDER)
759 			order = HMM_MAX_ORDER;
760 retry:
761 		/*
762 		 * When order > HMM_MIN_ORDER, for performance reasons we don't
763 		 * want alloc_pages() to sleep. In case it fails and fallbacks
764 		 * to HMM_MIN_ORDER or in case the requested order is originally
765 		 * the minimum value, we can allow alloc_pages() to sleep for
766 		 * robustness purpose.
767 		 *
768 		 * REVISIT: why __GFP_FS is necessary?
769 		 */
770 		if (order == HMM_MIN_ORDER) {
771 			gfp &= ~GFP_NOWAIT;
772 			gfp |= __GFP_RECLAIM | __GFP_FS;
773 		}
774 
775 		pages = alloc_pages(gfp, order);
776 		if (unlikely(!pages)) {
777 			/*
778 			 * in low memory case, if allocation page fails,
779 			 * we turn to try if order=0 allocation could
780 			 * succeed. if order=0 fails too, that means there is
781 			 * no memory left.
782 			 */
783 			if (order == HMM_MIN_ORDER) {
784 				dev_err(atomisp_dev,
785 					"%s: cannot allocate pages\n",
786 					__func__);
787 				goto cleanup;
788 			}
789 			order = HMM_MIN_ORDER;
790 			failure_number++;
791 			reduce_order = true;
792 			/*
793 			 * if fail two times continuously, we think be short
794 			 * of memory now.
795 			 */
796 			if (failure_number == 2) {
797 				lack_mem = true;
798 				failure_number = 0;
799 			}
800 			goto retry;
801 		} else {
802 			blk_pgnr = order_to_nr(order);
803 
804 			if (!cached) {
805 				/*
806 				 * set memory to uncacheable -- UC_MINUS
807 				 */
808 				ret = set_pages_uc(pages, blk_pgnr);
809 				if (ret) {
810 					dev_err(atomisp_dev,
811 						"set page uncacheablefailed.\n");
812 
813 					__free_pages(pages, order);
814 
815 					goto cleanup;
816 				}
817 			}
818 
819 			for (j = 0; j < blk_pgnr; j++) {
820 				bo->page_obj[i].page = pages + j;
821 				bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
822 			}
823 
824 			pgnr -= blk_pgnr;
825 			hmm_mem_stat.sys_size += blk_pgnr;
826 
827 			/*
828 			 * if order is not reduced this time, clear
829 			 * failure_number.
830 			 */
831 			if (reduce_order)
832 				reduce_order = false;
833 			else
834 				failure_number = 0;
835 		}
836 	}
837 
838 	return 0;
839 cleanup:
840 	alloc_pgnr = i;
841 	free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
842 
843 	kfree(bo->page_obj);
844 
845 	return -ENOMEM;
846 }
847 
848 static void free_private_pages(struct hmm_buffer_object *bo,
849 			       struct hmm_pool *dypool,
850 			       struct hmm_pool *repool)
851 {
852 	free_private_bo_pages(bo, dypool, repool, bo->pgnr);
853 
854 	kfree(bo->page_obj);
855 }
856 
857 /*
858  * Hacked from kernel function __get_user_pages in mm/memory.c
859  *
860  * Handle buffers allocated by other kernel space driver and mmaped into user
861  * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
862  *
863  * Get physical pages from user space virtual address and update into page list
864  */
865 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
866 			      unsigned long start, int nr_pages,
867 			      unsigned int gup_flags, struct page **pages,
868 			      struct vm_area_struct **vmas)
869 {
870 	int i, ret;
871 	unsigned long vm_flags;
872 
873 	if (nr_pages <= 0)
874 		return 0;
875 
876 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
877 
878 	/*
879 	 * Require read or write permissions.
880 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
881 	 */
882 	vm_flags  = (gup_flags & FOLL_WRITE) ?
883 		    (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
884 	vm_flags &= (gup_flags & FOLL_FORCE) ?
885 		    (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
886 	i = 0;
887 
888 	do {
889 		struct vm_area_struct *vma;
890 
891 		vma = find_vma(mm, start);
892 		if (!vma) {
893 			dev_err(atomisp_dev, "find_vma failed\n");
894 			return i ? : -EFAULT;
895 		}
896 
897 		if (is_vm_hugetlb_page(vma)) {
898 			/*
899 			i = follow_hugetlb_page(mm, vma, pages, vmas,
900 					&start, &nr_pages, i, gup_flags);
901 			*/
902 			continue;
903 		}
904 
905 		do {
906 			struct page *page;
907 			unsigned long pfn;
908 
909 			/*
910 			 * If we have a pending SIGKILL, don't keep faulting
911 			 * pages and potentially allocating memory.
912 			 */
913 			if (unlikely(fatal_signal_pending(current))) {
914 				dev_err(atomisp_dev,
915 					"fatal_signal_pending in %s\n",
916 					__func__);
917 				return i ? i : -ERESTARTSYS;
918 			}
919 
920 			ret = follow_pfn(vma, start, &pfn);
921 			if (ret) {
922 				dev_err(atomisp_dev, "follow_pfn() failed\n");
923 				return i ? : -EFAULT;
924 			}
925 
926 			page = pfn_to_page(pfn);
927 			if (IS_ERR(page))
928 				return i ? i : PTR_ERR(page);
929 			if (pages) {
930 				pages[i] = page;
931 				get_page(page);
932 				flush_anon_page(vma, page, start);
933 				flush_dcache_page(page);
934 			}
935 			if (vmas)
936 				vmas[i] = vma;
937 			i++;
938 			start += PAGE_SIZE;
939 			nr_pages--;
940 		} while (nr_pages && start < vma->vm_end);
941 	} while (nr_pages);
942 
943 	return i;
944 }
945 
946 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
947 			    unsigned long start, int nr_pages, int write, int force,
948 			    struct page **pages, struct vm_area_struct **vmas)
949 {
950 	int flags = FOLL_TOUCH;
951 
952 	if (pages)
953 		flags |= FOLL_GET;
954 	if (write)
955 		flags |= FOLL_WRITE;
956 	if (force)
957 		flags |= FOLL_FORCE;
958 
959 	return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
960 }
961 
962 /*
963  * Convert user space virtual address into pages list
964  */
965 static int alloc_user_pages(struct hmm_buffer_object *bo,
966 			    const void __user *userptr, bool cached)
967 {
968 	int page_nr;
969 	int i;
970 	struct vm_area_struct *vma;
971 	struct page **pages;
972 
973 	pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
974 	if (unlikely(!pages))
975 		return -ENOMEM;
976 
977 	bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
978 				     GFP_KERNEL);
979 	if (unlikely(!bo->page_obj)) {
980 		kfree(pages);
981 		return -ENOMEM;
982 	}
983 
984 	mutex_unlock(&bo->mutex);
985 	down_read(&current->mm->mmap_sem);
986 	vma = find_vma(current->mm, (unsigned long)userptr);
987 	up_read(&current->mm->mmap_sem);
988 	if (!vma) {
989 		dev_err(atomisp_dev, "find_vma failed\n");
990 		kfree(bo->page_obj);
991 		kfree(pages);
992 		mutex_lock(&bo->mutex);
993 		return -EFAULT;
994 	}
995 	mutex_lock(&bo->mutex);
996 	/*
997 	 * Handle frame buffer allocated in other kerenl space driver
998 	 * and map to user space
999 	 */
1000 	if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1001 		page_nr = get_pfnmap_pages(current, current->mm,
1002 					   (unsigned long)userptr,
1003 					   (int)(bo->pgnr), 1, 0,
1004 					   pages, NULL);
1005 		bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1006 	} else {
1007 		/*Handle frame buffer allocated in user space*/
1008 		mutex_unlock(&bo->mutex);
1009 		page_nr = get_user_pages_fast((unsigned long)userptr,
1010 					      (int)(bo->pgnr), 1, pages);
1011 		mutex_lock(&bo->mutex);
1012 		bo->mem_type = HMM_BO_MEM_TYPE_USER;
1013 	}
1014 
1015 	/* can be written by caller, not forced */
1016 	if (page_nr != bo->pgnr) {
1017 		dev_err(atomisp_dev,
1018 			"get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
1019 			bo->pgnr, page_nr);
1020 		goto out_of_mem;
1021 	}
1022 
1023 	for (i = 0; i < bo->pgnr; i++) {
1024 		bo->page_obj[i].page = pages[i];
1025 		bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1026 	}
1027 	hmm_mem_stat.usr_size += bo->pgnr;
1028 	kfree(pages);
1029 
1030 	return 0;
1031 
1032 out_of_mem:
1033 	for (i = 0; i < page_nr; i++)
1034 		put_page(pages[i]);
1035 	kfree(pages);
1036 	kfree(bo->page_obj);
1037 
1038 	return -ENOMEM;
1039 }
1040 
1041 static void free_user_pages(struct hmm_buffer_object *bo)
1042 {
1043 	int i;
1044 
1045 	for (i = 0; i < bo->pgnr; i++)
1046 		put_page(bo->page_obj[i].page);
1047 	hmm_mem_stat.usr_size -= bo->pgnr;
1048 
1049 	kfree(bo->page_obj);
1050 }
1051 
1052 /*
1053  * allocate/free physical pages for the bo.
1054  *
1055  * type indicate where are the pages from. currently we have 3 types
1056  * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1057  *
1058  * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1059  * try to alloc memory from highmem if from_highmem is set.
1060  *
1061  * userptr is only valid when type is HMM_BO_USER, it indicates
1062  * the start address from user space task.
1063  *
1064  * from_highmem and userptr will both be ignored when type is
1065  * HMM_BO_SHARE.
1066  */
1067 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1068 		       enum hmm_bo_type type, int from_highmem,
1069 		       const void __user *userptr, bool cached)
1070 {
1071 	int ret = -EINVAL;
1072 
1073 	check_bo_null_return(bo, -EINVAL);
1074 
1075 	mutex_lock(&bo->mutex);
1076 	check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1077 
1078 	/*
1079 	 * TO DO:
1080 	 * add HMM_BO_USER type
1081 	 */
1082 	if (type == HMM_BO_PRIVATE) {
1083 		ret = alloc_private_pages(bo, from_highmem,
1084 					  cached, &dynamic_pool, &reserved_pool);
1085 	} else if (type == HMM_BO_USER) {
1086 		ret = alloc_user_pages(bo, userptr, cached);
1087 	} else {
1088 		dev_err(atomisp_dev, "invalid buffer type.\n");
1089 		ret = -EINVAL;
1090 	}
1091 	if (ret)
1092 		goto alloc_err;
1093 
1094 	bo->type = type;
1095 
1096 	bo->status |= HMM_BO_PAGE_ALLOCED;
1097 
1098 	mutex_unlock(&bo->mutex);
1099 
1100 	return 0;
1101 
1102 alloc_err:
1103 	mutex_unlock(&bo->mutex);
1104 	dev_err(atomisp_dev, "alloc pages err...\n");
1105 	return ret;
1106 status_err:
1107 	mutex_unlock(&bo->mutex);
1108 	dev_err(atomisp_dev,
1109 		"buffer object has already page allocated.\n");
1110 	return -EINVAL;
1111 }
1112 
1113 /*
1114  * free physical pages of the bo.
1115  */
1116 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1117 {
1118 	check_bo_null_return_void(bo);
1119 
1120 	mutex_lock(&bo->mutex);
1121 
1122 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1123 
1124 	/* clear the flag anyway. */
1125 	bo->status &= (~HMM_BO_PAGE_ALLOCED);
1126 
1127 	if (bo->type == HMM_BO_PRIVATE)
1128 		free_private_pages(bo, &dynamic_pool, &reserved_pool);
1129 	else if (bo->type == HMM_BO_USER)
1130 		free_user_pages(bo);
1131 	else
1132 		dev_err(atomisp_dev, "invalid buffer type.\n");
1133 	mutex_unlock(&bo->mutex);
1134 
1135 	return;
1136 
1137 status_err2:
1138 	mutex_unlock(&bo->mutex);
1139 	dev_err(atomisp_dev,
1140 		"buffer object not page allocated yet.\n");
1141 }
1142 
1143 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1144 {
1145 	check_bo_null_return(bo, 0);
1146 
1147 	return bo->status & HMM_BO_PAGE_ALLOCED;
1148 }
1149 
1150 /*
1151  * get physical page info of the bo.
1152  */
1153 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1154 			 struct hmm_page_object **page_obj, int *pgnr)
1155 {
1156 	check_bo_null_return(bo, -EINVAL);
1157 
1158 	mutex_lock(&bo->mutex);
1159 
1160 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1161 
1162 	*page_obj = bo->page_obj;
1163 	*pgnr = bo->pgnr;
1164 
1165 	mutex_unlock(&bo->mutex);
1166 
1167 	return 0;
1168 
1169 status_err:
1170 	dev_err(atomisp_dev,
1171 		"buffer object not page allocated yet.\n");
1172 	mutex_unlock(&bo->mutex);
1173 	return -EINVAL;
1174 }
1175 
1176 /*
1177  * bind the physical pages to a virtual address space.
1178  */
1179 int hmm_bo_bind(struct hmm_buffer_object *bo)
1180 {
1181 	int ret;
1182 	unsigned int virt;
1183 	struct hmm_bo_device *bdev;
1184 	unsigned int i;
1185 
1186 	check_bo_null_return(bo, -EINVAL);
1187 
1188 	mutex_lock(&bo->mutex);
1189 
1190 	check_bo_status_yes_goto(bo,
1191 				 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1192 				 status_err1);
1193 
1194 	check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1195 
1196 	bdev = bo->bdev;
1197 
1198 	virt = bo->start;
1199 
1200 	for (i = 0; i < bo->pgnr; i++) {
1201 		ret =
1202 		    isp_mmu_map(&bdev->mmu, virt,
1203 				page_to_phys(bo->page_obj[i].page), 1);
1204 		if (ret)
1205 			goto map_err;
1206 		virt += (1 << PAGE_SHIFT);
1207 	}
1208 
1209 	/*
1210 	 * flush TBL here.
1211 	 *
1212 	 * theoretically, we donot need to flush TLB as we didnot change
1213 	 * any existed address mappings, but for Silicon Hive's MMU, its
1214 	 * really a bug here. I guess when fetching PTEs (page table entity)
1215 	 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1216 	 * for performance issue. EX, we only set up 1 page address mapping,
1217 	 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1218 	 * so the additional 3 PTEs are invalid.
1219 	 */
1220 	if (bo->start != 0x0)
1221 		isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1222 					(bo->pgnr << PAGE_SHIFT));
1223 
1224 	bo->status |= HMM_BO_BINDED;
1225 
1226 	mutex_unlock(&bo->mutex);
1227 
1228 	return 0;
1229 
1230 map_err:
1231 	/* unbind the physical pages with related virtual address space */
1232 	virt = bo->start;
1233 	for ( ; i > 0; i--) {
1234 		isp_mmu_unmap(&bdev->mmu, virt, 1);
1235 		virt += pgnr_to_size(1);
1236 	}
1237 
1238 	mutex_unlock(&bo->mutex);
1239 	dev_err(atomisp_dev,
1240 		"setup MMU address mapping failed.\n");
1241 	return ret;
1242 
1243 status_err2:
1244 	mutex_unlock(&bo->mutex);
1245 	dev_err(atomisp_dev, "buffer object already binded.\n");
1246 	return -EINVAL;
1247 status_err1:
1248 	mutex_unlock(&bo->mutex);
1249 	dev_err(atomisp_dev,
1250 		"buffer object vm_node or page not allocated.\n");
1251 	return -EINVAL;
1252 }
1253 
1254 /*
1255  * unbind the physical pages with related virtual address space.
1256  */
1257 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1258 {
1259 	unsigned int virt;
1260 	struct hmm_bo_device *bdev;
1261 	unsigned int i;
1262 
1263 	check_bo_null_return_void(bo);
1264 
1265 	mutex_lock(&bo->mutex);
1266 
1267 	check_bo_status_yes_goto(bo,
1268 				 HMM_BO_PAGE_ALLOCED |
1269 				 HMM_BO_ALLOCED |
1270 				 HMM_BO_BINDED, status_err);
1271 
1272 	bdev = bo->bdev;
1273 
1274 	virt = bo->start;
1275 
1276 	for (i = 0; i < bo->pgnr; i++) {
1277 		isp_mmu_unmap(&bdev->mmu, virt, 1);
1278 		virt += pgnr_to_size(1);
1279 	}
1280 
1281 	/*
1282 	 * flush TLB as the address mapping has been removed and
1283 	 * related TLBs should be invalidated.
1284 	 */
1285 	isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1286 				(bo->pgnr << PAGE_SHIFT));
1287 
1288 	bo->status &= (~HMM_BO_BINDED);
1289 
1290 	mutex_unlock(&bo->mutex);
1291 
1292 	return;
1293 
1294 status_err:
1295 	mutex_unlock(&bo->mutex);
1296 	dev_err(atomisp_dev,
1297 		"buffer vm or page not allocated or not binded yet.\n");
1298 }
1299 
1300 int hmm_bo_binded(struct hmm_buffer_object *bo)
1301 {
1302 	int ret;
1303 
1304 	check_bo_null_return(bo, 0);
1305 
1306 	mutex_lock(&bo->mutex);
1307 
1308 	ret = bo->status & HMM_BO_BINDED;
1309 
1310 	mutex_unlock(&bo->mutex);
1311 
1312 	return ret;
1313 }
1314 
1315 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1316 {
1317 	struct page **pages;
1318 	int i;
1319 
1320 	check_bo_null_return(bo, NULL);
1321 
1322 	mutex_lock(&bo->mutex);
1323 	if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1324 	    ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1325 		mutex_unlock(&bo->mutex);
1326 		return bo->vmap_addr;
1327 	}
1328 
1329 	/* cached status need to be changed, so vunmap first */
1330 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1331 		vunmap(bo->vmap_addr);
1332 		bo->vmap_addr = NULL;
1333 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1334 	}
1335 
1336 	pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1337 	if (unlikely(!pages)) {
1338 		mutex_unlock(&bo->mutex);
1339 		return NULL;
1340 	}
1341 
1342 	for (i = 0; i < bo->pgnr; i++)
1343 		pages[i] = bo->page_obj[i].page;
1344 
1345 	bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1346 			     cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1347 	if (unlikely(!bo->vmap_addr)) {
1348 		kfree(pages);
1349 		mutex_unlock(&bo->mutex);
1350 		dev_err(atomisp_dev, "vmap failed...\n");
1351 		return NULL;
1352 	}
1353 	bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1354 
1355 	kfree(pages);
1356 
1357 	mutex_unlock(&bo->mutex);
1358 	return bo->vmap_addr;
1359 }
1360 
1361 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1362 {
1363 	check_bo_null_return_void(bo);
1364 
1365 	mutex_lock(&bo->mutex);
1366 	if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1367 		mutex_unlock(&bo->mutex);
1368 		return;
1369 	}
1370 
1371 	clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1372 	mutex_unlock(&bo->mutex);
1373 }
1374 
1375 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1376 {
1377 	check_bo_null_return_void(bo);
1378 
1379 	mutex_lock(&bo->mutex);
1380 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1381 		vunmap(bo->vmap_addr);
1382 		bo->vmap_addr = NULL;
1383 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1384 	}
1385 
1386 	mutex_unlock(&bo->mutex);
1387 	return;
1388 }
1389 
1390 void hmm_bo_ref(struct hmm_buffer_object *bo)
1391 {
1392 	check_bo_null_return_void(bo);
1393 
1394 	kref_get(&bo->kref);
1395 }
1396 
1397 static void kref_hmm_bo_release(struct kref *kref)
1398 {
1399 	if (!kref)
1400 		return;
1401 
1402 	hmm_bo_release(kref_to_hmm_bo(kref));
1403 }
1404 
1405 void hmm_bo_unref(struct hmm_buffer_object *bo)
1406 {
1407 	check_bo_null_return_void(bo);
1408 
1409 	kref_put(&bo->kref, kref_hmm_bo_release);
1410 }
1411 
1412 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1413 {
1414 	struct hmm_buffer_object *bo =
1415 	    (struct hmm_buffer_object *)vma->vm_private_data;
1416 
1417 	check_bo_null_return_void(bo);
1418 
1419 	hmm_bo_ref(bo);
1420 
1421 	mutex_lock(&bo->mutex);
1422 
1423 	bo->status |= HMM_BO_MMAPED;
1424 
1425 	bo->mmap_count++;
1426 
1427 	mutex_unlock(&bo->mutex);
1428 }
1429 
1430 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1431 {
1432 	struct hmm_buffer_object *bo =
1433 	    (struct hmm_buffer_object *)vma->vm_private_data;
1434 
1435 	check_bo_null_return_void(bo);
1436 
1437 	hmm_bo_unref(bo);
1438 
1439 	mutex_lock(&bo->mutex);
1440 
1441 	bo->mmap_count--;
1442 
1443 	if (!bo->mmap_count) {
1444 		bo->status &= (~HMM_BO_MMAPED);
1445 		vma->vm_private_data = NULL;
1446 	}
1447 
1448 	mutex_unlock(&bo->mutex);
1449 }
1450 
1451 static const struct vm_operations_struct hmm_bo_vm_ops = {
1452 	.open = hmm_bo_vm_open,
1453 	.close = hmm_bo_vm_close,
1454 };
1455 
1456 /*
1457  * mmap the bo to user space.
1458  */
1459 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1460 {
1461 	unsigned int start, end;
1462 	unsigned int virt;
1463 	unsigned int pgnr, i;
1464 	unsigned int pfn;
1465 
1466 	check_bo_null_return(bo, -EINVAL);
1467 
1468 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1469 
1470 	pgnr = bo->pgnr;
1471 	start = vma->vm_start;
1472 	end = vma->vm_end;
1473 
1474 	/*
1475 	 * check vma's virtual address space size and buffer object's size.
1476 	 * must be the same.
1477 	 */
1478 	if ((start + pgnr_to_size(pgnr)) != end) {
1479 		dev_warn(atomisp_dev,
1480 			 "vma's address space size not equal to buffer object's size");
1481 		return -EINVAL;
1482 	}
1483 
1484 	virt = vma->vm_start;
1485 	for (i = 0; i < pgnr; i++) {
1486 		pfn = page_to_pfn(bo->page_obj[i].page);
1487 		if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1488 			dev_warn(atomisp_dev,
1489 				 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1490 				 virt, pfn, 1);
1491 			return -EINVAL;
1492 		}
1493 		virt += PAGE_SIZE;
1494 	}
1495 
1496 	vma->vm_private_data = bo;
1497 
1498 	vma->vm_ops = &hmm_bo_vm_ops;
1499 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1500 
1501 	/*
1502 	 * call hmm_bo_vm_open explicitly.
1503 	 */
1504 	hmm_bo_vm_open(vma);
1505 
1506 	return 0;
1507 
1508 status_err:
1509 	dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1510 	return -EINVAL;
1511 }
1512