1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * This file contains functions for buffer object structure management
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h>		/* for GFP_ATOMIC */
25 #include <linux/mm.h>
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h>		/* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
35 #include <linux/io.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
39 
40 #include <asm/set_memory.h>
41 
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
46 
47 static unsigned int order_to_nr(unsigned int order)
48 {
49 	return 1U << order;
50 }
51 
52 static unsigned int nr_to_order_bottom(unsigned int nr)
53 {
54 	return fls(nr) - 1;
55 }
56 
57 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
58 		     unsigned int pgnr)
59 {
60 	check_bodev_null_return(bdev, -EINVAL);
61 	var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
62 			 "hmm_bo_device not inited yet.\n");
63 	/* prevent zero size buffer object */
64 	if (pgnr == 0) {
65 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
66 		return -EINVAL;
67 	}
68 
69 	memset(bo, 0, sizeof(*bo));
70 	mutex_init(&bo->mutex);
71 
72 	/* init the bo->list HEAD as an element of entire_bo_list */
73 	INIT_LIST_HEAD(&bo->list);
74 
75 	bo->bdev = bdev;
76 	bo->vmap_addr = NULL;
77 	bo->status = HMM_BO_FREE;
78 	bo->start = bdev->start;
79 	bo->pgnr = pgnr;
80 	bo->end = bo->start + pgnr_to_size(pgnr);
81 	bo->prev = NULL;
82 	bo->next = NULL;
83 
84 	return 0;
85 }
86 
87 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
88     struct rb_node *node, unsigned int pgnr)
89 {
90 	struct hmm_buffer_object *this, *ret_bo, *temp_bo;
91 
92 	this = rb_entry(node, struct hmm_buffer_object, node);
93 	if (this->pgnr == pgnr ||
94 	    (this->pgnr > pgnr && !this->node.rb_left)) {
95 		goto remove_bo_and_return;
96 	} else {
97 		if (this->pgnr < pgnr) {
98 			if (!this->node.rb_right)
99 				return NULL;
100 			ret_bo = __bo_search_and_remove_from_free_rbtree(
101 				     this->node.rb_right, pgnr);
102 		} else {
103 			ret_bo = __bo_search_and_remove_from_free_rbtree(
104 				     this->node.rb_left, pgnr);
105 		}
106 		if (!ret_bo) {
107 			if (this->pgnr > pgnr)
108 				goto remove_bo_and_return;
109 			else
110 				return NULL;
111 		}
112 		return ret_bo;
113 	}
114 
115 remove_bo_and_return:
116 	/* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
117 	 * 1. check if 'this->next' is NULL:
118 	 *	yes: erase 'this' node and rebalance rbtree, return 'this'.
119 	 */
120 	if (!this->next) {
121 		rb_erase(&this->node, &this->bdev->free_rbtree);
122 		return this;
123 	}
124 	/* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
125 	 * 2. check if 'this->next->next' is NULL:
126 	 *	yes: change the related 'next/prev' pointer,
127 	 *		return 'this->next' but the rbtree stays unchanged.
128 	 */
129 	temp_bo = this->next;
130 	this->next = temp_bo->next;
131 	if (temp_bo->next)
132 		temp_bo->next->prev = this;
133 	temp_bo->next = NULL;
134 	temp_bo->prev = NULL;
135 	return temp_bo;
136 }
137 
138 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
139 	ia_css_ptr start)
140 {
141 	struct rb_node *n = root->rb_node;
142 	struct hmm_buffer_object *bo;
143 
144 	do {
145 		bo = rb_entry(n, struct hmm_buffer_object, node);
146 
147 		if (bo->start > start) {
148 			if (!n->rb_left)
149 				return NULL;
150 			n = n->rb_left;
151 		} else if (bo->start < start) {
152 			if (!n->rb_right)
153 				return NULL;
154 			n = n->rb_right;
155 		} else {
156 			return bo;
157 		}
158 	} while (n);
159 
160 	return NULL;
161 }
162 
163 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
164     struct rb_root *root, unsigned int start)
165 {
166 	struct rb_node *n = root->rb_node;
167 	struct hmm_buffer_object *bo;
168 
169 	do {
170 		bo = rb_entry(n, struct hmm_buffer_object, node);
171 
172 		if (bo->start > start) {
173 			if (!n->rb_left)
174 				return NULL;
175 			n = n->rb_left;
176 		} else {
177 			if (bo->end > start)
178 				return bo;
179 			if (!n->rb_right)
180 				return NULL;
181 			n = n->rb_right;
182 		}
183 	} while (n);
184 
185 	return NULL;
186 }
187 
188 static void __bo_insert_to_free_rbtree(struct rb_root *root,
189 				       struct hmm_buffer_object *bo)
190 {
191 	struct rb_node **new = &root->rb_node;
192 	struct rb_node *parent = NULL;
193 	struct hmm_buffer_object *this;
194 	unsigned int pgnr = bo->pgnr;
195 
196 	while (*new) {
197 		parent = *new;
198 		this = container_of(*new, struct hmm_buffer_object, node);
199 
200 		if (pgnr < this->pgnr) {
201 			new = &((*new)->rb_left);
202 		} else if (pgnr > this->pgnr) {
203 			new = &((*new)->rb_right);
204 		} else {
205 			bo->prev = this;
206 			bo->next = this->next;
207 			if (this->next)
208 				this->next->prev = bo;
209 			this->next = bo;
210 			bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
211 			return;
212 		}
213 	}
214 
215 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
216 
217 	rb_link_node(&bo->node, parent, new);
218 	rb_insert_color(&bo->node, root);
219 }
220 
221 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
222 					struct hmm_buffer_object *bo)
223 {
224 	struct rb_node **new = &root->rb_node;
225 	struct rb_node *parent = NULL;
226 	struct hmm_buffer_object *this;
227 	unsigned int start = bo->start;
228 
229 	while (*new) {
230 		parent = *new;
231 		this = container_of(*new, struct hmm_buffer_object, node);
232 
233 		if (start < this->start)
234 			new = &((*new)->rb_left);
235 		else
236 			new = &((*new)->rb_right);
237 	}
238 
239 	kref_init(&bo->kref);
240 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
241 
242 	rb_link_node(&bo->node, parent, new);
243 	rb_insert_color(&bo->node, root);
244 }
245 
246 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
247 	struct hmm_buffer_object *bo,
248 	unsigned int pgnr)
249 {
250 	struct hmm_buffer_object *new_bo;
251 	unsigned long flags;
252 	int ret;
253 
254 	new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
255 	if (!new_bo) {
256 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
257 		return NULL;
258 	}
259 	ret = __bo_init(bdev, new_bo, pgnr);
260 	if (ret) {
261 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
262 		kmem_cache_free(bdev->bo_cache, new_bo);
263 		return NULL;
264 	}
265 
266 	new_bo->start = bo->start;
267 	new_bo->end = new_bo->start + pgnr_to_size(pgnr);
268 	bo->start = new_bo->end;
269 	bo->pgnr = bo->pgnr - pgnr;
270 
271 	spin_lock_irqsave(&bdev->list_lock, flags);
272 	list_add_tail(&new_bo->list, &bo->list);
273 	spin_unlock_irqrestore(&bdev->list_lock, flags);
274 
275 	return new_bo;
276 }
277 
278 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
279 {
280 	struct hmm_bo_device *bdev = bo->bdev;
281 	/* There are 4 situations when we take off a known bo from free rbtree:
282 	 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
283 	 *	and does not have a linked list after bo, to take off this bo,
284 	 *	we just need erase bo directly and rebalance the free rbtree
285 	 */
286 	if (!bo->prev && !bo->next) {
287 		rb_erase(&bo->node, &bdev->free_rbtree);
288 		/* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
289 		 *	and has a linked list,to take off this bo we need erase bo
290 		 *	first, then, insert bo->next into free rbtree and rebalance
291 		 *	the free rbtree
292 		 */
293 	} else if (!bo->prev && bo->next) {
294 		bo->next->prev = NULL;
295 		rb_erase(&bo->node, &bdev->free_rbtree);
296 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
297 		bo->next = NULL;
298 		/* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
299 		 *	node, bo is the last element of the linked list after rbtree
300 		 *	node, to take off this bo, we just need set the "prev/next"
301 		 *	pointers to NULL, the free rbtree stays unchaged
302 		 */
303 	} else if (bo->prev && !bo->next) {
304 		bo->prev->next = NULL;
305 		bo->prev = NULL;
306 		/* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
307 		 *	node, bo is in the middle of the linked list after rbtree node,
308 		 *	to take off this bo, we just set take the "prev/next" pointers
309 		 *	to NULL, the free rbtree stays unchaged
310 		 */
311 	} else if (bo->prev && bo->next) {
312 		bo->next->prev = bo->prev;
313 		bo->prev->next = bo->next;
314 		bo->next = NULL;
315 		bo->prev = NULL;
316 	}
317 }
318 
319 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
320 	struct hmm_buffer_object *next_bo)
321 {
322 	struct hmm_bo_device *bdev;
323 	unsigned long flags;
324 
325 	bdev = bo->bdev;
326 	next_bo->start = bo->start;
327 	next_bo->pgnr = next_bo->pgnr + bo->pgnr;
328 
329 	spin_lock_irqsave(&bdev->list_lock, flags);
330 	list_del(&bo->list);
331 	spin_unlock_irqrestore(&bdev->list_lock, flags);
332 
333 	kmem_cache_free(bo->bdev->bo_cache, bo);
334 
335 	return next_bo;
336 }
337 
338 /*
339  * hmm_bo_device functions.
340  */
341 int hmm_bo_device_init(struct hmm_bo_device *bdev,
342 		       struct isp_mmu_client *mmu_driver,
343 		       unsigned int vaddr_start,
344 		       unsigned int size)
345 {
346 	struct hmm_buffer_object *bo;
347 	unsigned long flags;
348 	int ret;
349 
350 	check_bodev_null_return(bdev, -EINVAL);
351 
352 	ret = isp_mmu_init(&bdev->mmu, mmu_driver);
353 	if (ret) {
354 		dev_err(atomisp_dev, "isp_mmu_init failed.\n");
355 		return ret;
356 	}
357 
358 	bdev->start = vaddr_start;
359 	bdev->pgnr = size_to_pgnr_ceil(size);
360 	bdev->size = pgnr_to_size(bdev->pgnr);
361 
362 	spin_lock_init(&bdev->list_lock);
363 	mutex_init(&bdev->rbtree_mutex);
364 
365 	bdev->flag = HMM_BO_DEVICE_INITED;
366 
367 	INIT_LIST_HEAD(&bdev->entire_bo_list);
368 	bdev->allocated_rbtree = RB_ROOT;
369 	bdev->free_rbtree = RB_ROOT;
370 
371 	bdev->bo_cache = kmem_cache_create("bo_cache",
372 					   sizeof(struct hmm_buffer_object), 0, 0, NULL);
373 	if (!bdev->bo_cache) {
374 		dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
375 		isp_mmu_exit(&bdev->mmu);
376 		return -ENOMEM;
377 	}
378 
379 	bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
380 	if (!bo) {
381 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
382 		isp_mmu_exit(&bdev->mmu);
383 		return -ENOMEM;
384 	}
385 
386 	ret = __bo_init(bdev, bo, bdev->pgnr);
387 	if (ret) {
388 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
389 		kmem_cache_free(bdev->bo_cache, bo);
390 		isp_mmu_exit(&bdev->mmu);
391 		return -EINVAL;
392 	}
393 
394 	spin_lock_irqsave(&bdev->list_lock, flags);
395 	list_add_tail(&bo->list, &bdev->entire_bo_list);
396 	spin_unlock_irqrestore(&bdev->list_lock, flags);
397 
398 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
399 
400 	return 0;
401 }
402 
403 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
404 				       unsigned int pgnr)
405 {
406 	struct hmm_buffer_object *bo, *new_bo;
407 	struct rb_root *root = &bdev->free_rbtree;
408 
409 	check_bodev_null_return(bdev, NULL);
410 	var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
411 			 "hmm_bo_device not inited yet.\n");
412 
413 	if (pgnr == 0) {
414 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
415 		return NULL;
416 	}
417 
418 	mutex_lock(&bdev->rbtree_mutex);
419 	bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
420 	if (!bo) {
421 		mutex_unlock(&bdev->rbtree_mutex);
422 		dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
423 			__func__);
424 		return NULL;
425 	}
426 
427 	if (bo->pgnr > pgnr) {
428 		new_bo = __bo_break_up(bdev, bo, pgnr);
429 		if (!new_bo) {
430 			mutex_unlock(&bdev->rbtree_mutex);
431 			dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
432 				__func__);
433 			return NULL;
434 		}
435 
436 		__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
437 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
438 
439 		mutex_unlock(&bdev->rbtree_mutex);
440 		return new_bo;
441 	}
442 
443 	__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
444 
445 	mutex_unlock(&bdev->rbtree_mutex);
446 	return bo;
447 }
448 
449 void hmm_bo_release(struct hmm_buffer_object *bo)
450 {
451 	struct hmm_bo_device *bdev = bo->bdev;
452 	struct hmm_buffer_object *next_bo, *prev_bo;
453 
454 	mutex_lock(&bdev->rbtree_mutex);
455 
456 	/*
457 	 * FIX ME:
458 	 *
459 	 * how to destroy the bo when it is stilled MMAPED?
460 	 *
461 	 * ideally, this will not happened as hmm_bo_release
462 	 * will only be called when kref reaches 0, and in mmap
463 	 * operation the hmm_bo_ref will eventually be called.
464 	 * so, if this happened, something goes wrong.
465 	 */
466 	if (bo->status & HMM_BO_MMAPED) {
467 		mutex_unlock(&bdev->rbtree_mutex);
468 		dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
469 		return;
470 	}
471 
472 	if (bo->status & HMM_BO_BINDED) {
473 		dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
474 		hmm_bo_unbind(bo);
475 	}
476 
477 	if (bo->status & HMM_BO_PAGE_ALLOCED) {
478 		dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
479 		hmm_bo_free_pages(bo);
480 	}
481 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
482 		dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
483 		hmm_bo_vunmap(bo);
484 	}
485 
486 	rb_erase(&bo->node, &bdev->allocated_rbtree);
487 
488 	prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
489 	next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
490 
491 	if (bo->list.prev != &bdev->entire_bo_list &&
492 	    prev_bo->end == bo->start &&
493 	    (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
494 		__bo_take_off_handling(prev_bo);
495 		bo = __bo_merge(prev_bo, bo);
496 	}
497 
498 	if (bo->list.next != &bdev->entire_bo_list &&
499 	    next_bo->start == bo->end &&
500 	    (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
501 		__bo_take_off_handling(next_bo);
502 		bo = __bo_merge(bo, next_bo);
503 	}
504 
505 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
506 
507 	mutex_unlock(&bdev->rbtree_mutex);
508 	return;
509 }
510 
511 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
512 {
513 	struct hmm_buffer_object *bo;
514 	unsigned long flags;
515 
516 	dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
517 
518 	check_bodev_null_return_void(bdev);
519 
520 	/*
521 	 * release all allocated bos even they a in use
522 	 * and all bos will be merged into a big bo
523 	 */
524 	while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
525 		hmm_bo_release(
526 		    rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
527 
528 	dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
529 		__func__);
530 
531 	/* free all bos to release all ISP virtual memory */
532 	while (!list_empty(&bdev->entire_bo_list)) {
533 		bo = list_to_hmm_bo(bdev->entire_bo_list.next);
534 
535 		spin_lock_irqsave(&bdev->list_lock, flags);
536 		list_del(&bo->list);
537 		spin_unlock_irqrestore(&bdev->list_lock, flags);
538 
539 		kmem_cache_free(bdev->bo_cache, bo);
540 	}
541 
542 	dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
543 
544 	kmem_cache_destroy(bdev->bo_cache);
545 
546 	isp_mmu_exit(&bdev->mmu);
547 }
548 
549 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
550 {
551 	check_bodev_null_return(bdev, -EINVAL);
552 
553 	return bdev->flag == HMM_BO_DEVICE_INITED;
554 }
555 
556 int hmm_bo_allocated(struct hmm_buffer_object *bo)
557 {
558 	check_bo_null_return(bo, 0);
559 
560 	return bo->status & HMM_BO_ALLOCED;
561 }
562 
563 struct hmm_buffer_object *hmm_bo_device_search_start(
564     struct hmm_bo_device *bdev, ia_css_ptr vaddr)
565 {
566 	struct hmm_buffer_object *bo;
567 
568 	check_bodev_null_return(bdev, NULL);
569 
570 	mutex_lock(&bdev->rbtree_mutex);
571 	bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
572 	if (!bo) {
573 		mutex_unlock(&bdev->rbtree_mutex);
574 		dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
575 			__func__, vaddr);
576 		return NULL;
577 	}
578 	mutex_unlock(&bdev->rbtree_mutex);
579 
580 	return bo;
581 }
582 
583 struct hmm_buffer_object *hmm_bo_device_search_in_range(
584     struct hmm_bo_device *bdev, unsigned int vaddr)
585 {
586 	struct hmm_buffer_object *bo;
587 
588 	check_bodev_null_return(bdev, NULL);
589 
590 	mutex_lock(&bdev->rbtree_mutex);
591 	bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
592 	if (!bo) {
593 		mutex_unlock(&bdev->rbtree_mutex);
594 		dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
595 			__func__, vaddr);
596 		return NULL;
597 	}
598 	mutex_unlock(&bdev->rbtree_mutex);
599 
600 	return bo;
601 }
602 
603 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
604     struct hmm_bo_device *bdev, const void *vaddr)
605 {
606 	struct list_head *pos;
607 	struct hmm_buffer_object *bo;
608 	unsigned long flags;
609 
610 	check_bodev_null_return(bdev, NULL);
611 
612 	spin_lock_irqsave(&bdev->list_lock, flags);
613 	list_for_each(pos, &bdev->entire_bo_list) {
614 		bo = list_to_hmm_bo(pos);
615 		/* pass bo which has no vm_node allocated */
616 		if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
617 			continue;
618 		if (bo->vmap_addr == vaddr)
619 			goto found;
620 	}
621 	spin_unlock_irqrestore(&bdev->list_lock, flags);
622 	return NULL;
623 found:
624 	spin_unlock_irqrestore(&bdev->list_lock, flags);
625 	return bo;
626 }
627 
628 static void free_private_bo_pages(struct hmm_buffer_object *bo,
629 				  struct hmm_pool *dypool,
630 				  struct hmm_pool *repool,
631 				  int free_pgnr)
632 {
633 	int i, ret;
634 
635 	for (i = 0; i < free_pgnr; i++) {
636 		switch (bo->page_obj[i].type) {
637 		case HMM_PAGE_TYPE_RESERVED:
638 			if (repool->pops
639 			    && repool->pops->pool_free_pages) {
640 				repool->pops->pool_free_pages(repool->pool_info,
641 							      &bo->page_obj[i]);
642 				hmm_mem_stat.res_cnt--;
643 			}
644 			break;
645 		/*
646 		 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
647 		 * memory, so when free them, they should be put into dynamic
648 		 * pool.
649 		 */
650 		case HMM_PAGE_TYPE_DYNAMIC:
651 		case HMM_PAGE_TYPE_GENERAL:
652 			if (dypool->pops
653 			    && dypool->pops->pool_inited
654 			    && dypool->pops->pool_inited(dypool->pool_info)) {
655 				if (dypool->pops->pool_free_pages)
656 					dypool->pops->pool_free_pages(
657 					    dypool->pool_info,
658 					    &bo->page_obj[i]);
659 				break;
660 			}
661 
662 		/*
663 		 * if dynamic memory pool doesn't exist, need to free
664 		 * pages to system directly.
665 		 */
666 		default:
667 			ret = set_pages_wb(bo->page_obj[i].page, 1);
668 			if (ret)
669 				dev_err(atomisp_dev,
670 					"set page to WB err ...ret = %d\n",
671 					ret);
672 			/*
673 			W/A: set_pages_wb seldom return value = -EFAULT
674 			indicate that address of page is not in valid
675 			range(0xffff880000000000~0xffffc7ffffffffff)
676 			then, _free_pages would panic; Do not know why page
677 			address be valid,it maybe memory corruption by lowmemory
678 			*/
679 			if (!ret) {
680 				__free_pages(bo->page_obj[i].page, 0);
681 				hmm_mem_stat.sys_size--;
682 			}
683 			break;
684 		}
685 	}
686 
687 	return;
688 }
689 
690 /*Allocate pages which will be used only by ISP*/
691 static int alloc_private_pages(struct hmm_buffer_object *bo,
692 			       int from_highmem,
693 			       bool cached,
694 			       struct hmm_pool *dypool,
695 			       struct hmm_pool *repool)
696 {
697 	int ret;
698 	unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
699 	struct page *pages;
700 	gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
701 	int i, j;
702 	int failure_number = 0;
703 	bool reduce_order = false;
704 	bool lack_mem = true;
705 
706 	if (from_highmem)
707 		gfp |= __GFP_HIGHMEM;
708 
709 	pgnr = bo->pgnr;
710 
711 	bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
712 				     GFP_KERNEL);
713 	if (unlikely(!bo->page_obj))
714 		return -ENOMEM;
715 
716 	i = 0;
717 	alloc_pgnr = 0;
718 
719 	/*
720 	 * get physical pages from dynamic pages pool.
721 	 */
722 	if (dypool->pops && dypool->pops->pool_alloc_pages) {
723 		alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
724 			     bo->page_obj, pgnr,
725 			     cached);
726 		hmm_mem_stat.dyc_size -= alloc_pgnr;
727 
728 		if (alloc_pgnr == pgnr)
729 			return 0;
730 	}
731 
732 	pgnr -= alloc_pgnr;
733 	i += alloc_pgnr;
734 
735 	/*
736 	 * get physical pages from reserved pages pool for atomisp.
737 	 */
738 	if (repool->pops && repool->pops->pool_alloc_pages) {
739 		alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
740 			     &bo->page_obj[i], pgnr,
741 			     cached);
742 		hmm_mem_stat.res_cnt += alloc_pgnr;
743 		if (alloc_pgnr == pgnr)
744 			return 0;
745 	}
746 
747 	pgnr -= alloc_pgnr;
748 	i += alloc_pgnr;
749 
750 	while (pgnr) {
751 		order = nr_to_order_bottom(pgnr);
752 		/*
753 		 * if be short of memory, we will set order to 0
754 		 * everytime.
755 		 */
756 		if (lack_mem)
757 			order = HMM_MIN_ORDER;
758 		else if (order > HMM_MAX_ORDER)
759 			order = HMM_MAX_ORDER;
760 retry:
761 		/*
762 		 * When order > HMM_MIN_ORDER, for performance reasons we don't
763 		 * want alloc_pages() to sleep. In case it fails and fallbacks
764 		 * to HMM_MIN_ORDER or in case the requested order is originally
765 		 * the minimum value, we can allow alloc_pages() to sleep for
766 		 * robustness purpose.
767 		 *
768 		 * REVISIT: why __GFP_FS is necessary?
769 		 */
770 		if (order == HMM_MIN_ORDER) {
771 			gfp &= ~GFP_NOWAIT;
772 			gfp |= __GFP_RECLAIM | __GFP_FS;
773 		}
774 
775 		pages = alloc_pages(gfp, order);
776 		if (unlikely(!pages)) {
777 			/*
778 			 * in low memory case, if allocation page fails,
779 			 * we turn to try if order=0 allocation could
780 			 * succeed. if order=0 fails too, that means there is
781 			 * no memory left.
782 			 */
783 			if (order == HMM_MIN_ORDER) {
784 				dev_err(atomisp_dev,
785 					"%s: cannot allocate pages\n",
786 					__func__);
787 				goto cleanup;
788 			}
789 			order = HMM_MIN_ORDER;
790 			failure_number++;
791 			reduce_order = true;
792 			/*
793 			 * if fail two times continuously, we think be short
794 			 * of memory now.
795 			 */
796 			if (failure_number == 2) {
797 				lack_mem = true;
798 				failure_number = 0;
799 			}
800 			goto retry;
801 		} else {
802 			blk_pgnr = order_to_nr(order);
803 
804 			if (!cached) {
805 				/*
806 				 * set memory to uncacheable -- UC_MINUS
807 				 */
808 				ret = set_pages_uc(pages, blk_pgnr);
809 				if (ret) {
810 					dev_err(atomisp_dev,
811 						"set page uncacheablefailed.\n");
812 
813 					__free_pages(pages, order);
814 
815 					goto cleanup;
816 				}
817 			}
818 
819 			for (j = 0; j < blk_pgnr; j++) {
820 				bo->page_obj[i].page = pages + j;
821 				bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
822 			}
823 
824 			pgnr -= blk_pgnr;
825 			hmm_mem_stat.sys_size += blk_pgnr;
826 
827 			/*
828 			 * if order is not reduced this time, clear
829 			 * failure_number.
830 			 */
831 			if (reduce_order)
832 				reduce_order = false;
833 			else
834 				failure_number = 0;
835 		}
836 	}
837 
838 	return 0;
839 cleanup:
840 	alloc_pgnr = i;
841 	free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
842 
843 	kfree(bo->page_obj);
844 
845 	return -ENOMEM;
846 }
847 
848 static void free_private_pages(struct hmm_buffer_object *bo,
849 			       struct hmm_pool *dypool,
850 			       struct hmm_pool *repool)
851 {
852 	free_private_bo_pages(bo, dypool, repool, bo->pgnr);
853 
854 	kfree(bo->page_obj);
855 }
856 
857 static void free_user_pages(struct hmm_buffer_object *bo)
858 {
859 	int i;
860 
861 	hmm_mem_stat.usr_size -= bo->pgnr;
862 
863 	if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
864 		unpin_user_pages(bo->pages, bo->pgnr);
865 	} else {
866 		for (i = 0; i < bo->pgnr; i++)
867 			put_page(bo->pages[i]);
868 	}
869 	kfree(bo->pages);
870 	kfree(bo->page_obj);
871 }
872 
873 /*
874  * Convert user space virtual address into pages list
875  */
876 static int alloc_user_pages(struct hmm_buffer_object *bo,
877 			    const void __user *userptr, bool cached)
878 {
879 	int page_nr;
880 	int i;
881 	struct vm_area_struct *vma;
882 	struct page **pages;
883 
884 	pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
885 	if (unlikely(!pages))
886 		return -ENOMEM;
887 
888 	bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
889 				     GFP_KERNEL);
890 	if (unlikely(!bo->page_obj)) {
891 		kfree(pages);
892 		return -ENOMEM;
893 	}
894 
895 	mutex_unlock(&bo->mutex);
896 	down_read(&current->mm->mmap_sem);
897 	vma = find_vma(current->mm, (unsigned long)userptr);
898 	up_read(&current->mm->mmap_sem);
899 	if (!vma) {
900 		dev_err(atomisp_dev, "find_vma failed\n");
901 		kfree(bo->page_obj);
902 		kfree(pages);
903 		mutex_lock(&bo->mutex);
904 		return -EFAULT;
905 	}
906 	mutex_lock(&bo->mutex);
907 	/*
908 	 * Handle frame buffer allocated in other kerenl space driver
909 	 * and map to user space
910 	 */
911 
912 	userptr = untagged_addr(userptr);
913 
914 	bo->pages = pages;
915 
916 	if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
917 		page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
918 					 FOLL_LONGTERM | FOLL_WRITE,
919 					 pages, NULL);
920 		bo->mem_type = HMM_BO_MEM_TYPE_PFN;
921 	} else {
922 		/*Handle frame buffer allocated in user space*/
923 		mutex_unlock(&bo->mutex);
924 		page_nr = get_user_pages_fast((unsigned long)userptr,
925 					      (int)(bo->pgnr), 1, pages);
926 		mutex_lock(&bo->mutex);
927 		bo->mem_type = HMM_BO_MEM_TYPE_USER;
928 	}
929 
930 	dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n",
931 		__func__,
932 		bo->pgnr,
933 		bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
934 
935 	hmm_mem_stat.usr_size += bo->pgnr;
936 
937 	/* can be written by caller, not forced */
938 	if (page_nr != bo->pgnr) {
939 		dev_err(atomisp_dev,
940 			"get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
941 			bo->pgnr, page_nr);
942 		goto out_of_mem;
943 	}
944 
945 	for (i = 0; i < bo->pgnr; i++) {
946 		bo->page_obj[i].page = pages[i];
947 		bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
948 	}
949 
950 	return 0;
951 
952 out_of_mem:
953 
954 	free_user_pages(bo);
955 
956 	return -ENOMEM;
957 }
958 
959 /*
960  * allocate/free physical pages for the bo.
961  *
962  * type indicate where are the pages from. currently we have 3 types
963  * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
964  *
965  * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
966  * try to alloc memory from highmem if from_highmem is set.
967  *
968  * userptr is only valid when type is HMM_BO_USER, it indicates
969  * the start address from user space task.
970  *
971  * from_highmem and userptr will both be ignored when type is
972  * HMM_BO_SHARE.
973  */
974 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
975 		       enum hmm_bo_type type, int from_highmem,
976 		       const void __user *userptr, bool cached)
977 {
978 	int ret = -EINVAL;
979 
980 	check_bo_null_return(bo, -EINVAL);
981 
982 	mutex_lock(&bo->mutex);
983 	check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
984 
985 	/*
986 	 * TO DO:
987 	 * add HMM_BO_USER type
988 	 */
989 	if (type == HMM_BO_PRIVATE) {
990 		ret = alloc_private_pages(bo, from_highmem,
991 					  cached, &dynamic_pool, &reserved_pool);
992 	} else if (type == HMM_BO_USER) {
993 		ret = alloc_user_pages(bo, userptr, cached);
994 	} else {
995 		dev_err(atomisp_dev, "invalid buffer type.\n");
996 		ret = -EINVAL;
997 	}
998 	if (ret)
999 		goto alloc_err;
1000 
1001 	bo->type = type;
1002 
1003 	bo->status |= HMM_BO_PAGE_ALLOCED;
1004 
1005 	mutex_unlock(&bo->mutex);
1006 
1007 	return 0;
1008 
1009 alloc_err:
1010 	mutex_unlock(&bo->mutex);
1011 	dev_err(atomisp_dev, "alloc pages err...\n");
1012 	return ret;
1013 status_err:
1014 	mutex_unlock(&bo->mutex);
1015 	dev_err(atomisp_dev,
1016 		"buffer object has already page allocated.\n");
1017 	return -EINVAL;
1018 }
1019 
1020 /*
1021  * free physical pages of the bo.
1022  */
1023 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1024 {
1025 	check_bo_null_return_void(bo);
1026 
1027 	mutex_lock(&bo->mutex);
1028 
1029 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1030 
1031 	/* clear the flag anyway. */
1032 	bo->status &= (~HMM_BO_PAGE_ALLOCED);
1033 
1034 	if (bo->type == HMM_BO_PRIVATE)
1035 		free_private_pages(bo, &dynamic_pool, &reserved_pool);
1036 	else if (bo->type == HMM_BO_USER)
1037 		free_user_pages(bo);
1038 	else
1039 		dev_err(atomisp_dev, "invalid buffer type.\n");
1040 	mutex_unlock(&bo->mutex);
1041 
1042 	return;
1043 
1044 status_err2:
1045 	mutex_unlock(&bo->mutex);
1046 	dev_err(atomisp_dev,
1047 		"buffer object not page allocated yet.\n");
1048 }
1049 
1050 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1051 {
1052 	check_bo_null_return(bo, 0);
1053 
1054 	return bo->status & HMM_BO_PAGE_ALLOCED;
1055 }
1056 
1057 /*
1058  * get physical page info of the bo.
1059  */
1060 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1061 			 struct hmm_page_object **page_obj, int *pgnr)
1062 {
1063 	check_bo_null_return(bo, -EINVAL);
1064 
1065 	mutex_lock(&bo->mutex);
1066 
1067 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1068 
1069 	*page_obj = bo->page_obj;
1070 	*pgnr = bo->pgnr;
1071 
1072 	mutex_unlock(&bo->mutex);
1073 
1074 	return 0;
1075 
1076 status_err:
1077 	dev_err(atomisp_dev,
1078 		"buffer object not page allocated yet.\n");
1079 	mutex_unlock(&bo->mutex);
1080 	return -EINVAL;
1081 }
1082 
1083 /*
1084  * bind the physical pages to a virtual address space.
1085  */
1086 int hmm_bo_bind(struct hmm_buffer_object *bo)
1087 {
1088 	int ret;
1089 	unsigned int virt;
1090 	struct hmm_bo_device *bdev;
1091 	unsigned int i;
1092 
1093 	check_bo_null_return(bo, -EINVAL);
1094 
1095 	mutex_lock(&bo->mutex);
1096 
1097 	check_bo_status_yes_goto(bo,
1098 				 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1099 				 status_err1);
1100 
1101 	check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1102 
1103 	bdev = bo->bdev;
1104 
1105 	virt = bo->start;
1106 
1107 	for (i = 0; i < bo->pgnr; i++) {
1108 		ret =
1109 		    isp_mmu_map(&bdev->mmu, virt,
1110 				page_to_phys(bo->page_obj[i].page), 1);
1111 		if (ret)
1112 			goto map_err;
1113 		virt += (1 << PAGE_SHIFT);
1114 	}
1115 
1116 	/*
1117 	 * flush TBL here.
1118 	 *
1119 	 * theoretically, we donot need to flush TLB as we didnot change
1120 	 * any existed address mappings, but for Silicon Hive's MMU, its
1121 	 * really a bug here. I guess when fetching PTEs (page table entity)
1122 	 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1123 	 * for performance issue. EX, we only set up 1 page address mapping,
1124 	 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1125 	 * so the additional 3 PTEs are invalid.
1126 	 */
1127 	if (bo->start != 0x0)
1128 		isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1129 					(bo->pgnr << PAGE_SHIFT));
1130 
1131 	bo->status |= HMM_BO_BINDED;
1132 
1133 	mutex_unlock(&bo->mutex);
1134 
1135 	return 0;
1136 
1137 map_err:
1138 	/* unbind the physical pages with related virtual address space */
1139 	virt = bo->start;
1140 	for ( ; i > 0; i--) {
1141 		isp_mmu_unmap(&bdev->mmu, virt, 1);
1142 		virt += pgnr_to_size(1);
1143 	}
1144 
1145 	mutex_unlock(&bo->mutex);
1146 	dev_err(atomisp_dev,
1147 		"setup MMU address mapping failed.\n");
1148 	return ret;
1149 
1150 status_err2:
1151 	mutex_unlock(&bo->mutex);
1152 	dev_err(atomisp_dev, "buffer object already binded.\n");
1153 	return -EINVAL;
1154 status_err1:
1155 	mutex_unlock(&bo->mutex);
1156 	dev_err(atomisp_dev,
1157 		"buffer object vm_node or page not allocated.\n");
1158 	return -EINVAL;
1159 }
1160 
1161 /*
1162  * unbind the physical pages with related virtual address space.
1163  */
1164 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1165 {
1166 	unsigned int virt;
1167 	struct hmm_bo_device *bdev;
1168 	unsigned int i;
1169 
1170 	check_bo_null_return_void(bo);
1171 
1172 	mutex_lock(&bo->mutex);
1173 
1174 	check_bo_status_yes_goto(bo,
1175 				 HMM_BO_PAGE_ALLOCED |
1176 				 HMM_BO_ALLOCED |
1177 				 HMM_BO_BINDED, status_err);
1178 
1179 	bdev = bo->bdev;
1180 
1181 	virt = bo->start;
1182 
1183 	for (i = 0; i < bo->pgnr; i++) {
1184 		isp_mmu_unmap(&bdev->mmu, virt, 1);
1185 		virt += pgnr_to_size(1);
1186 	}
1187 
1188 	/*
1189 	 * flush TLB as the address mapping has been removed and
1190 	 * related TLBs should be invalidated.
1191 	 */
1192 	isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1193 				(bo->pgnr << PAGE_SHIFT));
1194 
1195 	bo->status &= (~HMM_BO_BINDED);
1196 
1197 	mutex_unlock(&bo->mutex);
1198 
1199 	return;
1200 
1201 status_err:
1202 	mutex_unlock(&bo->mutex);
1203 	dev_err(atomisp_dev,
1204 		"buffer vm or page not allocated or not binded yet.\n");
1205 }
1206 
1207 int hmm_bo_binded(struct hmm_buffer_object *bo)
1208 {
1209 	int ret;
1210 
1211 	check_bo_null_return(bo, 0);
1212 
1213 	mutex_lock(&bo->mutex);
1214 
1215 	ret = bo->status & HMM_BO_BINDED;
1216 
1217 	mutex_unlock(&bo->mutex);
1218 
1219 	return ret;
1220 }
1221 
1222 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1223 {
1224 	struct page **pages;
1225 	int i;
1226 
1227 	check_bo_null_return(bo, NULL);
1228 
1229 	mutex_lock(&bo->mutex);
1230 	if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1231 	    ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1232 		mutex_unlock(&bo->mutex);
1233 		return bo->vmap_addr;
1234 	}
1235 
1236 	/* cached status need to be changed, so vunmap first */
1237 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1238 		vunmap(bo->vmap_addr);
1239 		bo->vmap_addr = NULL;
1240 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1241 	}
1242 
1243 	pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1244 	if (unlikely(!pages)) {
1245 		mutex_unlock(&bo->mutex);
1246 		return NULL;
1247 	}
1248 
1249 	for (i = 0; i < bo->pgnr; i++)
1250 		pages[i] = bo->page_obj[i].page;
1251 
1252 	bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1253 			     cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1254 	if (unlikely(!bo->vmap_addr)) {
1255 		kfree(pages);
1256 		mutex_unlock(&bo->mutex);
1257 		dev_err(atomisp_dev, "vmap failed...\n");
1258 		return NULL;
1259 	}
1260 	bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1261 
1262 	kfree(pages);
1263 
1264 	mutex_unlock(&bo->mutex);
1265 	return bo->vmap_addr;
1266 }
1267 
1268 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1269 {
1270 	check_bo_null_return_void(bo);
1271 
1272 	mutex_lock(&bo->mutex);
1273 	if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1274 		mutex_unlock(&bo->mutex);
1275 		return;
1276 	}
1277 
1278 	clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1279 	mutex_unlock(&bo->mutex);
1280 }
1281 
1282 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1283 {
1284 	check_bo_null_return_void(bo);
1285 
1286 	mutex_lock(&bo->mutex);
1287 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1288 		vunmap(bo->vmap_addr);
1289 		bo->vmap_addr = NULL;
1290 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1291 	}
1292 
1293 	mutex_unlock(&bo->mutex);
1294 	return;
1295 }
1296 
1297 void hmm_bo_ref(struct hmm_buffer_object *bo)
1298 {
1299 	check_bo_null_return_void(bo);
1300 
1301 	kref_get(&bo->kref);
1302 }
1303 
1304 static void kref_hmm_bo_release(struct kref *kref)
1305 {
1306 	if (!kref)
1307 		return;
1308 
1309 	hmm_bo_release(kref_to_hmm_bo(kref));
1310 }
1311 
1312 void hmm_bo_unref(struct hmm_buffer_object *bo)
1313 {
1314 	check_bo_null_return_void(bo);
1315 
1316 	kref_put(&bo->kref, kref_hmm_bo_release);
1317 }
1318 
1319 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1320 {
1321 	struct hmm_buffer_object *bo =
1322 	    (struct hmm_buffer_object *)vma->vm_private_data;
1323 
1324 	check_bo_null_return_void(bo);
1325 
1326 	hmm_bo_ref(bo);
1327 
1328 	mutex_lock(&bo->mutex);
1329 
1330 	bo->status |= HMM_BO_MMAPED;
1331 
1332 	bo->mmap_count++;
1333 
1334 	mutex_unlock(&bo->mutex);
1335 }
1336 
1337 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1338 {
1339 	struct hmm_buffer_object *bo =
1340 	    (struct hmm_buffer_object *)vma->vm_private_data;
1341 
1342 	check_bo_null_return_void(bo);
1343 
1344 	hmm_bo_unref(bo);
1345 
1346 	mutex_lock(&bo->mutex);
1347 
1348 	bo->mmap_count--;
1349 
1350 	if (!bo->mmap_count) {
1351 		bo->status &= (~HMM_BO_MMAPED);
1352 		vma->vm_private_data = NULL;
1353 	}
1354 
1355 	mutex_unlock(&bo->mutex);
1356 }
1357 
1358 static const struct vm_operations_struct hmm_bo_vm_ops = {
1359 	.open = hmm_bo_vm_open,
1360 	.close = hmm_bo_vm_close,
1361 };
1362 
1363 /*
1364  * mmap the bo to user space.
1365  */
1366 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1367 {
1368 	unsigned int start, end;
1369 	unsigned int virt;
1370 	unsigned int pgnr, i;
1371 	unsigned int pfn;
1372 
1373 	check_bo_null_return(bo, -EINVAL);
1374 
1375 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1376 
1377 	pgnr = bo->pgnr;
1378 	start = vma->vm_start;
1379 	end = vma->vm_end;
1380 
1381 	/*
1382 	 * check vma's virtual address space size and buffer object's size.
1383 	 * must be the same.
1384 	 */
1385 	if ((start + pgnr_to_size(pgnr)) != end) {
1386 		dev_warn(atomisp_dev,
1387 			 "vma's address space size not equal to buffer object's size");
1388 		return -EINVAL;
1389 	}
1390 
1391 	virt = vma->vm_start;
1392 	for (i = 0; i < pgnr; i++) {
1393 		pfn = page_to_pfn(bo->page_obj[i].page);
1394 		if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1395 			dev_warn(atomisp_dev,
1396 				 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1397 				 virt, pfn, 1);
1398 			return -EINVAL;
1399 		}
1400 		virt += PAGE_SIZE;
1401 	}
1402 
1403 	vma->vm_private_data = bo;
1404 
1405 	vma->vm_ops = &hmm_bo_vm_ops;
1406 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1407 
1408 	/*
1409 	 * call hmm_bo_vm_open explicitly.
1410 	 */
1411 	hmm_bo_vm_open(vma);
1412 
1413 	return 0;
1414 
1415 status_err:
1416 	dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1417 	return -EINVAL;
1418 }
1419