1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Support for Medifield PNW Camera Imaging ISP subsystem. 4 * 5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved. 6 * 7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 11 * 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * 19 */ 20 /* 21 * This file contains functions for buffer object structure management 22 */ 23 #include <linux/kernel.h> 24 #include <linux/types.h> 25 #include <linux/gfp.h> /* for GFP_ATOMIC */ 26 #include <linux/mm.h> 27 #include <linux/mm_types.h> 28 #include <linux/hugetlb.h> 29 #include <linux/highmem.h> 30 #include <linux/slab.h> /* for kmalloc */ 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/string.h> 34 #include <linux/list.h> 35 #include <linux/errno.h> 36 #include <linux/io.h> 37 #include <asm/current.h> 38 #include <linux/sched/signal.h> 39 #include <linux/file.h> 40 41 #include <asm/set_memory.h> 42 43 #include "atomisp_internal.h" 44 #include "hmm/hmm_common.h" 45 #include "hmm/hmm_bo.h" 46 47 static unsigned int order_to_nr(unsigned int order) 48 { 49 return 1U << order; 50 } 51 52 static unsigned int nr_to_order_bottom(unsigned int nr) 53 { 54 return fls(nr) - 1; 55 } 56 57 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, 58 unsigned int pgnr) 59 { 60 check_bodev_null_return(bdev, -EINVAL); 61 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL, 62 "hmm_bo_device not inited yet.\n"); 63 /* prevent zero size buffer object */ 64 if (pgnr == 0) { 65 dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); 66 return -EINVAL; 67 } 68 69 memset(bo, 0, sizeof(*bo)); 70 mutex_init(&bo->mutex); 71 72 /* init the bo->list HEAD as an element of entire_bo_list */ 73 INIT_LIST_HEAD(&bo->list); 74 75 bo->bdev = bdev; 76 bo->vmap_addr = NULL; 77 bo->status = HMM_BO_FREE; 78 bo->start = bdev->start; 79 bo->pgnr = pgnr; 80 bo->end = bo->start + pgnr_to_size(pgnr); 81 bo->prev = NULL; 82 bo->next = NULL; 83 84 return 0; 85 } 86 87 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree( 88 struct rb_node *node, unsigned int pgnr) 89 { 90 struct hmm_buffer_object *this, *ret_bo, *temp_bo; 91 92 this = rb_entry(node, struct hmm_buffer_object, node); 93 if (this->pgnr == pgnr || 94 (this->pgnr > pgnr && !this->node.rb_left)) { 95 goto remove_bo_and_return; 96 } else { 97 if (this->pgnr < pgnr) { 98 if (!this->node.rb_right) 99 return NULL; 100 ret_bo = __bo_search_and_remove_from_free_rbtree( 101 this->node.rb_right, pgnr); 102 } else { 103 ret_bo = __bo_search_and_remove_from_free_rbtree( 104 this->node.rb_left, pgnr); 105 } 106 if (!ret_bo) { 107 if (this->pgnr > pgnr) 108 goto remove_bo_and_return; 109 else 110 return NULL; 111 } 112 return ret_bo; 113 } 114 115 remove_bo_and_return: 116 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL. 117 * 1. check if 'this->next' is NULL: 118 * yes: erase 'this' node and rebalance rbtree, return 'this'. 119 */ 120 if (!this->next) { 121 rb_erase(&this->node, &this->bdev->free_rbtree); 122 return this; 123 } 124 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo. 125 * 2. check if 'this->next->next' is NULL: 126 * yes: change the related 'next/prev' pointer, 127 * return 'this->next' but the rbtree stays unchanged. 128 */ 129 temp_bo = this->next; 130 this->next = temp_bo->next; 131 if (temp_bo->next) 132 temp_bo->next->prev = this; 133 temp_bo->next = NULL; 134 temp_bo->prev = NULL; 135 return temp_bo; 136 } 137 138 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root, 139 ia_css_ptr start) 140 { 141 struct rb_node *n = root->rb_node; 142 struct hmm_buffer_object *bo; 143 144 do { 145 bo = rb_entry(n, struct hmm_buffer_object, node); 146 147 if (bo->start > start) { 148 if (!n->rb_left) 149 return NULL; 150 n = n->rb_left; 151 } else if (bo->start < start) { 152 if (!n->rb_right) 153 return NULL; 154 n = n->rb_right; 155 } else { 156 return bo; 157 } 158 } while (n); 159 160 return NULL; 161 } 162 163 static struct hmm_buffer_object *__bo_search_by_addr_in_range( 164 struct rb_root *root, unsigned int start) 165 { 166 struct rb_node *n = root->rb_node; 167 struct hmm_buffer_object *bo; 168 169 do { 170 bo = rb_entry(n, struct hmm_buffer_object, node); 171 172 if (bo->start > start) { 173 if (!n->rb_left) 174 return NULL; 175 n = n->rb_left; 176 } else { 177 if (bo->end > start) 178 return bo; 179 if (!n->rb_right) 180 return NULL; 181 n = n->rb_right; 182 } 183 } while (n); 184 185 return NULL; 186 } 187 188 static void __bo_insert_to_free_rbtree(struct rb_root *root, 189 struct hmm_buffer_object *bo) 190 { 191 struct rb_node **new = &root->rb_node; 192 struct rb_node *parent = NULL; 193 struct hmm_buffer_object *this; 194 unsigned int pgnr = bo->pgnr; 195 196 while (*new) { 197 parent = *new; 198 this = container_of(*new, struct hmm_buffer_object, node); 199 200 if (pgnr < this->pgnr) { 201 new = &((*new)->rb_left); 202 } else if (pgnr > this->pgnr) { 203 new = &((*new)->rb_right); 204 } else { 205 bo->prev = this; 206 bo->next = this->next; 207 if (this->next) 208 this->next->prev = bo; 209 this->next = bo; 210 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; 211 return; 212 } 213 } 214 215 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; 216 217 rb_link_node(&bo->node, parent, new); 218 rb_insert_color(&bo->node, root); 219 } 220 221 static void __bo_insert_to_alloc_rbtree(struct rb_root *root, 222 struct hmm_buffer_object *bo) 223 { 224 struct rb_node **new = &root->rb_node; 225 struct rb_node *parent = NULL; 226 struct hmm_buffer_object *this; 227 unsigned int start = bo->start; 228 229 while (*new) { 230 parent = *new; 231 this = container_of(*new, struct hmm_buffer_object, node); 232 233 if (start < this->start) 234 new = &((*new)->rb_left); 235 else 236 new = &((*new)->rb_right); 237 } 238 239 kref_init(&bo->kref); 240 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED; 241 242 rb_link_node(&bo->node, parent, new); 243 rb_insert_color(&bo->node, root); 244 } 245 246 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev, 247 struct hmm_buffer_object *bo, 248 unsigned int pgnr) 249 { 250 struct hmm_buffer_object *new_bo; 251 unsigned long flags; 252 int ret; 253 254 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL); 255 if (!new_bo) { 256 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); 257 return NULL; 258 } 259 ret = __bo_init(bdev, new_bo, pgnr); 260 if (ret) { 261 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); 262 kmem_cache_free(bdev->bo_cache, new_bo); 263 return NULL; 264 } 265 266 new_bo->start = bo->start; 267 new_bo->end = new_bo->start + pgnr_to_size(pgnr); 268 bo->start = new_bo->end; 269 bo->pgnr = bo->pgnr - pgnr; 270 271 spin_lock_irqsave(&bdev->list_lock, flags); 272 list_add_tail(&new_bo->list, &bo->list); 273 spin_unlock_irqrestore(&bdev->list_lock, flags); 274 275 return new_bo; 276 } 277 278 static void __bo_take_off_handling(struct hmm_buffer_object *bo) 279 { 280 struct hmm_bo_device *bdev = bo->bdev; 281 /* There are 4 situations when we take off a known bo from free rbtree: 282 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node 283 * and does not have a linked list after bo, to take off this bo, 284 * we just need erase bo directly and rebalance the free rbtree 285 */ 286 if (!bo->prev && !bo->next) { 287 rb_erase(&bo->node, &bdev->free_rbtree); 288 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node, 289 * and has a linked list,to take off this bo we need erase bo 290 * first, then, insert bo->next into free rbtree and rebalance 291 * the free rbtree 292 */ 293 } else if (!bo->prev && bo->next) { 294 bo->next->prev = NULL; 295 rb_erase(&bo->node, &bdev->free_rbtree); 296 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next); 297 bo->next = NULL; 298 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree 299 * node, bo is the last element of the linked list after rbtree 300 * node, to take off this bo, we just need set the "prev/next" 301 * pointers to NULL, the free rbtree stays unchaged 302 */ 303 } else if (bo->prev && !bo->next) { 304 bo->prev->next = NULL; 305 bo->prev = NULL; 306 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree 307 * node, bo is in the middle of the linked list after rbtree node, 308 * to take off this bo, we just set take the "prev/next" pointers 309 * to NULL, the free rbtree stays unchaged 310 */ 311 } else if (bo->prev && bo->next) { 312 bo->next->prev = bo->prev; 313 bo->prev->next = bo->next; 314 bo->next = NULL; 315 bo->prev = NULL; 316 } 317 } 318 319 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo, 320 struct hmm_buffer_object *next_bo) 321 { 322 struct hmm_bo_device *bdev; 323 unsigned long flags; 324 325 bdev = bo->bdev; 326 next_bo->start = bo->start; 327 next_bo->pgnr = next_bo->pgnr + bo->pgnr; 328 329 spin_lock_irqsave(&bdev->list_lock, flags); 330 list_del(&bo->list); 331 spin_unlock_irqrestore(&bdev->list_lock, flags); 332 333 kmem_cache_free(bo->bdev->bo_cache, bo); 334 335 return next_bo; 336 } 337 338 /* 339 * hmm_bo_device functions. 340 */ 341 int hmm_bo_device_init(struct hmm_bo_device *bdev, 342 struct isp_mmu_client *mmu_driver, 343 unsigned int vaddr_start, 344 unsigned int size) 345 { 346 struct hmm_buffer_object *bo; 347 unsigned long flags; 348 int ret; 349 350 check_bodev_null_return(bdev, -EINVAL); 351 352 ret = isp_mmu_init(&bdev->mmu, mmu_driver); 353 if (ret) { 354 dev_err(atomisp_dev, "isp_mmu_init failed.\n"); 355 return ret; 356 } 357 358 bdev->start = vaddr_start; 359 bdev->pgnr = size_to_pgnr_ceil(size); 360 bdev->size = pgnr_to_size(bdev->pgnr); 361 362 spin_lock_init(&bdev->list_lock); 363 mutex_init(&bdev->rbtree_mutex); 364 365 bdev->flag = HMM_BO_DEVICE_INITED; 366 367 INIT_LIST_HEAD(&bdev->entire_bo_list); 368 bdev->allocated_rbtree = RB_ROOT; 369 bdev->free_rbtree = RB_ROOT; 370 371 bdev->bo_cache = kmem_cache_create("bo_cache", 372 sizeof(struct hmm_buffer_object), 0, 0, NULL); 373 if (!bdev->bo_cache) { 374 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__); 375 isp_mmu_exit(&bdev->mmu); 376 return -ENOMEM; 377 } 378 379 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL); 380 if (!bo) { 381 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); 382 isp_mmu_exit(&bdev->mmu); 383 return -ENOMEM; 384 } 385 386 ret = __bo_init(bdev, bo, bdev->pgnr); 387 if (ret) { 388 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); 389 kmem_cache_free(bdev->bo_cache, bo); 390 isp_mmu_exit(&bdev->mmu); 391 return -EINVAL; 392 } 393 394 spin_lock_irqsave(&bdev->list_lock, flags); 395 list_add_tail(&bo->list, &bdev->entire_bo_list); 396 spin_unlock_irqrestore(&bdev->list_lock, flags); 397 398 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); 399 400 return 0; 401 } 402 403 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev, 404 unsigned int pgnr) 405 { 406 struct hmm_buffer_object *bo, *new_bo; 407 struct rb_root *root = &bdev->free_rbtree; 408 409 check_bodev_null_return(bdev, NULL); 410 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL, 411 "hmm_bo_device not inited yet.\n"); 412 413 if (pgnr == 0) { 414 dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); 415 return NULL; 416 } 417 418 mutex_lock(&bdev->rbtree_mutex); 419 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr); 420 if (!bo) { 421 mutex_unlock(&bdev->rbtree_mutex); 422 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed", 423 __func__); 424 return NULL; 425 } 426 427 if (bo->pgnr > pgnr) { 428 new_bo = __bo_break_up(bdev, bo, pgnr); 429 if (!new_bo) { 430 mutex_unlock(&bdev->rbtree_mutex); 431 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n", 432 __func__); 433 return NULL; 434 } 435 436 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo); 437 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); 438 439 mutex_unlock(&bdev->rbtree_mutex); 440 return new_bo; 441 } 442 443 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo); 444 445 mutex_unlock(&bdev->rbtree_mutex); 446 return bo; 447 } 448 449 void hmm_bo_release(struct hmm_buffer_object *bo) 450 { 451 struct hmm_bo_device *bdev = bo->bdev; 452 struct hmm_buffer_object *next_bo, *prev_bo; 453 454 mutex_lock(&bdev->rbtree_mutex); 455 456 /* 457 * FIX ME: 458 * 459 * how to destroy the bo when it is stilled MMAPED? 460 * 461 * ideally, this will not happened as hmm_bo_release 462 * will only be called when kref reaches 0, and in mmap 463 * operation the hmm_bo_ref will eventually be called. 464 * so, if this happened, something goes wrong. 465 */ 466 if (bo->status & HMM_BO_MMAPED) { 467 mutex_unlock(&bdev->rbtree_mutex); 468 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n"); 469 return; 470 } 471 472 if (bo->status & HMM_BO_BINDED) { 473 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n"); 474 hmm_bo_unbind(bo); 475 } 476 477 if (bo->status & HMM_BO_PAGE_ALLOCED) { 478 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n"); 479 hmm_bo_free_pages(bo); 480 } 481 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { 482 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n"); 483 hmm_bo_vunmap(bo); 484 } 485 486 rb_erase(&bo->node, &bdev->allocated_rbtree); 487 488 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list); 489 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list); 490 491 if (bo->list.prev != &bdev->entire_bo_list && 492 prev_bo->end == bo->start && 493 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { 494 __bo_take_off_handling(prev_bo); 495 bo = __bo_merge(prev_bo, bo); 496 } 497 498 if (bo->list.next != &bdev->entire_bo_list && 499 next_bo->start == bo->end && 500 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { 501 __bo_take_off_handling(next_bo); 502 bo = __bo_merge(bo, next_bo); 503 } 504 505 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); 506 507 mutex_unlock(&bdev->rbtree_mutex); 508 return; 509 } 510 511 void hmm_bo_device_exit(struct hmm_bo_device *bdev) 512 { 513 struct hmm_buffer_object *bo; 514 unsigned long flags; 515 516 dev_dbg(atomisp_dev, "%s: entering!\n", __func__); 517 518 check_bodev_null_return_void(bdev); 519 520 /* 521 * release all allocated bos even they a in use 522 * and all bos will be merged into a big bo 523 */ 524 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree)) 525 hmm_bo_release( 526 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node)); 527 528 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n", 529 __func__); 530 531 /* free all bos to release all ISP virtual memory */ 532 while (!list_empty(&bdev->entire_bo_list)) { 533 bo = list_to_hmm_bo(bdev->entire_bo_list.next); 534 535 spin_lock_irqsave(&bdev->list_lock, flags); 536 list_del(&bo->list); 537 spin_unlock_irqrestore(&bdev->list_lock, flags); 538 539 kmem_cache_free(bdev->bo_cache, bo); 540 } 541 542 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__); 543 544 kmem_cache_destroy(bdev->bo_cache); 545 546 isp_mmu_exit(&bdev->mmu); 547 } 548 549 int hmm_bo_device_inited(struct hmm_bo_device *bdev) 550 { 551 check_bodev_null_return(bdev, -EINVAL); 552 553 return bdev->flag == HMM_BO_DEVICE_INITED; 554 } 555 556 int hmm_bo_allocated(struct hmm_buffer_object *bo) 557 { 558 check_bo_null_return(bo, 0); 559 560 return bo->status & HMM_BO_ALLOCED; 561 } 562 563 struct hmm_buffer_object *hmm_bo_device_search_start( 564 struct hmm_bo_device *bdev, ia_css_ptr vaddr) 565 { 566 struct hmm_buffer_object *bo; 567 568 check_bodev_null_return(bdev, NULL); 569 570 mutex_lock(&bdev->rbtree_mutex); 571 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr); 572 if (!bo) { 573 mutex_unlock(&bdev->rbtree_mutex); 574 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n", 575 __func__, vaddr); 576 return NULL; 577 } 578 mutex_unlock(&bdev->rbtree_mutex); 579 580 return bo; 581 } 582 583 struct hmm_buffer_object *hmm_bo_device_search_in_range( 584 struct hmm_bo_device *bdev, unsigned int vaddr) 585 { 586 struct hmm_buffer_object *bo; 587 588 check_bodev_null_return(bdev, NULL); 589 590 mutex_lock(&bdev->rbtree_mutex); 591 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr); 592 if (!bo) { 593 mutex_unlock(&bdev->rbtree_mutex); 594 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n", 595 __func__, vaddr); 596 return NULL; 597 } 598 mutex_unlock(&bdev->rbtree_mutex); 599 600 return bo; 601 } 602 603 struct hmm_buffer_object *hmm_bo_device_search_vmap_start( 604 struct hmm_bo_device *bdev, const void *vaddr) 605 { 606 struct list_head *pos; 607 struct hmm_buffer_object *bo; 608 unsigned long flags; 609 610 check_bodev_null_return(bdev, NULL); 611 612 spin_lock_irqsave(&bdev->list_lock, flags); 613 list_for_each(pos, &bdev->entire_bo_list) { 614 bo = list_to_hmm_bo(pos); 615 /* pass bo which has no vm_node allocated */ 616 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE) 617 continue; 618 if (bo->vmap_addr == vaddr) 619 goto found; 620 } 621 spin_unlock_irqrestore(&bdev->list_lock, flags); 622 return NULL; 623 found: 624 spin_unlock_irqrestore(&bdev->list_lock, flags); 625 return bo; 626 } 627 628 static void free_private_bo_pages(struct hmm_buffer_object *bo, 629 int free_pgnr) 630 { 631 int i, ret; 632 633 for (i = 0; i < free_pgnr; i++) { 634 ret = set_pages_wb(bo->pages[i], 1); 635 if (ret) 636 dev_err(atomisp_dev, 637 "set page to WB err ...ret = %d\n", 638 ret); 639 /* 640 W/A: set_pages_wb seldom return value = -EFAULT 641 indicate that address of page is not in valid 642 range(0xffff880000000000~0xffffc7ffffffffff) 643 then, _free_pages would panic; Do not know why page 644 address be valid,it maybe memory corruption by lowmemory 645 */ 646 if (!ret) { 647 __free_pages(bo->pages[i], 0); 648 } 649 } 650 } 651 652 /*Allocate pages which will be used only by ISP*/ 653 static int alloc_private_pages(struct hmm_buffer_object *bo) 654 { 655 int ret; 656 unsigned int pgnr, order, blk_pgnr, alloc_pgnr; 657 struct page *pages; 658 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ 659 int i, j; 660 int failure_number = 0; 661 bool reduce_order = false; 662 bool lack_mem = true; 663 664 pgnr = bo->pgnr; 665 666 i = 0; 667 alloc_pgnr = 0; 668 669 while (pgnr) { 670 order = nr_to_order_bottom(pgnr); 671 /* 672 * if be short of memory, we will set order to 0 673 * everytime. 674 */ 675 if (lack_mem) 676 order = HMM_MIN_ORDER; 677 else if (order > HMM_MAX_ORDER) 678 order = HMM_MAX_ORDER; 679 retry: 680 /* 681 * When order > HMM_MIN_ORDER, for performance reasons we don't 682 * want alloc_pages() to sleep. In case it fails and fallbacks 683 * to HMM_MIN_ORDER or in case the requested order is originally 684 * the minimum value, we can allow alloc_pages() to sleep for 685 * robustness purpose. 686 * 687 * REVISIT: why __GFP_FS is necessary? 688 */ 689 if (order == HMM_MIN_ORDER) { 690 gfp &= ~GFP_NOWAIT; 691 gfp |= __GFP_RECLAIM | __GFP_FS; 692 } 693 694 pages = alloc_pages(gfp, order); 695 if (unlikely(!pages)) { 696 /* 697 * in low memory case, if allocation page fails, 698 * we turn to try if order=0 allocation could 699 * succeed. if order=0 fails too, that means there is 700 * no memory left. 701 */ 702 if (order == HMM_MIN_ORDER) { 703 dev_err(atomisp_dev, 704 "%s: cannot allocate pages\n", 705 __func__); 706 goto cleanup; 707 } 708 order = HMM_MIN_ORDER; 709 failure_number++; 710 reduce_order = true; 711 /* 712 * if fail two times continuously, we think be short 713 * of memory now. 714 */ 715 if (failure_number == 2) { 716 lack_mem = true; 717 failure_number = 0; 718 } 719 goto retry; 720 } else { 721 blk_pgnr = order_to_nr(order); 722 723 /* 724 * set memory to uncacheable -- UC_MINUS 725 */ 726 ret = set_pages_uc(pages, blk_pgnr); 727 if (ret) { 728 dev_err(atomisp_dev, 729 "set page uncacheablefailed.\n"); 730 731 __free_pages(pages, order); 732 733 goto cleanup; 734 } 735 736 for (j = 0; j < blk_pgnr; j++, i++) { 737 bo->pages[i] = pages + j; 738 } 739 740 pgnr -= blk_pgnr; 741 742 /* 743 * if order is not reduced this time, clear 744 * failure_number. 745 */ 746 if (reduce_order) 747 reduce_order = false; 748 else 749 failure_number = 0; 750 } 751 } 752 753 return 0; 754 cleanup: 755 alloc_pgnr = i; 756 free_private_bo_pages(bo, alloc_pgnr); 757 return -ENOMEM; 758 } 759 760 static void free_user_pages(struct hmm_buffer_object *bo, 761 unsigned int page_nr) 762 { 763 int i; 764 765 if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) { 766 unpin_user_pages(bo->pages, page_nr); 767 } else { 768 for (i = 0; i < page_nr; i++) 769 put_page(bo->pages[i]); 770 } 771 } 772 773 /* 774 * Convert user space virtual address into pages list 775 */ 776 static int alloc_user_pages(struct hmm_buffer_object *bo, 777 const void __user *userptr) 778 { 779 int page_nr; 780 struct vm_area_struct *vma; 781 782 mutex_unlock(&bo->mutex); 783 mmap_read_lock(current->mm); 784 vma = find_vma(current->mm, (unsigned long)userptr); 785 mmap_read_unlock(current->mm); 786 if (!vma) { 787 dev_err(atomisp_dev, "find_vma failed\n"); 788 mutex_lock(&bo->mutex); 789 return -EFAULT; 790 } 791 mutex_lock(&bo->mutex); 792 /* 793 * Handle frame buffer allocated in other kerenl space driver 794 * and map to user space 795 */ 796 797 userptr = untagged_addr(userptr); 798 799 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 800 page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, 801 FOLL_LONGTERM | FOLL_WRITE, 802 bo->pages, NULL); 803 bo->mem_type = HMM_BO_MEM_TYPE_PFN; 804 } else { 805 /*Handle frame buffer allocated in user space*/ 806 mutex_unlock(&bo->mutex); 807 page_nr = get_user_pages_fast((unsigned long)userptr, 808 (int)(bo->pgnr), 1, bo->pages); 809 mutex_lock(&bo->mutex); 810 bo->mem_type = HMM_BO_MEM_TYPE_USER; 811 } 812 813 dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n", 814 __func__, 815 bo->pgnr, 816 bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr); 817 818 /* can be written by caller, not forced */ 819 if (page_nr != bo->pgnr) { 820 dev_err(atomisp_dev, 821 "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n", 822 bo->pgnr, page_nr); 823 if (page_nr < 0) 824 page_nr = 0; 825 goto out_of_mem; 826 } 827 828 return 0; 829 830 out_of_mem: 831 832 free_user_pages(bo, page_nr); 833 834 return -ENOMEM; 835 } 836 837 /* 838 * allocate/free physical pages for the bo. 839 * 840 * type indicate where are the pages from. currently we have 3 types 841 * of memory: HMM_BO_PRIVATE, HMM_BO_USER. 842 * 843 * userptr is only valid when type is HMM_BO_USER, it indicates 844 * the start address from user space task. 845 */ 846 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, 847 enum hmm_bo_type type, 848 const void __user *userptr) 849 { 850 int ret = -EINVAL; 851 852 check_bo_null_return(bo, -EINVAL); 853 854 mutex_lock(&bo->mutex); 855 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); 856 857 bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); 858 if (unlikely(!bo->pages)) { 859 ret = -ENOMEM; 860 goto alloc_err; 861 } 862 863 /* 864 * TO DO: 865 * add HMM_BO_USER type 866 */ 867 if (type == HMM_BO_PRIVATE) { 868 ret = alloc_private_pages(bo); 869 } else if (type == HMM_BO_USER) { 870 ret = alloc_user_pages(bo, userptr); 871 } else { 872 dev_err(atomisp_dev, "invalid buffer type.\n"); 873 ret = -EINVAL; 874 } 875 if (ret) 876 goto alloc_err; 877 878 bo->type = type; 879 880 bo->status |= HMM_BO_PAGE_ALLOCED; 881 882 mutex_unlock(&bo->mutex); 883 884 return 0; 885 886 alloc_err: 887 kfree(bo->pages); 888 mutex_unlock(&bo->mutex); 889 dev_err(atomisp_dev, "alloc pages err...\n"); 890 return ret; 891 status_err: 892 mutex_unlock(&bo->mutex); 893 dev_err(atomisp_dev, 894 "buffer object has already page allocated.\n"); 895 return -EINVAL; 896 } 897 898 /* 899 * free physical pages of the bo. 900 */ 901 void hmm_bo_free_pages(struct hmm_buffer_object *bo) 902 { 903 check_bo_null_return_void(bo); 904 905 mutex_lock(&bo->mutex); 906 907 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2); 908 909 /* clear the flag anyway. */ 910 bo->status &= (~HMM_BO_PAGE_ALLOCED); 911 912 if (bo->type == HMM_BO_PRIVATE) 913 free_private_bo_pages(bo, bo->pgnr); 914 else if (bo->type == HMM_BO_USER) 915 free_user_pages(bo, bo->pgnr); 916 else 917 dev_err(atomisp_dev, "invalid buffer type.\n"); 918 919 kfree(bo->pages); 920 mutex_unlock(&bo->mutex); 921 922 return; 923 924 status_err2: 925 mutex_unlock(&bo->mutex); 926 dev_err(atomisp_dev, 927 "buffer object not page allocated yet.\n"); 928 } 929 930 int hmm_bo_page_allocated(struct hmm_buffer_object *bo) 931 { 932 check_bo_null_return(bo, 0); 933 934 return bo->status & HMM_BO_PAGE_ALLOCED; 935 } 936 937 /* 938 * bind the physical pages to a virtual address space. 939 */ 940 int hmm_bo_bind(struct hmm_buffer_object *bo) 941 { 942 int ret; 943 unsigned int virt; 944 struct hmm_bo_device *bdev; 945 unsigned int i; 946 947 check_bo_null_return(bo, -EINVAL); 948 949 mutex_lock(&bo->mutex); 950 951 check_bo_status_yes_goto(bo, 952 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED, 953 status_err1); 954 955 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2); 956 957 bdev = bo->bdev; 958 959 virt = bo->start; 960 961 for (i = 0; i < bo->pgnr; i++) { 962 ret = 963 isp_mmu_map(&bdev->mmu, virt, 964 page_to_phys(bo->pages[i]), 1); 965 if (ret) 966 goto map_err; 967 virt += (1 << PAGE_SHIFT); 968 } 969 970 /* 971 * flush TBL here. 972 * 973 * theoretically, we donot need to flush TLB as we didnot change 974 * any existed address mappings, but for Silicon Hive's MMU, its 975 * really a bug here. I guess when fetching PTEs (page table entity) 976 * to TLB, its MMU will fetch additional INVALID PTEs automatically 977 * for performance issue. EX, we only set up 1 page address mapping, 978 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time, 979 * so the additional 3 PTEs are invalid. 980 */ 981 if (bo->start != 0x0) 982 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, 983 (bo->pgnr << PAGE_SHIFT)); 984 985 bo->status |= HMM_BO_BINDED; 986 987 mutex_unlock(&bo->mutex); 988 989 return 0; 990 991 map_err: 992 /* unbind the physical pages with related virtual address space */ 993 virt = bo->start; 994 for ( ; i > 0; i--) { 995 isp_mmu_unmap(&bdev->mmu, virt, 1); 996 virt += pgnr_to_size(1); 997 } 998 999 mutex_unlock(&bo->mutex); 1000 dev_err(atomisp_dev, 1001 "setup MMU address mapping failed.\n"); 1002 return ret; 1003 1004 status_err2: 1005 mutex_unlock(&bo->mutex); 1006 dev_err(atomisp_dev, "buffer object already binded.\n"); 1007 return -EINVAL; 1008 status_err1: 1009 mutex_unlock(&bo->mutex); 1010 dev_err(atomisp_dev, 1011 "buffer object vm_node or page not allocated.\n"); 1012 return -EINVAL; 1013 } 1014 1015 /* 1016 * unbind the physical pages with related virtual address space. 1017 */ 1018 void hmm_bo_unbind(struct hmm_buffer_object *bo) 1019 { 1020 unsigned int virt; 1021 struct hmm_bo_device *bdev; 1022 unsigned int i; 1023 1024 check_bo_null_return_void(bo); 1025 1026 mutex_lock(&bo->mutex); 1027 1028 check_bo_status_yes_goto(bo, 1029 HMM_BO_PAGE_ALLOCED | 1030 HMM_BO_ALLOCED | 1031 HMM_BO_BINDED, status_err); 1032 1033 bdev = bo->bdev; 1034 1035 virt = bo->start; 1036 1037 for (i = 0; i < bo->pgnr; i++) { 1038 isp_mmu_unmap(&bdev->mmu, virt, 1); 1039 virt += pgnr_to_size(1); 1040 } 1041 1042 /* 1043 * flush TLB as the address mapping has been removed and 1044 * related TLBs should be invalidated. 1045 */ 1046 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, 1047 (bo->pgnr << PAGE_SHIFT)); 1048 1049 bo->status &= (~HMM_BO_BINDED); 1050 1051 mutex_unlock(&bo->mutex); 1052 1053 return; 1054 1055 status_err: 1056 mutex_unlock(&bo->mutex); 1057 dev_err(atomisp_dev, 1058 "buffer vm or page not allocated or not binded yet.\n"); 1059 } 1060 1061 int hmm_bo_binded(struct hmm_buffer_object *bo) 1062 { 1063 int ret; 1064 1065 check_bo_null_return(bo, 0); 1066 1067 mutex_lock(&bo->mutex); 1068 1069 ret = bo->status & HMM_BO_BINDED; 1070 1071 mutex_unlock(&bo->mutex); 1072 1073 return ret; 1074 } 1075 1076 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) 1077 { 1078 check_bo_null_return(bo, NULL); 1079 1080 mutex_lock(&bo->mutex); 1081 if (((bo->status & HMM_BO_VMAPED) && !cached) || 1082 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) { 1083 mutex_unlock(&bo->mutex); 1084 return bo->vmap_addr; 1085 } 1086 1087 /* cached status need to be changed, so vunmap first */ 1088 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { 1089 vunmap(bo->vmap_addr); 1090 bo->vmap_addr = NULL; 1091 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); 1092 } 1093 1094 bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP, 1095 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); 1096 if (unlikely(!bo->vmap_addr)) { 1097 mutex_unlock(&bo->mutex); 1098 dev_err(atomisp_dev, "vmap failed...\n"); 1099 return NULL; 1100 } 1101 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); 1102 1103 mutex_unlock(&bo->mutex); 1104 return bo->vmap_addr; 1105 } 1106 1107 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo) 1108 { 1109 check_bo_null_return_void(bo); 1110 1111 mutex_lock(&bo->mutex); 1112 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) { 1113 mutex_unlock(&bo->mutex); 1114 return; 1115 } 1116 1117 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE); 1118 mutex_unlock(&bo->mutex); 1119 } 1120 1121 void hmm_bo_vunmap(struct hmm_buffer_object *bo) 1122 { 1123 check_bo_null_return_void(bo); 1124 1125 mutex_lock(&bo->mutex); 1126 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { 1127 vunmap(bo->vmap_addr); 1128 bo->vmap_addr = NULL; 1129 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); 1130 } 1131 1132 mutex_unlock(&bo->mutex); 1133 return; 1134 } 1135 1136 void hmm_bo_ref(struct hmm_buffer_object *bo) 1137 { 1138 check_bo_null_return_void(bo); 1139 1140 kref_get(&bo->kref); 1141 } 1142 1143 static void kref_hmm_bo_release(struct kref *kref) 1144 { 1145 if (!kref) 1146 return; 1147 1148 hmm_bo_release(kref_to_hmm_bo(kref)); 1149 } 1150 1151 void hmm_bo_unref(struct hmm_buffer_object *bo) 1152 { 1153 check_bo_null_return_void(bo); 1154 1155 kref_put(&bo->kref, kref_hmm_bo_release); 1156 } 1157 1158 static void hmm_bo_vm_open(struct vm_area_struct *vma) 1159 { 1160 struct hmm_buffer_object *bo = 1161 (struct hmm_buffer_object *)vma->vm_private_data; 1162 1163 check_bo_null_return_void(bo); 1164 1165 hmm_bo_ref(bo); 1166 1167 mutex_lock(&bo->mutex); 1168 1169 bo->status |= HMM_BO_MMAPED; 1170 1171 bo->mmap_count++; 1172 1173 mutex_unlock(&bo->mutex); 1174 } 1175 1176 static void hmm_bo_vm_close(struct vm_area_struct *vma) 1177 { 1178 struct hmm_buffer_object *bo = 1179 (struct hmm_buffer_object *)vma->vm_private_data; 1180 1181 check_bo_null_return_void(bo); 1182 1183 hmm_bo_unref(bo); 1184 1185 mutex_lock(&bo->mutex); 1186 1187 bo->mmap_count--; 1188 1189 if (!bo->mmap_count) { 1190 bo->status &= (~HMM_BO_MMAPED); 1191 vma->vm_private_data = NULL; 1192 } 1193 1194 mutex_unlock(&bo->mutex); 1195 } 1196 1197 static const struct vm_operations_struct hmm_bo_vm_ops = { 1198 .open = hmm_bo_vm_open, 1199 .close = hmm_bo_vm_close, 1200 }; 1201 1202 /* 1203 * mmap the bo to user space. 1204 */ 1205 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo) 1206 { 1207 unsigned int start, end; 1208 unsigned int virt; 1209 unsigned int pgnr, i; 1210 unsigned int pfn; 1211 1212 check_bo_null_return(bo, -EINVAL); 1213 1214 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); 1215 1216 pgnr = bo->pgnr; 1217 start = vma->vm_start; 1218 end = vma->vm_end; 1219 1220 /* 1221 * check vma's virtual address space size and buffer object's size. 1222 * must be the same. 1223 */ 1224 if ((start + pgnr_to_size(pgnr)) != end) { 1225 dev_warn(atomisp_dev, 1226 "vma's address space size not equal to buffer object's size"); 1227 return -EINVAL; 1228 } 1229 1230 virt = vma->vm_start; 1231 for (i = 0; i < pgnr; i++) { 1232 pfn = page_to_pfn(bo->pages[i]); 1233 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { 1234 dev_warn(atomisp_dev, 1235 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n", 1236 virt, pfn, 1); 1237 return -EINVAL; 1238 } 1239 virt += PAGE_SIZE; 1240 } 1241 1242 vma->vm_private_data = bo; 1243 1244 vma->vm_ops = &hmm_bo_vm_ops; 1245 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 1246 1247 /* 1248 * call hmm_bo_vm_open explicitly. 1249 */ 1250 hmm_bo_vm_open(vma); 1251 1252 return 0; 1253 1254 status_err: 1255 dev_err(atomisp_dev, "buffer page not allocated yet.\n"); 1256 return -EINVAL; 1257 } 1258