1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/mm.h> 49 #include <linux/sched/signal.h> 50 #include <linux/device.h> 51 #include <linux/module.h> 52 53 #include "hfi.h" 54 55 static unsigned long cache_size = 256; 56 module_param(cache_size, ulong, S_IRUGO | S_IWUSR); 57 MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)"); 58 59 /* 60 * Determine whether the caller can pin pages. 61 * 62 * This function should be used in the implementation of buffer caches. 63 * The cache implementation should call this function prior to attempting 64 * to pin buffer pages in order to determine whether they should do so. 65 * The function computes cache limits based on the configured ulimit and 66 * cache size. Use of this function is especially important for caches 67 * which are not limited in any other way (e.g. by HW resources) and, thus, 68 * could keeping caching buffers. 69 * 70 */ 71 bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, 72 u32 nlocked, u32 npages) 73 { 74 unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit, 75 size = (cache_size * (1UL << 20)); /* convert to bytes */ 76 unsigned int usr_ctxts = 77 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; 78 bool can_lock = capable(CAP_IPC_LOCK); 79 80 /* 81 * Calculate per-cache size. The calculation below uses only a quarter 82 * of the available per-context limit. This leaves space for other 83 * pinning. Should we worry about shared ctxts? 84 */ 85 cache_limit = (ulimit / usr_ctxts) / 4; 86 87 /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */ 88 if (ulimit != (-1UL) && size > cache_limit) 89 size = cache_limit; 90 91 /* Convert to number of pages */ 92 size = DIV_ROUND_UP(size, PAGE_SIZE); 93 94 pinned = atomic64_read(&mm->pinned_vm); 95 96 /* First, check the absolute limit against all pinned pages. */ 97 if (pinned + npages >= ulimit && !can_lock) 98 return false; 99 100 return ((nlocked + npages) <= size) || can_lock; 101 } 102 103 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, 104 bool writable, struct page **pages) 105 { 106 int ret; 107 unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0); 108 109 ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); 110 if (ret < 0) 111 return ret; 112 113 atomic64_add(ret, &mm->pinned_vm); 114 115 return ret; 116 } 117 118 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, 119 size_t npages, bool dirty) 120 { 121 put_user_pages_dirty_lock(p, npages, dirty); 122 123 if (mm) { /* during close after signal, mm can be NULL */ 124 atomic64_sub(npages, &mm->pinned_vm); 125 } 126 } 127