xref: /openbmc/linux/drivers/infiniband/hw/hfi1/user_pages.c (revision 145eba1aaec9f6798c30842d201920a80f9049a3)
1*145eba1aSCai Huoqing // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2f48ad614SDennis Dalessandro /*
32280740fSVishwanathapura, Niranjana  * Copyright(c) 2015-2017 Intel Corporation.
4f48ad614SDennis Dalessandro  */
5f48ad614SDennis Dalessandro 
6f48ad614SDennis Dalessandro #include <linux/mm.h>
73f07c014SIngo Molnar #include <linux/sched/signal.h>
8f48ad614SDennis Dalessandro #include <linux/device.h>
9f48ad614SDennis Dalessandro #include <linux/module.h>
10f48ad614SDennis Dalessandro 
11f48ad614SDennis Dalessandro #include "hfi.h"
12f48ad614SDennis Dalessandro 
13f48ad614SDennis Dalessandro static unsigned long cache_size = 256;
14f48ad614SDennis Dalessandro module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
15f48ad614SDennis Dalessandro MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
16f48ad614SDennis Dalessandro 
17f48ad614SDennis Dalessandro /*
18f48ad614SDennis Dalessandro  * Determine whether the caller can pin pages.
19f48ad614SDennis Dalessandro  *
20f48ad614SDennis Dalessandro  * This function should be used in the implementation of buffer caches.
21f48ad614SDennis Dalessandro  * The cache implementation should call this function prior to attempting
22f48ad614SDennis Dalessandro  * to pin buffer pages in order to determine whether they should do so.
23f48ad614SDennis Dalessandro  * The function computes cache limits based on the configured ulimit and
24f48ad614SDennis Dalessandro  * cache size. Use of this function is especially important for caches
25f48ad614SDennis Dalessandro  * which are not limited in any other way (e.g. by HW resources) and, thus,
26f48ad614SDennis Dalessandro  * could keeping caching buffers.
27f48ad614SDennis Dalessandro  *
28f48ad614SDennis Dalessandro  */
293faa3d9aSIra Weiny bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
303faa3d9aSIra Weiny 			u32 nlocked, u32 npages)
31f48ad614SDennis Dalessandro {
32f48ad614SDennis Dalessandro 	unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
33f48ad614SDennis Dalessandro 		size = (cache_size * (1UL << 20)); /* convert to bytes */
342280740fSVishwanathapura, Niranjana 	unsigned int usr_ctxts =
352280740fSVishwanathapura, Niranjana 			dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
36f48ad614SDennis Dalessandro 	bool can_lock = capable(CAP_IPC_LOCK);
37f48ad614SDennis Dalessandro 
38f48ad614SDennis Dalessandro 	/*
39f48ad614SDennis Dalessandro 	 * Calculate per-cache size. The calculation below uses only a quarter
40f48ad614SDennis Dalessandro 	 * of the available per-context limit. This leaves space for other
41f48ad614SDennis Dalessandro 	 * pinning. Should we worry about shared ctxts?
42f48ad614SDennis Dalessandro 	 */
43f48ad614SDennis Dalessandro 	cache_limit = (ulimit / usr_ctxts) / 4;
44f48ad614SDennis Dalessandro 
45f48ad614SDennis Dalessandro 	/* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
46f48ad614SDennis Dalessandro 	if (ulimit != (-1UL) && size > cache_limit)
47f48ad614SDennis Dalessandro 		size = cache_limit;
48f48ad614SDennis Dalessandro 
49f48ad614SDennis Dalessandro 	/* Convert to number of pages */
50f48ad614SDennis Dalessandro 	size = DIV_ROUND_UP(size, PAGE_SIZE);
51f48ad614SDennis Dalessandro 
5270f8a3caSDavidlohr Bueso 	pinned = atomic64_read(&mm->pinned_vm);
53f48ad614SDennis Dalessandro 
54f48ad614SDennis Dalessandro 	/* First, check the absolute limit against all pinned pages. */
55f48ad614SDennis Dalessandro 	if (pinned + npages >= ulimit && !can_lock)
56f48ad614SDennis Dalessandro 		return false;
57f48ad614SDennis Dalessandro 
58f48ad614SDennis Dalessandro 	return ((nlocked + npages) <= size) || can_lock;
59f48ad614SDennis Dalessandro }
60f48ad614SDennis Dalessandro 
613faa3d9aSIra Weiny int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
623faa3d9aSIra Weiny 			    bool writable, struct page **pages)
63f48ad614SDennis Dalessandro {
64f48ad614SDennis Dalessandro 	int ret;
659fdf4aa1SIra Weiny 	unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
66f48ad614SDennis Dalessandro 
67dfa0a4ffSJohn Hubbard 	ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
68f48ad614SDennis Dalessandro 	if (ret < 0)
69f48ad614SDennis Dalessandro 		return ret;
70f48ad614SDennis Dalessandro 
7170f8a3caSDavidlohr Bueso 	atomic64_add(ret, &mm->pinned_vm);
72f48ad614SDennis Dalessandro 
73f48ad614SDennis Dalessandro 	return ret;
74f48ad614SDennis Dalessandro }
75f48ad614SDennis Dalessandro 
76f48ad614SDennis Dalessandro void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
77f48ad614SDennis Dalessandro 			     size_t npages, bool dirty)
78f48ad614SDennis Dalessandro {
79f1f6a7ddSJohn Hubbard 	unpin_user_pages_dirty_lock(p, npages, dirty);
80f48ad614SDennis Dalessandro 
81f48ad614SDennis Dalessandro 	if (mm) { /* during close after signal, mm can be NULL */
8270f8a3caSDavidlohr Bueso 		atomic64_sub(npages, &mm->pinned_vm);
83f48ad614SDennis Dalessandro 	}
84f48ad614SDennis Dalessandro }
85