1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3  * Copyright(c) 2020 Cornelis Networks, Inc.
4  * Copyright(c) 2016 - 2017 Intel Corporation.
5  */
6 
7 #include <linux/list.h>
8 #include <linux/rculist.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/interval_tree_generic.h>
11 #include <linux/sched/mm.h>
12 
13 #include "mmu_rb.h"
14 #include "trace.h"
15 
16 static unsigned long mmu_node_start(struct mmu_rb_node *);
17 static unsigned long mmu_node_last(struct mmu_rb_node *);
18 static int mmu_notifier_range_start(struct mmu_notifier *,
19 		const struct mmu_notifier_range *);
20 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
21 					   unsigned long, unsigned long);
22 static void do_remove(struct mmu_rb_handler *handler,
23 		      struct list_head *del_list);
24 static void handle_remove(struct work_struct *work);
25 
26 static const struct mmu_notifier_ops mn_opts = {
27 	.invalidate_range_start = mmu_notifier_range_start,
28 };
29 
30 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
31 		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
32 
33 static unsigned long mmu_node_start(struct mmu_rb_node *node)
34 {
35 	return node->addr & PAGE_MASK;
36 }
37 
38 static unsigned long mmu_node_last(struct mmu_rb_node *node)
39 {
40 	return PAGE_ALIGN(node->addr + node->len) - 1;
41 }
42 
43 int hfi1_mmu_rb_register(void *ops_arg,
44 			 struct mmu_rb_ops *ops,
45 			 struct workqueue_struct *wq,
46 			 struct mmu_rb_handler **handler)
47 {
48 	struct mmu_rb_handler *h;
49 	void *free_ptr;
50 	int ret;
51 
52 	free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL);
53 	if (!free_ptr)
54 		return -ENOMEM;
55 
56 	h = PTR_ALIGN(free_ptr, cache_line_size());
57 	h->root = RB_ROOT_CACHED;
58 	h->ops = ops;
59 	h->ops_arg = ops_arg;
60 	INIT_HLIST_NODE(&h->mn.hlist);
61 	spin_lock_init(&h->lock);
62 	h->mn.ops = &mn_opts;
63 	INIT_WORK(&h->del_work, handle_remove);
64 	INIT_LIST_HEAD(&h->del_list);
65 	INIT_LIST_HEAD(&h->lru_list);
66 	h->wq = wq;
67 	h->free_ptr = free_ptr;
68 
69 	ret = mmu_notifier_register(&h->mn, current->mm);
70 	if (ret) {
71 		kfree(free_ptr);
72 		return ret;
73 	}
74 
75 	*handler = h;
76 	return 0;
77 }
78 
79 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
80 {
81 	struct mmu_rb_node *rbnode;
82 	struct rb_node *node;
83 	unsigned long flags;
84 	struct list_head del_list;
85 
86 	/* Prevent freeing of mm until we are completely finished. */
87 	mmgrab(handler->mn.mm);
88 
89 	/* Unregister first so we don't get any more notifications. */
90 	mmu_notifier_unregister(&handler->mn, handler->mn.mm);
91 
92 	/*
93 	 * Make sure the wq delete handler is finished running.  It will not
94 	 * be triggered once the mmu notifiers are unregistered above.
95 	 */
96 	flush_work(&handler->del_work);
97 
98 	INIT_LIST_HEAD(&del_list);
99 
100 	spin_lock_irqsave(&handler->lock, flags);
101 	while ((node = rb_first_cached(&handler->root))) {
102 		rbnode = rb_entry(node, struct mmu_rb_node, node);
103 		rb_erase_cached(node, &handler->root);
104 		/* move from LRU list to delete list */
105 		list_move(&rbnode->list, &del_list);
106 	}
107 	spin_unlock_irqrestore(&handler->lock, flags);
108 
109 	do_remove(handler, &del_list);
110 
111 	/* Now the mm may be freed. */
112 	mmdrop(handler->mn.mm);
113 
114 	kfree(handler->free_ptr);
115 }
116 
117 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
118 		       struct mmu_rb_node *mnode)
119 {
120 	struct mmu_rb_node *node;
121 	unsigned long flags;
122 	int ret = 0;
123 
124 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
125 
126 	if (current->mm != handler->mn.mm)
127 		return -EPERM;
128 
129 	spin_lock_irqsave(&handler->lock, flags);
130 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
131 	if (node) {
132 		ret = -EEXIST;
133 		goto unlock;
134 	}
135 	__mmu_int_rb_insert(mnode, &handler->root);
136 	list_add_tail(&mnode->list, &handler->lru_list);
137 
138 	ret = handler->ops->insert(handler->ops_arg, mnode);
139 	if (ret) {
140 		__mmu_int_rb_remove(mnode, &handler->root);
141 		list_del(&mnode->list); /* remove from LRU list */
142 	}
143 	mnode->handler = handler;
144 unlock:
145 	spin_unlock_irqrestore(&handler->lock, flags);
146 	return ret;
147 }
148 
149 /* Caller must hold handler lock */
150 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
151 					  unsigned long addr, unsigned long len)
152 {
153 	struct mmu_rb_node *node;
154 
155 	trace_hfi1_mmu_rb_search(addr, len);
156 	node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
157 	if (node)
158 		list_move_tail(&node->list, &handler->lru_list);
159 	return node;
160 }
161 
162 /* Caller must hold handler lock */
163 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
164 					   unsigned long addr,
165 					   unsigned long len)
166 {
167 	struct mmu_rb_node *node = NULL;
168 
169 	trace_hfi1_mmu_rb_search(addr, len);
170 	if (!handler->ops->filter) {
171 		node = __mmu_int_rb_iter_first(&handler->root, addr,
172 					       (addr + len) - 1);
173 	} else {
174 		for (node = __mmu_int_rb_iter_first(&handler->root, addr,
175 						    (addr + len) - 1);
176 		     node;
177 		     node = __mmu_int_rb_iter_next(node, addr,
178 						   (addr + len) - 1)) {
179 			if (handler->ops->filter(node, addr, len))
180 				return node;
181 		}
182 	}
183 	return node;
184 }
185 
186 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
187 {
188 	struct mmu_rb_node *rbnode, *ptr;
189 	struct list_head del_list;
190 	unsigned long flags;
191 	bool stop = false;
192 
193 	if (current->mm != handler->mn.mm)
194 		return;
195 
196 	INIT_LIST_HEAD(&del_list);
197 
198 	spin_lock_irqsave(&handler->lock, flags);
199 	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
200 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
201 					&stop)) {
202 			__mmu_int_rb_remove(rbnode, &handler->root);
203 			/* move from LRU list to delete list */
204 			list_move(&rbnode->list, &del_list);
205 		}
206 		if (stop)
207 			break;
208 	}
209 	spin_unlock_irqrestore(&handler->lock, flags);
210 
211 	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
212 		handler->ops->remove(handler->ops_arg, rbnode);
213 	}
214 }
215 
216 static int mmu_notifier_range_start(struct mmu_notifier *mn,
217 		const struct mmu_notifier_range *range)
218 {
219 	struct mmu_rb_handler *handler =
220 		container_of(mn, struct mmu_rb_handler, mn);
221 	struct rb_root_cached *root = &handler->root;
222 	struct mmu_rb_node *node, *ptr = NULL;
223 	unsigned long flags;
224 	bool added = false;
225 
226 	spin_lock_irqsave(&handler->lock, flags);
227 	for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
228 	     node; node = ptr) {
229 		/* Guard against node removal. */
230 		ptr = __mmu_int_rb_iter_next(node, range->start,
231 					     range->end - 1);
232 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
233 		if (handler->ops->invalidate(handler->ops_arg, node)) {
234 			__mmu_int_rb_remove(node, root);
235 			/* move from LRU list to delete list */
236 			list_move(&node->list, &handler->del_list);
237 			added = true;
238 		}
239 	}
240 	spin_unlock_irqrestore(&handler->lock, flags);
241 
242 	if (added)
243 		queue_work(handler->wq, &handler->del_work);
244 
245 	return 0;
246 }
247 
248 /*
249  * Call the remove function for the given handler and the list.  This
250  * is expected to be called with a delete list extracted from handler.
251  * The caller should not be holding the handler lock.
252  */
253 static void do_remove(struct mmu_rb_handler *handler,
254 		      struct list_head *del_list)
255 {
256 	struct mmu_rb_node *node;
257 
258 	while (!list_empty(del_list)) {
259 		node = list_first_entry(del_list, struct mmu_rb_node, list);
260 		list_del(&node->list);
261 		handler->ops->remove(handler->ops_arg, node);
262 	}
263 }
264 
265 /*
266  * Work queue function to remove all nodes that have been queued up to
267  * be removed.  The key feature is that mm->mmap_lock is not being held
268  * and the remove callback can sleep while taking it, if needed.
269  */
270 static void handle_remove(struct work_struct *work)
271 {
272 	struct mmu_rb_handler *handler = container_of(work,
273 						struct mmu_rb_handler,
274 						del_work);
275 	struct list_head del_list;
276 	unsigned long flags;
277 
278 	/* remove anything that is queued to get removed */
279 	spin_lock_irqsave(&handler->lock, flags);
280 	list_replace_init(&handler->del_list, &del_list);
281 	spin_unlock_irqrestore(&handler->lock, flags);
282 
283 	do_remove(handler, &del_list);
284 }
285