1 /*
2  * Copyright(c) 2016 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/list.h>
48 #include <linux/rculist.h>
49 #include <linux/mmu_notifier.h>
50 #include <linux/interval_tree_generic.h>
51 
52 #include "mmu_rb.h"
53 #include "trace.h"
54 
55 struct mmu_rb_handler {
56 	struct mmu_notifier mn;
57 	struct rb_root_cached root;
58 	void *ops_arg;
59 	spinlock_t lock;        /* protect the RB tree */
60 	struct mmu_rb_ops *ops;
61 	struct mm_struct *mm;
62 	struct list_head lru_list;
63 	struct work_struct del_work;
64 	struct list_head del_list;
65 	struct workqueue_struct *wq;
66 };
67 
68 static unsigned long mmu_node_start(struct mmu_rb_node *);
69 static unsigned long mmu_node_last(struct mmu_rb_node *);
70 static int mmu_notifier_range_start(struct mmu_notifier *,
71 				     struct mm_struct *,
72 				     unsigned long, unsigned long, bool);
73 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
74 					   unsigned long, unsigned long);
75 static void do_remove(struct mmu_rb_handler *handler,
76 		      struct list_head *del_list);
77 static void handle_remove(struct work_struct *work);
78 
79 static const struct mmu_notifier_ops mn_opts = {
80 	.invalidate_range_start = mmu_notifier_range_start,
81 };
82 
83 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
84 		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
85 
86 static unsigned long mmu_node_start(struct mmu_rb_node *node)
87 {
88 	return node->addr & PAGE_MASK;
89 }
90 
91 static unsigned long mmu_node_last(struct mmu_rb_node *node)
92 {
93 	return PAGE_ALIGN(node->addr + node->len) - 1;
94 }
95 
96 int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
97 			 struct mmu_rb_ops *ops,
98 			 struct workqueue_struct *wq,
99 			 struct mmu_rb_handler **handler)
100 {
101 	struct mmu_rb_handler *handlr;
102 	int ret;
103 
104 	handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
105 	if (!handlr)
106 		return -ENOMEM;
107 
108 	handlr->root = RB_ROOT_CACHED;
109 	handlr->ops = ops;
110 	handlr->ops_arg = ops_arg;
111 	INIT_HLIST_NODE(&handlr->mn.hlist);
112 	spin_lock_init(&handlr->lock);
113 	handlr->mn.ops = &mn_opts;
114 	handlr->mm = mm;
115 	INIT_WORK(&handlr->del_work, handle_remove);
116 	INIT_LIST_HEAD(&handlr->del_list);
117 	INIT_LIST_HEAD(&handlr->lru_list);
118 	handlr->wq = wq;
119 
120 	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
121 	if (ret) {
122 		kfree(handlr);
123 		return ret;
124 	}
125 
126 	*handler = handlr;
127 	return 0;
128 }
129 
130 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
131 {
132 	struct mmu_rb_node *rbnode;
133 	struct rb_node *node;
134 	unsigned long flags;
135 	struct list_head del_list;
136 
137 	/* Unregister first so we don't get any more notifications. */
138 	mmu_notifier_unregister(&handler->mn, handler->mm);
139 
140 	/*
141 	 * Make sure the wq delete handler is finished running.  It will not
142 	 * be triggered once the mmu notifiers are unregistered above.
143 	 */
144 	flush_work(&handler->del_work);
145 
146 	INIT_LIST_HEAD(&del_list);
147 
148 	spin_lock_irqsave(&handler->lock, flags);
149 	while ((node = rb_first_cached(&handler->root))) {
150 		rbnode = rb_entry(node, struct mmu_rb_node, node);
151 		rb_erase_cached(node, &handler->root);
152 		/* move from LRU list to delete list */
153 		list_move(&rbnode->list, &del_list);
154 	}
155 	spin_unlock_irqrestore(&handler->lock, flags);
156 
157 	do_remove(handler, &del_list);
158 
159 	kfree(handler);
160 }
161 
162 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
163 		       struct mmu_rb_node *mnode)
164 {
165 	struct mmu_rb_node *node;
166 	unsigned long flags;
167 	int ret = 0;
168 
169 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
170 	spin_lock_irqsave(&handler->lock, flags);
171 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
172 	if (node) {
173 		ret = -EINVAL;
174 		goto unlock;
175 	}
176 	__mmu_int_rb_insert(mnode, &handler->root);
177 	list_add(&mnode->list, &handler->lru_list);
178 
179 	ret = handler->ops->insert(handler->ops_arg, mnode);
180 	if (ret) {
181 		__mmu_int_rb_remove(mnode, &handler->root);
182 		list_del(&mnode->list); /* remove from LRU list */
183 	}
184 unlock:
185 	spin_unlock_irqrestore(&handler->lock, flags);
186 	return ret;
187 }
188 
189 /* Caller must hold handler lock */
190 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
191 					   unsigned long addr,
192 					   unsigned long len)
193 {
194 	struct mmu_rb_node *node = NULL;
195 
196 	trace_hfi1_mmu_rb_search(addr, len);
197 	if (!handler->ops->filter) {
198 		node = __mmu_int_rb_iter_first(&handler->root, addr,
199 					       (addr + len) - 1);
200 	} else {
201 		for (node = __mmu_int_rb_iter_first(&handler->root, addr,
202 						    (addr + len) - 1);
203 		     node;
204 		     node = __mmu_int_rb_iter_next(node, addr,
205 						   (addr + len) - 1)) {
206 			if (handler->ops->filter(node, addr, len))
207 				return node;
208 		}
209 	}
210 	return node;
211 }
212 
213 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
214 				     unsigned long addr, unsigned long len,
215 				     struct mmu_rb_node **rb_node)
216 {
217 	struct mmu_rb_node *node;
218 	unsigned long flags;
219 	bool ret = false;
220 
221 	spin_lock_irqsave(&handler->lock, flags);
222 	node = __mmu_rb_search(handler, addr, len);
223 	if (node) {
224 		if (node->addr == addr && node->len == len)
225 			goto unlock;
226 		__mmu_int_rb_remove(node, &handler->root);
227 		list_del(&node->list); /* remove from LRU list */
228 		ret = true;
229 	}
230 unlock:
231 	spin_unlock_irqrestore(&handler->lock, flags);
232 	*rb_node = node;
233 	return ret;
234 }
235 
236 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
237 {
238 	struct mmu_rb_node *rbnode, *ptr;
239 	struct list_head del_list;
240 	unsigned long flags;
241 	bool stop = false;
242 
243 	INIT_LIST_HEAD(&del_list);
244 
245 	spin_lock_irqsave(&handler->lock, flags);
246 	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
247 					 list) {
248 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
249 					&stop)) {
250 			__mmu_int_rb_remove(rbnode, &handler->root);
251 			/* move from LRU list to delete list */
252 			list_move(&rbnode->list, &del_list);
253 		}
254 		if (stop)
255 			break;
256 	}
257 	spin_unlock_irqrestore(&handler->lock, flags);
258 
259 	while (!list_empty(&del_list)) {
260 		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
261 		list_del(&rbnode->list);
262 		handler->ops->remove(handler->ops_arg, rbnode);
263 	}
264 }
265 
266 /*
267  * It is up to the caller to ensure that this function does not race with the
268  * mmu invalidate notifier which may be calling the users remove callback on
269  * 'node'.
270  */
271 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
272 			struct mmu_rb_node *node)
273 {
274 	unsigned long flags;
275 
276 	/* Validity of handler and node pointers has been checked by caller. */
277 	trace_hfi1_mmu_rb_remove(node->addr, node->len);
278 	spin_lock_irqsave(&handler->lock, flags);
279 	__mmu_int_rb_remove(node, &handler->root);
280 	list_del(&node->list); /* remove from LRU list */
281 	spin_unlock_irqrestore(&handler->lock, flags);
282 
283 	handler->ops->remove(handler->ops_arg, node);
284 }
285 
286 static int mmu_notifier_range_start(struct mmu_notifier *mn,
287 				     struct mm_struct *mm,
288 				     unsigned long start,
289 				     unsigned long end,
290 				     bool blockable)
291 {
292 	struct mmu_rb_handler *handler =
293 		container_of(mn, struct mmu_rb_handler, mn);
294 	struct rb_root_cached *root = &handler->root;
295 	struct mmu_rb_node *node, *ptr = NULL;
296 	unsigned long flags;
297 	bool added = false;
298 
299 	spin_lock_irqsave(&handler->lock, flags);
300 	for (node = __mmu_int_rb_iter_first(root, start, end - 1);
301 	     node; node = ptr) {
302 		/* Guard against node removal. */
303 		ptr = __mmu_int_rb_iter_next(node, start, end - 1);
304 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
305 		if (handler->ops->invalidate(handler->ops_arg, node)) {
306 			__mmu_int_rb_remove(node, root);
307 			/* move from LRU list to delete list */
308 			list_move(&node->list, &handler->del_list);
309 			added = true;
310 		}
311 	}
312 	spin_unlock_irqrestore(&handler->lock, flags);
313 
314 	if (added)
315 		queue_work(handler->wq, &handler->del_work);
316 
317 	return 0;
318 }
319 
320 /*
321  * Call the remove function for the given handler and the list.  This
322  * is expected to be called with a delete list extracted from handler.
323  * The caller should not be holding the handler lock.
324  */
325 static void do_remove(struct mmu_rb_handler *handler,
326 		      struct list_head *del_list)
327 {
328 	struct mmu_rb_node *node;
329 
330 	while (!list_empty(del_list)) {
331 		node = list_first_entry(del_list, struct mmu_rb_node, list);
332 		list_del(&node->list);
333 		handler->ops->remove(handler->ops_arg, node);
334 	}
335 }
336 
337 /*
338  * Work queue function to remove all nodes that have been queued up to
339  * be removed.  The key feature is that mm->mmap_sem is not being held
340  * and the remove callback can sleep while taking it, if needed.
341  */
342 static void handle_remove(struct work_struct *work)
343 {
344 	struct mmu_rb_handler *handler = container_of(work,
345 						struct mmu_rb_handler,
346 						del_work);
347 	struct list_head del_list;
348 	unsigned long flags;
349 
350 	/* remove anything that is queued to get removed */
351 	spin_lock_irqsave(&handler->lock, flags);
352 	list_replace_init(&handler->del_list, &del_list);
353 	spin_unlock_irqrestore(&handler->lock, flags);
354 
355 	do_remove(handler, &del_list);
356 }
357