1 /*
2  * Copyright(c) 2016 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/list.h>
48 #include <linux/rculist.h>
49 #include <linux/mmu_notifier.h>
50 #include <linux/interval_tree_generic.h>
51 
52 #include "mmu_rb.h"
53 #include "trace.h"
54 
55 struct mmu_rb_handler {
56 	struct mmu_notifier mn;
57 	struct rb_root_cached root;
58 	void *ops_arg;
59 	spinlock_t lock;        /* protect the RB tree */
60 	struct mmu_rb_ops *ops;
61 	struct mm_struct *mm;
62 	struct list_head lru_list;
63 	struct work_struct del_work;
64 	struct list_head del_list;
65 	struct workqueue_struct *wq;
66 };
67 
68 static unsigned long mmu_node_start(struct mmu_rb_node *);
69 static unsigned long mmu_node_last(struct mmu_rb_node *);
70 static void mmu_notifier_range_start(struct mmu_notifier *,
71 				     struct mm_struct *,
72 				     unsigned long, unsigned long);
73 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
74 					   unsigned long, unsigned long);
75 static void do_remove(struct mmu_rb_handler *handler,
76 		      struct list_head *del_list);
77 static void handle_remove(struct work_struct *work);
78 
79 static const struct mmu_notifier_ops mn_opts = {
80 	.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
81 	.invalidate_range_start = mmu_notifier_range_start,
82 };
83 
84 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
85 		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
86 
87 static unsigned long mmu_node_start(struct mmu_rb_node *node)
88 {
89 	return node->addr & PAGE_MASK;
90 }
91 
92 static unsigned long mmu_node_last(struct mmu_rb_node *node)
93 {
94 	return PAGE_ALIGN(node->addr + node->len) - 1;
95 }
96 
97 int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
98 			 struct mmu_rb_ops *ops,
99 			 struct workqueue_struct *wq,
100 			 struct mmu_rb_handler **handler)
101 {
102 	struct mmu_rb_handler *handlr;
103 	int ret;
104 
105 	handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
106 	if (!handlr)
107 		return -ENOMEM;
108 
109 	handlr->root = RB_ROOT_CACHED;
110 	handlr->ops = ops;
111 	handlr->ops_arg = ops_arg;
112 	INIT_HLIST_NODE(&handlr->mn.hlist);
113 	spin_lock_init(&handlr->lock);
114 	handlr->mn.ops = &mn_opts;
115 	handlr->mm = mm;
116 	INIT_WORK(&handlr->del_work, handle_remove);
117 	INIT_LIST_HEAD(&handlr->del_list);
118 	INIT_LIST_HEAD(&handlr->lru_list);
119 	handlr->wq = wq;
120 
121 	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
122 	if (ret) {
123 		kfree(handlr);
124 		return ret;
125 	}
126 
127 	*handler = handlr;
128 	return 0;
129 }
130 
131 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
132 {
133 	struct mmu_rb_node *rbnode;
134 	struct rb_node *node;
135 	unsigned long flags;
136 	struct list_head del_list;
137 
138 	/* Unregister first so we don't get any more notifications. */
139 	mmu_notifier_unregister(&handler->mn, handler->mm);
140 
141 	/*
142 	 * Make sure the wq delete handler is finished running.  It will not
143 	 * be triggered once the mmu notifiers are unregistered above.
144 	 */
145 	flush_work(&handler->del_work);
146 
147 	INIT_LIST_HEAD(&del_list);
148 
149 	spin_lock_irqsave(&handler->lock, flags);
150 	while ((node = rb_first_cached(&handler->root))) {
151 		rbnode = rb_entry(node, struct mmu_rb_node, node);
152 		rb_erase_cached(node, &handler->root);
153 		/* move from LRU list to delete list */
154 		list_move(&rbnode->list, &del_list);
155 	}
156 	spin_unlock_irqrestore(&handler->lock, flags);
157 
158 	do_remove(handler, &del_list);
159 
160 	kfree(handler);
161 }
162 
163 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
164 		       struct mmu_rb_node *mnode)
165 {
166 	struct mmu_rb_node *node;
167 	unsigned long flags;
168 	int ret = 0;
169 
170 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
171 	spin_lock_irqsave(&handler->lock, flags);
172 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
173 	if (node) {
174 		ret = -EINVAL;
175 		goto unlock;
176 	}
177 	__mmu_int_rb_insert(mnode, &handler->root);
178 	list_add(&mnode->list, &handler->lru_list);
179 
180 	ret = handler->ops->insert(handler->ops_arg, mnode);
181 	if (ret) {
182 		__mmu_int_rb_remove(mnode, &handler->root);
183 		list_del(&mnode->list); /* remove from LRU list */
184 	}
185 unlock:
186 	spin_unlock_irqrestore(&handler->lock, flags);
187 	return ret;
188 }
189 
190 /* Caller must hold handler lock */
191 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
192 					   unsigned long addr,
193 					   unsigned long len)
194 {
195 	struct mmu_rb_node *node = NULL;
196 
197 	trace_hfi1_mmu_rb_search(addr, len);
198 	if (!handler->ops->filter) {
199 		node = __mmu_int_rb_iter_first(&handler->root, addr,
200 					       (addr + len) - 1);
201 	} else {
202 		for (node = __mmu_int_rb_iter_first(&handler->root, addr,
203 						    (addr + len) - 1);
204 		     node;
205 		     node = __mmu_int_rb_iter_next(node, addr,
206 						   (addr + len) - 1)) {
207 			if (handler->ops->filter(node, addr, len))
208 				return node;
209 		}
210 	}
211 	return node;
212 }
213 
214 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
215 				     unsigned long addr, unsigned long len,
216 				     struct mmu_rb_node **rb_node)
217 {
218 	struct mmu_rb_node *node;
219 	unsigned long flags;
220 	bool ret = false;
221 
222 	spin_lock_irqsave(&handler->lock, flags);
223 	node = __mmu_rb_search(handler, addr, len);
224 	if (node) {
225 		if (node->addr == addr && node->len == len)
226 			goto unlock;
227 		__mmu_int_rb_remove(node, &handler->root);
228 		list_del(&node->list); /* remove from LRU list */
229 		ret = true;
230 	}
231 unlock:
232 	spin_unlock_irqrestore(&handler->lock, flags);
233 	*rb_node = node;
234 	return ret;
235 }
236 
237 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
238 {
239 	struct mmu_rb_node *rbnode, *ptr;
240 	struct list_head del_list;
241 	unsigned long flags;
242 	bool stop = false;
243 
244 	INIT_LIST_HEAD(&del_list);
245 
246 	spin_lock_irqsave(&handler->lock, flags);
247 	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
248 					 list) {
249 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
250 					&stop)) {
251 			__mmu_int_rb_remove(rbnode, &handler->root);
252 			/* move from LRU list to delete list */
253 			list_move(&rbnode->list, &del_list);
254 		}
255 		if (stop)
256 			break;
257 	}
258 	spin_unlock_irqrestore(&handler->lock, flags);
259 
260 	while (!list_empty(&del_list)) {
261 		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
262 		list_del(&rbnode->list);
263 		handler->ops->remove(handler->ops_arg, rbnode);
264 	}
265 }
266 
267 /*
268  * It is up to the caller to ensure that this function does not race with the
269  * mmu invalidate notifier which may be calling the users remove callback on
270  * 'node'.
271  */
272 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
273 			struct mmu_rb_node *node)
274 {
275 	unsigned long flags;
276 
277 	/* Validity of handler and node pointers has been checked by caller. */
278 	trace_hfi1_mmu_rb_remove(node->addr, node->len);
279 	spin_lock_irqsave(&handler->lock, flags);
280 	__mmu_int_rb_remove(node, &handler->root);
281 	list_del(&node->list); /* remove from LRU list */
282 	spin_unlock_irqrestore(&handler->lock, flags);
283 
284 	handler->ops->remove(handler->ops_arg, node);
285 }
286 
287 static void mmu_notifier_range_start(struct mmu_notifier *mn,
288 				     struct mm_struct *mm,
289 				     unsigned long start,
290 				     unsigned long end)
291 {
292 	struct mmu_rb_handler *handler =
293 		container_of(mn, struct mmu_rb_handler, mn);
294 	struct rb_root_cached *root = &handler->root;
295 	struct mmu_rb_node *node, *ptr = NULL;
296 	unsigned long flags;
297 	bool added = false;
298 
299 	spin_lock_irqsave(&handler->lock, flags);
300 	for (node = __mmu_int_rb_iter_first(root, start, end - 1);
301 	     node; node = ptr) {
302 		/* Guard against node removal. */
303 		ptr = __mmu_int_rb_iter_next(node, start, end - 1);
304 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
305 		if (handler->ops->invalidate(handler->ops_arg, node)) {
306 			__mmu_int_rb_remove(node, root);
307 			/* move from LRU list to delete list */
308 			list_move(&node->list, &handler->del_list);
309 			added = true;
310 		}
311 	}
312 	spin_unlock_irqrestore(&handler->lock, flags);
313 
314 	if (added)
315 		queue_work(handler->wq, &handler->del_work);
316 }
317 
318 /*
319  * Call the remove function for the given handler and the list.  This
320  * is expected to be called with a delete list extracted from handler.
321  * The caller should not be holding the handler lock.
322  */
323 static void do_remove(struct mmu_rb_handler *handler,
324 		      struct list_head *del_list)
325 {
326 	struct mmu_rb_node *node;
327 
328 	while (!list_empty(del_list)) {
329 		node = list_first_entry(del_list, struct mmu_rb_node, list);
330 		list_del(&node->list);
331 		handler->ops->remove(handler->ops_arg, node);
332 	}
333 }
334 
335 /*
336  * Work queue function to remove all nodes that have been queued up to
337  * be removed.  The key feature is that mm->mmap_sem is not being held
338  * and the remove callback can sleep while taking it, if needed.
339  */
340 static void handle_remove(struct work_struct *work)
341 {
342 	struct mmu_rb_handler *handler = container_of(work,
343 						struct mmu_rb_handler,
344 						del_work);
345 	struct list_head del_list;
346 	unsigned long flags;
347 
348 	/* remove anything that is queued to get removed */
349 	spin_lock_irqsave(&handler->lock, flags);
350 	list_replace_init(&handler->del_list, &del_list);
351 	spin_unlock_irqrestore(&handler->lock, flags);
352 
353 	do_remove(handler, &del_list);
354 }
355