1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 #include <linux/list.h> 48 #include <linux/rculist.h> 49 #include <linux/mmu_notifier.h> 50 #include <linux/interval_tree_generic.h> 51 52 #include "mmu_rb.h" 53 #include "trace.h" 54 55 struct mmu_rb_handler { 56 struct mmu_notifier mn; 57 struct rb_root root; 58 void *ops_arg; 59 spinlock_t lock; /* protect the RB tree */ 60 struct mmu_rb_ops *ops; 61 struct mm_struct *mm; 62 struct list_head lru_list; 63 struct work_struct del_work; 64 struct list_head del_list; 65 struct workqueue_struct *wq; 66 }; 67 68 static unsigned long mmu_node_start(struct mmu_rb_node *); 69 static unsigned long mmu_node_last(struct mmu_rb_node *); 70 static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, 71 unsigned long); 72 static inline void mmu_notifier_range_start(struct mmu_notifier *, 73 struct mm_struct *, 74 unsigned long, unsigned long); 75 static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 76 struct mm_struct *, 77 unsigned long, unsigned long); 78 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 79 unsigned long, unsigned long); 80 static void do_remove(struct mmu_rb_handler *handler, 81 struct list_head *del_list); 82 static void handle_remove(struct work_struct *work); 83 84 static const struct mmu_notifier_ops mn_opts = { 85 .invalidate_page = mmu_notifier_page, 86 .invalidate_range_start = mmu_notifier_range_start, 87 }; 88 89 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, 90 mmu_node_start, mmu_node_last, static, __mmu_int_rb); 91 92 static unsigned long mmu_node_start(struct mmu_rb_node *node) 93 { 94 return node->addr & PAGE_MASK; 95 } 96 97 static unsigned long mmu_node_last(struct mmu_rb_node *node) 98 { 99 return PAGE_ALIGN(node->addr + node->len) - 1; 100 } 101 102 int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, 103 struct mmu_rb_ops *ops, 104 struct workqueue_struct *wq, 105 struct mmu_rb_handler **handler) 106 { 107 struct mmu_rb_handler *handlr; 108 int ret; 109 110 handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); 111 if (!handlr) 112 return -ENOMEM; 113 114 handlr->root = RB_ROOT; 115 handlr->ops = ops; 116 handlr->ops_arg = ops_arg; 117 INIT_HLIST_NODE(&handlr->mn.hlist); 118 spin_lock_init(&handlr->lock); 119 handlr->mn.ops = &mn_opts; 120 handlr->mm = mm; 121 INIT_WORK(&handlr->del_work, handle_remove); 122 INIT_LIST_HEAD(&handlr->del_list); 123 INIT_LIST_HEAD(&handlr->lru_list); 124 handlr->wq = wq; 125 126 ret = mmu_notifier_register(&handlr->mn, handlr->mm); 127 if (ret) { 128 kfree(handlr); 129 return ret; 130 } 131 132 *handler = handlr; 133 return 0; 134 } 135 136 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) 137 { 138 struct mmu_rb_node *rbnode; 139 struct rb_node *node; 140 unsigned long flags; 141 struct list_head del_list; 142 143 /* Unregister first so we don't get any more notifications. */ 144 mmu_notifier_unregister(&handler->mn, handler->mm); 145 146 /* 147 * Make sure the wq delete handler is finished running. It will not 148 * be triggered once the mmu notifiers are unregistered above. 149 */ 150 flush_work(&handler->del_work); 151 152 INIT_LIST_HEAD(&del_list); 153 154 spin_lock_irqsave(&handler->lock, flags); 155 while ((node = rb_first(&handler->root))) { 156 rbnode = rb_entry(node, struct mmu_rb_node, node); 157 rb_erase(node, &handler->root); 158 /* move from LRU list to delete list */ 159 list_move(&rbnode->list, &del_list); 160 } 161 spin_unlock_irqrestore(&handler->lock, flags); 162 163 do_remove(handler, &del_list); 164 165 kfree(handler); 166 } 167 168 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, 169 struct mmu_rb_node *mnode) 170 { 171 struct mmu_rb_node *node; 172 unsigned long flags; 173 int ret = 0; 174 175 spin_lock_irqsave(&handler->lock, flags); 176 hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, 177 mnode->len); 178 node = __mmu_rb_search(handler, mnode->addr, mnode->len); 179 if (node) { 180 ret = -EINVAL; 181 goto unlock; 182 } 183 __mmu_int_rb_insert(mnode, &handler->root); 184 list_add(&mnode->list, &handler->lru_list); 185 186 ret = handler->ops->insert(handler->ops_arg, mnode); 187 if (ret) { 188 __mmu_int_rb_remove(mnode, &handler->root); 189 list_del(&mnode->list); /* remove from LRU list */ 190 } 191 unlock: 192 spin_unlock_irqrestore(&handler->lock, flags); 193 return ret; 194 } 195 196 /* Caller must hold handler lock */ 197 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 198 unsigned long addr, 199 unsigned long len) 200 { 201 struct mmu_rb_node *node = NULL; 202 203 hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); 204 if (!handler->ops->filter) { 205 node = __mmu_int_rb_iter_first(&handler->root, addr, 206 (addr + len) - 1); 207 } else { 208 for (node = __mmu_int_rb_iter_first(&handler->root, addr, 209 (addr + len) - 1); 210 node; 211 node = __mmu_int_rb_iter_next(node, addr, 212 (addr + len) - 1)) { 213 if (handler->ops->filter(node, addr, len)) 214 return node; 215 } 216 } 217 return node; 218 } 219 220 struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler, 221 unsigned long addr, unsigned long len) 222 { 223 struct mmu_rb_node *node; 224 unsigned long flags; 225 226 spin_lock_irqsave(&handler->lock, flags); 227 node = __mmu_rb_search(handler, addr, len); 228 if (node) { 229 __mmu_int_rb_remove(node, &handler->root); 230 list_del(&node->list); /* remove from LRU list */ 231 } 232 spin_unlock_irqrestore(&handler->lock, flags); 233 234 return node; 235 } 236 237 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) 238 { 239 struct mmu_rb_node *rbnode, *ptr; 240 struct list_head del_list; 241 unsigned long flags; 242 bool stop = false; 243 244 INIT_LIST_HEAD(&del_list); 245 246 spin_lock_irqsave(&handler->lock, flags); 247 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list, 248 list) { 249 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, 250 &stop)) { 251 __mmu_int_rb_remove(rbnode, &handler->root); 252 /* move from LRU list to delete list */ 253 list_move(&rbnode->list, &del_list); 254 } 255 if (stop) 256 break; 257 } 258 spin_unlock_irqrestore(&handler->lock, flags); 259 260 while (!list_empty(&del_list)) { 261 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); 262 list_del(&rbnode->list); 263 handler->ops->remove(handler->ops_arg, rbnode); 264 } 265 } 266 267 /* 268 * It is up to the caller to ensure that this function does not race with the 269 * mmu invalidate notifier which may be calling the users remove callback on 270 * 'node'. 271 */ 272 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, 273 struct mmu_rb_node *node) 274 { 275 unsigned long flags; 276 277 /* Validity of handler and node pointers has been checked by caller. */ 278 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 279 node->len); 280 spin_lock_irqsave(&handler->lock, flags); 281 __mmu_int_rb_remove(node, &handler->root); 282 list_del(&node->list); /* remove from LRU list */ 283 spin_unlock_irqrestore(&handler->lock, flags); 284 285 handler->ops->remove(handler->ops_arg, node); 286 } 287 288 static inline void mmu_notifier_page(struct mmu_notifier *mn, 289 struct mm_struct *mm, unsigned long addr) 290 { 291 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); 292 } 293 294 static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 295 struct mm_struct *mm, 296 unsigned long start, 297 unsigned long end) 298 { 299 mmu_notifier_mem_invalidate(mn, mm, start, end); 300 } 301 302 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 303 struct mm_struct *mm, 304 unsigned long start, unsigned long end) 305 { 306 struct mmu_rb_handler *handler = 307 container_of(mn, struct mmu_rb_handler, mn); 308 struct rb_root *root = &handler->root; 309 struct mmu_rb_node *node, *ptr = NULL; 310 unsigned long flags; 311 bool added = false; 312 313 spin_lock_irqsave(&handler->lock, flags); 314 for (node = __mmu_int_rb_iter_first(root, start, end - 1); 315 node; node = ptr) { 316 /* Guard against node removal. */ 317 ptr = __mmu_int_rb_iter_next(node, start, end - 1); 318 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 319 node->addr, node->len); 320 if (handler->ops->invalidate(handler->ops_arg, node)) { 321 __mmu_int_rb_remove(node, root); 322 /* move from LRU list to delete list */ 323 list_move(&node->list, &handler->del_list); 324 added = true; 325 } 326 } 327 spin_unlock_irqrestore(&handler->lock, flags); 328 329 if (added) 330 queue_work(handler->wq, &handler->del_work); 331 } 332 333 /* 334 * Call the remove function for the given handler and the list. This 335 * is expected to be called with a delete list extracted from handler. 336 * The caller should not be holding the handler lock. 337 */ 338 static void do_remove(struct mmu_rb_handler *handler, 339 struct list_head *del_list) 340 { 341 struct mmu_rb_node *node; 342 343 while (!list_empty(del_list)) { 344 node = list_first_entry(del_list, struct mmu_rb_node, list); 345 list_del(&node->list); 346 handler->ops->remove(handler->ops_arg, node); 347 } 348 } 349 350 /* 351 * Work queue function to remove all nodes that have been queued up to 352 * be removed. The key feature is that mm->mmap_sem is not being held 353 * and the remove callback can sleep while taking it, if needed. 354 */ 355 static void handle_remove(struct work_struct *work) 356 { 357 struct mmu_rb_handler *handler = container_of(work, 358 struct mmu_rb_handler, 359 del_work); 360 struct list_head del_list; 361 unsigned long flags; 362 363 /* remove anything that is queued to get removed */ 364 spin_lock_irqsave(&handler->lock, flags); 365 list_replace_init(&handler->del_list, &del_list); 366 spin_unlock_irqrestore(&handler->lock, flags); 367 368 do_remove(handler, &del_list); 369 } 370