1145eba1aSCai Huoqing // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2f48ad614SDennis Dalessandro /* 33d2a9d64SDennis Dalessandro * Copyright(c) 2020 Cornelis Networks, Inc. 434ab4de7SMichael J. Ruhl * Copyright(c) 2016 - 2017 Intel Corporation. 5f48ad614SDennis Dalessandro */ 6145eba1aSCai Huoqing 7f48ad614SDennis Dalessandro #include <linux/list.h> 8f48ad614SDennis Dalessandro #include <linux/rculist.h> 9f48ad614SDennis Dalessandro #include <linux/mmu_notifier.h> 10f48ad614SDennis Dalessandro #include <linux/interval_tree_generic.h> 113d2a9d64SDennis Dalessandro #include <linux/sched/mm.h> 12f48ad614SDennis Dalessandro 13f48ad614SDennis Dalessandro #include "mmu_rb.h" 14f48ad614SDennis Dalessandro #include "trace.h" 15f48ad614SDennis Dalessandro 16f48ad614SDennis Dalessandro static unsigned long mmu_node_start(struct mmu_rb_node *); 17f48ad614SDennis Dalessandro static unsigned long mmu_node_last(struct mmu_rb_node *); 1893065ac7SMichal Hocko static int mmu_notifier_range_start(struct mmu_notifier *, 195d6527a7SJérôme Glisse const struct mmu_notifier_range *); 20f48ad614SDennis Dalessandro static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 21f48ad614SDennis Dalessandro unsigned long, unsigned long); 22b85ced91SDean Luick static void do_remove(struct mmu_rb_handler *handler, 23b85ced91SDean Luick struct list_head *del_list); 24b85ced91SDean Luick static void handle_remove(struct work_struct *work); 25f48ad614SDennis Dalessandro 260fc859a6SBhumika Goyal static const struct mmu_notifier_ops mn_opts = { 27f48ad614SDennis Dalessandro .invalidate_range_start = mmu_notifier_range_start, 28f48ad614SDennis Dalessandro }; 29f48ad614SDennis Dalessandro 30f48ad614SDennis Dalessandro INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, 31f48ad614SDennis Dalessandro mmu_node_start, mmu_node_last, static, __mmu_int_rb); 32f48ad614SDennis Dalessandro 33f48ad614SDennis Dalessandro static unsigned long mmu_node_start(struct mmu_rb_node *node) 34f48ad614SDennis Dalessandro { 35f48ad614SDennis Dalessandro return node->addr & PAGE_MASK; 36f48ad614SDennis Dalessandro } 37f48ad614SDennis Dalessandro 38f48ad614SDennis Dalessandro static unsigned long mmu_node_last(struct mmu_rb_node *node) 39f48ad614SDennis Dalessandro { 40f48ad614SDennis Dalessandro return PAGE_ALIGN(node->addr + node->len) - 1; 41f48ad614SDennis Dalessandro } 42f48ad614SDennis Dalessandro 433d2a9d64SDennis Dalessandro int hfi1_mmu_rb_register(void *ops_arg, 44e0b09ac5SDean Luick struct mmu_rb_ops *ops, 45b85ced91SDean Luick struct workqueue_struct *wq, 46e0b09ac5SDean Luick struct mmu_rb_handler **handler) 47f48ad614SDennis Dalessandro { 483d2a9d64SDennis Dalessandro struct mmu_rb_handler *h; 49*866694afSPatrick Kelsey void *free_ptr; 503faa3d9aSIra Weiny int ret; 51f48ad614SDennis Dalessandro 52*866694afSPatrick Kelsey free_ptr = kzalloc(sizeof(*h) + cache_line_size() - 1, GFP_KERNEL); 53*866694afSPatrick Kelsey if (!free_ptr) 54f48ad614SDennis Dalessandro return -ENOMEM; 55f48ad614SDennis Dalessandro 56*866694afSPatrick Kelsey h = PTR_ALIGN(free_ptr, cache_line_size()); 573d2a9d64SDennis Dalessandro h->root = RB_ROOT_CACHED; 583d2a9d64SDennis Dalessandro h->ops = ops; 593d2a9d64SDennis Dalessandro h->ops_arg = ops_arg; 603d2a9d64SDennis Dalessandro INIT_HLIST_NODE(&h->mn.hlist); 613d2a9d64SDennis Dalessandro spin_lock_init(&h->lock); 623d2a9d64SDennis Dalessandro h->mn.ops = &mn_opts; 633d2a9d64SDennis Dalessandro INIT_WORK(&h->del_work, handle_remove); 643d2a9d64SDennis Dalessandro INIT_LIST_HEAD(&h->del_list); 653d2a9d64SDennis Dalessandro INIT_LIST_HEAD(&h->lru_list); 663d2a9d64SDennis Dalessandro h->wq = wq; 67*866694afSPatrick Kelsey h->free_ptr = free_ptr; 683faa3d9aSIra Weiny 693d2a9d64SDennis Dalessandro ret = mmu_notifier_register(&h->mn, current->mm); 703faa3d9aSIra Weiny if (ret) { 71*866694afSPatrick Kelsey kfree(free_ptr); 723faa3d9aSIra Weiny return ret; 733faa3d9aSIra Weiny } 743faa3d9aSIra Weiny 753d2a9d64SDennis Dalessandro *handler = h; 76e0b09ac5SDean Luick return 0; 77f48ad614SDennis Dalessandro } 78f48ad614SDennis Dalessandro 79e0b09ac5SDean Luick void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) 80f48ad614SDennis Dalessandro { 8120a42d08SDean Luick struct mmu_rb_node *rbnode; 8220a42d08SDean Luick struct rb_node *node; 83f48ad614SDennis Dalessandro unsigned long flags; 84b85ced91SDean Luick struct list_head del_list; 85f48ad614SDennis Dalessandro 862bbac98dSDouglas Miller /* Prevent freeing of mm until we are completely finished. */ 872bbac98dSDouglas Miller mmgrab(handler->mn.mm); 882bbac98dSDouglas Miller 89f48ad614SDennis Dalessandro /* Unregister first so we don't get any more notifications. */ 903d2a9d64SDennis Dalessandro mmu_notifier_unregister(&handler->mn, handler->mn.mm); 91f48ad614SDennis Dalessandro 92b85ced91SDean Luick /* 93b85ced91SDean Luick * Make sure the wq delete handler is finished running. It will not 94b85ced91SDean Luick * be triggered once the mmu notifiers are unregistered above. 95b85ced91SDean Luick */ 96b85ced91SDean Luick flush_work(&handler->del_work); 97b85ced91SDean Luick 98b85ced91SDean Luick INIT_LIST_HEAD(&del_list); 99b85ced91SDean Luick 100f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 101f808c13fSDavidlohr Bueso while ((node = rb_first_cached(&handler->root))) { 102f48ad614SDennis Dalessandro rbnode = rb_entry(node, struct mmu_rb_node, node); 103f808c13fSDavidlohr Bueso rb_erase_cached(node, &handler->root); 1040636e9abSDean Luick /* move from LRU list to delete list */ 1050636e9abSDean Luick list_move(&rbnode->list, &del_list); 106f48ad614SDennis Dalessandro } 107f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 108f48ad614SDennis Dalessandro 109b85ced91SDean Luick do_remove(handler, &del_list); 110b85ced91SDean Luick 1112bbac98dSDouglas Miller /* Now the mm may be freed. */ 1122bbac98dSDouglas Miller mmdrop(handler->mn.mm); 1132bbac98dSDouglas Miller 114*866694afSPatrick Kelsey kfree(handler->free_ptr); 115f48ad614SDennis Dalessandro } 116f48ad614SDennis Dalessandro 117e0b09ac5SDean Luick int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, 118e0b09ac5SDean Luick struct mmu_rb_node *mnode) 119f48ad614SDennis Dalessandro { 120f48ad614SDennis Dalessandro struct mmu_rb_node *node; 121f48ad614SDennis Dalessandro unsigned long flags; 122f48ad614SDennis Dalessandro int ret = 0; 123f48ad614SDennis Dalessandro 12434ab4de7SMichael J. Ruhl trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len); 1253d2a9d64SDennis Dalessandro 1263d2a9d64SDennis Dalessandro if (current->mm != handler->mn.mm) 1273d2a9d64SDennis Dalessandro return -EPERM; 1283d2a9d64SDennis Dalessandro 129f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 130f48ad614SDennis Dalessandro node = __mmu_rb_search(handler, mnode->addr, mnode->len); 131f48ad614SDennis Dalessandro if (node) { 13200cbce5cSPatrick Kelsey ret = -EEXIST; 133f48ad614SDennis Dalessandro goto unlock; 134f48ad614SDennis Dalessandro } 135e0b09ac5SDean Luick __mmu_int_rb_insert(mnode, &handler->root); 1369fe8fec5SPatrick Kelsey list_add_tail(&mnode->list, &handler->lru_list); 137f48ad614SDennis Dalessandro 138e0b09ac5SDean Luick ret = handler->ops->insert(handler->ops_arg, mnode); 1390636e9abSDean Luick if (ret) { 140e0b09ac5SDean Luick __mmu_int_rb_remove(mnode, &handler->root); 1410636e9abSDean Luick list_del(&mnode->list); /* remove from LRU list */ 1420636e9abSDean Luick } 1433d2a9d64SDennis Dalessandro mnode->handler = handler; 144f48ad614SDennis Dalessandro unlock: 145f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 146f48ad614SDennis Dalessandro return ret; 147f48ad614SDennis Dalessandro } 148f48ad614SDennis Dalessandro 149f48ad614SDennis Dalessandro /* Caller must hold handler lock */ 15000cbce5cSPatrick Kelsey struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler, 15100cbce5cSPatrick Kelsey unsigned long addr, unsigned long len) 15200cbce5cSPatrick Kelsey { 15300cbce5cSPatrick Kelsey struct mmu_rb_node *node; 15400cbce5cSPatrick Kelsey 15500cbce5cSPatrick Kelsey trace_hfi1_mmu_rb_search(addr, len); 15600cbce5cSPatrick Kelsey node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1); 15700cbce5cSPatrick Kelsey if (node) 15800cbce5cSPatrick Kelsey list_move_tail(&node->list, &handler->lru_list); 15900cbce5cSPatrick Kelsey return node; 16000cbce5cSPatrick Kelsey } 16100cbce5cSPatrick Kelsey 16200cbce5cSPatrick Kelsey /* Caller must hold handler lock */ 163f48ad614SDennis Dalessandro static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 164f48ad614SDennis Dalessandro unsigned long addr, 165f48ad614SDennis Dalessandro unsigned long len) 166f48ad614SDennis Dalessandro { 167f48ad614SDennis Dalessandro struct mmu_rb_node *node = NULL; 168f48ad614SDennis Dalessandro 16934ab4de7SMichael J. Ruhl trace_hfi1_mmu_rb_search(addr, len); 170f48ad614SDennis Dalessandro if (!handler->ops->filter) { 171e0b09ac5SDean Luick node = __mmu_int_rb_iter_first(&handler->root, addr, 172f48ad614SDennis Dalessandro (addr + len) - 1); 173f48ad614SDennis Dalessandro } else { 174e0b09ac5SDean Luick for (node = __mmu_int_rb_iter_first(&handler->root, addr, 175f48ad614SDennis Dalessandro (addr + len) - 1); 176f48ad614SDennis Dalessandro node; 177f48ad614SDennis Dalessandro node = __mmu_int_rb_iter_next(node, addr, 178f48ad614SDennis Dalessandro (addr + len) - 1)) { 179f48ad614SDennis Dalessandro if (handler->ops->filter(node, addr, len)) 180f48ad614SDennis Dalessandro return node; 181f48ad614SDennis Dalessandro } 182f48ad614SDennis Dalessandro } 183f48ad614SDennis Dalessandro return node; 184f48ad614SDennis Dalessandro } 185f48ad614SDennis Dalessandro 18610345998SDean Luick void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) 18710345998SDean Luick { 1880636e9abSDean Luick struct mmu_rb_node *rbnode, *ptr; 18910345998SDean Luick struct list_head del_list; 19010345998SDean Luick unsigned long flags; 19110345998SDean Luick bool stop = false; 19210345998SDean Luick 1933d2a9d64SDennis Dalessandro if (current->mm != handler->mn.mm) 1943d2a9d64SDennis Dalessandro return; 1953d2a9d64SDennis Dalessandro 19610345998SDean Luick INIT_LIST_HEAD(&del_list); 19710345998SDean Luick 19810345998SDean Luick spin_lock_irqsave(&handler->lock, flags); 1999fe8fec5SPatrick Kelsey list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { 20010345998SDean Luick if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, 20110345998SDean Luick &stop)) { 20210345998SDean Luick __mmu_int_rb_remove(rbnode, &handler->root); 2030636e9abSDean Luick /* move from LRU list to delete list */ 2040636e9abSDean Luick list_move(&rbnode->list, &del_list); 20510345998SDean Luick } 20610345998SDean Luick if (stop) 20710345998SDean Luick break; 20810345998SDean Luick } 20910345998SDean Luick spin_unlock_irqrestore(&handler->lock, flags); 21010345998SDean Luick 2119fe8fec5SPatrick Kelsey list_for_each_entry_safe(rbnode, ptr, &del_list, list) { 212082b3532SDean Luick handler->ops->remove(handler->ops_arg, rbnode); 21310345998SDean Luick } 21410345998SDean Luick } 21510345998SDean Luick 21693065ac7SMichal Hocko static int mmu_notifier_range_start(struct mmu_notifier *mn, 2175d6527a7SJérôme Glisse const struct mmu_notifier_range *range) 218f48ad614SDennis Dalessandro { 219f48ad614SDennis Dalessandro struct mmu_rb_handler *handler = 220f48ad614SDennis Dalessandro container_of(mn, struct mmu_rb_handler, mn); 221f808c13fSDavidlohr Bueso struct rb_root_cached *root = &handler->root; 222f48ad614SDennis Dalessandro struct mmu_rb_node *node, *ptr = NULL; 223f48ad614SDennis Dalessandro unsigned long flags; 224b85ced91SDean Luick bool added = false; 225f48ad614SDennis Dalessandro 226f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 2275d6527a7SJérôme Glisse for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); 228f48ad614SDennis Dalessandro node; node = ptr) { 229f48ad614SDennis Dalessandro /* Guard against node removal. */ 2305d6527a7SJérôme Glisse ptr = __mmu_int_rb_iter_next(node, range->start, 2315d6527a7SJérôme Glisse range->end - 1); 23234ab4de7SMichael J. Ruhl trace_hfi1_mmu_mem_invalidate(node->addr, node->len); 233e0b09ac5SDean Luick if (handler->ops->invalidate(handler->ops_arg, node)) { 234f48ad614SDennis Dalessandro __mmu_int_rb_remove(node, root); 2350636e9abSDean Luick /* move from LRU list to delete list */ 2360636e9abSDean Luick list_move(&node->list, &handler->del_list); 237b85ced91SDean Luick added = true; 238f48ad614SDennis Dalessandro } 239f48ad614SDennis Dalessandro } 240f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 241b85ced91SDean Luick 242b85ced91SDean Luick if (added) 243b85ced91SDean Luick queue_work(handler->wq, &handler->del_work); 24493065ac7SMichal Hocko 24593065ac7SMichal Hocko return 0; 246b85ced91SDean Luick } 247b85ced91SDean Luick 248b85ced91SDean Luick /* 249b85ced91SDean Luick * Call the remove function for the given handler and the list. This 250b85ced91SDean Luick * is expected to be called with a delete list extracted from handler. 251b85ced91SDean Luick * The caller should not be holding the handler lock. 252b85ced91SDean Luick */ 253b85ced91SDean Luick static void do_remove(struct mmu_rb_handler *handler, 254b85ced91SDean Luick struct list_head *del_list) 255b85ced91SDean Luick { 256b85ced91SDean Luick struct mmu_rb_node *node; 257b85ced91SDean Luick 258b85ced91SDean Luick while (!list_empty(del_list)) { 259b85ced91SDean Luick node = list_first_entry(del_list, struct mmu_rb_node, list); 260b85ced91SDean Luick list_del(&node->list); 261082b3532SDean Luick handler->ops->remove(handler->ops_arg, node); 262b85ced91SDean Luick } 263b85ced91SDean Luick } 264b85ced91SDean Luick 265b85ced91SDean Luick /* 266b85ced91SDean Luick * Work queue function to remove all nodes that have been queued up to 267c1e8d7c6SMichel Lespinasse * be removed. The key feature is that mm->mmap_lock is not being held 268b85ced91SDean Luick * and the remove callback can sleep while taking it, if needed. 269b85ced91SDean Luick */ 270b85ced91SDean Luick static void handle_remove(struct work_struct *work) 271b85ced91SDean Luick { 272b85ced91SDean Luick struct mmu_rb_handler *handler = container_of(work, 273b85ced91SDean Luick struct mmu_rb_handler, 274b85ced91SDean Luick del_work); 275b85ced91SDean Luick struct list_head del_list; 276b85ced91SDean Luick unsigned long flags; 277b85ced91SDean Luick 278b85ced91SDean Luick /* remove anything that is queued to get removed */ 279b85ced91SDean Luick spin_lock_irqsave(&handler->lock, flags); 280b85ced91SDean Luick list_replace_init(&handler->del_list, &del_list); 281b85ced91SDean Luick spin_unlock_irqrestore(&handler->lock, flags); 282b85ced91SDean Luick 283b85ced91SDean Luick do_remove(handler, &del_list); 284f48ad614SDennis Dalessandro } 285