1f48ad614SDennis Dalessandro /* 2f48ad614SDennis Dalessandro * Copyright(c) 2016 Intel Corporation. 3f48ad614SDennis Dalessandro * 4f48ad614SDennis Dalessandro * This file is provided under a dual BSD/GPLv2 license. When using or 5f48ad614SDennis Dalessandro * redistributing this file, you may do so under either license. 6f48ad614SDennis Dalessandro * 7f48ad614SDennis Dalessandro * GPL LICENSE SUMMARY 8f48ad614SDennis Dalessandro * 9f48ad614SDennis Dalessandro * This program is free software; you can redistribute it and/or modify 10f48ad614SDennis Dalessandro * it under the terms of version 2 of the GNU General Public License as 11f48ad614SDennis Dalessandro * published by the Free Software Foundation. 12f48ad614SDennis Dalessandro * 13f48ad614SDennis Dalessandro * This program is distributed in the hope that it will be useful, but 14f48ad614SDennis Dalessandro * WITHOUT ANY WARRANTY; without even the implied warranty of 15f48ad614SDennis Dalessandro * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16f48ad614SDennis Dalessandro * General Public License for more details. 17f48ad614SDennis Dalessandro * 18f48ad614SDennis Dalessandro * BSD LICENSE 19f48ad614SDennis Dalessandro * 20f48ad614SDennis Dalessandro * Redistribution and use in source and binary forms, with or without 21f48ad614SDennis Dalessandro * modification, are permitted provided that the following conditions 22f48ad614SDennis Dalessandro * are met: 23f48ad614SDennis Dalessandro * 24f48ad614SDennis Dalessandro * - Redistributions of source code must retain the above copyright 25f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer. 26f48ad614SDennis Dalessandro * - Redistributions in binary form must reproduce the above copyright 27f48ad614SDennis Dalessandro * notice, this list of conditions and the following disclaimer in 28f48ad614SDennis Dalessandro * the documentation and/or other materials provided with the 29f48ad614SDennis Dalessandro * distribution. 30f48ad614SDennis Dalessandro * - Neither the name of Intel Corporation nor the names of its 31f48ad614SDennis Dalessandro * contributors may be used to endorse or promote products derived 32f48ad614SDennis Dalessandro * from this software without specific prior written permission. 33f48ad614SDennis Dalessandro * 34f48ad614SDennis Dalessandro * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35f48ad614SDennis Dalessandro * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36f48ad614SDennis Dalessandro * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37f48ad614SDennis Dalessandro * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38f48ad614SDennis Dalessandro * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39f48ad614SDennis Dalessandro * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40f48ad614SDennis Dalessandro * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41f48ad614SDennis Dalessandro * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42f48ad614SDennis Dalessandro * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43f48ad614SDennis Dalessandro * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44f48ad614SDennis Dalessandro * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45f48ad614SDennis Dalessandro * 46f48ad614SDennis Dalessandro */ 47f48ad614SDennis Dalessandro #include <linux/list.h> 48f48ad614SDennis Dalessandro #include <linux/rculist.h> 49f48ad614SDennis Dalessandro #include <linux/mmu_notifier.h> 50f48ad614SDennis Dalessandro #include <linux/interval_tree_generic.h> 51f48ad614SDennis Dalessandro 52f48ad614SDennis Dalessandro #include "mmu_rb.h" 53f48ad614SDennis Dalessandro #include "trace.h" 54f48ad614SDennis Dalessandro 55f48ad614SDennis Dalessandro struct mmu_rb_handler { 56f48ad614SDennis Dalessandro struct mmu_notifier mn; 57e0b09ac5SDean Luick struct rb_root root; 58e0b09ac5SDean Luick void *ops_arg; 59f48ad614SDennis Dalessandro spinlock_t lock; /* protect the RB tree */ 60f48ad614SDennis Dalessandro struct mmu_rb_ops *ops; 613faa3d9aSIra Weiny struct mm_struct *mm; 62*b85ced91SDean Luick struct work_struct del_work; 63*b85ced91SDean Luick struct list_head del_list; 64*b85ced91SDean Luick struct workqueue_struct *wq; 65f48ad614SDennis Dalessandro }; 66f48ad614SDennis Dalessandro 67f48ad614SDennis Dalessandro static unsigned long mmu_node_start(struct mmu_rb_node *); 68f48ad614SDennis Dalessandro static unsigned long mmu_node_last(struct mmu_rb_node *); 69f48ad614SDennis Dalessandro static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, 70f48ad614SDennis Dalessandro unsigned long); 71f48ad614SDennis Dalessandro static inline void mmu_notifier_range_start(struct mmu_notifier *, 72f48ad614SDennis Dalessandro struct mm_struct *, 73f48ad614SDennis Dalessandro unsigned long, unsigned long); 74f48ad614SDennis Dalessandro static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 75f48ad614SDennis Dalessandro struct mm_struct *, 76f48ad614SDennis Dalessandro unsigned long, unsigned long); 77f48ad614SDennis Dalessandro static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 78f48ad614SDennis Dalessandro unsigned long, unsigned long); 79*b85ced91SDean Luick static void do_remove(struct mmu_rb_handler *handler, 80*b85ced91SDean Luick struct list_head *del_list); 81*b85ced91SDean Luick static void handle_remove(struct work_struct *work); 82f48ad614SDennis Dalessandro 83f48ad614SDennis Dalessandro static struct mmu_notifier_ops mn_opts = { 84f48ad614SDennis Dalessandro .invalidate_page = mmu_notifier_page, 85f48ad614SDennis Dalessandro .invalidate_range_start = mmu_notifier_range_start, 86f48ad614SDennis Dalessandro }; 87f48ad614SDennis Dalessandro 88f48ad614SDennis Dalessandro INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, 89f48ad614SDennis Dalessandro mmu_node_start, mmu_node_last, static, __mmu_int_rb); 90f48ad614SDennis Dalessandro 91f48ad614SDennis Dalessandro static unsigned long mmu_node_start(struct mmu_rb_node *node) 92f48ad614SDennis Dalessandro { 93f48ad614SDennis Dalessandro return node->addr & PAGE_MASK; 94f48ad614SDennis Dalessandro } 95f48ad614SDennis Dalessandro 96f48ad614SDennis Dalessandro static unsigned long mmu_node_last(struct mmu_rb_node *node) 97f48ad614SDennis Dalessandro { 98f48ad614SDennis Dalessandro return PAGE_ALIGN(node->addr + node->len) - 1; 99f48ad614SDennis Dalessandro } 100f48ad614SDennis Dalessandro 101e0b09ac5SDean Luick int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, 102e0b09ac5SDean Luick struct mmu_rb_ops *ops, 103*b85ced91SDean Luick struct workqueue_struct *wq, 104e0b09ac5SDean Luick struct mmu_rb_handler **handler) 105f48ad614SDennis Dalessandro { 106f48ad614SDennis Dalessandro struct mmu_rb_handler *handlr; 1073faa3d9aSIra Weiny int ret; 108f48ad614SDennis Dalessandro 109f48ad614SDennis Dalessandro handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); 110f48ad614SDennis Dalessandro if (!handlr) 111f48ad614SDennis Dalessandro return -ENOMEM; 112f48ad614SDennis Dalessandro 113e0b09ac5SDean Luick handlr->root = RB_ROOT; 114f48ad614SDennis Dalessandro handlr->ops = ops; 115e0b09ac5SDean Luick handlr->ops_arg = ops_arg; 116f48ad614SDennis Dalessandro INIT_HLIST_NODE(&handlr->mn.hlist); 117f48ad614SDennis Dalessandro spin_lock_init(&handlr->lock); 118f48ad614SDennis Dalessandro handlr->mn.ops = &mn_opts; 1193faa3d9aSIra Weiny handlr->mm = mm; 120*b85ced91SDean Luick INIT_WORK(&handlr->del_work, handle_remove); 121*b85ced91SDean Luick INIT_LIST_HEAD(&handlr->del_list); 122*b85ced91SDean Luick handlr->wq = wq; 1233faa3d9aSIra Weiny 1243faa3d9aSIra Weiny ret = mmu_notifier_register(&handlr->mn, handlr->mm); 1253faa3d9aSIra Weiny if (ret) { 1263faa3d9aSIra Weiny kfree(handlr); 1273faa3d9aSIra Weiny return ret; 1283faa3d9aSIra Weiny } 1293faa3d9aSIra Weiny 130e0b09ac5SDean Luick *handler = handlr; 131e0b09ac5SDean Luick return 0; 132f48ad614SDennis Dalessandro } 133f48ad614SDennis Dalessandro 134e0b09ac5SDean Luick void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) 135f48ad614SDennis Dalessandro { 13620a42d08SDean Luick struct mmu_rb_node *rbnode; 13720a42d08SDean Luick struct rb_node *node; 138f48ad614SDennis Dalessandro unsigned long flags; 139*b85ced91SDean Luick struct list_head del_list; 140f48ad614SDennis Dalessandro 141f48ad614SDennis Dalessandro /* Unregister first so we don't get any more notifications. */ 1423faa3d9aSIra Weiny mmu_notifier_unregister(&handler->mn, handler->mm); 143f48ad614SDennis Dalessandro 144*b85ced91SDean Luick /* 145*b85ced91SDean Luick * Make sure the wq delete handler is finished running. It will not 146*b85ced91SDean Luick * be triggered once the mmu notifiers are unregistered above. 147*b85ced91SDean Luick */ 148*b85ced91SDean Luick flush_work(&handler->del_work); 149*b85ced91SDean Luick 150*b85ced91SDean Luick INIT_LIST_HEAD(&del_list); 151*b85ced91SDean Luick 152f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 153e0b09ac5SDean Luick while ((node = rb_first(&handler->root))) { 154f48ad614SDennis Dalessandro rbnode = rb_entry(node, struct mmu_rb_node, node); 155e0b09ac5SDean Luick rb_erase(node, &handler->root); 156*b85ced91SDean Luick list_add(&rbnode->list, &del_list); 157f48ad614SDennis Dalessandro } 158f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 159f48ad614SDennis Dalessandro 160*b85ced91SDean Luick do_remove(handler, &del_list); 161*b85ced91SDean Luick 162f48ad614SDennis Dalessandro kfree(handler); 163f48ad614SDennis Dalessandro } 164f48ad614SDennis Dalessandro 165e0b09ac5SDean Luick int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, 166e0b09ac5SDean Luick struct mmu_rb_node *mnode) 167f48ad614SDennis Dalessandro { 168f48ad614SDennis Dalessandro struct mmu_rb_node *node; 169f48ad614SDennis Dalessandro unsigned long flags; 170f48ad614SDennis Dalessandro int ret = 0; 171f48ad614SDennis Dalessandro 172f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 173f48ad614SDennis Dalessandro hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, 174f48ad614SDennis Dalessandro mnode->len); 175f48ad614SDennis Dalessandro node = __mmu_rb_search(handler, mnode->addr, mnode->len); 176f48ad614SDennis Dalessandro if (node) { 177f48ad614SDennis Dalessandro ret = -EINVAL; 178f48ad614SDennis Dalessandro goto unlock; 179f48ad614SDennis Dalessandro } 180e0b09ac5SDean Luick __mmu_int_rb_insert(mnode, &handler->root); 181f48ad614SDennis Dalessandro 182e0b09ac5SDean Luick ret = handler->ops->insert(handler->ops_arg, mnode); 183f48ad614SDennis Dalessandro if (ret) 184e0b09ac5SDean Luick __mmu_int_rb_remove(mnode, &handler->root); 185f48ad614SDennis Dalessandro unlock: 186f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 187f48ad614SDennis Dalessandro return ret; 188f48ad614SDennis Dalessandro } 189f48ad614SDennis Dalessandro 190f48ad614SDennis Dalessandro /* Caller must hold handler lock */ 191f48ad614SDennis Dalessandro static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 192f48ad614SDennis Dalessandro unsigned long addr, 193f48ad614SDennis Dalessandro unsigned long len) 194f48ad614SDennis Dalessandro { 195f48ad614SDennis Dalessandro struct mmu_rb_node *node = NULL; 196f48ad614SDennis Dalessandro 197f48ad614SDennis Dalessandro hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); 198f48ad614SDennis Dalessandro if (!handler->ops->filter) { 199e0b09ac5SDean Luick node = __mmu_int_rb_iter_first(&handler->root, addr, 200f48ad614SDennis Dalessandro (addr + len) - 1); 201f48ad614SDennis Dalessandro } else { 202e0b09ac5SDean Luick for (node = __mmu_int_rb_iter_first(&handler->root, addr, 203f48ad614SDennis Dalessandro (addr + len) - 1); 204f48ad614SDennis Dalessandro node; 205f48ad614SDennis Dalessandro node = __mmu_int_rb_iter_next(node, addr, 206f48ad614SDennis Dalessandro (addr + len) - 1)) { 207f48ad614SDennis Dalessandro if (handler->ops->filter(node, addr, len)) 208f48ad614SDennis Dalessandro return node; 209f48ad614SDennis Dalessandro } 210f48ad614SDennis Dalessandro } 211f48ad614SDennis Dalessandro return node; 212f48ad614SDennis Dalessandro } 213f48ad614SDennis Dalessandro 214e0b09ac5SDean Luick struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler, 215f48ad614SDennis Dalessandro unsigned long addr, unsigned long len) 216f48ad614SDennis Dalessandro { 217f48ad614SDennis Dalessandro struct mmu_rb_node *node; 218f48ad614SDennis Dalessandro unsigned long flags; 219f48ad614SDennis Dalessandro 220f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 221f48ad614SDennis Dalessandro node = __mmu_rb_search(handler, addr, len); 222f48ad614SDennis Dalessandro if (node) 223e0b09ac5SDean Luick __mmu_int_rb_remove(node, &handler->root); 224f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 225f48ad614SDennis Dalessandro 226f48ad614SDennis Dalessandro return node; 227f48ad614SDennis Dalessandro } 228f48ad614SDennis Dalessandro 22910345998SDean Luick void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) 23010345998SDean Luick { 23110345998SDean Luick struct mmu_rb_node *rbnode; 23210345998SDean Luick struct rb_node *node, *next; 23310345998SDean Luick struct list_head del_list; 23410345998SDean Luick unsigned long flags; 23510345998SDean Luick bool stop = false; 23610345998SDean Luick 23710345998SDean Luick INIT_LIST_HEAD(&del_list); 23810345998SDean Luick 23910345998SDean Luick spin_lock_irqsave(&handler->lock, flags); 24010345998SDean Luick for (node = rb_first(&handler->root); node; node = next) { 24110345998SDean Luick next = rb_next(node); 24210345998SDean Luick rbnode = rb_entry(node, struct mmu_rb_node, node); 24310345998SDean Luick if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, 24410345998SDean Luick &stop)) { 24510345998SDean Luick __mmu_int_rb_remove(rbnode, &handler->root); 24610345998SDean Luick list_add(&rbnode->list, &del_list); 24710345998SDean Luick } 24810345998SDean Luick if (stop) 24910345998SDean Luick break; 25010345998SDean Luick } 25110345998SDean Luick spin_unlock_irqrestore(&handler->lock, flags); 25210345998SDean Luick 25310345998SDean Luick while (!list_empty(&del_list)) { 25410345998SDean Luick rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); 25510345998SDean Luick list_del(&rbnode->list); 25610345998SDean Luick handler->ops->remove(handler->ops_arg, rbnode, 25710345998SDean Luick handler->mm); 25810345998SDean Luick } 25910345998SDean Luick } 26010345998SDean Luick 261*b85ced91SDean Luick /* 262*b85ced91SDean Luick * It is up to the caller to ensure that this function does not race with the 263*b85ced91SDean Luick * mmu invalidate notifier which may be calling the users remove callback on 264*b85ced91SDean Luick * 'node'. 265*b85ced91SDean Luick */ 266e0b09ac5SDean Luick void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, 267e0b09ac5SDean Luick struct mmu_rb_node *node) 268f48ad614SDennis Dalessandro { 2693c1091aaSIra Weiny unsigned long flags; 270f48ad614SDennis Dalessandro 2713c1091aaSIra Weiny /* Validity of handler and node pointers has been checked by caller. */ 2723c1091aaSIra Weiny hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 2733c1091aaSIra Weiny node->len); 2743c1091aaSIra Weiny spin_lock_irqsave(&handler->lock, flags); 275e0b09ac5SDean Luick __mmu_int_rb_remove(node, &handler->root); 2763c1091aaSIra Weiny spin_unlock_irqrestore(&handler->lock, flags); 2773c1091aaSIra Weiny 278e0b09ac5SDean Luick handler->ops->remove(handler->ops_arg, node, NULL); 279f48ad614SDennis Dalessandro } 280f48ad614SDennis Dalessandro 281f48ad614SDennis Dalessandro static inline void mmu_notifier_page(struct mmu_notifier *mn, 282f48ad614SDennis Dalessandro struct mm_struct *mm, unsigned long addr) 283f48ad614SDennis Dalessandro { 284f48ad614SDennis Dalessandro mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); 285f48ad614SDennis Dalessandro } 286f48ad614SDennis Dalessandro 287f48ad614SDennis Dalessandro static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 288f48ad614SDennis Dalessandro struct mm_struct *mm, 289f48ad614SDennis Dalessandro unsigned long start, 290f48ad614SDennis Dalessandro unsigned long end) 291f48ad614SDennis Dalessandro { 292f48ad614SDennis Dalessandro mmu_notifier_mem_invalidate(mn, mm, start, end); 293f48ad614SDennis Dalessandro } 294f48ad614SDennis Dalessandro 295f48ad614SDennis Dalessandro static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 296f48ad614SDennis Dalessandro struct mm_struct *mm, 297f48ad614SDennis Dalessandro unsigned long start, unsigned long end) 298f48ad614SDennis Dalessandro { 299f48ad614SDennis Dalessandro struct mmu_rb_handler *handler = 300f48ad614SDennis Dalessandro container_of(mn, struct mmu_rb_handler, mn); 301e0b09ac5SDean Luick struct rb_root *root = &handler->root; 302f48ad614SDennis Dalessandro struct mmu_rb_node *node, *ptr = NULL; 303f48ad614SDennis Dalessandro unsigned long flags; 304*b85ced91SDean Luick bool added = false; 305f48ad614SDennis Dalessandro 306f48ad614SDennis Dalessandro spin_lock_irqsave(&handler->lock, flags); 307f48ad614SDennis Dalessandro for (node = __mmu_int_rb_iter_first(root, start, end - 1); 308f48ad614SDennis Dalessandro node; node = ptr) { 309f48ad614SDennis Dalessandro /* Guard against node removal. */ 310f48ad614SDennis Dalessandro ptr = __mmu_int_rb_iter_next(node, start, end - 1); 311f48ad614SDennis Dalessandro hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 312f48ad614SDennis Dalessandro node->addr, node->len); 313e0b09ac5SDean Luick if (handler->ops->invalidate(handler->ops_arg, node)) { 314f48ad614SDennis Dalessandro __mmu_int_rb_remove(node, root); 315*b85ced91SDean Luick list_add(&node->list, &handler->del_list); 316*b85ced91SDean Luick added = true; 317f48ad614SDennis Dalessandro } 318f48ad614SDennis Dalessandro } 319f48ad614SDennis Dalessandro spin_unlock_irqrestore(&handler->lock, flags); 320*b85ced91SDean Luick 321*b85ced91SDean Luick if (added) 322*b85ced91SDean Luick queue_work(handler->wq, &handler->del_work); 323*b85ced91SDean Luick } 324*b85ced91SDean Luick 325*b85ced91SDean Luick /* 326*b85ced91SDean Luick * Call the remove function for the given handler and the list. This 327*b85ced91SDean Luick * is expected to be called with a delete list extracted from handler. 328*b85ced91SDean Luick * The caller should not be holding the handler lock. 329*b85ced91SDean Luick */ 330*b85ced91SDean Luick static void do_remove(struct mmu_rb_handler *handler, 331*b85ced91SDean Luick struct list_head *del_list) 332*b85ced91SDean Luick { 333*b85ced91SDean Luick struct mmu_rb_node *node; 334*b85ced91SDean Luick 335*b85ced91SDean Luick while (!list_empty(del_list)) { 336*b85ced91SDean Luick node = list_first_entry(del_list, struct mmu_rb_node, list); 337*b85ced91SDean Luick list_del(&node->list); 338*b85ced91SDean Luick handler->ops->remove(handler->ops_arg, node, handler->mm); 339*b85ced91SDean Luick } 340*b85ced91SDean Luick } 341*b85ced91SDean Luick 342*b85ced91SDean Luick /* 343*b85ced91SDean Luick * Work queue function to remove all nodes that have been queued up to 344*b85ced91SDean Luick * be removed. The key feature is that mm->mmap_sem is not being held 345*b85ced91SDean Luick * and the remove callback can sleep while taking it, if needed. 346*b85ced91SDean Luick */ 347*b85ced91SDean Luick static void handle_remove(struct work_struct *work) 348*b85ced91SDean Luick { 349*b85ced91SDean Luick struct mmu_rb_handler *handler = container_of(work, 350*b85ced91SDean Luick struct mmu_rb_handler, 351*b85ced91SDean Luick del_work); 352*b85ced91SDean Luick struct list_head del_list; 353*b85ced91SDean Luick unsigned long flags; 354*b85ced91SDean Luick 355*b85ced91SDean Luick /* remove anything that is queued to get removed */ 356*b85ced91SDean Luick spin_lock_irqsave(&handler->lock, flags); 357*b85ced91SDean Luick list_replace_init(&handler->del_list, &del_list); 358*b85ced91SDean Luick spin_unlock_irqrestore(&handler->lock, flags); 359*b85ced91SDean Luick 360*b85ced91SDean Luick do_remove(handler, &del_list); 361f48ad614SDennis Dalessandro } 362