xref: /openbmc/linux/drivers/infiniband/hw/hfi1/mmu_rb.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1145eba1aSCai Huoqing /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2f48ad614SDennis Dalessandro /*
33d2a9d64SDennis Dalessandro  * Copyright(c) 2020 Cornelis Networks, Inc.
4f48ad614SDennis Dalessandro  * Copyright(c) 2016 Intel Corporation.
5f48ad614SDennis Dalessandro  */
6145eba1aSCai Huoqing 
7f48ad614SDennis Dalessandro #ifndef _HFI1_MMU_RB_H
8f48ad614SDennis Dalessandro #define _HFI1_MMU_RB_H
9f48ad614SDennis Dalessandro 
10f48ad614SDennis Dalessandro #include "hfi.h"
11f48ad614SDennis Dalessandro 
12f48ad614SDennis Dalessandro struct mmu_rb_node {
13f48ad614SDennis Dalessandro 	unsigned long addr;
14f48ad614SDennis Dalessandro 	unsigned long len;
15f48ad614SDennis Dalessandro 	unsigned long __last;
16f48ad614SDennis Dalessandro 	struct rb_node node;
173d2a9d64SDennis Dalessandro 	struct mmu_rb_handler *handler;
1810345998SDean Luick 	struct list_head list;
19f48ad614SDennis Dalessandro 	struct kref refcount;
20f48ad614SDennis Dalessandro };
21b85ced91SDean Luick 
22b85ced91SDean Luick /* filter and evict must not sleep. Only remove is allowed to sleep. */
23b85ced91SDean Luick struct mmu_rb_ops {
24b85ced91SDean Luick 	bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
25f48ad614SDennis Dalessandro 		       unsigned long len);
26862548daSIra Weiny 	void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
27862548daSIra Weiny 	int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
28e0b09ac5SDean Luick 		     void *evict_arg, bool *stop);
29082b3532SDean Luick };
30e0b09ac5SDean Luick 
3110345998SDean Luick struct mmu_rb_handler {
3210345998SDean Luick 	/*
33f48ad614SDennis Dalessandro 	 * struct mmu_notifier is 56 bytes, and spinlock_t is 4 bytes, so
34f48ad614SDennis Dalessandro 	 * they fit together in one cache line.  mn is relatively rarely
353d2a9d64SDennis Dalessandro 	 * accessed, so co-locating the spinlock with it achieves much of
36*866694afSPatrick Kelsey 	 * the cacheline contention reduction of giving the spinlock its own
37*866694afSPatrick Kelsey 	 * cacheline without the overhead of doing so.
38*866694afSPatrick Kelsey 	 */
39*866694afSPatrick Kelsey 	struct mmu_notifier mn;
40*866694afSPatrick Kelsey 	spinlock_t lock;        /* protect the RB tree */
41*866694afSPatrick Kelsey 
42*866694afSPatrick Kelsey 	/* Begin on a new cachline boundary here */
433d2a9d64SDennis Dalessandro 	struct rb_root_cached root ____cacheline_aligned_in_smp;
443d2a9d64SDennis Dalessandro 	void *ops_arg;
45*866694afSPatrick Kelsey 	struct mmu_rb_ops *ops;
46*866694afSPatrick Kelsey 	struct list_head lru_list;
47*866694afSPatrick Kelsey 	struct work_struct del_work;
48*866694afSPatrick Kelsey 	struct list_head del_list;
493d2a9d64SDennis Dalessandro 	struct workqueue_struct *wq;
503d2a9d64SDennis Dalessandro 	void *free_ptr;
513d2a9d64SDennis Dalessandro };
523d2a9d64SDennis Dalessandro 
533d2a9d64SDennis Dalessandro int hfi1_mmu_rb_register(void *ops_arg,
54*866694afSPatrick Kelsey 			 struct mmu_rb_ops *ops,
553d2a9d64SDennis Dalessandro 			 struct workqueue_struct *wq,
563d2a9d64SDennis Dalessandro 			 struct mmu_rb_handler **handler);
573d2a9d64SDennis Dalessandro void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
58e0b09ac5SDean Luick int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
59b85ced91SDean Luick 		       struct mmu_rb_node *mnode);
60e0b09ac5SDean Luick void hfi1_mmu_rb_release(struct kref *refcount);
61e0b09ac5SDean Luick 
62e0b09ac5SDean Luick void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
63e0b09ac5SDean Luick struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
6410345998SDean Luick 					  unsigned long addr,
6500cbce5cSPatrick Kelsey 					  unsigned long len);
6600cbce5cSPatrick Kelsey 
6700cbce5cSPatrick Kelsey #endif /* _HFI1_MMU_RB_H */
68f48ad614SDennis Dalessandro