xref: /openbmc/linux/include/rdma/ib_umem.h (revision 84b102f5)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  */
5 
6 #ifndef IB_UMEM_H
7 #define IB_UMEM_H
8 
9 #include <linux/list.h>
10 #include <linux/scatterlist.h>
11 #include <linux/workqueue.h>
12 #include <rdma/ib_verbs.h>
13 
14 struct ib_ucontext;
15 struct ib_umem_odp;
16 
17 struct ib_umem {
18 	struct ib_device       *ibdev;
19 	struct mm_struct       *owning_mm;
20 	u64 iova;
21 	size_t			length;
22 	unsigned long		address;
23 	u32 writable : 1;
24 	u32 is_odp : 1;
25 	struct work_struct	work;
26 	struct sg_table sg_head;
27 	int             nmap;
28 	unsigned int    sg_nents;
29 };
30 
31 /* Returns the offset of the umem start relative to the first page. */
32 static inline int ib_umem_offset(struct ib_umem *umem)
33 {
34 	return umem->address & ~PAGE_MASK;
35 }
36 
37 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
38 					       unsigned long pgsz)
39 {
40 	return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) &
41 	       (pgsz - 1);
42 }
43 
44 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
45 					    unsigned long pgsz)
46 {
47 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
48 			 ALIGN_DOWN(umem->iova, pgsz))) /
49 	       pgsz;
50 }
51 
52 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
53 {
54 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
55 }
56 
57 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
58 						struct ib_umem *umem,
59 						unsigned long pgsz)
60 {
61 	__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
62 }
63 
64 /**
65  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
66  * @umem: umem to iterate over
67  * @pgsz: Page size to split the list into
68  *
69  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
70  * returned DMA blocks will be aligned to pgsz and span the range:
71  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
72  *
73  * Performs exactly ib_umem_num_dma_blocks() iterations.
74  */
75 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
76 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
77 	     __rdma_block_iter_next(biter);)
78 
79 #ifdef CONFIG_INFINIBAND_USER_MEM
80 
81 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
82 			    size_t size, int access);
83 void ib_umem_release(struct ib_umem *umem);
84 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
85 		      size_t length);
86 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
87 				     unsigned long pgsz_bitmap,
88 				     unsigned long virt);
89 /**
90  * ib_umem_find_best_pgoff - Find best HW page size
91  *
92  * @umem: umem struct
93  * @pgsz_bitmap bitmap of HW supported page sizes
94  * @pgoff_bitmask: Mask of bits that can be represented with an offset
95  *
96  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
97  * an IOVA it accepts a bitmask specifying what address bits can be represented
98  * with a page offset.
99  *
100  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
101  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
102  * "111111000000".
103  *
104  * If the pgoff_bitmask requires either alignment in the low bit or an
105  * unavailable page size for the high bits, this function returns 0.
106  */
107 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
108 						    unsigned long pgsz_bitmap,
109 						    u64 pgoff_bitmask)
110 {
111 	struct scatterlist *sg = umem->sg_head.sgl;
112 	dma_addr_t dma_addr;
113 
114 	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
115 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
116 				      dma_addr & pgoff_bitmask);
117 }
118 
119 #else /* CONFIG_INFINIBAND_USER_MEM */
120 
121 #include <linux/err.h>
122 
123 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
124 					  unsigned long addr, size_t size,
125 					  int access)
126 {
127 	return ERR_PTR(-EINVAL);
128 }
129 static inline void ib_umem_release(struct ib_umem *umem) { }
130 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
131 		      		    size_t length) {
132 	return -EINVAL;
133 }
134 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
135 						   unsigned long pgsz_bitmap,
136 						   unsigned long virt)
137 {
138 	return 0;
139 }
140 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
141 						    unsigned long pgsz_bitmap,
142 						    u64 pgoff_bitmask)
143 {
144 	return 0;
145 }
146 
147 #endif /* CONFIG_INFINIBAND_USER_MEM */
148 
149 #endif /* IB_UMEM_H */
150