1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 
11 #include "uverbs.h"
12 
13 MODULE_IMPORT_NS(DMA_BUF);
14 
15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
16 {
17 	struct sg_table *sgt;
18 	struct scatterlist *sg;
19 	unsigned long start, end, cur = 0;
20 	unsigned int nmap = 0;
21 	int i;
22 
23 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
24 
25 	if (umem_dmabuf->sgt)
26 		goto wait_fence;
27 
28 	sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
29 	if (IS_ERR(sgt))
30 		return PTR_ERR(sgt);
31 
32 	/* modify the sg list in-place to match umem address and length */
33 
34 	start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
35 	end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
36 		    PAGE_SIZE);
37 	for_each_sgtable_dma_sg(sgt, sg, i) {
38 		if (start < cur + sg_dma_len(sg) && cur < end)
39 			nmap++;
40 		if (cur <= start && start < cur + sg_dma_len(sg)) {
41 			unsigned long offset = start - cur;
42 
43 			umem_dmabuf->first_sg = sg;
44 			umem_dmabuf->first_sg_offset = offset;
45 			sg_dma_address(sg) += offset;
46 			sg_dma_len(sg) -= offset;
47 			cur += offset;
48 		}
49 		if (cur < end && end <= cur + sg_dma_len(sg)) {
50 			unsigned long trim = cur + sg_dma_len(sg) - end;
51 
52 			umem_dmabuf->last_sg = sg;
53 			umem_dmabuf->last_sg_trim = trim;
54 			sg_dma_len(sg) -= trim;
55 			break;
56 		}
57 		cur += sg_dma_len(sg);
58 	}
59 
60 	umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
61 	umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
62 	umem_dmabuf->sgt = sgt;
63 
64 wait_fence:
65 	/*
66 	 * Although the sg list is valid now, the content of the pages
67 	 * may be not up-to-date. Wait for the exporter to finish
68 	 * the migration.
69 	 */
70 	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
71 				     DMA_RESV_USAGE_KERNEL,
72 				     false, MAX_SCHEDULE_TIMEOUT);
73 }
74 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
75 
76 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
77 {
78 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
79 
80 	if (!umem_dmabuf->sgt)
81 		return;
82 
83 	/* retore the original sg list */
84 	if (umem_dmabuf->first_sg) {
85 		sg_dma_address(umem_dmabuf->first_sg) -=
86 			umem_dmabuf->first_sg_offset;
87 		sg_dma_len(umem_dmabuf->first_sg) +=
88 			umem_dmabuf->first_sg_offset;
89 		umem_dmabuf->first_sg = NULL;
90 		umem_dmabuf->first_sg_offset = 0;
91 	}
92 	if (umem_dmabuf->last_sg) {
93 		sg_dma_len(umem_dmabuf->last_sg) +=
94 			umem_dmabuf->last_sg_trim;
95 		umem_dmabuf->last_sg = NULL;
96 		umem_dmabuf->last_sg_trim = 0;
97 	}
98 
99 	dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
100 				 DMA_BIDIRECTIONAL);
101 
102 	umem_dmabuf->sgt = NULL;
103 }
104 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
105 
106 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
107 					  unsigned long offset, size_t size,
108 					  int fd, int access,
109 					  const struct dma_buf_attach_ops *ops)
110 {
111 	struct dma_buf *dmabuf;
112 	struct ib_umem_dmabuf *umem_dmabuf;
113 	struct ib_umem *umem;
114 	unsigned long end;
115 	struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
116 
117 	if (check_add_overflow(offset, (unsigned long)size, &end))
118 		return ret;
119 
120 	if (unlikely(!ops || !ops->move_notify))
121 		return ret;
122 
123 	dmabuf = dma_buf_get(fd);
124 	if (IS_ERR(dmabuf))
125 		return ERR_CAST(dmabuf);
126 
127 	if (dmabuf->size < end)
128 		goto out_release_dmabuf;
129 
130 	umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
131 	if (!umem_dmabuf) {
132 		ret = ERR_PTR(-ENOMEM);
133 		goto out_release_dmabuf;
134 	}
135 
136 	umem = &umem_dmabuf->umem;
137 	umem->ibdev = device;
138 	umem->length = size;
139 	umem->address = offset;
140 	umem->writable = ib_access_writable(access);
141 	umem->is_dmabuf = 1;
142 
143 	if (!ib_umem_num_pages(umem))
144 		goto out_free_umem;
145 
146 	umem_dmabuf->attach = dma_buf_dynamic_attach(
147 					dmabuf,
148 					device->dma_device,
149 					ops,
150 					umem_dmabuf);
151 	if (IS_ERR(umem_dmabuf->attach)) {
152 		ret = ERR_CAST(umem_dmabuf->attach);
153 		goto out_free_umem;
154 	}
155 	return umem_dmabuf;
156 
157 out_free_umem:
158 	kfree(umem_dmabuf);
159 
160 out_release_dmabuf:
161 	dma_buf_put(dmabuf);
162 	return ret;
163 }
164 EXPORT_SYMBOL(ib_umem_dmabuf_get);
165 
166 static void
167 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
168 {
169 	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
170 
171 	ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
172 			       "Invalidate callback should not be called when memory is pinned\n");
173 }
174 
175 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
176 	.allow_peer2peer = true,
177 	.move_notify = ib_umem_dmabuf_unsupported_move_notify,
178 };
179 
180 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
181 						 unsigned long offset,
182 						 size_t size, int fd,
183 						 int access)
184 {
185 	struct ib_umem_dmabuf *umem_dmabuf;
186 	int err;
187 
188 	umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
189 					 &ib_umem_dmabuf_attach_pinned_ops);
190 	if (IS_ERR(umem_dmabuf))
191 		return umem_dmabuf;
192 
193 	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
194 	err = dma_buf_pin(umem_dmabuf->attach);
195 	if (err)
196 		goto err_release;
197 	umem_dmabuf->pinned = 1;
198 
199 	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
200 	if (err)
201 		goto err_unpin;
202 	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
203 
204 	return umem_dmabuf;
205 
206 err_unpin:
207 	dma_buf_unpin(umem_dmabuf->attach);
208 err_release:
209 	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
210 	ib_umem_release(&umem_dmabuf->umem);
211 	return ERR_PTR(err);
212 }
213 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
214 
215 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
216 {
217 	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
218 
219 	dma_resv_lock(dmabuf->resv, NULL);
220 	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
221 	if (umem_dmabuf->pinned)
222 		dma_buf_unpin(umem_dmabuf->attach);
223 	dma_resv_unlock(dmabuf->resv);
224 
225 	dma_buf_detach(dmabuf, umem_dmabuf->attach);
226 	dma_buf_put(dmabuf);
227 	kfree(umem_dmabuf);
228 }
229