1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 
10 #include "uverbs.h"
11 
12 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
13 {
14 	struct sg_table *sgt;
15 	struct scatterlist *sg;
16 	struct dma_fence *fence;
17 	unsigned long start, end, cur = 0;
18 	unsigned int nmap = 0;
19 	int i;
20 
21 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
22 
23 	if (umem_dmabuf->sgt)
24 		goto wait_fence;
25 
26 	sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
27 	if (IS_ERR(sgt))
28 		return PTR_ERR(sgt);
29 
30 	/* modify the sg list in-place to match umem address and length */
31 
32 	start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
33 	end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
34 		    PAGE_SIZE);
35 	for_each_sgtable_dma_sg(sgt, sg, i) {
36 		if (start < cur + sg_dma_len(sg) && cur < end)
37 			nmap++;
38 		if (cur <= start && start < cur + sg_dma_len(sg)) {
39 			unsigned long offset = start - cur;
40 
41 			umem_dmabuf->first_sg = sg;
42 			umem_dmabuf->first_sg_offset = offset;
43 			sg_dma_address(sg) += offset;
44 			sg_dma_len(sg) -= offset;
45 			cur += offset;
46 		}
47 		if (cur < end && end <= cur + sg_dma_len(sg)) {
48 			unsigned long trim = cur + sg_dma_len(sg) - end;
49 
50 			umem_dmabuf->last_sg = sg;
51 			umem_dmabuf->last_sg_trim = trim;
52 			sg_dma_len(sg) -= trim;
53 			break;
54 		}
55 		cur += sg_dma_len(sg);
56 	}
57 
58 	umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
59 	umem_dmabuf->umem.sg_head.nents = nmap;
60 	umem_dmabuf->umem.nmap = nmap;
61 	umem_dmabuf->sgt = sgt;
62 
63 wait_fence:
64 	/*
65 	 * Although the sg list is valid now, the content of the pages
66 	 * may be not up-to-date. Wait for the exporter to finish
67 	 * the migration.
68 	 */
69 	fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
70 	if (fence)
71 		return dma_fence_wait(fence, false);
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
76 
77 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
78 {
79 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
80 
81 	if (!umem_dmabuf->sgt)
82 		return;
83 
84 	/* retore the original sg list */
85 	if (umem_dmabuf->first_sg) {
86 		sg_dma_address(umem_dmabuf->first_sg) -=
87 			umem_dmabuf->first_sg_offset;
88 		sg_dma_len(umem_dmabuf->first_sg) +=
89 			umem_dmabuf->first_sg_offset;
90 		umem_dmabuf->first_sg = NULL;
91 		umem_dmabuf->first_sg_offset = 0;
92 	}
93 	if (umem_dmabuf->last_sg) {
94 		sg_dma_len(umem_dmabuf->last_sg) +=
95 			umem_dmabuf->last_sg_trim;
96 		umem_dmabuf->last_sg = NULL;
97 		umem_dmabuf->last_sg_trim = 0;
98 	}
99 
100 	dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
101 				 DMA_BIDIRECTIONAL);
102 
103 	umem_dmabuf->sgt = NULL;
104 }
105 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
106 
107 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
108 					  unsigned long offset, size_t size,
109 					  int fd, int access,
110 					  const struct dma_buf_attach_ops *ops)
111 {
112 	struct dma_buf *dmabuf;
113 	struct ib_umem_dmabuf *umem_dmabuf;
114 	struct ib_umem *umem;
115 	unsigned long end;
116 	struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
117 
118 	if (check_add_overflow(offset, (unsigned long)size, &end))
119 		return ret;
120 
121 	if (unlikely(!ops || !ops->move_notify))
122 		return ret;
123 
124 	dmabuf = dma_buf_get(fd);
125 	if (IS_ERR(dmabuf))
126 		return ERR_CAST(dmabuf);
127 
128 	if (dmabuf->size < end)
129 		goto out_release_dmabuf;
130 
131 	umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
132 	if (!umem_dmabuf) {
133 		ret = ERR_PTR(-ENOMEM);
134 		goto out_release_dmabuf;
135 	}
136 
137 	umem = &umem_dmabuf->umem;
138 	umem->ibdev = device;
139 	umem->length = size;
140 	umem->address = offset;
141 	umem->writable = ib_access_writable(access);
142 	umem->is_dmabuf = 1;
143 
144 	if (!ib_umem_num_pages(umem))
145 		goto out_free_umem;
146 
147 	umem_dmabuf->attach = dma_buf_dynamic_attach(
148 					dmabuf,
149 					device->dma_device,
150 					ops,
151 					umem_dmabuf);
152 	if (IS_ERR(umem_dmabuf->attach)) {
153 		ret = ERR_CAST(umem_dmabuf->attach);
154 		goto out_free_umem;
155 	}
156 	return umem_dmabuf;
157 
158 out_free_umem:
159 	kfree(umem_dmabuf);
160 
161 out_release_dmabuf:
162 	dma_buf_put(dmabuf);
163 	return ret;
164 }
165 EXPORT_SYMBOL(ib_umem_dmabuf_get);
166 
167 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
168 {
169 	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
170 
171 	dma_buf_detach(dmabuf, umem_dmabuf->attach);
172 	dma_buf_put(dmabuf);
173 	kfree(umem_dmabuf);
174 }
175