xref: /openbmc/linux/drivers/vdpa/mlx5/core/mr.c (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #include <linux/vhost_types.h>
5 #include <linux/vdpa.h>
6 #include <linux/gcd.h>
7 #include <linux/string.h>
8 #include <linux/mlx5/qp.h>
9 #include "mlx5_vdpa.h"
10 
11 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
12 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
13 ({ \
14 	u64 __s = _s; \
15 	u64 _res; \
16 	_res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
17 	_res; \
18 })
19 
get_octo_len(u64 len,int page_shift)20 static int get_octo_len(u64 len, int page_shift)
21 {
22 	u64 page_size = 1ULL << page_shift;
23 	int npages;
24 
25 	npages = ALIGN(len, page_size) >> page_shift;
26 	return (npages + 1) / 2;
27 }
28 
mlx5_set_access_mode(void * mkc,int mode)29 static void mlx5_set_access_mode(void *mkc, int mode)
30 {
31 	MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
32 	MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
33 }
34 
populate_mtts(struct mlx5_vdpa_direct_mr * mr,__be64 * mtt)35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
36 {
37 	struct scatterlist *sg;
38 	int nsg = mr->nsg;
39 	u64 dma_addr;
40 	u64 dma_len;
41 	int j = 0;
42 	int i;
43 
44 	for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
45 		for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
46 		     nsg && dma_len;
47 		     nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
48 			mtt[j++] = cpu_to_be64(dma_addr);
49 	}
50 }
51 
create_direct_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_direct_mr * mr)52 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
53 {
54 	int inlen;
55 	void *mkc;
56 	void *in;
57 	int err;
58 
59 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
60 	in = kvzalloc(inlen, GFP_KERNEL);
61 	if (!in)
62 		return -ENOMEM;
63 
64 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
65 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
66 	MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
67 	MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
68 	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
69 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
70 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
71 	MLX5_SET64(mkc, mkc, start_addr, mr->offset);
72 	MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
73 	MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
74 	MLX5_SET(mkc, mkc, translations_octword_size,
75 		 get_octo_len(mr->end - mr->start, mr->log_size));
76 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
77 		 get_octo_len(mr->end - mr->start, mr->log_size));
78 	populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
79 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
80 	kvfree(in);
81 	if (err) {
82 		mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
83 		return err;
84 	}
85 
86 	return 0;
87 }
88 
destroy_direct_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_direct_mr * mr)89 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
90 {
91 	mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
92 }
93 
map_start(struct vhost_iotlb_map * map,struct mlx5_vdpa_direct_mr * mr)94 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
95 {
96 	return max_t(u64, map->start, mr->start);
97 }
98 
map_end(struct vhost_iotlb_map * map,struct mlx5_vdpa_direct_mr * mr)99 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
100 {
101 	return min_t(u64, map->last + 1, mr->end);
102 }
103 
maplen(struct vhost_iotlb_map * map,struct mlx5_vdpa_direct_mr * mr)104 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
105 {
106 	return map_end(map, mr) - map_start(map, mr);
107 }
108 
109 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
110 #define MLX5_VDPA_INVALID_LEN ((u64)-1)
111 
indir_start_addr(struct mlx5_vdpa_mr * mkey)112 static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
113 {
114 	struct mlx5_vdpa_direct_mr *s;
115 
116 	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
117 	if (!s)
118 		return MLX5_VDPA_INVALID_START_ADDR;
119 
120 	return s->start;
121 }
122 
indir_len(struct mlx5_vdpa_mr * mkey)123 static u64 indir_len(struct mlx5_vdpa_mr *mkey)
124 {
125 	struct mlx5_vdpa_direct_mr *s;
126 	struct mlx5_vdpa_direct_mr *e;
127 
128 	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
129 	if (!s)
130 		return MLX5_VDPA_INVALID_LEN;
131 
132 	e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
133 
134 	return e->end - s->start;
135 }
136 
137 #define LOG_MAX_KLM_SIZE 30
138 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
139 
klm_bcount(u64 size)140 static u32 klm_bcount(u64 size)
141 {
142 	return (u32)size;
143 }
144 
fill_indir(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mkey,void * in)145 static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
146 {
147 	struct mlx5_vdpa_direct_mr *dmr;
148 	struct mlx5_klm *klmarr;
149 	struct mlx5_klm *klm;
150 	bool first = true;
151 	u64 preve;
152 	int i;
153 
154 	klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
155 	i = 0;
156 	list_for_each_entry(dmr, &mkey->head, list) {
157 again:
158 		klm = &klmarr[i++];
159 		if (first) {
160 			preve = dmr->start;
161 			first = false;
162 		}
163 
164 		if (preve == dmr->start) {
165 			klm->key = cpu_to_be32(dmr->mr);
166 			klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
167 			preve = dmr->end;
168 		} else {
169 			u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE);
170 
171 			klm->key = cpu_to_be32(mvdev->res.null_mkey);
172 			klm->bcount = cpu_to_be32(klm_bcount(bcount));
173 			preve += bcount;
174 
175 			goto again;
176 		}
177 	}
178 }
179 
klm_byte_size(int nklms)180 static int klm_byte_size(int nklms)
181 {
182 	return 16 * ALIGN(nklms, 4);
183 }
184 
create_indirect_key(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mr)185 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
186 {
187 	int inlen;
188 	void *mkc;
189 	void *in;
190 	int err;
191 	u64 start;
192 	u64 len;
193 
194 	start = indir_start_addr(mr);
195 	len = indir_len(mr);
196 	if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
197 		return -EINVAL;
198 
199 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
200 	in = kzalloc(inlen, GFP_KERNEL);
201 	if (!in)
202 		return -ENOMEM;
203 
204 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
205 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
206 	MLX5_SET(mkc, mkc, lw, 1);
207 	MLX5_SET(mkc, mkc, lr, 1);
208 	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
209 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
210 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
211 	MLX5_SET64(mkc, mkc, start_addr, start);
212 	MLX5_SET64(mkc, mkc, len, len);
213 	MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
214 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
215 	fill_indir(mvdev, mr, in);
216 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
217 	kfree(in);
218 	return err;
219 }
220 
destroy_indirect_key(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mkey)221 static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
222 {
223 	mlx5_vdpa_destroy_mkey(mvdev, mkey->mkey);
224 }
225 
map_direct_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_direct_mr * mr,struct vhost_iotlb * iotlb)226 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
227 			 struct vhost_iotlb *iotlb)
228 {
229 	struct vhost_iotlb_map *map;
230 	unsigned long lgcd = 0;
231 	int log_entity_size;
232 	unsigned long size;
233 	int err;
234 	struct page *pg;
235 	unsigned int nsg;
236 	int sglen;
237 	u64 pa, offset;
238 	u64 paend;
239 	struct scatterlist *sg;
240 	struct device *dma = mvdev->vdev.dma_dev;
241 
242 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
243 	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
244 		size = maplen(map, mr);
245 		lgcd = gcd(lgcd, size);
246 	}
247 	log_entity_size = ilog2(lgcd);
248 
249 	sglen = 1 << log_entity_size;
250 	nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
251 
252 	err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
253 	if (err)
254 		return err;
255 
256 	sg = mr->sg_head.sgl;
257 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
258 	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
259 		offset = mr->start > map->start ? mr->start - map->start : 0;
260 		pa = map->addr + offset;
261 		paend = map->addr + offset + maplen(map, mr);
262 		for (; pa < paend; pa += sglen) {
263 			pg = pfn_to_page(__phys_to_pfn(pa));
264 			if (!sg) {
265 				mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
266 					       map->start, map->last + 1);
267 				err = -ENOMEM;
268 				goto err_map;
269 			}
270 			sg_set_page(sg, pg, sglen, 0);
271 			sg = sg_next(sg);
272 			if (!sg)
273 				goto done;
274 		}
275 	}
276 done:
277 	mr->log_size = log_entity_size;
278 	mr->nsg = nsg;
279 	mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
280 	if (!mr->nent) {
281 		err = -ENOMEM;
282 		goto err_map;
283 	}
284 
285 	err = create_direct_mr(mvdev, mr);
286 	if (err)
287 		goto err_direct;
288 
289 	return 0;
290 
291 err_direct:
292 	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
293 err_map:
294 	sg_free_table(&mr->sg_head);
295 	return err;
296 }
297 
unmap_direct_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_direct_mr * mr)298 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
299 {
300 	struct device *dma = mvdev->vdev.dma_dev;
301 
302 	destroy_direct_mr(mvdev, mr);
303 	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
304 	sg_free_table(&mr->sg_head);
305 }
306 
add_direct_chain(struct mlx5_vdpa_dev * mvdev,u64 start,u64 size,u8 perm,struct vhost_iotlb * iotlb)307 static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
308 			    struct vhost_iotlb *iotlb)
309 {
310 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
311 	struct mlx5_vdpa_direct_mr *dmr;
312 	struct mlx5_vdpa_direct_mr *n;
313 	LIST_HEAD(tmp);
314 	u64 st;
315 	u64 sz;
316 	int err;
317 
318 	st = start;
319 	while (size) {
320 		sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
321 		dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
322 		if (!dmr) {
323 			err = -ENOMEM;
324 			goto err_alloc;
325 		}
326 
327 		dmr->start = st;
328 		dmr->end = st + sz;
329 		dmr->perm = perm;
330 		err = map_direct_mr(mvdev, dmr, iotlb);
331 		if (err) {
332 			kfree(dmr);
333 			goto err_alloc;
334 		}
335 
336 		list_add_tail(&dmr->list, &tmp);
337 		size -= sz;
338 		mr->num_directs++;
339 		mr->num_klms++;
340 		st += sz;
341 	}
342 	list_splice_tail(&tmp, &mr->head);
343 	return 0;
344 
345 err_alloc:
346 	list_for_each_entry_safe(dmr, n, &mr->head, list) {
347 		list_del_init(&dmr->list);
348 		unmap_direct_mr(mvdev, dmr);
349 		kfree(dmr);
350 	}
351 	return err;
352 }
353 
354 /* The iotlb pointer contains a list of maps. Go over the maps, possibly
355  * merging mergeable maps, and create direct memory keys that provide the
356  * device access to memory. The direct mkeys are then referred to by the
357  * indirect memory key that provides access to the enitre address space given
358  * by iotlb.
359  */
create_user_mr(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb)360 static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
361 {
362 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
363 	struct mlx5_vdpa_direct_mr *dmr;
364 	struct mlx5_vdpa_direct_mr *n;
365 	struct vhost_iotlb_map *map;
366 	u32 pperm = U16_MAX;
367 	u64 last = U64_MAX;
368 	u64 ps = U64_MAX;
369 	u64 pe = U64_MAX;
370 	u64 start = 0;
371 	int err = 0;
372 	int nnuls;
373 
374 	INIT_LIST_HEAD(&mr->head);
375 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
376 	     map = vhost_iotlb_itree_next(map, start, last)) {
377 		start = map->start;
378 		if (pe == map->start && pperm == map->perm) {
379 			pe = map->last + 1;
380 		} else {
381 			if (ps != U64_MAX) {
382 				if (pe < map->start) {
383 					/* We have a hole in the map. Check how
384 					 * many null keys are required to fill it.
385 					 */
386 					nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
387 								       LOG_MAX_KLM_SIZE);
388 					mr->num_klms += nnuls;
389 				}
390 				err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
391 				if (err)
392 					goto err_chain;
393 			}
394 			ps = map->start;
395 			pe = map->last + 1;
396 			pperm = map->perm;
397 		}
398 	}
399 	err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
400 	if (err)
401 		goto err_chain;
402 
403 	/* Create the memory key that defines the guests's address space. This
404 	 * memory key refers to the direct keys that contain the MTT
405 	 * translations
406 	 */
407 	err = create_indirect_key(mvdev, mr);
408 	if (err)
409 		goto err_chain;
410 
411 	mr->user_mr = true;
412 	return 0;
413 
414 err_chain:
415 	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
416 		list_del_init(&dmr->list);
417 		unmap_direct_mr(mvdev, dmr);
418 		kfree(dmr);
419 	}
420 	return err;
421 }
422 
create_dma_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mr)423 static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
424 {
425 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
426 	void *mkc;
427 	u32 *in;
428 	int err;
429 
430 	in = kzalloc(inlen, GFP_KERNEL);
431 	if (!in)
432 		return -ENOMEM;
433 
434 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
435 
436 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
437 	MLX5_SET(mkc, mkc, length64, 1);
438 	MLX5_SET(mkc, mkc, lw, 1);
439 	MLX5_SET(mkc, mkc, lr, 1);
440 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
441 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
442 
443 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
444 	if (!err)
445 		mr->user_mr = false;
446 
447 	kfree(in);
448 	return err;
449 }
450 
destroy_dma_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mr)451 static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
452 {
453 	mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
454 }
455 
dup_iotlb(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * src)456 static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
457 {
458 	struct vhost_iotlb_map *map;
459 	u64 start = 0, last = ULLONG_MAX;
460 	int err;
461 
462 	if (!src) {
463 		err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
464 		return err;
465 	}
466 
467 	for (map = vhost_iotlb_itree_first(src, start, last); map;
468 		map = vhost_iotlb_itree_next(map, start, last)) {
469 		err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
470 					    map->addr, map->perm);
471 		if (err)
472 			return err;
473 	}
474 	return 0;
475 }
476 
prune_iotlb(struct mlx5_vdpa_dev * mvdev)477 static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
478 {
479 	vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
480 }
481 
destroy_user_mr(struct mlx5_vdpa_dev * mvdev,struct mlx5_vdpa_mr * mr)482 static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
483 {
484 	struct mlx5_vdpa_direct_mr *dmr;
485 	struct mlx5_vdpa_direct_mr *n;
486 
487 	destroy_indirect_key(mvdev, mr);
488 	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
489 		list_del_init(&dmr->list);
490 		unmap_direct_mr(mvdev, dmr);
491 		kfree(dmr);
492 	}
493 }
494 
_mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev * mvdev,unsigned int asid)495 static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
496 {
497 	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
498 		return;
499 
500 	prune_iotlb(mvdev);
501 }
502 
_mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev * mvdev,unsigned int asid)503 static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
504 {
505 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
506 
507 	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
508 		return;
509 
510 	if (!mr->initialized)
511 		return;
512 
513 	if (mr->user_mr)
514 		destroy_user_mr(mvdev, mr);
515 	else
516 		destroy_dma_mr(mvdev, mr);
517 
518 	mr->initialized = false;
519 }
520 
mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev * mvdev,unsigned int asid)521 void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
522 {
523 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
524 
525 	mutex_lock(&mr->mkey_mtx);
526 
527 	_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
528 	_mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
529 
530 	mutex_unlock(&mr->mkey_mtx);
531 }
532 
mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev * mvdev)533 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
534 {
535 	mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
536 	mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
537 }
538 
_mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb,unsigned int asid)539 static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
540 				    struct vhost_iotlb *iotlb,
541 				    unsigned int asid)
542 {
543 	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
544 		return 0;
545 
546 	return dup_iotlb(mvdev, iotlb);
547 }
548 
_mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb,unsigned int asid)549 static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
550 				    struct vhost_iotlb *iotlb,
551 				    unsigned int asid)
552 {
553 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
554 	int err;
555 
556 	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
557 		return 0;
558 
559 	if (mr->initialized)
560 		return 0;
561 
562 	if (iotlb)
563 		err = create_user_mr(mvdev, iotlb);
564 	else
565 		err = create_dma_mr(mvdev, mr);
566 
567 	if (err)
568 		return err;
569 
570 	mr->initialized = true;
571 
572 	return 0;
573 }
574 
_mlx5_vdpa_create_mr(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb,unsigned int asid)575 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
576 				struct vhost_iotlb *iotlb, unsigned int asid)
577 {
578 	int err;
579 
580 	err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
581 	if (err)
582 		return err;
583 
584 	err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
585 	if (err)
586 		goto out_err;
587 
588 	return 0;
589 
590 out_err:
591 	_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
592 
593 	return err;
594 }
595 
mlx5_vdpa_create_mr(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb,unsigned int asid)596 int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
597 			unsigned int asid)
598 {
599 	int err;
600 
601 	mutex_lock(&mvdev->mr.mkey_mtx);
602 	err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
603 	mutex_unlock(&mvdev->mr.mkey_mtx);
604 	return err;
605 }
606 
mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev * mvdev,struct vhost_iotlb * iotlb,bool * change_map,unsigned int asid)607 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
608 			     bool *change_map, unsigned int asid)
609 {
610 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
611 	int err = 0;
612 
613 	*change_map = false;
614 	mutex_lock(&mr->mkey_mtx);
615 	if (mr->initialized) {
616 		mlx5_vdpa_info(mvdev, "memory map update\n");
617 		*change_map = true;
618 	}
619 	if (!*change_map)
620 		err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
621 	mutex_unlock(&mr->mkey_mtx);
622 
623 	return err;
624 }
625