1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */ 3 4 #include <linux/vdpa.h> 5 #include <linux/gcd.h> 6 #include <linux/string.h> 7 #include <linux/mlx5/qp.h> 8 #include "mlx5_vdpa.h" 9 10 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */ 11 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \ 12 ({ \ 13 u64 __s = _s; \ 14 u64 _res; \ 15 _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \ 16 _res; \ 17 }) 18 19 static int get_octo_len(u64 len, int page_shift) 20 { 21 u64 page_size = 1ULL << page_shift; 22 int npages; 23 24 npages = ALIGN(len, page_size) >> page_shift; 25 return (npages + 1) / 2; 26 } 27 28 static void mlx5_set_access_mode(void *mkc, int mode) 29 { 30 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); 31 MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2); 32 } 33 34 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) 35 { 36 struct scatterlist *sg; 37 int nsg = mr->nsg; 38 u64 dma_addr; 39 u64 dma_len; 40 int j = 0; 41 int i; 42 43 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { 44 for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg); 45 nsg && dma_len; 46 nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) 47 mtt[j++] = cpu_to_be64(dma_addr); 48 } 49 } 50 51 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 52 { 53 int inlen; 54 void *mkc; 55 void *in; 56 int err; 57 58 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); 59 in = kvzalloc(inlen, GFP_KERNEL); 60 if (!in) 61 return -ENOMEM; 62 63 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); 64 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 65 MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); 66 MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); 67 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT); 68 MLX5_SET(mkc, mkc, qpn, 0xffffff); 69 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); 70 MLX5_SET64(mkc, mkc, start_addr, mr->offset); 71 MLX5_SET64(mkc, mkc, len, mr->end - mr->start); 72 MLX5_SET(mkc, mkc, log_page_size, mr->log_size); 73 MLX5_SET(mkc, mkc, translations_octword_size, 74 get_octo_len(mr->end - mr->start, mr->log_size)); 75 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 76 get_octo_len(mr->end - mr->start, mr->log_size)); 77 populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt)); 78 err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen); 79 kvfree(in); 80 if (err) { 81 mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n"); 82 return err; 83 } 84 85 return 0; 86 } 87 88 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 89 { 90 mlx5_vdpa_destroy_mkey(mvdev, &mr->mr); 91 } 92 93 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 94 { 95 return max_t(u64, map->start, mr->start); 96 } 97 98 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 99 { 100 return min_t(u64, map->last + 1, mr->end); 101 } 102 103 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 104 { 105 return map_end(map, mr) - map_start(map, mr); 106 } 107 108 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1) 109 #define MLX5_VDPA_INVALID_LEN ((u64)-1) 110 111 static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey) 112 { 113 struct mlx5_vdpa_direct_mr *s; 114 115 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); 116 if (!s) 117 return MLX5_VDPA_INVALID_START_ADDR; 118 119 return s->start; 120 } 121 122 static u64 indir_len(struct mlx5_vdpa_mr *mkey) 123 { 124 struct mlx5_vdpa_direct_mr *s; 125 struct mlx5_vdpa_direct_mr *e; 126 127 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); 128 if (!s) 129 return MLX5_VDPA_INVALID_LEN; 130 131 e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list); 132 133 return e->end - s->start; 134 } 135 136 #define LOG_MAX_KLM_SIZE 30 137 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE) 138 139 static u32 klm_bcount(u64 size) 140 { 141 return (u32)size; 142 } 143 144 static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in) 145 { 146 struct mlx5_vdpa_direct_mr *dmr; 147 struct mlx5_klm *klmarr; 148 struct mlx5_klm *klm; 149 bool first = true; 150 u64 preve; 151 int i; 152 153 klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 154 i = 0; 155 list_for_each_entry(dmr, &mkey->head, list) { 156 again: 157 klm = &klmarr[i++]; 158 if (first) { 159 preve = dmr->start; 160 first = false; 161 } 162 163 if (preve == dmr->start) { 164 klm->key = cpu_to_be32(dmr->mr.key); 165 klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); 166 preve = dmr->end; 167 } else { 168 klm->key = cpu_to_be32(mvdev->res.null_mkey); 169 klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); 170 preve = dmr->start; 171 goto again; 172 } 173 } 174 } 175 176 static int klm_byte_size(int nklms) 177 { 178 return 16 * ALIGN(nklms, 4); 179 } 180 181 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) 182 { 183 int inlen; 184 void *mkc; 185 void *in; 186 int err; 187 u64 start; 188 u64 len; 189 190 start = indir_start_addr(mr); 191 len = indir_len(mr); 192 if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN) 193 return -EINVAL; 194 195 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms); 196 in = kzalloc(inlen, GFP_KERNEL); 197 if (!in) 198 return -ENOMEM; 199 200 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); 201 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 202 MLX5_SET(mkc, mkc, lw, 1); 203 MLX5_SET(mkc, mkc, lr, 1); 204 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS); 205 MLX5_SET(mkc, mkc, qpn, 0xffffff); 206 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); 207 MLX5_SET64(mkc, mkc, start_addr, start); 208 MLX5_SET64(mkc, mkc, len, len); 209 MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16); 210 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms); 211 fill_indir(mvdev, mr, in); 212 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); 213 kfree(in); 214 return err; 215 } 216 217 static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey) 218 { 219 mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); 220 } 221 222 static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev) 223 { 224 return &mvdev->mdev->pdev->dev; 225 } 226 227 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, 228 struct vhost_iotlb *iotlb) 229 { 230 struct vhost_iotlb_map *map; 231 unsigned long lgcd = 0; 232 int log_entity_size; 233 unsigned long size; 234 u64 start = 0; 235 int err; 236 struct page *pg; 237 unsigned int nsg; 238 int sglen; 239 u64 pa; 240 u64 paend; 241 struct scatterlist *sg; 242 struct device *dma = get_dma_device(mvdev); 243 244 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); 245 map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { 246 size = maplen(map, mr); 247 lgcd = gcd(lgcd, size); 248 start += size; 249 } 250 log_entity_size = ilog2(lgcd); 251 252 sglen = 1 << log_entity_size; 253 nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size); 254 255 err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL); 256 if (err) 257 return err; 258 259 sg = mr->sg_head.sgl; 260 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); 261 map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { 262 paend = map->addr + maplen(map, mr); 263 for (pa = map->addr; pa < paend; pa += sglen) { 264 pg = pfn_to_page(__phys_to_pfn(pa)); 265 if (!sg) { 266 mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n", 267 map->start, map->last + 1); 268 err = -ENOMEM; 269 goto err_map; 270 } 271 sg_set_page(sg, pg, sglen, 0); 272 sg = sg_next(sg); 273 if (!sg) 274 goto done; 275 } 276 } 277 done: 278 mr->log_size = log_entity_size; 279 mr->nsg = nsg; 280 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 281 if (!mr->nent) { 282 err = -ENOMEM; 283 goto err_map; 284 } 285 286 err = create_direct_mr(mvdev, mr); 287 if (err) 288 goto err_direct; 289 290 return 0; 291 292 err_direct: 293 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 294 err_map: 295 sg_free_table(&mr->sg_head); 296 return err; 297 } 298 299 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 300 { 301 struct device *dma = get_dma_device(mvdev); 302 303 destroy_direct_mr(mvdev, mr); 304 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 305 sg_free_table(&mr->sg_head); 306 } 307 308 static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm, 309 struct vhost_iotlb *iotlb) 310 { 311 struct mlx5_vdpa_mr *mr = &mvdev->mr; 312 struct mlx5_vdpa_direct_mr *dmr; 313 struct mlx5_vdpa_direct_mr *n; 314 LIST_HEAD(tmp); 315 u64 st; 316 u64 sz; 317 int err; 318 int i = 0; 319 320 st = start; 321 while (size) { 322 sz = (u32)min_t(u64, MAX_KLM_SIZE, size); 323 dmr = kzalloc(sizeof(*dmr), GFP_KERNEL); 324 if (!dmr) { 325 err = -ENOMEM; 326 goto err_alloc; 327 } 328 329 dmr->start = st; 330 dmr->end = st + sz; 331 dmr->perm = perm; 332 err = map_direct_mr(mvdev, dmr, iotlb); 333 if (err) { 334 kfree(dmr); 335 goto err_alloc; 336 } 337 338 list_add_tail(&dmr->list, &tmp); 339 size -= sz; 340 mr->num_directs++; 341 mr->num_klms++; 342 st += sz; 343 i++; 344 } 345 list_splice_tail(&tmp, &mr->head); 346 return 0; 347 348 err_alloc: 349 list_for_each_entry_safe(dmr, n, &mr->head, list) { 350 list_del_init(&dmr->list); 351 unmap_direct_mr(mvdev, dmr); 352 kfree(dmr); 353 } 354 return err; 355 } 356 357 /* The iotlb pointer contains a list of maps. Go over the maps, possibly 358 * merging mergeable maps, and create direct memory keys that provide the 359 * device access to memory. The direct mkeys are then referred to by the 360 * indirect memory key that provides access to the enitre address space given 361 * by iotlb. 362 */ 363 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 364 { 365 struct mlx5_vdpa_mr *mr = &mvdev->mr; 366 struct mlx5_vdpa_direct_mr *dmr; 367 struct mlx5_vdpa_direct_mr *n; 368 struct vhost_iotlb_map *map; 369 u32 pperm = U16_MAX; 370 u64 last = U64_MAX; 371 u64 ps = U64_MAX; 372 u64 pe = U64_MAX; 373 u64 start = 0; 374 int err = 0; 375 int nnuls; 376 377 if (mr->initialized) 378 return 0; 379 380 INIT_LIST_HEAD(&mr->head); 381 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 382 map = vhost_iotlb_itree_next(map, start, last)) { 383 start = map->start; 384 if (pe == map->start && pperm == map->perm) { 385 pe = map->last + 1; 386 } else { 387 if (ps != U64_MAX) { 388 if (pe < map->start) { 389 /* We have a hole in the map. Check how 390 * many null keys are required to fill it. 391 */ 392 nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe, 393 LOG_MAX_KLM_SIZE); 394 mr->num_klms += nnuls; 395 } 396 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); 397 if (err) 398 goto err_chain; 399 } 400 ps = map->start; 401 pe = map->last + 1; 402 pperm = map->perm; 403 } 404 } 405 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); 406 if (err) 407 goto err_chain; 408 409 /* Create the memory key that defines the guests's address space. This 410 * memory key refers to the direct keys that contain the MTT 411 * translations 412 */ 413 err = create_indirect_key(mvdev, mr); 414 if (err) 415 goto err_chain; 416 417 mr->initialized = true; 418 return 0; 419 420 err_chain: 421 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { 422 list_del_init(&dmr->list); 423 unmap_direct_mr(mvdev, dmr); 424 kfree(dmr); 425 } 426 return err; 427 } 428 429 int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 430 { 431 struct mlx5_vdpa_mr *mr = &mvdev->mr; 432 int err; 433 434 mutex_lock(&mr->mkey_mtx); 435 err = _mlx5_vdpa_create_mr(mvdev, iotlb); 436 mutex_unlock(&mr->mkey_mtx); 437 return err; 438 } 439 440 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) 441 { 442 struct mlx5_vdpa_mr *mr = &mvdev->mr; 443 struct mlx5_vdpa_direct_mr *dmr; 444 struct mlx5_vdpa_direct_mr *n; 445 446 mutex_lock(&mr->mkey_mtx); 447 if (!mr->initialized) 448 goto out; 449 450 destroy_indirect_key(mvdev, mr); 451 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { 452 list_del_init(&dmr->list); 453 unmap_direct_mr(mvdev, dmr); 454 kfree(dmr); 455 } 456 memset(mr, 0, sizeof(*mr)); 457 mr->initialized = false; 458 out: 459 mutex_unlock(&mr->mkey_mtx); 460 } 461 462 static bool map_empty(struct vhost_iotlb *iotlb) 463 { 464 return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX); 465 } 466 467 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 468 bool *change_map) 469 { 470 struct mlx5_vdpa_mr *mr = &mvdev->mr; 471 int err = 0; 472 473 *change_map = false; 474 if (map_empty(iotlb)) { 475 mlx5_vdpa_destroy_mr(mvdev); 476 return 0; 477 } 478 mutex_lock(&mr->mkey_mtx); 479 if (mr->initialized) { 480 mlx5_vdpa_info(mvdev, "memory map update\n"); 481 *change_map = true; 482 } 483 if (!*change_map) 484 err = _mlx5_vdpa_create_mr(mvdev, iotlb); 485 mutex_unlock(&mr->mkey_mtx); 486 487 return err; 488 } 489