1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */ 3 4 #include <linux/vdpa.h> 5 #include <linux/gcd.h> 6 #include <linux/string.h> 7 #include <linux/mlx5/qp.h> 8 #include "mlx5_vdpa.h" 9 10 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */ 11 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \ 12 ({ \ 13 u64 __s = _s; \ 14 u64 _res; \ 15 _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \ 16 _res; \ 17 }) 18 19 static int get_octo_len(u64 len, int page_shift) 20 { 21 u64 page_size = 1ULL << page_shift; 22 int npages; 23 24 npages = ALIGN(len, page_size) >> page_shift; 25 return (npages + 1) / 2; 26 } 27 28 static void mlx5_set_access_mode(void *mkc, int mode) 29 { 30 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); 31 MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2); 32 } 33 34 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) 35 { 36 struct scatterlist *sg; 37 int nsg = mr->nsg; 38 u64 dma_addr; 39 u64 dma_len; 40 int j = 0; 41 int i; 42 43 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { 44 for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg); 45 nsg && dma_len; 46 nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) 47 mtt[j++] = cpu_to_be64(dma_addr); 48 } 49 } 50 51 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 52 { 53 int inlen; 54 void *mkc; 55 void *in; 56 int err; 57 58 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); 59 in = kvzalloc(inlen, GFP_KERNEL); 60 if (!in) 61 return -ENOMEM; 62 63 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); 64 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 65 MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); 66 MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); 67 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT); 68 MLX5_SET(mkc, mkc, qpn, 0xffffff); 69 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); 70 MLX5_SET64(mkc, mkc, start_addr, mr->offset); 71 MLX5_SET64(mkc, mkc, len, mr->end - mr->start); 72 MLX5_SET(mkc, mkc, log_page_size, mr->log_size); 73 MLX5_SET(mkc, mkc, translations_octword_size, 74 get_octo_len(mr->end - mr->start, mr->log_size)); 75 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 76 get_octo_len(mr->end - mr->start, mr->log_size)); 77 populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt)); 78 err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen); 79 kvfree(in); 80 if (err) { 81 mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n"); 82 return err; 83 } 84 85 return 0; 86 } 87 88 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 89 { 90 mlx5_vdpa_destroy_mkey(mvdev, &mr->mr); 91 } 92 93 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 94 { 95 return max_t(u64, map->start, mr->start); 96 } 97 98 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 99 { 100 return min_t(u64, map->last + 1, mr->end); 101 } 102 103 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) 104 { 105 return map_end(map, mr) - map_start(map, mr); 106 } 107 108 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1) 109 #define MLX5_VDPA_INVALID_LEN ((u64)-1) 110 111 static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey) 112 { 113 struct mlx5_vdpa_direct_mr *s; 114 115 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); 116 if (!s) 117 return MLX5_VDPA_INVALID_START_ADDR; 118 119 return s->start; 120 } 121 122 static u64 indir_len(struct mlx5_vdpa_mr *mkey) 123 { 124 struct mlx5_vdpa_direct_mr *s; 125 struct mlx5_vdpa_direct_mr *e; 126 127 s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); 128 if (!s) 129 return MLX5_VDPA_INVALID_LEN; 130 131 e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list); 132 133 return e->end - s->start; 134 } 135 136 #define LOG_MAX_KLM_SIZE 30 137 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE) 138 139 static u32 klm_bcount(u64 size) 140 { 141 return (u32)size; 142 } 143 144 static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in) 145 { 146 struct mlx5_vdpa_direct_mr *dmr; 147 struct mlx5_klm *klmarr; 148 struct mlx5_klm *klm; 149 bool first = true; 150 u64 preve; 151 int i; 152 153 klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 154 i = 0; 155 list_for_each_entry(dmr, &mkey->head, list) { 156 again: 157 klm = &klmarr[i++]; 158 if (first) { 159 preve = dmr->start; 160 first = false; 161 } 162 163 if (preve == dmr->start) { 164 klm->key = cpu_to_be32(dmr->mr.key); 165 klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); 166 preve = dmr->end; 167 } else { 168 klm->key = cpu_to_be32(mvdev->res.null_mkey); 169 klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); 170 preve = dmr->start; 171 goto again; 172 } 173 } 174 } 175 176 static int klm_byte_size(int nklms) 177 { 178 return 16 * ALIGN(nklms, 4); 179 } 180 181 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) 182 { 183 int inlen; 184 void *mkc; 185 void *in; 186 int err; 187 u64 start; 188 u64 len; 189 190 start = indir_start_addr(mr); 191 len = indir_len(mr); 192 if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN) 193 return -EINVAL; 194 195 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms); 196 in = kzalloc(inlen, GFP_KERNEL); 197 if (!in) 198 return -ENOMEM; 199 200 MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); 201 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 202 MLX5_SET(mkc, mkc, lw, 1); 203 MLX5_SET(mkc, mkc, lr, 1); 204 mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS); 205 MLX5_SET(mkc, mkc, qpn, 0xffffff); 206 MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); 207 MLX5_SET64(mkc, mkc, start_addr, start); 208 MLX5_SET64(mkc, mkc, len, len); 209 MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16); 210 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms); 211 fill_indir(mvdev, mr, in); 212 err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); 213 kfree(in); 214 return err; 215 } 216 217 static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey) 218 { 219 mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); 220 } 221 222 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, 223 struct vhost_iotlb *iotlb) 224 { 225 struct vhost_iotlb_map *map; 226 unsigned long lgcd = 0; 227 int log_entity_size; 228 unsigned long size; 229 u64 start = 0; 230 int err; 231 struct page *pg; 232 unsigned int nsg; 233 int sglen; 234 u64 pa; 235 u64 paend; 236 struct scatterlist *sg; 237 struct device *dma = mvdev->mdev->device; 238 239 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); 240 map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { 241 size = maplen(map, mr); 242 lgcd = gcd(lgcd, size); 243 start += size; 244 } 245 log_entity_size = ilog2(lgcd); 246 247 sglen = 1 << log_entity_size; 248 nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size); 249 250 err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL); 251 if (err) 252 return err; 253 254 sg = mr->sg_head.sgl; 255 for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); 256 map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { 257 paend = map->addr + maplen(map, mr); 258 for (pa = map->addr; pa < paend; pa += sglen) { 259 pg = pfn_to_page(__phys_to_pfn(pa)); 260 if (!sg) { 261 mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n", 262 map->start, map->last + 1); 263 err = -ENOMEM; 264 goto err_map; 265 } 266 sg_set_page(sg, pg, sglen, 0); 267 sg = sg_next(sg); 268 if (!sg) 269 goto done; 270 } 271 } 272 done: 273 mr->log_size = log_entity_size; 274 mr->nsg = nsg; 275 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 276 if (!mr->nent) 277 goto err_map; 278 279 err = create_direct_mr(mvdev, mr); 280 if (err) 281 goto err_direct; 282 283 return 0; 284 285 err_direct: 286 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 287 err_map: 288 sg_free_table(&mr->sg_head); 289 return err; 290 } 291 292 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) 293 { 294 struct device *dma = mvdev->mdev->device; 295 296 destroy_direct_mr(mvdev, mr); 297 dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); 298 sg_free_table(&mr->sg_head); 299 } 300 301 static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm, 302 struct vhost_iotlb *iotlb) 303 { 304 struct mlx5_vdpa_mr *mr = &mvdev->mr; 305 struct mlx5_vdpa_direct_mr *dmr; 306 struct mlx5_vdpa_direct_mr *n; 307 LIST_HEAD(tmp); 308 u64 st; 309 u64 sz; 310 int err; 311 int i = 0; 312 313 st = start; 314 while (size) { 315 sz = (u32)min_t(u64, MAX_KLM_SIZE, size); 316 dmr = kzalloc(sizeof(*dmr), GFP_KERNEL); 317 if (!dmr) { 318 err = -ENOMEM; 319 goto err_alloc; 320 } 321 322 dmr->start = st; 323 dmr->end = st + sz; 324 dmr->perm = perm; 325 err = map_direct_mr(mvdev, dmr, iotlb); 326 if (err) { 327 kfree(dmr); 328 goto err_alloc; 329 } 330 331 list_add_tail(&dmr->list, &tmp); 332 size -= sz; 333 mr->num_directs++; 334 mr->num_klms++; 335 st += sz; 336 i++; 337 } 338 list_splice_tail(&tmp, &mr->head); 339 return 0; 340 341 err_alloc: 342 list_for_each_entry_safe(dmr, n, &mr->head, list) { 343 list_del_init(&dmr->list); 344 unmap_direct_mr(mvdev, dmr); 345 kfree(dmr); 346 } 347 return err; 348 } 349 350 /* The iotlb pointer contains a list of maps. Go over the maps, possibly 351 * merging mergeable maps, and create direct memory keys that provide the 352 * device access to memory. The direct mkeys are then referred to by the 353 * indirect memory key that provides access to the enitre address space given 354 * by iotlb. 355 */ 356 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 357 { 358 struct mlx5_vdpa_mr *mr = &mvdev->mr; 359 struct mlx5_vdpa_direct_mr *dmr; 360 struct mlx5_vdpa_direct_mr *n; 361 struct vhost_iotlb_map *map; 362 u32 pperm = U16_MAX; 363 u64 last = U64_MAX; 364 u64 ps = U64_MAX; 365 u64 pe = U64_MAX; 366 u64 start = 0; 367 int err = 0; 368 int nnuls; 369 370 if (mr->initialized) 371 return 0; 372 373 INIT_LIST_HEAD(&mr->head); 374 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 375 map = vhost_iotlb_itree_next(map, start, last)) { 376 start = map->start; 377 if (pe == map->start && pperm == map->perm) { 378 pe = map->last + 1; 379 } else { 380 if (ps != U64_MAX) { 381 if (pe < map->start) { 382 /* We have a hole in the map. Check how 383 * many null keys are required to fill it. 384 */ 385 nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe, 386 LOG_MAX_KLM_SIZE); 387 mr->num_klms += nnuls; 388 } 389 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); 390 if (err) 391 goto err_chain; 392 } 393 ps = map->start; 394 pe = map->last + 1; 395 pperm = map->perm; 396 } 397 } 398 err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); 399 if (err) 400 goto err_chain; 401 402 /* Create the memory key that defines the guests's address space. This 403 * memory key refers to the direct keys that contain the MTT 404 * translations 405 */ 406 err = create_indirect_key(mvdev, mr); 407 if (err) 408 goto err_chain; 409 410 mr->initialized = true; 411 return 0; 412 413 err_chain: 414 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { 415 list_del_init(&dmr->list); 416 unmap_direct_mr(mvdev, dmr); 417 kfree(dmr); 418 } 419 return err; 420 } 421 422 int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 423 { 424 struct mlx5_vdpa_mr *mr = &mvdev->mr; 425 int err; 426 427 mutex_lock(&mr->mkey_mtx); 428 err = _mlx5_vdpa_create_mr(mvdev, iotlb); 429 mutex_unlock(&mr->mkey_mtx); 430 return err; 431 } 432 433 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) 434 { 435 struct mlx5_vdpa_mr *mr = &mvdev->mr; 436 struct mlx5_vdpa_direct_mr *dmr; 437 struct mlx5_vdpa_direct_mr *n; 438 439 mutex_lock(&mr->mkey_mtx); 440 if (!mr->initialized) 441 goto out; 442 443 destroy_indirect_key(mvdev, mr); 444 list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { 445 list_del_init(&dmr->list); 446 unmap_direct_mr(mvdev, dmr); 447 kfree(dmr); 448 } 449 memset(mr, 0, sizeof(*mr)); 450 mr->initialized = false; 451 out: 452 mutex_unlock(&mr->mkey_mtx); 453 } 454 455 static bool map_empty(struct vhost_iotlb *iotlb) 456 { 457 return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX); 458 } 459 460 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 461 bool *change_map) 462 { 463 struct mlx5_vdpa_mr *mr = &mvdev->mr; 464 int err = 0; 465 466 *change_map = false; 467 if (map_empty(iotlb)) { 468 mlx5_vdpa_destroy_mr(mvdev); 469 return 0; 470 } 471 mutex_lock(&mr->mkey_mtx); 472 if (mr->initialized) { 473 mlx5_vdpa_info(mvdev, "memory map update\n"); 474 *change_map = true; 475 } 476 if (!*change_map) 477 err = _mlx5_vdpa_create_mr(mvdev, iotlb); 478 mutex_unlock(&mr->mkey_mtx); 479 480 return err; 481 } 482