1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management 4 * 5 * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 * 7 * Authors: 8 * Wu Hao <hao.wu@intel.com> 9 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 10 */ 11 12 #include <linux/dma-mapping.h> 13 #include <linux/sched/signal.h> 14 #include <linux/uaccess.h> 15 16 #include "dfl-afu.h" 17 18 static void put_all_pages(struct page **pages, int npages) 19 { 20 int i; 21 22 for (i = 0; i < npages; i++) 23 if (pages[i]) 24 put_page(pages[i]); 25 } 26 27 void afu_dma_region_init(struct dfl_feature_platform_data *pdata) 28 { 29 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 30 31 afu->dma_regions = RB_ROOT; 32 } 33 34 /** 35 * afu_dma_adjust_locked_vm - adjust locked memory 36 * @dev: port device 37 * @npages: number of pages 38 * @incr: increase or decrease locked memory 39 * 40 * Increase or decrease the locked memory size with npages input. 41 * 42 * Return 0 on success. 43 * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK. 44 */ 45 static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) 46 { 47 unsigned long locked, lock_limit; 48 int ret = 0; 49 50 /* the task is exiting. */ 51 if (!current->mm) 52 return 0; 53 54 down_write(¤t->mm->mmap_sem); 55 56 if (incr) { 57 locked = current->mm->locked_vm + npages; 58 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 59 60 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 61 ret = -ENOMEM; 62 else 63 current->mm->locked_vm += npages; 64 } else { 65 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) 66 npages = current->mm->locked_vm; 67 current->mm->locked_vm -= npages; 68 } 69 70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, 71 incr ? '+' : '-', npages << PAGE_SHIFT, 72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), 73 ret ? "- exceeded" : ""); 74 75 up_write(¤t->mm->mmap_sem); 76 77 return ret; 78 } 79 80 /** 81 * afu_dma_pin_pages - pin pages of given dma memory region 82 * @pdata: feature device platform data 83 * @region: dma memory region to be pinned 84 * 85 * Pin all the pages of given dfl_afu_dma_region. 86 * Return 0 for success or negative error code. 87 */ 88 static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, 89 struct dfl_afu_dma_region *region) 90 { 91 int npages = region->length >> PAGE_SHIFT; 92 struct device *dev = &pdata->dev->dev; 93 int ret, pinned; 94 95 ret = afu_dma_adjust_locked_vm(dev, npages, true); 96 if (ret) 97 return ret; 98 99 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); 100 if (!region->pages) { 101 ret = -ENOMEM; 102 goto unlock_vm; 103 } 104 105 pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, 106 region->pages); 107 if (pinned < 0) { 108 ret = pinned; 109 goto put_pages; 110 } else if (pinned != npages) { 111 ret = -EFAULT; 112 goto free_pages; 113 } 114 115 dev_dbg(dev, "%d pages pinned\n", pinned); 116 117 return 0; 118 119 put_pages: 120 put_all_pages(region->pages, pinned); 121 free_pages: 122 kfree(region->pages); 123 unlock_vm: 124 afu_dma_adjust_locked_vm(dev, npages, false); 125 return ret; 126 } 127 128 /** 129 * afu_dma_unpin_pages - unpin pages of given dma memory region 130 * @pdata: feature device platform data 131 * @region: dma memory region to be unpinned 132 * 133 * Unpin all the pages of given dfl_afu_dma_region. 134 * Return 0 for success or negative error code. 135 */ 136 static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, 137 struct dfl_afu_dma_region *region) 138 { 139 long npages = region->length >> PAGE_SHIFT; 140 struct device *dev = &pdata->dev->dev; 141 142 put_all_pages(region->pages, npages); 143 kfree(region->pages); 144 afu_dma_adjust_locked_vm(dev, npages, false); 145 146 dev_dbg(dev, "%ld pages unpinned\n", npages); 147 } 148 149 /** 150 * afu_dma_check_continuous_pages - check if pages are continuous 151 * @region: dma memory region 152 * 153 * Return true if pages of given dma memory region have continuous physical 154 * address, otherwise return false. 155 */ 156 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) 157 { 158 int npages = region->length >> PAGE_SHIFT; 159 int i; 160 161 for (i = 0; i < npages - 1; i++) 162 if (page_to_pfn(region->pages[i]) + 1 != 163 page_to_pfn(region->pages[i + 1])) 164 return false; 165 166 return true; 167 } 168 169 /** 170 * dma_region_check_iova - check if memory area is fully contained in the region 171 * @region: dma memory region 172 * @iova: address of the dma memory area 173 * @size: size of the dma memory area 174 * 175 * Compare the dma memory area defined by @iova and @size with given dma region. 176 * Return true if memory area is fully contained in the region, otherwise false. 177 */ 178 static bool dma_region_check_iova(struct dfl_afu_dma_region *region, 179 u64 iova, u64 size) 180 { 181 if (!size && region->iova != iova) 182 return false; 183 184 return (region->iova <= iova) && 185 (region->length + region->iova >= iova + size); 186 } 187 188 /** 189 * afu_dma_region_add - add given dma region to rbtree 190 * @pdata: feature device platform data 191 * @region: dma region to be added 192 * 193 * Return 0 for success, -EEXIST if dma region has already been added. 194 * 195 * Needs to be called with pdata->lock heold. 196 */ 197 static int afu_dma_region_add(struct dfl_feature_platform_data *pdata, 198 struct dfl_afu_dma_region *region) 199 { 200 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 201 struct rb_node **new, *parent = NULL; 202 203 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", 204 (unsigned long long)region->iova); 205 206 new = &afu->dma_regions.rb_node; 207 208 while (*new) { 209 struct dfl_afu_dma_region *this; 210 211 this = container_of(*new, struct dfl_afu_dma_region, node); 212 213 parent = *new; 214 215 if (dma_region_check_iova(this, region->iova, region->length)) 216 return -EEXIST; 217 218 if (region->iova < this->iova) 219 new = &((*new)->rb_left); 220 else if (region->iova > this->iova) 221 new = &((*new)->rb_right); 222 else 223 return -EEXIST; 224 } 225 226 rb_link_node(®ion->node, parent, new); 227 rb_insert_color(®ion->node, &afu->dma_regions); 228 229 return 0; 230 } 231 232 /** 233 * afu_dma_region_remove - remove given dma region from rbtree 234 * @pdata: feature device platform data 235 * @region: dma region to be removed 236 * 237 * Needs to be called with pdata->lock heold. 238 */ 239 static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata, 240 struct dfl_afu_dma_region *region) 241 { 242 struct dfl_afu *afu; 243 244 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 245 (unsigned long long)region->iova); 246 247 afu = dfl_fpga_pdata_get_private(pdata); 248 rb_erase(®ion->node, &afu->dma_regions); 249 } 250 251 /** 252 * afu_dma_region_destroy - destroy all regions in rbtree 253 * @pdata: feature device platform data 254 * 255 * Needs to be called with pdata->lock heold. 256 */ 257 void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) 258 { 259 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 260 struct rb_node *node = rb_first(&afu->dma_regions); 261 struct dfl_afu_dma_region *region; 262 263 while (node) { 264 region = container_of(node, struct dfl_afu_dma_region, node); 265 266 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 267 (unsigned long long)region->iova); 268 269 rb_erase(node, &afu->dma_regions); 270 271 if (region->iova) 272 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 273 region->iova, region->length, 274 DMA_BIDIRECTIONAL); 275 276 if (region->pages) 277 afu_dma_unpin_pages(pdata, region); 278 279 node = rb_next(node); 280 kfree(region); 281 } 282 } 283 284 /** 285 * afu_dma_region_find - find the dma region from rbtree based on iova and size 286 * @pdata: feature device platform data 287 * @iova: address of the dma memory area 288 * @size: size of the dma memory area 289 * 290 * It finds the dma region from the rbtree based on @iova and @size: 291 * - if @size == 0, it finds the dma region which starts from @iova 292 * - otherwise, it finds the dma region which fully contains 293 * [@iova, @iova+size) 294 * If nothing is matched returns NULL. 295 * 296 * Needs to be called with pdata->lock held. 297 */ 298 struct dfl_afu_dma_region * 299 afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) 300 { 301 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 302 struct rb_node *node = afu->dma_regions.rb_node; 303 struct device *dev = &pdata->dev->dev; 304 305 while (node) { 306 struct dfl_afu_dma_region *region; 307 308 region = container_of(node, struct dfl_afu_dma_region, node); 309 310 if (dma_region_check_iova(region, iova, size)) { 311 dev_dbg(dev, "find region (iova = %llx)\n", 312 (unsigned long long)region->iova); 313 return region; 314 } 315 316 if (iova < region->iova) 317 node = node->rb_left; 318 else if (iova > region->iova) 319 node = node->rb_right; 320 else 321 /* the iova region is not fully covered. */ 322 break; 323 } 324 325 dev_dbg(dev, "region with iova %llx and size %llx is not found\n", 326 (unsigned long long)iova, (unsigned long long)size); 327 328 return NULL; 329 } 330 331 /** 332 * afu_dma_region_find_iova - find the dma region from rbtree by iova 333 * @pdata: feature device platform data 334 * @iova: address of the dma region 335 * 336 * Needs to be called with pdata->lock held. 337 */ 338 static struct dfl_afu_dma_region * 339 afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) 340 { 341 return afu_dma_region_find(pdata, iova, 0); 342 } 343 344 /** 345 * afu_dma_map_region - map memory region for dma 346 * @pdata: feature device platform data 347 * @user_addr: address of the memory region 348 * @length: size of the memory region 349 * @iova: pointer of iova address 350 * 351 * Map memory region defined by @user_addr and @length, and return dma address 352 * of the memory region via @iova. 353 * Return 0 for success, otherwise error code. 354 */ 355 int afu_dma_map_region(struct dfl_feature_platform_data *pdata, 356 u64 user_addr, u64 length, u64 *iova) 357 { 358 struct dfl_afu_dma_region *region; 359 int ret; 360 361 /* 362 * Check Inputs, only accept page-aligned user memory region with 363 * valid length. 364 */ 365 if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length) 366 return -EINVAL; 367 368 /* Check overflow */ 369 if (user_addr + length < user_addr) 370 return -EINVAL; 371 372 if (!access_ok((void __user *)(unsigned long)user_addr, 373 length)) 374 return -EINVAL; 375 376 region = kzalloc(sizeof(*region), GFP_KERNEL); 377 if (!region) 378 return -ENOMEM; 379 380 region->user_addr = user_addr; 381 region->length = length; 382 383 /* Pin the user memory region */ 384 ret = afu_dma_pin_pages(pdata, region); 385 if (ret) { 386 dev_err(&pdata->dev->dev, "failed to pin memory region\n"); 387 goto free_region; 388 } 389 390 /* Only accept continuous pages, return error else */ 391 if (!afu_dma_check_continuous_pages(region)) { 392 dev_err(&pdata->dev->dev, "pages are not continuous\n"); 393 ret = -EINVAL; 394 goto unpin_pages; 395 } 396 397 /* As pages are continuous then start to do DMA mapping */ 398 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), 399 region->pages[0], 0, 400 region->length, 401 DMA_BIDIRECTIONAL); 402 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { 403 dev_err(&pdata->dev->dev, "failed to map for dma\n"); 404 ret = -EFAULT; 405 goto unpin_pages; 406 } 407 408 *iova = region->iova; 409 410 mutex_lock(&pdata->lock); 411 ret = afu_dma_region_add(pdata, region); 412 mutex_unlock(&pdata->lock); 413 if (ret) { 414 dev_err(&pdata->dev->dev, "failed to add dma region\n"); 415 goto unmap_dma; 416 } 417 418 return 0; 419 420 unmap_dma: 421 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 422 region->iova, region->length, DMA_BIDIRECTIONAL); 423 unpin_pages: 424 afu_dma_unpin_pages(pdata, region); 425 free_region: 426 kfree(region); 427 return ret; 428 } 429 430 /** 431 * afu_dma_unmap_region - unmap dma memory region 432 * @pdata: feature device platform data 433 * @iova: dma address of the region 434 * 435 * Unmap dma memory region based on @iova. 436 * Return 0 for success, otherwise error code. 437 */ 438 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova) 439 { 440 struct dfl_afu_dma_region *region; 441 442 mutex_lock(&pdata->lock); 443 region = afu_dma_region_find_iova(pdata, iova); 444 if (!region) { 445 mutex_unlock(&pdata->lock); 446 return -EINVAL; 447 } 448 449 if (region->in_use) { 450 mutex_unlock(&pdata->lock); 451 return -EBUSY; 452 } 453 454 afu_dma_region_remove(pdata, region); 455 mutex_unlock(&pdata->lock); 456 457 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 458 region->iova, region->length, DMA_BIDIRECTIONAL); 459 afu_dma_unpin_pages(pdata, region); 460 kfree(region); 461 462 return 0; 463 } 464