1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management 4 * 5 * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 * 7 * Authors: 8 * Wu Hao <hao.wu@intel.com> 9 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 10 */ 11 12 #include <linux/dma-mapping.h> 13 #include <linux/sched/signal.h> 14 #include <linux/uaccess.h> 15 #include <linux/mm.h> 16 17 #include "dfl-afu.h" 18 19 static void put_all_pages(struct page **pages, int npages) 20 { 21 int i; 22 23 for (i = 0; i < npages; i++) 24 if (pages[i]) 25 put_page(pages[i]); 26 } 27 28 void afu_dma_region_init(struct dfl_feature_platform_data *pdata) 29 { 30 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 31 32 afu->dma_regions = RB_ROOT; 33 } 34 35 /** 36 * afu_dma_pin_pages - pin pages of given dma memory region 37 * @pdata: feature device platform data 38 * @region: dma memory region to be pinned 39 * 40 * Pin all the pages of given dfl_afu_dma_region. 41 * Return 0 for success or negative error code. 42 */ 43 static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, 44 struct dfl_afu_dma_region *region) 45 { 46 int npages = region->length >> PAGE_SHIFT; 47 struct device *dev = &pdata->dev->dev; 48 int ret, pinned; 49 50 ret = account_locked_vm(current->mm, npages, true); 51 if (ret) 52 return ret; 53 54 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); 55 if (!region->pages) { 56 ret = -ENOMEM; 57 goto unlock_vm; 58 } 59 60 pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, 61 region->pages); 62 if (pinned < 0) { 63 ret = pinned; 64 goto put_pages; 65 } else if (pinned != npages) { 66 ret = -EFAULT; 67 goto free_pages; 68 } 69 70 dev_dbg(dev, "%d pages pinned\n", pinned); 71 72 return 0; 73 74 put_pages: 75 put_all_pages(region->pages, pinned); 76 free_pages: 77 kfree(region->pages); 78 unlock_vm: 79 account_locked_vm(current->mm, npages, false); 80 return ret; 81 } 82 83 /** 84 * afu_dma_unpin_pages - unpin pages of given dma memory region 85 * @pdata: feature device platform data 86 * @region: dma memory region to be unpinned 87 * 88 * Unpin all the pages of given dfl_afu_dma_region. 89 * Return 0 for success or negative error code. 90 */ 91 static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, 92 struct dfl_afu_dma_region *region) 93 { 94 long npages = region->length >> PAGE_SHIFT; 95 struct device *dev = &pdata->dev->dev; 96 97 put_all_pages(region->pages, npages); 98 kfree(region->pages); 99 account_locked_vm(current->mm, npages, false); 100 101 dev_dbg(dev, "%ld pages unpinned\n", npages); 102 } 103 104 /** 105 * afu_dma_check_continuous_pages - check if pages are continuous 106 * @region: dma memory region 107 * 108 * Return true if pages of given dma memory region have continuous physical 109 * address, otherwise return false. 110 */ 111 static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region) 112 { 113 int npages = region->length >> PAGE_SHIFT; 114 int i; 115 116 for (i = 0; i < npages - 1; i++) 117 if (page_to_pfn(region->pages[i]) + 1 != 118 page_to_pfn(region->pages[i + 1])) 119 return false; 120 121 return true; 122 } 123 124 /** 125 * dma_region_check_iova - check if memory area is fully contained in the region 126 * @region: dma memory region 127 * @iova: address of the dma memory area 128 * @size: size of the dma memory area 129 * 130 * Compare the dma memory area defined by @iova and @size with given dma region. 131 * Return true if memory area is fully contained in the region, otherwise false. 132 */ 133 static bool dma_region_check_iova(struct dfl_afu_dma_region *region, 134 u64 iova, u64 size) 135 { 136 if (!size && region->iova != iova) 137 return false; 138 139 return (region->iova <= iova) && 140 (region->length + region->iova >= iova + size); 141 } 142 143 /** 144 * afu_dma_region_add - add given dma region to rbtree 145 * @pdata: feature device platform data 146 * @region: dma region to be added 147 * 148 * Return 0 for success, -EEXIST if dma region has already been added. 149 * 150 * Needs to be called with pdata->lock heold. 151 */ 152 static int afu_dma_region_add(struct dfl_feature_platform_data *pdata, 153 struct dfl_afu_dma_region *region) 154 { 155 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 156 struct rb_node **new, *parent = NULL; 157 158 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n", 159 (unsigned long long)region->iova); 160 161 new = &afu->dma_regions.rb_node; 162 163 while (*new) { 164 struct dfl_afu_dma_region *this; 165 166 this = container_of(*new, struct dfl_afu_dma_region, node); 167 168 parent = *new; 169 170 if (dma_region_check_iova(this, region->iova, region->length)) 171 return -EEXIST; 172 173 if (region->iova < this->iova) 174 new = &((*new)->rb_left); 175 else if (region->iova > this->iova) 176 new = &((*new)->rb_right); 177 else 178 return -EEXIST; 179 } 180 181 rb_link_node(®ion->node, parent, new); 182 rb_insert_color(®ion->node, &afu->dma_regions); 183 184 return 0; 185 } 186 187 /** 188 * afu_dma_region_remove - remove given dma region from rbtree 189 * @pdata: feature device platform data 190 * @region: dma region to be removed 191 * 192 * Needs to be called with pdata->lock heold. 193 */ 194 static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata, 195 struct dfl_afu_dma_region *region) 196 { 197 struct dfl_afu *afu; 198 199 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 200 (unsigned long long)region->iova); 201 202 afu = dfl_fpga_pdata_get_private(pdata); 203 rb_erase(®ion->node, &afu->dma_regions); 204 } 205 206 /** 207 * afu_dma_region_destroy - destroy all regions in rbtree 208 * @pdata: feature device platform data 209 * 210 * Needs to be called with pdata->lock heold. 211 */ 212 void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata) 213 { 214 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 215 struct rb_node *node = rb_first(&afu->dma_regions); 216 struct dfl_afu_dma_region *region; 217 218 while (node) { 219 region = container_of(node, struct dfl_afu_dma_region, node); 220 221 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n", 222 (unsigned long long)region->iova); 223 224 rb_erase(node, &afu->dma_regions); 225 226 if (region->iova) 227 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 228 region->iova, region->length, 229 DMA_BIDIRECTIONAL); 230 231 if (region->pages) 232 afu_dma_unpin_pages(pdata, region); 233 234 node = rb_next(node); 235 kfree(region); 236 } 237 } 238 239 /** 240 * afu_dma_region_find - find the dma region from rbtree based on iova and size 241 * @pdata: feature device platform data 242 * @iova: address of the dma memory area 243 * @size: size of the dma memory area 244 * 245 * It finds the dma region from the rbtree based on @iova and @size: 246 * - if @size == 0, it finds the dma region which starts from @iova 247 * - otherwise, it finds the dma region which fully contains 248 * [@iova, @iova+size) 249 * If nothing is matched returns NULL. 250 * 251 * Needs to be called with pdata->lock held. 252 */ 253 struct dfl_afu_dma_region * 254 afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) 255 { 256 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata); 257 struct rb_node *node = afu->dma_regions.rb_node; 258 struct device *dev = &pdata->dev->dev; 259 260 while (node) { 261 struct dfl_afu_dma_region *region; 262 263 region = container_of(node, struct dfl_afu_dma_region, node); 264 265 if (dma_region_check_iova(region, iova, size)) { 266 dev_dbg(dev, "find region (iova = %llx)\n", 267 (unsigned long long)region->iova); 268 return region; 269 } 270 271 if (iova < region->iova) 272 node = node->rb_left; 273 else if (iova > region->iova) 274 node = node->rb_right; 275 else 276 /* the iova region is not fully covered. */ 277 break; 278 } 279 280 dev_dbg(dev, "region with iova %llx and size %llx is not found\n", 281 (unsigned long long)iova, (unsigned long long)size); 282 283 return NULL; 284 } 285 286 /** 287 * afu_dma_region_find_iova - find the dma region from rbtree by iova 288 * @pdata: feature device platform data 289 * @iova: address of the dma region 290 * 291 * Needs to be called with pdata->lock held. 292 */ 293 static struct dfl_afu_dma_region * 294 afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) 295 { 296 return afu_dma_region_find(pdata, iova, 0); 297 } 298 299 /** 300 * afu_dma_map_region - map memory region for dma 301 * @pdata: feature device platform data 302 * @user_addr: address of the memory region 303 * @length: size of the memory region 304 * @iova: pointer of iova address 305 * 306 * Map memory region defined by @user_addr and @length, and return dma address 307 * of the memory region via @iova. 308 * Return 0 for success, otherwise error code. 309 */ 310 int afu_dma_map_region(struct dfl_feature_platform_data *pdata, 311 u64 user_addr, u64 length, u64 *iova) 312 { 313 struct dfl_afu_dma_region *region; 314 int ret; 315 316 /* 317 * Check Inputs, only accept page-aligned user memory region with 318 * valid length. 319 */ 320 if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length) 321 return -EINVAL; 322 323 /* Check overflow */ 324 if (user_addr + length < user_addr) 325 return -EINVAL; 326 327 if (!access_ok((void __user *)(unsigned long)user_addr, 328 length)) 329 return -EINVAL; 330 331 region = kzalloc(sizeof(*region), GFP_KERNEL); 332 if (!region) 333 return -ENOMEM; 334 335 region->user_addr = user_addr; 336 region->length = length; 337 338 /* Pin the user memory region */ 339 ret = afu_dma_pin_pages(pdata, region); 340 if (ret) { 341 dev_err(&pdata->dev->dev, "failed to pin memory region\n"); 342 goto free_region; 343 } 344 345 /* Only accept continuous pages, return error else */ 346 if (!afu_dma_check_continuous_pages(region)) { 347 dev_err(&pdata->dev->dev, "pages are not continuous\n"); 348 ret = -EINVAL; 349 goto unpin_pages; 350 } 351 352 /* As pages are continuous then start to do DMA mapping */ 353 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata), 354 region->pages[0], 0, 355 region->length, 356 DMA_BIDIRECTIONAL); 357 if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { 358 dev_err(&pdata->dev->dev, "failed to map for dma\n"); 359 ret = -EFAULT; 360 goto unpin_pages; 361 } 362 363 *iova = region->iova; 364 365 mutex_lock(&pdata->lock); 366 ret = afu_dma_region_add(pdata, region); 367 mutex_unlock(&pdata->lock); 368 if (ret) { 369 dev_err(&pdata->dev->dev, "failed to add dma region\n"); 370 goto unmap_dma; 371 } 372 373 return 0; 374 375 unmap_dma: 376 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 377 region->iova, region->length, DMA_BIDIRECTIONAL); 378 unpin_pages: 379 afu_dma_unpin_pages(pdata, region); 380 free_region: 381 kfree(region); 382 return ret; 383 } 384 385 /** 386 * afu_dma_unmap_region - unmap dma memory region 387 * @pdata: feature device platform data 388 * @iova: dma address of the region 389 * 390 * Unmap dma memory region based on @iova. 391 * Return 0 for success, otherwise error code. 392 */ 393 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova) 394 { 395 struct dfl_afu_dma_region *region; 396 397 mutex_lock(&pdata->lock); 398 region = afu_dma_region_find_iova(pdata, iova); 399 if (!region) { 400 mutex_unlock(&pdata->lock); 401 return -EINVAL; 402 } 403 404 if (region->in_use) { 405 mutex_unlock(&pdata->lock); 406 return -EBUSY; 407 } 408 409 afu_dma_region_remove(pdata, region); 410 mutex_unlock(&pdata->lock); 411 412 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata), 413 region->iova, region->length, DMA_BIDIRECTIONAL); 414 afu_dma_unpin_pages(pdata, region); 415 kfree(region); 416 417 return 0; 418 } 419