xref: /openbmc/linux/drivers/fpga/dfl-afu-dma-region.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1fa8dda1eSWu Hao // SPDX-License-Identifier: GPL-2.0
2fa8dda1eSWu Hao /*
3fa8dda1eSWu Hao  * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
4fa8dda1eSWu Hao  *
5fa8dda1eSWu Hao  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6fa8dda1eSWu Hao  *
7fa8dda1eSWu Hao  * Authors:
8fa8dda1eSWu Hao  *   Wu Hao <hao.wu@intel.com>
9fa8dda1eSWu Hao  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10fa8dda1eSWu Hao  */
11fa8dda1eSWu Hao 
12fa8dda1eSWu Hao #include <linux/dma-mapping.h>
13fa8dda1eSWu Hao #include <linux/sched/signal.h>
14fa8dda1eSWu Hao #include <linux/uaccess.h>
1579eb597cSDaniel Jordan #include <linux/mm.h>
16fa8dda1eSWu Hao 
17fa8dda1eSWu Hao #include "dfl-afu.h"
18fa8dda1eSWu Hao 
afu_dma_region_init(struct dfl_feature_platform_data * pdata)19fa8dda1eSWu Hao void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
20fa8dda1eSWu Hao {
21fa8dda1eSWu Hao 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
22fa8dda1eSWu Hao 
23fa8dda1eSWu Hao 	afu->dma_regions = RB_ROOT;
24fa8dda1eSWu Hao }
25fa8dda1eSWu Hao 
26fa8dda1eSWu Hao /**
27fa8dda1eSWu Hao  * afu_dma_pin_pages - pin pages of given dma memory region
28fa8dda1eSWu Hao  * @pdata: feature device platform data
29fa8dda1eSWu Hao  * @region: dma memory region to be pinned
30fa8dda1eSWu Hao  *
31fa8dda1eSWu Hao  * Pin all the pages of given dfl_afu_dma_region.
32fa8dda1eSWu Hao  * Return 0 for success or negative error code.
33fa8dda1eSWu Hao  */
afu_dma_pin_pages(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)34fa8dda1eSWu Hao static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
35fa8dda1eSWu Hao 			     struct dfl_afu_dma_region *region)
36fa8dda1eSWu Hao {
37fa8dda1eSWu Hao 	int npages = region->length >> PAGE_SHIFT;
38fa8dda1eSWu Hao 	struct device *dev = &pdata->dev->dev;
39fa8dda1eSWu Hao 	int ret, pinned;
40fa8dda1eSWu Hao 
4179eb597cSDaniel Jordan 	ret = account_locked_vm(current->mm, npages, true);
42fa8dda1eSWu Hao 	if (ret)
43fa8dda1eSWu Hao 		return ret;
44fa8dda1eSWu Hao 
45fa8dda1eSWu Hao 	region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
46fa8dda1eSWu Hao 	if (!region->pages) {
47fa8dda1eSWu Hao 		ret = -ENOMEM;
48fa8dda1eSWu Hao 		goto unlock_vm;
49fa8dda1eSWu Hao 	}
50fa8dda1eSWu Hao 
51*55dc9b87SJohn Hubbard 	pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
52fa8dda1eSWu Hao 				     region->pages);
53fa8dda1eSWu Hao 	if (pinned < 0) {
54fa8dda1eSWu Hao 		ret = pinned;
55c9d7e3daSSouptick Joarder 		goto free_pages;
56fa8dda1eSWu Hao 	} else if (pinned != npages) {
57fa8dda1eSWu Hao 		ret = -EFAULT;
58*55dc9b87SJohn Hubbard 		goto unpin_pages;
59fa8dda1eSWu Hao 	}
60fa8dda1eSWu Hao 
61fa8dda1eSWu Hao 	dev_dbg(dev, "%d pages pinned\n", pinned);
62fa8dda1eSWu Hao 
63fa8dda1eSWu Hao 	return 0;
64fa8dda1eSWu Hao 
65*55dc9b87SJohn Hubbard unpin_pages:
66*55dc9b87SJohn Hubbard 	unpin_user_pages(region->pages, pinned);
67fa8dda1eSWu Hao free_pages:
68fa8dda1eSWu Hao 	kfree(region->pages);
69fa8dda1eSWu Hao unlock_vm:
7079eb597cSDaniel Jordan 	account_locked_vm(current->mm, npages, false);
71fa8dda1eSWu Hao 	return ret;
72fa8dda1eSWu Hao }
73fa8dda1eSWu Hao 
74fa8dda1eSWu Hao /**
75fa8dda1eSWu Hao  * afu_dma_unpin_pages - unpin pages of given dma memory region
76fa8dda1eSWu Hao  * @pdata: feature device platform data
77fa8dda1eSWu Hao  * @region: dma memory region to be unpinned
78fa8dda1eSWu Hao  *
79fa8dda1eSWu Hao  * Unpin all the pages of given dfl_afu_dma_region.
80fa8dda1eSWu Hao  * Return 0 for success or negative error code.
81fa8dda1eSWu Hao  */
afu_dma_unpin_pages(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)82fa8dda1eSWu Hao static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
83fa8dda1eSWu Hao 				struct dfl_afu_dma_region *region)
84fa8dda1eSWu Hao {
85fa8dda1eSWu Hao 	long npages = region->length >> PAGE_SHIFT;
86fa8dda1eSWu Hao 	struct device *dev = &pdata->dev->dev;
87fa8dda1eSWu Hao 
88*55dc9b87SJohn Hubbard 	unpin_user_pages(region->pages, npages);
89fa8dda1eSWu Hao 	kfree(region->pages);
9079eb597cSDaniel Jordan 	account_locked_vm(current->mm, npages, false);
91fa8dda1eSWu Hao 
92fa8dda1eSWu Hao 	dev_dbg(dev, "%ld pages unpinned\n", npages);
93fa8dda1eSWu Hao }
94fa8dda1eSWu Hao 
95fa8dda1eSWu Hao /**
96fa8dda1eSWu Hao  * afu_dma_check_continuous_pages - check if pages are continuous
97fa8dda1eSWu Hao  * @region: dma memory region
98fa8dda1eSWu Hao  *
99fa8dda1eSWu Hao  * Return true if pages of given dma memory region have continuous physical
100fa8dda1eSWu Hao  * address, otherwise return false.
101fa8dda1eSWu Hao  */
afu_dma_check_continuous_pages(struct dfl_afu_dma_region * region)102fa8dda1eSWu Hao static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
103fa8dda1eSWu Hao {
104fa8dda1eSWu Hao 	int npages = region->length >> PAGE_SHIFT;
105fa8dda1eSWu Hao 	int i;
106fa8dda1eSWu Hao 
107fa8dda1eSWu Hao 	for (i = 0; i < npages - 1; i++)
108fa8dda1eSWu Hao 		if (page_to_pfn(region->pages[i]) + 1 !=
109fa8dda1eSWu Hao 				page_to_pfn(region->pages[i + 1]))
110fa8dda1eSWu Hao 			return false;
111fa8dda1eSWu Hao 
112fa8dda1eSWu Hao 	return true;
113fa8dda1eSWu Hao }
114fa8dda1eSWu Hao 
115fa8dda1eSWu Hao /**
116fa8dda1eSWu Hao  * dma_region_check_iova - check if memory area is fully contained in the region
117fa8dda1eSWu Hao  * @region: dma memory region
118fa8dda1eSWu Hao  * @iova: address of the dma memory area
119fa8dda1eSWu Hao  * @size: size of the dma memory area
120fa8dda1eSWu Hao  *
121fa8dda1eSWu Hao  * Compare the dma memory area defined by @iova and @size with given dma region.
122fa8dda1eSWu Hao  * Return true if memory area is fully contained in the region, otherwise false.
123fa8dda1eSWu Hao  */
dma_region_check_iova(struct dfl_afu_dma_region * region,u64 iova,u64 size)124fa8dda1eSWu Hao static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
125fa8dda1eSWu Hao 				  u64 iova, u64 size)
126fa8dda1eSWu Hao {
127fa8dda1eSWu Hao 	if (!size && region->iova != iova)
128fa8dda1eSWu Hao 		return false;
129fa8dda1eSWu Hao 
130fa8dda1eSWu Hao 	return (region->iova <= iova) &&
131fa8dda1eSWu Hao 		(region->length + region->iova >= iova + size);
132fa8dda1eSWu Hao }
133fa8dda1eSWu Hao 
134fa8dda1eSWu Hao /**
135fa8dda1eSWu Hao  * afu_dma_region_add - add given dma region to rbtree
136fa8dda1eSWu Hao  * @pdata: feature device platform data
137fa8dda1eSWu Hao  * @region: dma region to be added
138fa8dda1eSWu Hao  *
139fa8dda1eSWu Hao  * Return 0 for success, -EEXIST if dma region has already been added.
140fa8dda1eSWu Hao  *
141fa8dda1eSWu Hao  * Needs to be called with pdata->lock heold.
142fa8dda1eSWu Hao  */
afu_dma_region_add(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)143fa8dda1eSWu Hao static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
144fa8dda1eSWu Hao 			      struct dfl_afu_dma_region *region)
145fa8dda1eSWu Hao {
146fa8dda1eSWu Hao 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
147fa8dda1eSWu Hao 	struct rb_node **new, *parent = NULL;
148fa8dda1eSWu Hao 
149fa8dda1eSWu Hao 	dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
150fa8dda1eSWu Hao 		(unsigned long long)region->iova);
151fa8dda1eSWu Hao 
152fa8dda1eSWu Hao 	new = &afu->dma_regions.rb_node;
153fa8dda1eSWu Hao 
154fa8dda1eSWu Hao 	while (*new) {
155fa8dda1eSWu Hao 		struct dfl_afu_dma_region *this;
156fa8dda1eSWu Hao 
157fa8dda1eSWu Hao 		this = container_of(*new, struct dfl_afu_dma_region, node);
158fa8dda1eSWu Hao 
159fa8dda1eSWu Hao 		parent = *new;
160fa8dda1eSWu Hao 
161fa8dda1eSWu Hao 		if (dma_region_check_iova(this, region->iova, region->length))
162fa8dda1eSWu Hao 			return -EEXIST;
163fa8dda1eSWu Hao 
164fa8dda1eSWu Hao 		if (region->iova < this->iova)
165fa8dda1eSWu Hao 			new = &((*new)->rb_left);
166fa8dda1eSWu Hao 		else if (region->iova > this->iova)
167fa8dda1eSWu Hao 			new = &((*new)->rb_right);
168fa8dda1eSWu Hao 		else
169fa8dda1eSWu Hao 			return -EEXIST;
170fa8dda1eSWu Hao 	}
171fa8dda1eSWu Hao 
172fa8dda1eSWu Hao 	rb_link_node(&region->node, parent, new);
173fa8dda1eSWu Hao 	rb_insert_color(&region->node, &afu->dma_regions);
174fa8dda1eSWu Hao 
175fa8dda1eSWu Hao 	return 0;
176fa8dda1eSWu Hao }
177fa8dda1eSWu Hao 
178fa8dda1eSWu Hao /**
179fa8dda1eSWu Hao  * afu_dma_region_remove - remove given dma region from rbtree
180fa8dda1eSWu Hao  * @pdata: feature device platform data
181fa8dda1eSWu Hao  * @region: dma region to be removed
182fa8dda1eSWu Hao  *
183fa8dda1eSWu Hao  * Needs to be called with pdata->lock heold.
184fa8dda1eSWu Hao  */
afu_dma_region_remove(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)185fa8dda1eSWu Hao static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
186fa8dda1eSWu Hao 				  struct dfl_afu_dma_region *region)
187fa8dda1eSWu Hao {
188fa8dda1eSWu Hao 	struct dfl_afu *afu;
189fa8dda1eSWu Hao 
190fa8dda1eSWu Hao 	dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
191fa8dda1eSWu Hao 		(unsigned long long)region->iova);
192fa8dda1eSWu Hao 
193fa8dda1eSWu Hao 	afu = dfl_fpga_pdata_get_private(pdata);
194fa8dda1eSWu Hao 	rb_erase(&region->node, &afu->dma_regions);
195fa8dda1eSWu Hao }
196fa8dda1eSWu Hao 
197fa8dda1eSWu Hao /**
198fa8dda1eSWu Hao  * afu_dma_region_destroy - destroy all regions in rbtree
199fa8dda1eSWu Hao  * @pdata: feature device platform data
200fa8dda1eSWu Hao  *
201fa8dda1eSWu Hao  * Needs to be called with pdata->lock heold.
202fa8dda1eSWu Hao  */
afu_dma_region_destroy(struct dfl_feature_platform_data * pdata)203fa8dda1eSWu Hao void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
204fa8dda1eSWu Hao {
205fa8dda1eSWu Hao 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
206fa8dda1eSWu Hao 	struct rb_node *node = rb_first(&afu->dma_regions);
207fa8dda1eSWu Hao 	struct dfl_afu_dma_region *region;
208fa8dda1eSWu Hao 
209fa8dda1eSWu Hao 	while (node) {
210fa8dda1eSWu Hao 		region = container_of(node, struct dfl_afu_dma_region, node);
211fa8dda1eSWu Hao 
212fa8dda1eSWu Hao 		dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
213fa8dda1eSWu Hao 			(unsigned long long)region->iova);
214fa8dda1eSWu Hao 
215fa8dda1eSWu Hao 		rb_erase(node, &afu->dma_regions);
216fa8dda1eSWu Hao 
217fa8dda1eSWu Hao 		if (region->iova)
218fa8dda1eSWu Hao 			dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
219fa8dda1eSWu Hao 				       region->iova, region->length,
220fa8dda1eSWu Hao 				       DMA_BIDIRECTIONAL);
221fa8dda1eSWu Hao 
222fa8dda1eSWu Hao 		if (region->pages)
223fa8dda1eSWu Hao 			afu_dma_unpin_pages(pdata, region);
224fa8dda1eSWu Hao 
225fa8dda1eSWu Hao 		node = rb_next(node);
226fa8dda1eSWu Hao 		kfree(region);
227fa8dda1eSWu Hao 	}
228fa8dda1eSWu Hao }
229fa8dda1eSWu Hao 
230fa8dda1eSWu Hao /**
231fa8dda1eSWu Hao  * afu_dma_region_find - find the dma region from rbtree based on iova and size
232fa8dda1eSWu Hao  * @pdata: feature device platform data
233fa8dda1eSWu Hao  * @iova: address of the dma memory area
234fa8dda1eSWu Hao  * @size: size of the dma memory area
235fa8dda1eSWu Hao  *
236fa8dda1eSWu Hao  * It finds the dma region from the rbtree based on @iova and @size:
237fa8dda1eSWu Hao  * - if @size == 0, it finds the dma region which starts from @iova
238fa8dda1eSWu Hao  * - otherwise, it finds the dma region which fully contains
239fa8dda1eSWu Hao  *   [@iova, @iova+size)
240fa8dda1eSWu Hao  * If nothing is matched returns NULL.
241fa8dda1eSWu Hao  *
242fa8dda1eSWu Hao  * Needs to be called with pdata->lock held.
243fa8dda1eSWu Hao  */
244fa8dda1eSWu Hao struct dfl_afu_dma_region *
afu_dma_region_find(struct dfl_feature_platform_data * pdata,u64 iova,u64 size)245fa8dda1eSWu Hao afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
246fa8dda1eSWu Hao {
247fa8dda1eSWu Hao 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
248fa8dda1eSWu Hao 	struct rb_node *node = afu->dma_regions.rb_node;
249fa8dda1eSWu Hao 	struct device *dev = &pdata->dev->dev;
250fa8dda1eSWu Hao 
251fa8dda1eSWu Hao 	while (node) {
252fa8dda1eSWu Hao 		struct dfl_afu_dma_region *region;
253fa8dda1eSWu Hao 
254fa8dda1eSWu Hao 		region = container_of(node, struct dfl_afu_dma_region, node);
255fa8dda1eSWu Hao 
256fa8dda1eSWu Hao 		if (dma_region_check_iova(region, iova, size)) {
257fa8dda1eSWu Hao 			dev_dbg(dev, "find region (iova = %llx)\n",
258fa8dda1eSWu Hao 				(unsigned long long)region->iova);
259fa8dda1eSWu Hao 			return region;
260fa8dda1eSWu Hao 		}
261fa8dda1eSWu Hao 
262fa8dda1eSWu Hao 		if (iova < region->iova)
263fa8dda1eSWu Hao 			node = node->rb_left;
264fa8dda1eSWu Hao 		else if (iova > region->iova)
265fa8dda1eSWu Hao 			node = node->rb_right;
266fa8dda1eSWu Hao 		else
267fa8dda1eSWu Hao 			/* the iova region is not fully covered. */
268fa8dda1eSWu Hao 			break;
269fa8dda1eSWu Hao 	}
270fa8dda1eSWu Hao 
271fa8dda1eSWu Hao 	dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
272fa8dda1eSWu Hao 		(unsigned long long)iova, (unsigned long long)size);
273fa8dda1eSWu Hao 
274fa8dda1eSWu Hao 	return NULL;
275fa8dda1eSWu Hao }
276fa8dda1eSWu Hao 
277fa8dda1eSWu Hao /**
278fa8dda1eSWu Hao  * afu_dma_region_find_iova - find the dma region from rbtree by iova
279fa8dda1eSWu Hao  * @pdata: feature device platform data
280fa8dda1eSWu Hao  * @iova: address of the dma region
281fa8dda1eSWu Hao  *
282fa8dda1eSWu Hao  * Needs to be called with pdata->lock held.
283fa8dda1eSWu Hao  */
284fa8dda1eSWu Hao static struct dfl_afu_dma_region *
afu_dma_region_find_iova(struct dfl_feature_platform_data * pdata,u64 iova)285fa8dda1eSWu Hao afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
286fa8dda1eSWu Hao {
287fa8dda1eSWu Hao 	return afu_dma_region_find(pdata, iova, 0);
288fa8dda1eSWu Hao }
289fa8dda1eSWu Hao 
290fa8dda1eSWu Hao /**
291fa8dda1eSWu Hao  * afu_dma_map_region - map memory region for dma
292fa8dda1eSWu Hao  * @pdata: feature device platform data
293fa8dda1eSWu Hao  * @user_addr: address of the memory region
294fa8dda1eSWu Hao  * @length: size of the memory region
295fa8dda1eSWu Hao  * @iova: pointer of iova address
296fa8dda1eSWu Hao  *
297fa8dda1eSWu Hao  * Map memory region defined by @user_addr and @length, and return dma address
298fa8dda1eSWu Hao  * of the memory region via @iova.
299fa8dda1eSWu Hao  * Return 0 for success, otherwise error code.
300fa8dda1eSWu Hao  */
afu_dma_map_region(struct dfl_feature_platform_data * pdata,u64 user_addr,u64 length,u64 * iova)301fa8dda1eSWu Hao int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
302fa8dda1eSWu Hao 		       u64 user_addr, u64 length, u64 *iova)
303fa8dda1eSWu Hao {
304fa8dda1eSWu Hao 	struct dfl_afu_dma_region *region;
305fa8dda1eSWu Hao 	int ret;
306fa8dda1eSWu Hao 
307fa8dda1eSWu Hao 	/*
308fa8dda1eSWu Hao 	 * Check Inputs, only accept page-aligned user memory region with
309fa8dda1eSWu Hao 	 * valid length.
310fa8dda1eSWu Hao 	 */
311fa8dda1eSWu Hao 	if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
312fa8dda1eSWu Hao 		return -EINVAL;
313fa8dda1eSWu Hao 
314fa8dda1eSWu Hao 	/* Check overflow */
315fa8dda1eSWu Hao 	if (user_addr + length < user_addr)
316fa8dda1eSWu Hao 		return -EINVAL;
317fa8dda1eSWu Hao 
318fa8dda1eSWu Hao 	region = kzalloc(sizeof(*region), GFP_KERNEL);
319fa8dda1eSWu Hao 	if (!region)
320fa8dda1eSWu Hao 		return -ENOMEM;
321fa8dda1eSWu Hao 
322fa8dda1eSWu Hao 	region->user_addr = user_addr;
323fa8dda1eSWu Hao 	region->length = length;
324fa8dda1eSWu Hao 
325fa8dda1eSWu Hao 	/* Pin the user memory region */
326fa8dda1eSWu Hao 	ret = afu_dma_pin_pages(pdata, region);
327fa8dda1eSWu Hao 	if (ret) {
328fa8dda1eSWu Hao 		dev_err(&pdata->dev->dev, "failed to pin memory region\n");
329fa8dda1eSWu Hao 		goto free_region;
330fa8dda1eSWu Hao 	}
331fa8dda1eSWu Hao 
332fa8dda1eSWu Hao 	/* Only accept continuous pages, return error else */
333fa8dda1eSWu Hao 	if (!afu_dma_check_continuous_pages(region)) {
334fa8dda1eSWu Hao 		dev_err(&pdata->dev->dev, "pages are not continuous\n");
335fa8dda1eSWu Hao 		ret = -EINVAL;
336fa8dda1eSWu Hao 		goto unpin_pages;
337fa8dda1eSWu Hao 	}
338fa8dda1eSWu Hao 
339fa8dda1eSWu Hao 	/* As pages are continuous then start to do DMA mapping */
340fa8dda1eSWu Hao 	region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
341fa8dda1eSWu Hao 				    region->pages[0], 0,
342fa8dda1eSWu Hao 				    region->length,
343fa8dda1eSWu Hao 				    DMA_BIDIRECTIONAL);
34413069847SScott Wood 	if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
345fa8dda1eSWu Hao 		dev_err(&pdata->dev->dev, "failed to map for dma\n");
346fa8dda1eSWu Hao 		ret = -EFAULT;
347fa8dda1eSWu Hao 		goto unpin_pages;
348fa8dda1eSWu Hao 	}
349fa8dda1eSWu Hao 
350fa8dda1eSWu Hao 	*iova = region->iova;
351fa8dda1eSWu Hao 
352fa8dda1eSWu Hao 	mutex_lock(&pdata->lock);
353fa8dda1eSWu Hao 	ret = afu_dma_region_add(pdata, region);
354fa8dda1eSWu Hao 	mutex_unlock(&pdata->lock);
355fa8dda1eSWu Hao 	if (ret) {
356fa8dda1eSWu Hao 		dev_err(&pdata->dev->dev, "failed to add dma region\n");
357fa8dda1eSWu Hao 		goto unmap_dma;
358fa8dda1eSWu Hao 	}
359fa8dda1eSWu Hao 
360fa8dda1eSWu Hao 	return 0;
361fa8dda1eSWu Hao 
362fa8dda1eSWu Hao unmap_dma:
363fa8dda1eSWu Hao 	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
364fa8dda1eSWu Hao 		       region->iova, region->length, DMA_BIDIRECTIONAL);
365fa8dda1eSWu Hao unpin_pages:
366fa8dda1eSWu Hao 	afu_dma_unpin_pages(pdata, region);
367fa8dda1eSWu Hao free_region:
368fa8dda1eSWu Hao 	kfree(region);
369fa8dda1eSWu Hao 	return ret;
370fa8dda1eSWu Hao }
371fa8dda1eSWu Hao 
372fa8dda1eSWu Hao /**
373fa8dda1eSWu Hao  * afu_dma_unmap_region - unmap dma memory region
374fa8dda1eSWu Hao  * @pdata: feature device platform data
375fa8dda1eSWu Hao  * @iova: dma address of the region
376fa8dda1eSWu Hao  *
377fa8dda1eSWu Hao  * Unmap dma memory region based on @iova.
378fa8dda1eSWu Hao  * Return 0 for success, otherwise error code.
379fa8dda1eSWu Hao  */
afu_dma_unmap_region(struct dfl_feature_platform_data * pdata,u64 iova)380fa8dda1eSWu Hao int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
381fa8dda1eSWu Hao {
382fa8dda1eSWu Hao 	struct dfl_afu_dma_region *region;
383fa8dda1eSWu Hao 
384fa8dda1eSWu Hao 	mutex_lock(&pdata->lock);
385fa8dda1eSWu Hao 	region = afu_dma_region_find_iova(pdata, iova);
386fa8dda1eSWu Hao 	if (!region) {
387fa8dda1eSWu Hao 		mutex_unlock(&pdata->lock);
388fa8dda1eSWu Hao 		return -EINVAL;
389fa8dda1eSWu Hao 	}
390fa8dda1eSWu Hao 
391fa8dda1eSWu Hao 	if (region->in_use) {
392fa8dda1eSWu Hao 		mutex_unlock(&pdata->lock);
393fa8dda1eSWu Hao 		return -EBUSY;
394fa8dda1eSWu Hao 	}
395fa8dda1eSWu Hao 
396fa8dda1eSWu Hao 	afu_dma_region_remove(pdata, region);
397fa8dda1eSWu Hao 	mutex_unlock(&pdata->lock);
398fa8dda1eSWu Hao 
399fa8dda1eSWu Hao 	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
400fa8dda1eSWu Hao 		       region->iova, region->length, DMA_BIDIRECTIONAL);
401fa8dda1eSWu Hao 	afu_dma_unpin_pages(pdata, region);
402fa8dda1eSWu Hao 	kfree(region);
403fa8dda1eSWu Hao 
404fa8dda1eSWu Hao 	return 0;
405fa8dda1eSWu Hao }
406