1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched.h>
38 #include <linux/hugetlb.h>
39 #include <linux/dma-attrs.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
44 
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
48 
49 static struct workqueue_struct *usnic_uiom_wq;
50 
51 #define USNIC_UIOM_PAGE_CHUNK						\
52 	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
53 	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
54 	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
55 
56 static void usnic_uiom_reg_account(struct work_struct *work)
57 {
58 	struct usnic_uiom_reg *umem = container_of(work,
59 						struct usnic_uiom_reg, work);
60 
61 	down_write(&umem->mm->mmap_sem);
62 	umem->mm->locked_vm -= umem->diff;
63 	up_write(&umem->mm->mmap_sem);
64 	mmput(umem->mm);
65 	kfree(umem);
66 }
67 
68 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
69 				struct device *dev,
70 				unsigned long iova, int flags,
71 				void *token)
72 {
73 	usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
74 		dev_name(dev),
75 		domain, iova, flags);
76 	return -ENOSYS;
77 }
78 
79 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
80 {
81 	struct usnic_uiom_chunk *chunk, *tmp;
82 	struct page *page;
83 	struct scatterlist *sg;
84 	int i;
85 	dma_addr_t pa;
86 
87 	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
88 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
89 			page = sg_page(sg);
90 			pa = sg_phys(sg);
91 			if (dirty)
92 				set_page_dirty_lock(page);
93 			put_page(page);
94 			usnic_dbg("pa: %pa\n", &pa);
95 		}
96 		kfree(chunk);
97 	}
98 }
99 
100 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
101 				int dmasync, struct list_head *chunk_list)
102 {
103 	struct page **page_list;
104 	struct scatterlist *sg;
105 	struct usnic_uiom_chunk *chunk;
106 	unsigned long locked;
107 	unsigned long lock_limit;
108 	unsigned long cur_base;
109 	unsigned long npages;
110 	int ret;
111 	int off;
112 	int i;
113 	int flags;
114 	dma_addr_t pa;
115 	DEFINE_DMA_ATTRS(attrs);
116 
117 	if (dmasync)
118 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
119 
120 	if (!can_do_mlock())
121 		return -EPERM;
122 
123 	INIT_LIST_HEAD(chunk_list);
124 
125 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
126 	if (!page_list)
127 		return -ENOMEM;
128 
129 	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
130 
131 	down_write(&current->mm->mmap_sem);
132 
133 	locked = npages + current->mm->locked_vm;
134 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
135 
136 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
137 		ret = -ENOMEM;
138 		goto out;
139 	}
140 
141 	flags = IOMMU_READ | IOMMU_CACHE;
142 	flags |= (writable) ? IOMMU_WRITE : 0;
143 	cur_base = addr & PAGE_MASK;
144 	ret = 0;
145 
146 	while (npages) {
147 		ret = get_user_pages(current, current->mm, cur_base,
148 					min_t(unsigned long, npages,
149 					PAGE_SIZE / sizeof(struct page *)),
150 					1, !writable, page_list, NULL);
151 
152 		if (ret < 0)
153 			goto out;
154 
155 		npages -= ret;
156 		off = 0;
157 
158 		while (ret) {
159 			chunk = kmalloc(sizeof(*chunk) +
160 					sizeof(struct scatterlist) *
161 					min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
162 					GFP_KERNEL);
163 			if (!chunk) {
164 				ret = -ENOMEM;
165 				goto out;
166 			}
167 
168 			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
169 			sg_init_table(chunk->page_list, chunk->nents);
170 			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
171 				sg_set_page(sg, page_list[i + off],
172 						PAGE_SIZE, 0);
173 				pa = sg_phys(sg);
174 				usnic_dbg("va: 0x%lx pa: %pa\n",
175 						cur_base + i*PAGE_SIZE, &pa);
176 			}
177 			cur_base += chunk->nents * PAGE_SIZE;
178 			ret -= chunk->nents;
179 			off += chunk->nents;
180 			list_add_tail(&chunk->list, chunk_list);
181 		}
182 
183 		ret = 0;
184 	}
185 
186 out:
187 	if (ret < 0)
188 		usnic_uiom_put_pages(chunk_list, 0);
189 	else
190 		current->mm->locked_vm = locked;
191 
192 	up_write(&current->mm->mmap_sem);
193 	free_page((unsigned long) page_list);
194 	return ret;
195 }
196 
197 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
198 						struct usnic_uiom_pd *pd)
199 {
200 	struct usnic_uiom_interval_node *interval, *tmp;
201 	long unsigned va, size;
202 
203 	list_for_each_entry_safe(interval, tmp, intervals, link) {
204 		va = interval->start << PAGE_SHIFT;
205 		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
206 		while (size > 0) {
207 			/* Workaround for RH 970401 */
208 			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
209 			iommu_unmap(pd->domain, va, PAGE_SIZE);
210 			va += PAGE_SIZE;
211 			size -= PAGE_SIZE;
212 		}
213 	}
214 }
215 
216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
217 					struct usnic_uiom_reg *uiomr,
218 					int dirty)
219 {
220 	int npages;
221 	unsigned long vpn_start, vpn_last;
222 	struct usnic_uiom_interval_node *interval, *tmp;
223 	int writable = 0;
224 	LIST_HEAD(rm_intervals);
225 
226 	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
227 	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
228 	vpn_last = vpn_start + npages - 1;
229 
230 	spin_lock(&pd->lock);
231 	usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
232 					vpn_last, &rm_intervals);
233 	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
234 
235 	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
236 		if (interval->flags & IOMMU_WRITE)
237 			writable = 1;
238 		list_del(&interval->link);
239 		kfree(interval);
240 	}
241 
242 	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
243 	spin_unlock(&pd->lock);
244 }
245 
246 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
247 						struct usnic_uiom_reg *uiomr)
248 {
249 	int i, err;
250 	size_t size;
251 	struct usnic_uiom_chunk *chunk;
252 	struct usnic_uiom_interval_node *interval_node;
253 	dma_addr_t pa;
254 	dma_addr_t pa_start = 0;
255 	dma_addr_t pa_end = 0;
256 	long int va_start = -EINVAL;
257 	struct usnic_uiom_pd *pd = uiomr->pd;
258 	long int va = uiomr->va & PAGE_MASK;
259 	int flags = IOMMU_READ | IOMMU_CACHE;
260 
261 	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
262 	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
263 									list);
264 	list_for_each_entry(interval_node, intervals, link) {
265 iter_chunk:
266 		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
267 			pa = sg_phys(&chunk->page_list[i]);
268 			if ((va >> PAGE_SHIFT) < interval_node->start)
269 				continue;
270 
271 			if ((va >> PAGE_SHIFT) == interval_node->start) {
272 				/* First page of the interval */
273 				va_start = va;
274 				pa_start = pa;
275 				pa_end = pa;
276 			}
277 
278 			WARN_ON(va_start == -EINVAL);
279 
280 			if ((pa_end + PAGE_SIZE != pa) &&
281 					(pa != pa_start)) {
282 				/* PAs are not contiguous */
283 				size = pa_end - pa_start + PAGE_SIZE;
284 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
285 					va_start, &pa_start, size, flags);
286 				err = iommu_map(pd->domain, va_start, pa_start,
287 							size, flags);
288 				if (err) {
289 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
290 						va_start, &pa_start, size, err);
291 					goto err_out;
292 				}
293 				va_start = va;
294 				pa_start = pa;
295 				pa_end = pa;
296 			}
297 
298 			if ((va >> PAGE_SHIFT) == interval_node->last) {
299 				/* Last page of the interval */
300 				size = pa - pa_start + PAGE_SIZE;
301 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
302 					va_start, &pa_start, size, flags);
303 				err = iommu_map(pd->domain, va_start, pa_start,
304 						size, flags);
305 				if (err) {
306 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
307 						va_start, &pa_start, size, err);
308 					goto err_out;
309 				}
310 				break;
311 			}
312 
313 			if (pa != pa_start)
314 				pa_end += PAGE_SIZE;
315 		}
316 
317 		if (i == chunk->nents) {
318 			/*
319 			 * Hit last entry of the chunk,
320 			 * hence advance to next chunk
321 			 */
322 			chunk = list_first_entry(&chunk->list,
323 							struct usnic_uiom_chunk,
324 							list);
325 			goto iter_chunk;
326 		}
327 	}
328 
329 	return 0;
330 
331 err_out:
332 	usnic_uiom_unmap_sorted_intervals(intervals, pd);
333 	return err;
334 }
335 
336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
337 						unsigned long addr, size_t size,
338 						int writable, int dmasync)
339 {
340 	struct usnic_uiom_reg *uiomr;
341 	unsigned long va_base, vpn_start, vpn_last;
342 	unsigned long npages;
343 	int offset, err;
344 	LIST_HEAD(sorted_diff_intervals);
345 
346 	/*
347 	 * Intel IOMMU map throws an error if a translation entry is
348 	 * changed from read to write.  This module may not unmap
349 	 * and then remap the entry after fixing the permission
350 	 * b/c this open up a small windows where hw DMA may page fault
351 	 * Hence, make all entries to be writable.
352 	 */
353 	writable = 1;
354 
355 	va_base = addr & PAGE_MASK;
356 	offset = addr & ~PAGE_MASK;
357 	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
358 	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
359 	vpn_last = vpn_start + npages - 1;
360 
361 	uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
362 	if (!uiomr)
363 		return ERR_PTR(-ENOMEM);
364 
365 	uiomr->va = va_base;
366 	uiomr->offset = offset;
367 	uiomr->length = size;
368 	uiomr->writable = writable;
369 	uiomr->pd = pd;
370 
371 	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
372 					&uiomr->chunk_list);
373 	if (err) {
374 		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
375 				vpn_start, vpn_last, err);
376 		goto out_free_uiomr;
377 	}
378 
379 	spin_lock(&pd->lock);
380 	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
381 						(writable) ? IOMMU_WRITE : 0,
382 						IOMMU_WRITE,
383 						&pd->rb_root,
384 						&sorted_diff_intervals);
385 	if (err) {
386 		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
387 						vpn_start, vpn_last, err);
388 		goto out_put_pages;
389 	}
390 
391 	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
392 	if (err) {
393 		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
394 						vpn_start, vpn_last, err);
395 		goto out_put_intervals;
396 
397 	}
398 
399 	err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
400 					(writable) ? IOMMU_WRITE : 0);
401 	if (err) {
402 		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
403 						vpn_start, vpn_last, err);
404 		goto out_unmap_intervals;
405 	}
406 
407 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
408 	spin_unlock(&pd->lock);
409 
410 	return uiomr;
411 
412 out_unmap_intervals:
413 	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
414 out_put_intervals:
415 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
416 out_put_pages:
417 	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
418 	spin_unlock(&pd->lock);
419 out_free_uiomr:
420 	kfree(uiomr);
421 	return ERR_PTR(err);
422 }
423 
424 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing)
425 {
426 	struct mm_struct *mm;
427 	unsigned long diff;
428 
429 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
430 
431 	mm = get_task_mm(current);
432 	if (!mm) {
433 		kfree(uiomr);
434 		return;
435 	}
436 
437 	diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
438 
439 	/*
440 	 * We may be called with the mm's mmap_sem already held.  This
441 	 * can happen when a userspace munmap() is the call that drops
442 	 * the last reference to our file and calls our release
443 	 * method.  If there are memory regions to destroy, we'll end
444 	 * up here and not be able to take the mmap_sem.  In that case
445 	 * we defer the vm_locked accounting to the system workqueue.
446 	 */
447 	if (closing) {
448 		if (!down_write_trylock(&mm->mmap_sem)) {
449 			INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
450 			uiomr->mm = mm;
451 			uiomr->diff = diff;
452 
453 			queue_work(usnic_uiom_wq, &uiomr->work);
454 			return;
455 		}
456 	} else
457 		down_write(&mm->mmap_sem);
458 
459 	current->mm->locked_vm -= diff;
460 	up_write(&mm->mmap_sem);
461 	mmput(mm);
462 	kfree(uiomr);
463 }
464 
465 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
466 {
467 	struct usnic_uiom_pd *pd;
468 	void *domain;
469 
470 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
471 	if (!pd)
472 		return ERR_PTR(-ENOMEM);
473 
474 	pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
475 	if (IS_ERR_OR_NULL(domain)) {
476 		usnic_err("Failed to allocate IOMMU domain with err %ld\n",
477 				PTR_ERR(pd->domain));
478 		kfree(pd);
479 		return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM);
480 	}
481 
482 	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
483 
484 	spin_lock_init(&pd->lock);
485 	INIT_LIST_HEAD(&pd->devs);
486 
487 	return pd;
488 }
489 
490 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
491 {
492 	iommu_domain_free(pd->domain);
493 	kfree(pd);
494 }
495 
496 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
497 {
498 	struct usnic_uiom_dev *uiom_dev;
499 	int err;
500 
501 	uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
502 	if (!uiom_dev)
503 		return -ENOMEM;
504 	uiom_dev->dev = dev;
505 
506 	err = iommu_attach_device(pd->domain, dev);
507 	if (err)
508 		goto out_free_dev;
509 
510 	if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) {
511 		usnic_err("IOMMU of %s does not support cache coherency\n",
512 				dev_name(dev));
513 		err = -EINVAL;
514 		goto out_detach_device;
515 	}
516 
517 	spin_lock(&pd->lock);
518 	list_add_tail(&uiom_dev->link, &pd->devs);
519 	pd->dev_cnt++;
520 	spin_unlock(&pd->lock);
521 
522 	return 0;
523 
524 out_detach_device:
525 	iommu_detach_device(pd->domain, dev);
526 out_free_dev:
527 	kfree(uiom_dev);
528 	return err;
529 }
530 
531 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
532 {
533 	struct usnic_uiom_dev *uiom_dev;
534 	int found = 0;
535 
536 	spin_lock(&pd->lock);
537 	list_for_each_entry(uiom_dev, &pd->devs, link) {
538 		if (uiom_dev->dev == dev) {
539 			found = 1;
540 			break;
541 		}
542 	}
543 
544 	if (!found) {
545 		usnic_err("Unable to free dev %s - not found\n",
546 				dev_name(dev));
547 		spin_unlock(&pd->lock);
548 		return;
549 	}
550 
551 	list_del(&uiom_dev->link);
552 	pd->dev_cnt--;
553 	spin_unlock(&pd->lock);
554 
555 	return iommu_detach_device(pd->domain, dev);
556 }
557 
558 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
559 {
560 	struct usnic_uiom_dev *uiom_dev;
561 	struct device **devs;
562 	int i = 0;
563 
564 	spin_lock(&pd->lock);
565 	devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
566 	if (!devs) {
567 		devs = ERR_PTR(-ENOMEM);
568 		goto out;
569 	}
570 
571 	list_for_each_entry(uiom_dev, &pd->devs, link) {
572 		devs[i++] = uiom_dev->dev;
573 	}
574 out:
575 	spin_unlock(&pd->lock);
576 	return devs;
577 }
578 
579 void usnic_uiom_free_dev_list(struct device **devs)
580 {
581 	kfree(devs);
582 }
583 
584 int usnic_uiom_init(char *drv_name)
585 {
586 	if (!iommu_present(&pci_bus_type)) {
587 		usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
588 		return -EPERM;
589 	}
590 
591 	usnic_uiom_wq = create_workqueue(drv_name);
592 	if (!usnic_uiom_wq) {
593 		usnic_err("Unable to alloc wq for drv %s\n", drv_name);
594 		return -ENOMEM;
595 	}
596 
597 	return 0;
598 }
599 
600 void usnic_uiom_fini(void)
601 {
602 	flush_workqueue(usnic_uiom_wq);
603 	destroy_workqueue(usnic_uiom_wq);
604 }
605