xref: /openbmc/linux/drivers/gpu/drm/i915/i915_mm.c (revision b923cda9)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/mm.h>
26 #include <linux/io-mapping.h>
27 
28 
29 #include "i915_drv.h"
30 
31 struct remap_pfn {
32 	struct mm_struct *mm;
33 	unsigned long pfn;
34 	pgprot_t prot;
35 
36 	struct sgt_iter sgt;
37 	resource_size_t iobase;
38 };
39 
40 #define use_dma(io) ((io) != -1)
41 
42 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
43 {
44 	if (use_dma(r->iobase))
45 		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
46 	else
47 		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
48 }
49 
50 static int remap_sg(pte_t *pte, unsigned long addr, void *data)
51 {
52 	struct remap_pfn *r = data;
53 
54 	if (GEM_WARN_ON(!r->sgt.sgp))
55 		return -EINVAL;
56 
57 	/* Special PTE are not associated with any struct page */
58 	set_pte_at(r->mm, addr, pte,
59 		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
60 	r->pfn++; /* track insertions in case we need to unwind later */
61 
62 	r->sgt.curr += PAGE_SIZE;
63 	if (r->sgt.curr >= r->sgt.max)
64 		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
65 
66 	return 0;
67 }
68 
69 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
70 
71 /**
72  * remap_io_sg - remap an IO mapping to userspace
73  * @vma: user vma to map to
74  * @addr: target user address to start at
75  * @size: size of map area
76  * @sgl: Start sg entry
77  * @iobase: Use stored dma address offset by this address or pfn if -1
78  *
79  *  Note: this is only safe if the mm semaphore is held when called.
80  */
81 int remap_io_sg(struct vm_area_struct *vma,
82 		unsigned long addr, unsigned long size,
83 		struct scatterlist *sgl, resource_size_t iobase)
84 {
85 	struct remap_pfn r = {
86 		.mm = vma->vm_mm,
87 		.prot = vma->vm_page_prot,
88 		.sgt = __sgt_iter(sgl, use_dma(iobase)),
89 		.iobase = iobase,
90 	};
91 	int err;
92 
93 	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
94 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
95 
96 	if (!use_dma(iobase))
97 		flush_cache_range(vma, addr, size);
98 
99 	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
100 	if (unlikely(err)) {
101 		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
102 		return err;
103 	}
104 
105 	return 0;
106 }
107