xref: /openbmc/linux/arch/arm/xen/p2m.c (revision b34e08d5)
1 #include <linux/bootmem.h>
2 #include <linux/gfp.h>
3 #include <linux/export.h>
4 #include <linux/rwlock.h>
5 #include <linux/slab.h>
6 #include <linux/types.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/vmalloc.h>
9 #include <linux/swiotlb.h>
10 
11 #include <xen/xen.h>
12 #include <xen/interface/memory.h>
13 #include <xen/swiotlb-xen.h>
14 
15 #include <asm/cacheflush.h>
16 #include <asm/xen/page.h>
17 #include <asm/xen/hypercall.h>
18 #include <asm/xen/interface.h>
19 
20 struct xen_p2m_entry {
21 	unsigned long pfn;
22 	unsigned long mfn;
23 	unsigned long nr_pages;
24 	struct rb_node rbnode_mach;
25 	struct rb_node rbnode_phys;
26 };
27 
28 static rwlock_t p2m_lock;
29 struct rb_root phys_to_mach = RB_ROOT;
30 EXPORT_SYMBOL_GPL(phys_to_mach);
31 static struct rb_root mach_to_phys = RB_ROOT;
32 
33 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
34 {
35 	struct rb_node **link = &phys_to_mach.rb_node;
36 	struct rb_node *parent = NULL;
37 	struct xen_p2m_entry *entry;
38 	int rc = 0;
39 
40 	while (*link) {
41 		parent = *link;
42 		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
43 
44 		if (new->mfn == entry->mfn)
45 			goto err_out;
46 		if (new->pfn == entry->pfn)
47 			goto err_out;
48 
49 		if (new->pfn < entry->pfn)
50 			link = &(*link)->rb_left;
51 		else
52 			link = &(*link)->rb_right;
53 	}
54 	rb_link_node(&new->rbnode_phys, parent, link);
55 	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
56 	goto out;
57 
58 err_out:
59 	rc = -EINVAL;
60 	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
61 			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
62 out:
63 	return rc;
64 }
65 
66 unsigned long __pfn_to_mfn(unsigned long pfn)
67 {
68 	struct rb_node *n = phys_to_mach.rb_node;
69 	struct xen_p2m_entry *entry;
70 	unsigned long irqflags;
71 
72 	read_lock_irqsave(&p2m_lock, irqflags);
73 	while (n) {
74 		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
75 		if (entry->pfn <= pfn &&
76 				entry->pfn + entry->nr_pages > pfn) {
77 			read_unlock_irqrestore(&p2m_lock, irqflags);
78 			return entry->mfn + (pfn - entry->pfn);
79 		}
80 		if (pfn < entry->pfn)
81 			n = n->rb_left;
82 		else
83 			n = n->rb_right;
84 	}
85 	read_unlock_irqrestore(&p2m_lock, irqflags);
86 
87 	return INVALID_P2M_ENTRY;
88 }
89 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
90 
91 static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
92 {
93 	struct rb_node **link = &mach_to_phys.rb_node;
94 	struct rb_node *parent = NULL;
95 	struct xen_p2m_entry *entry;
96 	int rc = 0;
97 
98 	while (*link) {
99 		parent = *link;
100 		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
101 
102 		if (new->mfn == entry->mfn)
103 			goto err_out;
104 		if (new->pfn == entry->pfn)
105 			goto err_out;
106 
107 		if (new->mfn < entry->mfn)
108 			link = &(*link)->rb_left;
109 		else
110 			link = &(*link)->rb_right;
111 	}
112 	rb_link_node(&new->rbnode_mach, parent, link);
113 	rb_insert_color(&new->rbnode_mach, &mach_to_phys);
114 	goto out;
115 
116 err_out:
117 	rc = -EINVAL;
118 	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
119 			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
120 out:
121 	return rc;
122 }
123 
124 unsigned long __mfn_to_pfn(unsigned long mfn)
125 {
126 	struct rb_node *n = mach_to_phys.rb_node;
127 	struct xen_p2m_entry *entry;
128 	unsigned long irqflags;
129 
130 	read_lock_irqsave(&p2m_lock, irqflags);
131 	while (n) {
132 		entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
133 		if (entry->mfn <= mfn &&
134 				entry->mfn + entry->nr_pages > mfn) {
135 			read_unlock_irqrestore(&p2m_lock, irqflags);
136 			return entry->pfn + (mfn - entry->mfn);
137 		}
138 		if (mfn < entry->mfn)
139 			n = n->rb_left;
140 		else
141 			n = n->rb_right;
142 	}
143 	read_unlock_irqrestore(&p2m_lock, irqflags);
144 
145 	return INVALID_P2M_ENTRY;
146 }
147 EXPORT_SYMBOL_GPL(__mfn_to_pfn);
148 
149 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
150 			    struct gnttab_map_grant_ref *kmap_ops,
151 			    struct page **pages, unsigned int count)
152 {
153 	int i;
154 
155 	for (i = 0; i < count; i++) {
156 		if (map_ops[i].status)
157 			continue;
158 		set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
159 				    map_ops[i].dev_bus_addr >> PAGE_SHIFT);
160 	}
161 
162 	return 0;
163 }
164 EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
165 
166 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
167 			      struct gnttab_map_grant_ref *kmap_ops,
168 			      struct page **pages, unsigned int count)
169 {
170 	int i;
171 
172 	for (i = 0; i < count; i++) {
173 		set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
174 				    INVALID_P2M_ENTRY);
175 	}
176 
177 	return 0;
178 }
179 EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
180 
181 bool __set_phys_to_machine_multi(unsigned long pfn,
182 		unsigned long mfn, unsigned long nr_pages)
183 {
184 	int rc;
185 	unsigned long irqflags;
186 	struct xen_p2m_entry *p2m_entry;
187 	struct rb_node *n = phys_to_mach.rb_node;
188 
189 	if (mfn == INVALID_P2M_ENTRY) {
190 		write_lock_irqsave(&p2m_lock, irqflags);
191 		while (n) {
192 			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
193 			if (p2m_entry->pfn <= pfn &&
194 					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
195 				rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
196 				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
197 				write_unlock_irqrestore(&p2m_lock, irqflags);
198 				kfree(p2m_entry);
199 				return true;
200 			}
201 			if (pfn < p2m_entry->pfn)
202 				n = n->rb_left;
203 			else
204 				n = n->rb_right;
205 		}
206 		write_unlock_irqrestore(&p2m_lock, irqflags);
207 		return true;
208 	}
209 
210 	p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
211 	if (!p2m_entry) {
212 		pr_warn("cannot allocate xen_p2m_entry\n");
213 		return false;
214 	}
215 	p2m_entry->pfn = pfn;
216 	p2m_entry->nr_pages = nr_pages;
217 	p2m_entry->mfn = mfn;
218 
219 	write_lock_irqsave(&p2m_lock, irqflags);
220 	if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
221 		(rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
222 		write_unlock_irqrestore(&p2m_lock, irqflags);
223 		return false;
224 	}
225 	write_unlock_irqrestore(&p2m_lock, irqflags);
226 	return true;
227 }
228 EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
229 
230 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
231 {
232 	return __set_phys_to_machine_multi(pfn, mfn, 1);
233 }
234 EXPORT_SYMBOL_GPL(__set_phys_to_machine);
235 
236 static int p2m_init(void)
237 {
238 	rwlock_init(&p2m_lock);
239 	return 0;
240 }
241 arch_initcall(p2m_init);
242