xref: /openbmc/linux/arch/arm/xen/p2m.c (revision 09de5cd2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/memblock.h>
3 #include <linux/gfp.h>
4 #include <linux/export.h>
5 #include <linux/spinlock.h>
6 #include <linux/slab.h>
7 #include <linux/types.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/vmalloc.h>
10 #include <linux/swiotlb.h>
11 
12 #include <xen/xen.h>
13 #include <xen/interface/memory.h>
14 #include <xen/grant_table.h>
15 #include <xen/page.h>
16 #include <xen/swiotlb-xen.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/xen/hypercall.h>
20 #include <asm/xen/interface.h>
21 
22 struct xen_p2m_entry {
23 	unsigned long pfn;
24 	unsigned long mfn;
25 	unsigned long nr_pages;
26 	struct rb_node rbnode_phys;
27 };
28 
29 static rwlock_t p2m_lock;
30 struct rb_root phys_to_mach = RB_ROOT;
31 EXPORT_SYMBOL_GPL(phys_to_mach);
32 
33 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
34 {
35 	struct rb_node **link = &phys_to_mach.rb_node;
36 	struct rb_node *parent = NULL;
37 	struct xen_p2m_entry *entry;
38 	int rc = 0;
39 
40 	while (*link) {
41 		parent = *link;
42 		entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
43 
44 		if (new->pfn == entry->pfn)
45 			goto err_out;
46 
47 		if (new->pfn < entry->pfn)
48 			link = &(*link)->rb_left;
49 		else
50 			link = &(*link)->rb_right;
51 	}
52 	rb_link_node(&new->rbnode_phys, parent, link);
53 	rb_insert_color(&new->rbnode_phys, &phys_to_mach);
54 	goto out;
55 
56 err_out:
57 	rc = -EINVAL;
58 	pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
59 			__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
60 out:
61 	return rc;
62 }
63 
64 unsigned long __pfn_to_mfn(unsigned long pfn)
65 {
66 	struct rb_node *n = phys_to_mach.rb_node;
67 	struct xen_p2m_entry *entry;
68 	unsigned long irqflags;
69 
70 	read_lock_irqsave(&p2m_lock, irqflags);
71 	while (n) {
72 		entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
73 		if (entry->pfn <= pfn &&
74 				entry->pfn + entry->nr_pages > pfn) {
75 			unsigned long mfn = entry->mfn + (pfn - entry->pfn);
76 			read_unlock_irqrestore(&p2m_lock, irqflags);
77 			return mfn;
78 		}
79 		if (pfn < entry->pfn)
80 			n = n->rb_left;
81 		else
82 			n = n->rb_right;
83 	}
84 	read_unlock_irqrestore(&p2m_lock, irqflags);
85 
86 	return INVALID_P2M_ENTRY;
87 }
88 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
89 
90 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
91 			    struct gnttab_map_grant_ref *kmap_ops,
92 			    struct page **pages, unsigned int count)
93 {
94 	int i;
95 
96 	for (i = 0; i < count; i++) {
97 		struct gnttab_unmap_grant_ref unmap;
98 		int rc;
99 
100 		if (map_ops[i].status)
101 			continue;
102 		if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
103 				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
104 			continue;
105 
106 		/*
107 		 * Signal an error for this slot. This in turn requires
108 		 * immediate unmapping.
109 		 */
110 		map_ops[i].status = GNTST_general_error;
111 		unmap.host_addr = map_ops[i].host_addr,
112 		unmap.handle = map_ops[i].handle;
113 		map_ops[i].handle = INVALID_GRANT_HANDLE;
114 		if (map_ops[i].flags & GNTMAP_device_map)
115 			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
116 		else
117 			unmap.dev_bus_addr = 0;
118 
119 		/*
120 		 * Pre-populate the status field, to be recognizable in
121 		 * the log message below.
122 		 */
123 		unmap.status = 1;
124 
125 		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
126 					       &unmap, 1);
127 		if (rc || unmap.status != GNTST_okay)
128 			pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
129 				    rc, unmap.status);
130 	}
131 
132 	return 0;
133 }
134 
135 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
136 			      struct gnttab_unmap_grant_ref *kunmap_ops,
137 			      struct page **pages, unsigned int count)
138 {
139 	int i;
140 
141 	for (i = 0; i < count; i++) {
142 		set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
143 				    INVALID_P2M_ENTRY);
144 	}
145 
146 	return 0;
147 }
148 
149 bool __set_phys_to_machine_multi(unsigned long pfn,
150 		unsigned long mfn, unsigned long nr_pages)
151 {
152 	int rc;
153 	unsigned long irqflags;
154 	struct xen_p2m_entry *p2m_entry;
155 	struct rb_node *n = phys_to_mach.rb_node;
156 
157 	if (mfn == INVALID_P2M_ENTRY) {
158 		write_lock_irqsave(&p2m_lock, irqflags);
159 		while (n) {
160 			p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
161 			if (p2m_entry->pfn <= pfn &&
162 					p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
163 				rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
164 				write_unlock_irqrestore(&p2m_lock, irqflags);
165 				kfree(p2m_entry);
166 				return true;
167 			}
168 			if (pfn < p2m_entry->pfn)
169 				n = n->rb_left;
170 			else
171 				n = n->rb_right;
172 		}
173 		write_unlock_irqrestore(&p2m_lock, irqflags);
174 		return true;
175 	}
176 
177 	p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
178 	if (!p2m_entry)
179 		return false;
180 
181 	p2m_entry->pfn = pfn;
182 	p2m_entry->nr_pages = nr_pages;
183 	p2m_entry->mfn = mfn;
184 
185 	write_lock_irqsave(&p2m_lock, irqflags);
186 	rc = xen_add_phys_to_mach_entry(p2m_entry);
187 	if (rc < 0) {
188 		write_unlock_irqrestore(&p2m_lock, irqflags);
189 		kfree(p2m_entry);
190 		return false;
191 	}
192 	write_unlock_irqrestore(&p2m_lock, irqflags);
193 	return true;
194 }
195 EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
196 
197 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
198 {
199 	return __set_phys_to_machine_multi(pfn, mfn, 1);
200 }
201 EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202 
203 static int p2m_init(void)
204 {
205 	rwlock_init(&p2m_lock);
206 	return 0;
207 }
208 arch_initcall(p2m_init);
209