1 #include <linux/bootmem.h> 2 #include <linux/gfp.h> 3 #include <linux/export.h> 4 #include <linux/rwlock.h> 5 #include <linux/slab.h> 6 #include <linux/types.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/vmalloc.h> 9 #include <linux/swiotlb.h> 10 11 #include <xen/xen.h> 12 #include <xen/interface/memory.h> 13 #include <xen/swiotlb-xen.h> 14 15 #include <asm/cacheflush.h> 16 #include <asm/xen/page.h> 17 #include <asm/xen/hypercall.h> 18 #include <asm/xen/interface.h> 19 20 struct xen_p2m_entry { 21 unsigned long pfn; 22 unsigned long mfn; 23 unsigned long nr_pages; 24 struct rb_node rbnode_mach; 25 struct rb_node rbnode_phys; 26 }; 27 28 static rwlock_t p2m_lock; 29 struct rb_root phys_to_mach = RB_ROOT; 30 EXPORT_SYMBOL_GPL(phys_to_mach); 31 static struct rb_root mach_to_phys = RB_ROOT; 32 33 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) 34 { 35 struct rb_node **link = &phys_to_mach.rb_node; 36 struct rb_node *parent = NULL; 37 struct xen_p2m_entry *entry; 38 int rc = 0; 39 40 while (*link) { 41 parent = *link; 42 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); 43 44 if (new->mfn == entry->mfn) 45 goto err_out; 46 if (new->pfn == entry->pfn) 47 goto err_out; 48 49 if (new->pfn < entry->pfn) 50 link = &(*link)->rb_left; 51 else 52 link = &(*link)->rb_right; 53 } 54 rb_link_node(&new->rbnode_phys, parent, link); 55 rb_insert_color(&new->rbnode_phys, &phys_to_mach); 56 goto out; 57 58 err_out: 59 rc = -EINVAL; 60 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", 61 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); 62 out: 63 return rc; 64 } 65 66 unsigned long __pfn_to_mfn(unsigned long pfn) 67 { 68 struct rb_node *n = phys_to_mach.rb_node; 69 struct xen_p2m_entry *entry; 70 unsigned long irqflags; 71 72 read_lock_irqsave(&p2m_lock, irqflags); 73 while (n) { 74 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); 75 if (entry->pfn <= pfn && 76 entry->pfn + entry->nr_pages > pfn) { 77 read_unlock_irqrestore(&p2m_lock, irqflags); 78 return entry->mfn + (pfn - entry->pfn); 79 } 80 if (pfn < entry->pfn) 81 n = n->rb_left; 82 else 83 n = n->rb_right; 84 } 85 read_unlock_irqrestore(&p2m_lock, irqflags); 86 87 return INVALID_P2M_ENTRY; 88 } 89 EXPORT_SYMBOL_GPL(__pfn_to_mfn); 90 91 static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) 92 { 93 struct rb_node **link = &mach_to_phys.rb_node; 94 struct rb_node *parent = NULL; 95 struct xen_p2m_entry *entry; 96 int rc = 0; 97 98 while (*link) { 99 parent = *link; 100 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); 101 102 if (new->mfn == entry->mfn) 103 goto err_out; 104 if (new->pfn == entry->pfn) 105 goto err_out; 106 107 if (new->mfn < entry->mfn) 108 link = &(*link)->rb_left; 109 else 110 link = &(*link)->rb_right; 111 } 112 rb_link_node(&new->rbnode_mach, parent, link); 113 rb_insert_color(&new->rbnode_mach, &mach_to_phys); 114 goto out; 115 116 err_out: 117 rc = -EINVAL; 118 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", 119 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); 120 out: 121 return rc; 122 } 123 124 unsigned long __mfn_to_pfn(unsigned long mfn) 125 { 126 struct rb_node *n = mach_to_phys.rb_node; 127 struct xen_p2m_entry *entry; 128 unsigned long irqflags; 129 130 read_lock_irqsave(&p2m_lock, irqflags); 131 while (n) { 132 entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); 133 if (entry->mfn <= mfn && 134 entry->mfn + entry->nr_pages > mfn) { 135 read_unlock_irqrestore(&p2m_lock, irqflags); 136 return entry->pfn + (mfn - entry->mfn); 137 } 138 if (mfn < entry->mfn) 139 n = n->rb_left; 140 else 141 n = n->rb_right; 142 } 143 read_unlock_irqrestore(&p2m_lock, irqflags); 144 145 return INVALID_P2M_ENTRY; 146 } 147 EXPORT_SYMBOL_GPL(__mfn_to_pfn); 148 149 bool __set_phys_to_machine_multi(unsigned long pfn, 150 unsigned long mfn, unsigned long nr_pages) 151 { 152 int rc; 153 unsigned long irqflags; 154 struct xen_p2m_entry *p2m_entry; 155 struct rb_node *n = phys_to_mach.rb_node; 156 157 if (mfn == INVALID_P2M_ENTRY) { 158 write_lock_irqsave(&p2m_lock, irqflags); 159 while (n) { 160 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); 161 if (p2m_entry->pfn <= pfn && 162 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { 163 rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); 164 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); 165 write_unlock_irqrestore(&p2m_lock, irqflags); 166 kfree(p2m_entry); 167 return true; 168 } 169 if (pfn < p2m_entry->pfn) 170 n = n->rb_left; 171 else 172 n = n->rb_right; 173 } 174 write_unlock_irqrestore(&p2m_lock, irqflags); 175 return true; 176 } 177 178 p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT); 179 if (!p2m_entry) { 180 pr_warn("cannot allocate xen_p2m_entry\n"); 181 return false; 182 } 183 p2m_entry->pfn = pfn; 184 p2m_entry->nr_pages = nr_pages; 185 p2m_entry->mfn = mfn; 186 187 write_lock_irqsave(&p2m_lock, irqflags); 188 if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || 189 (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { 190 write_unlock_irqrestore(&p2m_lock, irqflags); 191 return false; 192 } 193 write_unlock_irqrestore(&p2m_lock, irqflags); 194 return true; 195 } 196 EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi); 197 198 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) 199 { 200 return __set_phys_to_machine_multi(pfn, mfn, 1); 201 } 202 EXPORT_SYMBOL_GPL(__set_phys_to_machine); 203 204 static int p2m_init(void) 205 { 206 rwlock_init(&p2m_lock); 207 return 0; 208 } 209 arch_initcall(p2m_init); 210