1 #include <xen/xen.h> 2 #include <xen/events.h> 3 #include <xen/grant_table.h> 4 #include <xen/hvm.h> 5 #include <xen/interface/xen.h> 6 #include <xen/interface/memory.h> 7 #include <xen/interface/hvm/params.h> 8 #include <xen/features.h> 9 #include <xen/platform_pci.h> 10 #include <xen/xenbus.h> 11 #include <xen/page.h> 12 #include <xen/xen-ops.h> 13 #include <asm/xen/hypervisor.h> 14 #include <asm/xen/hypercall.h> 15 #include <linux/interrupt.h> 16 #include <linux/irqreturn.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 22 #include <linux/mm.h> 23 24 struct start_info _xen_start_info; 25 struct start_info *xen_start_info = &_xen_start_info; 26 EXPORT_SYMBOL_GPL(xen_start_info); 27 28 enum xen_domain_type xen_domain_type = XEN_NATIVE; 29 EXPORT_SYMBOL_GPL(xen_domain_type); 30 31 struct shared_info xen_dummy_shared_info; 32 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 33 34 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 35 36 /* These are unused until we support booting "pre-ballooned" */ 37 unsigned long xen_released_pages; 38 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 39 40 /* TODO: to be removed */ 41 __read_mostly int xen_have_vector_callback; 42 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 43 44 int xen_platform_pci_unplug = XEN_UNPLUG_ALL; 45 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); 46 47 static __read_mostly int xen_events_irq = -1; 48 49 /* map fgmfn of domid to lpfn in the current domain */ 50 static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 51 unsigned int domid) 52 { 53 int rc; 54 struct xen_add_to_physmap_range xatp = { 55 .domid = DOMID_SELF, 56 .foreign_domid = domid, 57 .size = 1, 58 .space = XENMAPSPACE_gmfn_foreign, 59 }; 60 xen_ulong_t idx = fgmfn; 61 xen_pfn_t gpfn = lpfn; 62 int err = 0; 63 64 set_xen_guest_handle(xatp.idxs, &idx); 65 set_xen_guest_handle(xatp.gpfns, &gpfn); 66 set_xen_guest_handle(xatp.errs, &err); 67 68 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 69 if (rc || err) { 70 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", 71 rc, err, lpfn, fgmfn); 72 return 1; 73 } 74 return 0; 75 } 76 77 struct remap_data { 78 xen_pfn_t fgmfn; /* foreign domain's gmfn */ 79 pgprot_t prot; 80 domid_t domid; 81 struct vm_area_struct *vma; 82 int index; 83 struct page **pages; 84 struct xen_remap_mfn_info *info; 85 }; 86 87 static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 88 void *data) 89 { 90 struct remap_data *info = data; 91 struct page *page = info->pages[info->index++]; 92 unsigned long pfn = page_to_pfn(page); 93 pte_t pte = pfn_pte(pfn, info->prot); 94 95 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 96 return -EFAULT; 97 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 98 99 return 0; 100 } 101 102 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 103 unsigned long addr, 104 xen_pfn_t mfn, int nr, 105 pgprot_t prot, unsigned domid, 106 struct page **pages) 107 { 108 int err; 109 struct remap_data data; 110 111 /* TBD: Batching, current sole caller only does page at a time */ 112 if (nr > 1) 113 return -EINVAL; 114 115 data.fgmfn = mfn; 116 data.prot = prot; 117 data.domid = domid; 118 data.vma = vma; 119 data.index = 0; 120 data.pages = pages; 121 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 122 remap_pte_fn, &data); 123 return err; 124 } 125 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 126 127 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 128 int nr, struct page **pages) 129 { 130 int i; 131 132 for (i = 0; i < nr; i++) { 133 struct xen_remove_from_physmap xrp; 134 unsigned long rc, pfn; 135 136 pfn = page_to_pfn(pages[i]); 137 138 xrp.domid = DOMID_SELF; 139 xrp.gpfn = pfn; 140 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 141 if (rc) { 142 pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 143 pfn, rc); 144 return rc; 145 } 146 } 147 return 0; 148 } 149 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 150 151 /* 152 * see Documentation/devicetree/bindings/arm/xen.txt for the 153 * documentation of the Xen Device Tree format. 154 */ 155 #define GRANT_TABLE_PHYSADDR 0 156 static int __init xen_guest_init(void) 157 { 158 struct xen_add_to_physmap xatp; 159 static struct shared_info *shared_info_page = 0; 160 struct device_node *node; 161 int len; 162 const char *s = NULL; 163 const char *version = NULL; 164 const char *xen_prefix = "xen,xen-"; 165 struct resource res; 166 167 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 168 if (!node) { 169 pr_debug("No Xen support\n"); 170 return 0; 171 } 172 s = of_get_property(node, "compatible", &len); 173 if (strlen(xen_prefix) + 3 < len && 174 !strncmp(xen_prefix, s, strlen(xen_prefix))) 175 version = s + strlen(xen_prefix); 176 if (version == NULL) { 177 pr_debug("Xen version not found\n"); 178 return 0; 179 } 180 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 181 return 0; 182 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 183 xen_events_irq = irq_of_parse_and_map(node, 0); 184 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 185 version, xen_events_irq, xen_hvm_resume_frames); 186 xen_domain_type = XEN_HVM_DOMAIN; 187 188 xen_setup_features(); 189 if (xen_feature(XENFEAT_dom0)) 190 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 191 else 192 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); 193 194 if (!shared_info_page) 195 shared_info_page = (struct shared_info *) 196 get_zeroed_page(GFP_KERNEL); 197 if (!shared_info_page) { 198 pr_err("not enough memory\n"); 199 return -ENOMEM; 200 } 201 xatp.domid = DOMID_SELF; 202 xatp.idx = 0; 203 xatp.space = XENMAPSPACE_shared_info; 204 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 205 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 206 BUG(); 207 208 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 209 210 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 211 * page, we use it in the event channel upcall and in some pvclock 212 * related functions. We don't need the vcpu_info placement 213 * optimizations because we don't use any pv_mmu or pv_irq op on 214 * HVM. 215 * The shared info contains exactly 1 CPU (the boot CPU). The guest 216 * is required to use VCPUOP_register_vcpu_info to place vcpu info 217 * for secondary CPUs as they are brought up. */ 218 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 219 220 gnttab_init(); 221 if (!xen_initial_domain()) 222 xenbus_probe(NULL); 223 224 return 0; 225 } 226 core_initcall(xen_guest_init); 227 228 static irqreturn_t xen_arm_callback(int irq, void *arg) 229 { 230 xen_hvm_evtchn_do_upcall(); 231 return IRQ_HANDLED; 232 } 233 234 static int __init xen_init_events(void) 235 { 236 if (!xen_domain() || xen_events_irq < 0) 237 return -ENODEV; 238 239 xen_init_IRQ(); 240 241 if (request_percpu_irq(xen_events_irq, xen_arm_callback, 242 "events", xen_vcpu)) { 243 pr_err("Error requesting IRQ %d\n", xen_events_irq); 244 return -EINVAL; 245 } 246 247 enable_percpu_irq(xen_events_irq, 0); 248 249 return 0; 250 } 251 postcore_initcall(xen_init_events); 252 253 /* In the hypervisor.S file. */ 254 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 255 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 256 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 257 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 258 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 259 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 260 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 261 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 262 EXPORT_SYMBOL_GPL(privcmd_call); 263