1 #include <xen/xen.h> 2 #include <xen/events.h> 3 #include <xen/grant_table.h> 4 #include <xen/hvm.h> 5 #include <xen/interface/vcpu.h> 6 #include <xen/interface/xen.h> 7 #include <xen/interface/memory.h> 8 #include <xen/interface/hvm/params.h> 9 #include <xen/features.h> 10 #include <xen/platform_pci.h> 11 #include <xen/xenbus.h> 12 #include <xen/page.h> 13 #include <xen/interface/sched.h> 14 #include <xen/xen-ops.h> 15 #include <asm/xen/hypervisor.h> 16 #include <asm/xen/hypercall.h> 17 #include <asm/system_misc.h> 18 #include <linux/interrupt.h> 19 #include <linux/irqreturn.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/cpuidle.h> 25 #include <linux/cpufreq.h> 26 27 #include <linux/mm.h> 28 29 struct start_info _xen_start_info; 30 struct start_info *xen_start_info = &_xen_start_info; 31 EXPORT_SYMBOL_GPL(xen_start_info); 32 33 enum xen_domain_type xen_domain_type = XEN_NATIVE; 34 EXPORT_SYMBOL_GPL(xen_domain_type); 35 36 struct shared_info xen_dummy_shared_info; 37 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 38 39 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 40 static struct vcpu_info __percpu *xen_vcpu_info; 41 42 /* These are unused until we support booting "pre-ballooned" */ 43 unsigned long xen_released_pages; 44 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 45 46 /* TODO: to be removed */ 47 __read_mostly int xen_have_vector_callback; 48 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 49 50 int xen_platform_pci_unplug = XEN_UNPLUG_ALL; 51 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); 52 53 static __read_mostly int xen_events_irq = -1; 54 55 /* map fgmfn of domid to lpfn in the current domain */ 56 static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 57 unsigned int domid) 58 { 59 int rc; 60 struct xen_add_to_physmap_range xatp = { 61 .domid = DOMID_SELF, 62 .foreign_domid = domid, 63 .size = 1, 64 .space = XENMAPSPACE_gmfn_foreign, 65 }; 66 xen_ulong_t idx = fgmfn; 67 xen_pfn_t gpfn = lpfn; 68 int err = 0; 69 70 set_xen_guest_handle(xatp.idxs, &idx); 71 set_xen_guest_handle(xatp.gpfns, &gpfn); 72 set_xen_guest_handle(xatp.errs, &err); 73 74 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 75 if (rc || err) { 76 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", 77 rc, err, lpfn, fgmfn); 78 return 1; 79 } 80 return 0; 81 } 82 83 struct remap_data { 84 xen_pfn_t fgmfn; /* foreign domain's gmfn */ 85 pgprot_t prot; 86 domid_t domid; 87 struct vm_area_struct *vma; 88 int index; 89 struct page **pages; 90 struct xen_remap_mfn_info *info; 91 }; 92 93 static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 94 void *data) 95 { 96 struct remap_data *info = data; 97 struct page *page = info->pages[info->index++]; 98 unsigned long pfn = page_to_pfn(page); 99 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 100 101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 102 return -EFAULT; 103 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 104 105 return 0; 106 } 107 108 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 109 unsigned long addr, 110 xen_pfn_t mfn, int nr, 111 pgprot_t prot, unsigned domid, 112 struct page **pages) 113 { 114 int err; 115 struct remap_data data; 116 117 /* TBD: Batching, current sole caller only does page at a time */ 118 if (nr > 1) 119 return -EINVAL; 120 121 data.fgmfn = mfn; 122 data.prot = prot; 123 data.domid = domid; 124 data.vma = vma; 125 data.index = 0; 126 data.pages = pages; 127 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 128 remap_pte_fn, &data); 129 return err; 130 } 131 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 132 133 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 134 int nr, struct page **pages) 135 { 136 int i; 137 138 for (i = 0; i < nr; i++) { 139 struct xen_remove_from_physmap xrp; 140 unsigned long rc, pfn; 141 142 pfn = page_to_pfn(pages[i]); 143 144 xrp.domid = DOMID_SELF; 145 xrp.gpfn = pfn; 146 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 147 if (rc) { 148 pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 149 pfn, rc); 150 return rc; 151 } 152 } 153 return 0; 154 } 155 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 156 157 static void __init xen_percpu_init(void *unused) 158 { 159 struct vcpu_register_vcpu_info info; 160 struct vcpu_info *vcpup; 161 int err; 162 int cpu = get_cpu(); 163 164 pr_info("Xen: initializing cpu%d\n", cpu); 165 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 166 167 info.mfn = __pa(vcpup) >> PAGE_SHIFT; 168 info.offset = offset_in_page(vcpup); 169 170 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 171 BUG_ON(err); 172 per_cpu(xen_vcpu, cpu) = vcpup; 173 174 enable_percpu_irq(xen_events_irq, 0); 175 put_cpu(); 176 } 177 178 static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 179 { 180 struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; 181 int rc; 182 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 183 if (rc) 184 BUG(); 185 } 186 187 static void xen_power_off(void) 188 { 189 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff }; 190 int rc; 191 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 192 if (rc) 193 BUG(); 194 } 195 196 /* 197 * see Documentation/devicetree/bindings/arm/xen.txt for the 198 * documentation of the Xen Device Tree format. 199 */ 200 #define GRANT_TABLE_PHYSADDR 0 201 static int __init xen_guest_init(void) 202 { 203 struct xen_add_to_physmap xatp; 204 static struct shared_info *shared_info_page = 0; 205 struct device_node *node; 206 int len; 207 const char *s = NULL; 208 const char *version = NULL; 209 const char *xen_prefix = "xen,xen-"; 210 struct resource res; 211 212 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 213 if (!node) { 214 pr_debug("No Xen support\n"); 215 return 0; 216 } 217 s = of_get_property(node, "compatible", &len); 218 if (strlen(xen_prefix) + 3 < len && 219 !strncmp(xen_prefix, s, strlen(xen_prefix))) 220 version = s + strlen(xen_prefix); 221 if (version == NULL) { 222 pr_debug("Xen version not found\n"); 223 return 0; 224 } 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 226 return 0; 227 xen_hvm_resume_frames = res.start; 228 xen_events_irq = irq_of_parse_and_map(node, 0); 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 230 version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); 231 xen_domain_type = XEN_HVM_DOMAIN; 232 233 xen_setup_features(); 234 if (xen_feature(XENFEAT_dom0)) 235 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 236 else 237 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); 238 239 if (!shared_info_page) 240 shared_info_page = (struct shared_info *) 241 get_zeroed_page(GFP_KERNEL); 242 if (!shared_info_page) { 243 pr_err("not enough memory\n"); 244 return -ENOMEM; 245 } 246 xatp.domid = DOMID_SELF; 247 xatp.idx = 0; 248 xatp.space = XENMAPSPACE_shared_info; 249 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 250 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 251 BUG(); 252 253 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 254 255 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 256 * page, we use it in the event channel upcall and in some pvclock 257 * related functions. 258 * The shared info contains exactly 1 CPU (the boot CPU). The guest 259 * is required to use VCPUOP_register_vcpu_info to place vcpu info 260 * for secondary CPUs as they are brought up. 261 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 262 */ 263 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 264 sizeof(struct vcpu_info)); 265 if (xen_vcpu_info == NULL) 266 return -ENOMEM; 267 268 gnttab_init(); 269 if (!xen_initial_domain()) 270 xenbus_probe(NULL); 271 272 /* 273 * Making sure board specific code will not set up ops for 274 * cpu idle and cpu freq. 275 */ 276 disable_cpuidle(); 277 disable_cpufreq(); 278 279 return 0; 280 } 281 core_initcall(xen_guest_init); 282 283 static int __init xen_pm_init(void) 284 { 285 if (!xen_domain()) 286 return -ENODEV; 287 288 pm_power_off = xen_power_off; 289 arm_pm_restart = xen_restart; 290 291 return 0; 292 } 293 late_initcall(xen_pm_init); 294 295 static irqreturn_t xen_arm_callback(int irq, void *arg) 296 { 297 xen_hvm_evtchn_do_upcall(); 298 return IRQ_HANDLED; 299 } 300 301 static int __init xen_init_events(void) 302 { 303 if (!xen_domain() || xen_events_irq < 0) 304 return -ENODEV; 305 306 xen_init_IRQ(); 307 308 if (request_percpu_irq(xen_events_irq, xen_arm_callback, 309 "events", &xen_vcpu)) { 310 pr_err("Error requesting IRQ %d\n", xen_events_irq); 311 return -EINVAL; 312 } 313 314 on_each_cpu(xen_percpu_init, NULL, 0); 315 316 return 0; 317 } 318 postcore_initcall(xen_init_events); 319 320 /* In the hypervisor.S file. */ 321 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 322 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 323 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 324 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 325 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 326 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 327 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 328 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 329 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); 330 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); 331 EXPORT_SYMBOL_GPL(privcmd_call); 332