1 #include <xen/xen.h> 2 #include <xen/events.h> 3 #include <xen/grant_table.h> 4 #include <xen/hvm.h> 5 #include <xen/interface/xen.h> 6 #include <xen/interface/memory.h> 7 #include <xen/interface/hvm/params.h> 8 #include <xen/features.h> 9 #include <xen/platform_pci.h> 10 #include <xen/xenbus.h> 11 #include <asm/xen/hypervisor.h> 12 #include <asm/xen/hypercall.h> 13 #include <linux/interrupt.h> 14 #include <linux/irqreturn.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_irq.h> 18 #include <linux/of_address.h> 19 20 struct start_info _xen_start_info; 21 struct start_info *xen_start_info = &_xen_start_info; 22 EXPORT_SYMBOL_GPL(xen_start_info); 23 24 enum xen_domain_type xen_domain_type = XEN_NATIVE; 25 EXPORT_SYMBOL_GPL(xen_domain_type); 26 27 struct shared_info xen_dummy_shared_info; 28 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 29 30 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 31 32 /* TODO: to be removed */ 33 __read_mostly int xen_have_vector_callback; 34 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 35 36 int xen_platform_pci_unplug = XEN_UNPLUG_ALL; 37 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); 38 39 static __read_mostly int xen_events_irq = -1; 40 41 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 42 unsigned long addr, 43 unsigned long mfn, int nr, 44 pgprot_t prot, unsigned domid) 45 { 46 return -ENOSYS; 47 } 48 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 49 50 /* 51 * see Documentation/devicetree/bindings/arm/xen.txt for the 52 * documentation of the Xen Device Tree format. 53 */ 54 #define GRANT_TABLE_PHYSADDR 0 55 static int __init xen_guest_init(void) 56 { 57 struct xen_add_to_physmap xatp; 58 static struct shared_info *shared_info_page = 0; 59 struct device_node *node; 60 int len; 61 const char *s = NULL; 62 const char *version = NULL; 63 const char *xen_prefix = "xen,xen-"; 64 struct resource res; 65 66 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 67 if (!node) { 68 pr_debug("No Xen support\n"); 69 return 0; 70 } 71 s = of_get_property(node, "compatible", &len); 72 if (strlen(xen_prefix) + 3 < len && 73 !strncmp(xen_prefix, s, strlen(xen_prefix))) 74 version = s + strlen(xen_prefix); 75 if (version == NULL) { 76 pr_debug("Xen version not found\n"); 77 return 0; 78 } 79 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 80 return 0; 81 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 82 xen_events_irq = irq_of_parse_and_map(node, 0); 83 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 84 version, xen_events_irq, xen_hvm_resume_frames); 85 xen_domain_type = XEN_HVM_DOMAIN; 86 87 xen_setup_features(); 88 if (xen_feature(XENFEAT_dom0)) 89 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 90 else 91 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); 92 93 if (!shared_info_page) 94 shared_info_page = (struct shared_info *) 95 get_zeroed_page(GFP_KERNEL); 96 if (!shared_info_page) { 97 pr_err("not enough memory\n"); 98 return -ENOMEM; 99 } 100 xatp.domid = DOMID_SELF; 101 xatp.idx = 0; 102 xatp.space = XENMAPSPACE_shared_info; 103 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 104 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 105 BUG(); 106 107 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 108 109 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 110 * page, we use it in the event channel upcall and in some pvclock 111 * related functions. We don't need the vcpu_info placement 112 * optimizations because we don't use any pv_mmu or pv_irq op on 113 * HVM. 114 * The shared info contains exactly 1 CPU (the boot CPU). The guest 115 * is required to use VCPUOP_register_vcpu_info to place vcpu info 116 * for secondary CPUs as they are brought up. */ 117 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 118 119 gnttab_init(); 120 if (!xen_initial_domain()) 121 xenbus_probe(NULL); 122 123 return 0; 124 } 125 core_initcall(xen_guest_init); 126 127 static irqreturn_t xen_arm_callback(int irq, void *arg) 128 { 129 xen_hvm_evtchn_do_upcall(); 130 return IRQ_HANDLED; 131 } 132 133 static int __init xen_init_events(void) 134 { 135 if (!xen_domain() || xen_events_irq < 0) 136 return -ENODEV; 137 138 xen_init_IRQ(); 139 140 if (request_percpu_irq(xen_events_irq, xen_arm_callback, 141 "events", xen_vcpu)) { 142 pr_err("Error requesting IRQ %d\n", xen_events_irq); 143 return -EINVAL; 144 } 145 146 enable_percpu_irq(xen_events_irq, 0); 147 148 return 0; 149 } 150 postcore_initcall(xen_init_events); 151 152 /* XXX: only until balloon is properly working */ 153 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 154 { 155 *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL, 156 get_order(nr_pages)); 157 if (*pages == NULL) 158 return -ENOMEM; 159 return 0; 160 } 161 EXPORT_SYMBOL_GPL(alloc_xenballooned_pages); 162 163 void free_xenballooned_pages(int nr_pages, struct page **pages) 164 { 165 kfree(*pages); 166 *pages = NULL; 167 } 168 EXPORT_SYMBOL_GPL(free_xenballooned_pages); 169 170 /* In the hypervisor.S file. */ 171 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 172 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 173 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 174 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 175 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 176 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 177 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 178 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 179 EXPORT_SYMBOL_GPL(privcmd_call); 180