xref: /openbmc/linux/arch/x86/xen/setup.c (revision 79a93295)
1 /*
2  * Machine specific setup for xen
3  *
4  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5  */
6 
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pm.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
14 
15 #include <asm/elf.h>
16 #include <asm/vdso.h>
17 #include <asm/e820.h>
18 #include <asm/setup.h>
19 #include <asm/acpi.h>
20 #include <asm/numa.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
23 
24 #include <xen/xen.h>
25 #include <xen/page.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
30 #include <xen/hvc-console.h>
31 #include "xen-ops.h"
32 #include "vdso.h"
33 #include "mmu.h"
34 
35 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
36 
37 /* Amount of extra memory space we add to the e820 ranges */
38 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
39 
40 /* Number of pages released from the initial allocation. */
41 unsigned long xen_released_pages;
42 
43 /* E820 map used during setting up memory. */
44 static struct e820entry xen_e820_map[E820_X_MAX] __initdata;
45 static u32 xen_e820_map_entries __initdata;
46 
47 /*
48  * Buffer used to remap identity mapped pages. We only need the virtual space.
49  * The physical page behind this address is remapped as needed to different
50  * buffer pages.
51  */
52 #define REMAP_SIZE	(P2M_PER_PAGE - 3)
53 static struct {
54 	unsigned long	next_area_mfn;
55 	unsigned long	target_pfn;
56 	unsigned long	size;
57 	unsigned long	mfns[REMAP_SIZE];
58 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
59 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
60 
61 /*
62  * The maximum amount of extra memory compared to the base size.  The
63  * main scaling factor is the size of struct page.  At extreme ratios
64  * of base:extra, all the base memory can be filled with page
65  * structures for the extra memory, leaving no space for anything
66  * else.
67  *
68  * 10x seems like a reasonable balance between scaling flexibility and
69  * leaving a practically usable system.
70  */
71 #define EXTRA_MEM_RATIO		(10)
72 
73 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
74 
75 static void __init xen_parse_512gb(void)
76 {
77 	bool val = false;
78 	char *arg;
79 
80 	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
81 	if (!arg)
82 		return;
83 
84 	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
85 	if (!arg)
86 		val = true;
87 	else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
88 		return;
89 
90 	xen_512gb_limit = val;
91 }
92 
93 static void __init xen_add_extra_mem(unsigned long start_pfn,
94 				     unsigned long n_pfns)
95 {
96 	int i;
97 
98 	/*
99 	 * No need to check for zero size, should happen rarely and will only
100 	 * write a new entry regarded to be unused due to zero size.
101 	 */
102 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
103 		/* Add new region. */
104 		if (xen_extra_mem[i].n_pfns == 0) {
105 			xen_extra_mem[i].start_pfn = start_pfn;
106 			xen_extra_mem[i].n_pfns = n_pfns;
107 			break;
108 		}
109 		/* Append to existing region. */
110 		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
111 		    start_pfn) {
112 			xen_extra_mem[i].n_pfns += n_pfns;
113 			break;
114 		}
115 	}
116 	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
117 		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
118 
119 	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
120 }
121 
122 static void __init xen_del_extra_mem(unsigned long start_pfn,
123 				     unsigned long n_pfns)
124 {
125 	int i;
126 	unsigned long start_r, size_r;
127 
128 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
129 		start_r = xen_extra_mem[i].start_pfn;
130 		size_r = xen_extra_mem[i].n_pfns;
131 
132 		/* Start of region. */
133 		if (start_r == start_pfn) {
134 			BUG_ON(n_pfns > size_r);
135 			xen_extra_mem[i].start_pfn += n_pfns;
136 			xen_extra_mem[i].n_pfns -= n_pfns;
137 			break;
138 		}
139 		/* End of region. */
140 		if (start_r + size_r == start_pfn + n_pfns) {
141 			BUG_ON(n_pfns > size_r);
142 			xen_extra_mem[i].n_pfns -= n_pfns;
143 			break;
144 		}
145 		/* Mid of region. */
146 		if (start_pfn > start_r && start_pfn < start_r + size_r) {
147 			BUG_ON(start_pfn + n_pfns > start_r + size_r);
148 			xen_extra_mem[i].n_pfns = start_pfn - start_r;
149 			/* Calling memblock_reserve() again is okay. */
150 			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
151 					  (start_pfn + n_pfns));
152 			break;
153 		}
154 	}
155 	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
156 }
157 
158 /*
159  * Called during boot before the p2m list can take entries beyond the
160  * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
161  * invalid.
162  */
163 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
164 {
165 	int i;
166 
167 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
168 		if (pfn >= xen_extra_mem[i].start_pfn &&
169 		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
170 			return INVALID_P2M_ENTRY;
171 	}
172 
173 	return IDENTITY_FRAME(pfn);
174 }
175 
176 /*
177  * Mark all pfns of extra mem as invalid in p2m list.
178  */
179 void __init xen_inv_extra_mem(void)
180 {
181 	unsigned long pfn, pfn_s, pfn_e;
182 	int i;
183 
184 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
185 		if (!xen_extra_mem[i].n_pfns)
186 			continue;
187 		pfn_s = xen_extra_mem[i].start_pfn;
188 		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
189 		for (pfn = pfn_s; pfn < pfn_e; pfn++)
190 			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
191 	}
192 }
193 
194 /*
195  * Finds the next RAM pfn available in the E820 map after min_pfn.
196  * This function updates min_pfn with the pfn found and returns
197  * the size of that range or zero if not found.
198  */
199 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
200 {
201 	const struct e820entry *entry = xen_e820_map;
202 	unsigned int i;
203 	unsigned long done = 0;
204 
205 	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
206 		unsigned long s_pfn;
207 		unsigned long e_pfn;
208 
209 		if (entry->type != E820_RAM)
210 			continue;
211 
212 		e_pfn = PFN_DOWN(entry->addr + entry->size);
213 
214 		/* We only care about E820 after this */
215 		if (e_pfn <= *min_pfn)
216 			continue;
217 
218 		s_pfn = PFN_UP(entry->addr);
219 
220 		/* If min_pfn falls within the E820 entry, we want to start
221 		 * at the min_pfn PFN.
222 		 */
223 		if (s_pfn <= *min_pfn) {
224 			done = e_pfn - *min_pfn;
225 		} else {
226 			done = e_pfn - s_pfn;
227 			*min_pfn = s_pfn;
228 		}
229 		break;
230 	}
231 
232 	return done;
233 }
234 
235 static int __init xen_free_mfn(unsigned long mfn)
236 {
237 	struct xen_memory_reservation reservation = {
238 		.address_bits = 0,
239 		.extent_order = 0,
240 		.domid        = DOMID_SELF
241 	};
242 
243 	set_xen_guest_handle(reservation.extent_start, &mfn);
244 	reservation.nr_extents = 1;
245 
246 	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
247 }
248 
249 /*
250  * This releases a chunk of memory and then does the identity map. It's used
251  * as a fallback if the remapping fails.
252  */
253 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
254 			unsigned long end_pfn, unsigned long nr_pages)
255 {
256 	unsigned long pfn, end;
257 	int ret;
258 
259 	WARN_ON(start_pfn > end_pfn);
260 
261 	/* Release pages first. */
262 	end = min(end_pfn, nr_pages);
263 	for (pfn = start_pfn; pfn < end; pfn++) {
264 		unsigned long mfn = pfn_to_mfn(pfn);
265 
266 		/* Make sure pfn exists to start with */
267 		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
268 			continue;
269 
270 		ret = xen_free_mfn(mfn);
271 		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
272 
273 		if (ret == 1) {
274 			xen_released_pages++;
275 			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
276 				break;
277 		} else
278 			break;
279 	}
280 
281 	set_phys_range_identity(start_pfn, end_pfn);
282 }
283 
284 /*
285  * Helper function to update the p2m and m2p tables and kernel mapping.
286  */
287 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
288 {
289 	struct mmu_update update = {
290 		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
291 		.val = pfn
292 	};
293 
294 	/* Update p2m */
295 	if (!set_phys_to_machine(pfn, mfn)) {
296 		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
297 		     pfn, mfn);
298 		BUG();
299 	}
300 
301 	/* Update m2p */
302 	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
303 		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
304 		     mfn, pfn);
305 		BUG();
306 	}
307 
308 	/* Update kernel mapping, but not for highmem. */
309 	if (pfn >= PFN_UP(__pa(high_memory - 1)))
310 		return;
311 
312 	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
313 					 mfn_pte(mfn, PAGE_KERNEL), 0)) {
314 		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
315 		      mfn, pfn);
316 		BUG();
317 	}
318 }
319 
320 /*
321  * This function updates the p2m and m2p tables with an identity map from
322  * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
323  * original allocation at remap_pfn. The information needed for remapping is
324  * saved in the memory itself to avoid the need for allocating buffers. The
325  * complete remap information is contained in a list of MFNs each containing
326  * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
327  * This enables us to preserve the original mfn sequence while doing the
328  * remapping at a time when the memory management is capable of allocating
329  * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
330  * its callers.
331  */
332 static void __init xen_do_set_identity_and_remap_chunk(
333         unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
334 {
335 	unsigned long buf = (unsigned long)&xen_remap_buf;
336 	unsigned long mfn_save, mfn;
337 	unsigned long ident_pfn_iter, remap_pfn_iter;
338 	unsigned long ident_end_pfn = start_pfn + size;
339 	unsigned long left = size;
340 	unsigned int i, chunk;
341 
342 	WARN_ON(size == 0);
343 
344 	BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
345 
346 	mfn_save = virt_to_mfn(buf);
347 
348 	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
349 	     ident_pfn_iter < ident_end_pfn;
350 	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
351 		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
352 
353 		/* Map first pfn to xen_remap_buf */
354 		mfn = pfn_to_mfn(ident_pfn_iter);
355 		set_pte_mfn(buf, mfn, PAGE_KERNEL);
356 
357 		/* Save mapping information in page */
358 		xen_remap_buf.next_area_mfn = xen_remap_mfn;
359 		xen_remap_buf.target_pfn = remap_pfn_iter;
360 		xen_remap_buf.size = chunk;
361 		for (i = 0; i < chunk; i++)
362 			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
363 
364 		/* Put remap buf into list. */
365 		xen_remap_mfn = mfn;
366 
367 		/* Set identity map */
368 		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
369 
370 		left -= chunk;
371 	}
372 
373 	/* Restore old xen_remap_buf mapping */
374 	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
375 }
376 
377 /*
378  * This function takes a contiguous pfn range that needs to be identity mapped
379  * and:
380  *
381  *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
382  *  2) Calls the do_ function to actually do the mapping/remapping work.
383  *
384  * The goal is to not allocate additional memory but to remap the existing
385  * pages. In the case of an error the underlying memory is simply released back
386  * to Xen and not remapped.
387  */
388 static unsigned long __init xen_set_identity_and_remap_chunk(
389 	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
390 	unsigned long remap_pfn)
391 {
392 	unsigned long pfn;
393 	unsigned long i = 0;
394 	unsigned long n = end_pfn - start_pfn;
395 
396 	if (remap_pfn == 0)
397 		remap_pfn = nr_pages;
398 
399 	while (i < n) {
400 		unsigned long cur_pfn = start_pfn + i;
401 		unsigned long left = n - i;
402 		unsigned long size = left;
403 		unsigned long remap_range_size;
404 
405 		/* Do not remap pages beyond the current allocation */
406 		if (cur_pfn >= nr_pages) {
407 			/* Identity map remaining pages */
408 			set_phys_range_identity(cur_pfn, cur_pfn + size);
409 			break;
410 		}
411 		if (cur_pfn + size > nr_pages)
412 			size = nr_pages - cur_pfn;
413 
414 		remap_range_size = xen_find_pfn_range(&remap_pfn);
415 		if (!remap_range_size) {
416 			pr_warning("Unable to find available pfn range, not remapping identity pages\n");
417 			xen_set_identity_and_release_chunk(cur_pfn,
418 						cur_pfn + left, nr_pages);
419 			break;
420 		}
421 		/* Adjust size to fit in current e820 RAM region */
422 		if (size > remap_range_size)
423 			size = remap_range_size;
424 
425 		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
426 
427 		/* Update variables to reflect new mappings. */
428 		i += size;
429 		remap_pfn += size;
430 	}
431 
432 	/*
433 	 * If the PFNs are currently mapped, the VA mapping also needs
434 	 * to be updated to be 1:1.
435 	 */
436 	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
437 		(void)HYPERVISOR_update_va_mapping(
438 			(unsigned long)__va(pfn << PAGE_SHIFT),
439 			mfn_pte(pfn, PAGE_KERNEL_IO), 0);
440 
441 	return remap_pfn;
442 }
443 
444 static unsigned long __init xen_count_remap_pages(
445 	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
446 	unsigned long remap_pages)
447 {
448 	if (start_pfn >= nr_pages)
449 		return remap_pages;
450 
451 	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
452 }
453 
454 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
455 	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
456 			      unsigned long nr_pages, unsigned long last_val))
457 {
458 	phys_addr_t start = 0;
459 	unsigned long ret_val = 0;
460 	const struct e820entry *entry = xen_e820_map;
461 	int i;
462 
463 	/*
464 	 * Combine non-RAM regions and gaps until a RAM region (or the
465 	 * end of the map) is reached, then call the provided function
466 	 * to perform its duty on the non-RAM region.
467 	 *
468 	 * The combined non-RAM regions are rounded to a whole number
469 	 * of pages so any partial pages are accessible via the 1:1
470 	 * mapping.  This is needed for some BIOSes that put (for
471 	 * example) the DMI tables in a reserved region that begins on
472 	 * a non-page boundary.
473 	 */
474 	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
475 		phys_addr_t end = entry->addr + entry->size;
476 		if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
477 			unsigned long start_pfn = PFN_DOWN(start);
478 			unsigned long end_pfn = PFN_UP(end);
479 
480 			if (entry->type == E820_RAM)
481 				end_pfn = PFN_UP(entry->addr);
482 
483 			if (start_pfn < end_pfn)
484 				ret_val = func(start_pfn, end_pfn, nr_pages,
485 					       ret_val);
486 			start = end;
487 		}
488 	}
489 
490 	return ret_val;
491 }
492 
493 /*
494  * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
495  * The remap information (which mfn remap to which pfn) is contained in the
496  * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
497  * This scheme allows to remap the different chunks in arbitrary order while
498  * the resulting mapping will be independant from the order.
499  */
500 void __init xen_remap_memory(void)
501 {
502 	unsigned long buf = (unsigned long)&xen_remap_buf;
503 	unsigned long mfn_save, mfn, pfn;
504 	unsigned long remapped = 0;
505 	unsigned int i;
506 	unsigned long pfn_s = ~0UL;
507 	unsigned long len = 0;
508 
509 	mfn_save = virt_to_mfn(buf);
510 
511 	while (xen_remap_mfn != INVALID_P2M_ENTRY) {
512 		/* Map the remap information */
513 		set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
514 
515 		BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
516 
517 		pfn = xen_remap_buf.target_pfn;
518 		for (i = 0; i < xen_remap_buf.size; i++) {
519 			mfn = xen_remap_buf.mfns[i];
520 			xen_update_mem_tables(pfn, mfn);
521 			remapped++;
522 			pfn++;
523 		}
524 		if (pfn_s == ~0UL || pfn == pfn_s) {
525 			pfn_s = xen_remap_buf.target_pfn;
526 			len += xen_remap_buf.size;
527 		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
528 			len += xen_remap_buf.size;
529 		} else {
530 			xen_del_extra_mem(pfn_s, len);
531 			pfn_s = xen_remap_buf.target_pfn;
532 			len = xen_remap_buf.size;
533 		}
534 
535 		mfn = xen_remap_mfn;
536 		xen_remap_mfn = xen_remap_buf.next_area_mfn;
537 	}
538 
539 	if (pfn_s != ~0UL && len)
540 		xen_del_extra_mem(pfn_s, len);
541 
542 	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
543 
544 	pr_info("Remapped %ld page(s)\n", remapped);
545 }
546 
547 static unsigned long __init xen_get_pages_limit(void)
548 {
549 	unsigned long limit;
550 
551 #ifdef CONFIG_X86_32
552 	limit = GB(64) / PAGE_SIZE;
553 #else
554 	limit = MAXMEM / PAGE_SIZE;
555 	if (!xen_initial_domain() && xen_512gb_limit)
556 		limit = GB(512) / PAGE_SIZE;
557 #endif
558 	return limit;
559 }
560 
561 static unsigned long __init xen_get_max_pages(void)
562 {
563 	unsigned long max_pages, limit;
564 	domid_t domid = DOMID_SELF;
565 	long ret;
566 
567 	limit = xen_get_pages_limit();
568 	max_pages = limit;
569 
570 	/*
571 	 * For the initial domain we use the maximum reservation as
572 	 * the maximum page.
573 	 *
574 	 * For guest domains the current maximum reservation reflects
575 	 * the current maximum rather than the static maximum. In this
576 	 * case the e820 map provided to us will cover the static
577 	 * maximum region.
578 	 */
579 	if (xen_initial_domain()) {
580 		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
581 		if (ret > 0)
582 			max_pages = ret;
583 	}
584 
585 	return min(max_pages, limit);
586 }
587 
588 static void __init xen_align_and_add_e820_region(phys_addr_t start,
589 						 phys_addr_t size, int type)
590 {
591 	phys_addr_t end = start + size;
592 
593 	/* Align RAM regions to page boundaries. */
594 	if (type == E820_RAM) {
595 		start = PAGE_ALIGN(start);
596 		end &= ~((phys_addr_t)PAGE_SIZE - 1);
597 	}
598 
599 	e820_add_region(start, end - start, type);
600 }
601 
602 static void __init xen_ignore_unusable(void)
603 {
604 	struct e820entry *entry = xen_e820_map;
605 	unsigned int i;
606 
607 	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
608 		if (entry->type == E820_UNUSABLE)
609 			entry->type = E820_RAM;
610 	}
611 }
612 
613 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
614 {
615 	struct e820entry *entry;
616 	unsigned mapcnt;
617 	phys_addr_t end;
618 
619 	if (!size)
620 		return false;
621 
622 	end = start + size;
623 	entry = xen_e820_map;
624 
625 	for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
626 		if (entry->type == E820_RAM && entry->addr <= start &&
627 		    (entry->addr + entry->size) >= end)
628 			return false;
629 
630 		entry++;
631 	}
632 
633 	return true;
634 }
635 
636 /*
637  * Find a free area in physical memory not yet reserved and compliant with
638  * E820 map.
639  * Used to relocate pre-allocated areas like initrd or p2m list which are in
640  * conflict with the to be used E820 map.
641  * In case no area is found, return 0. Otherwise return the physical address
642  * of the area which is already reserved for convenience.
643  */
644 phys_addr_t __init xen_find_free_area(phys_addr_t size)
645 {
646 	unsigned mapcnt;
647 	phys_addr_t addr, start;
648 	struct e820entry *entry = xen_e820_map;
649 
650 	for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
651 		if (entry->type != E820_RAM || entry->size < size)
652 			continue;
653 		start = entry->addr;
654 		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
655 			if (!memblock_is_reserved(addr))
656 				continue;
657 			start = addr + PAGE_SIZE;
658 			if (start + size > entry->addr + entry->size)
659 				break;
660 		}
661 		if (addr >= start + size) {
662 			memblock_reserve(start, size);
663 			return start;
664 		}
665 	}
666 
667 	return 0;
668 }
669 
670 /*
671  * Like memcpy, but with physical addresses for dest and src.
672  */
673 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
674 				   phys_addr_t n)
675 {
676 	phys_addr_t dest_off, src_off, dest_len, src_len, len;
677 	void *from, *to;
678 
679 	while (n) {
680 		dest_off = dest & ~PAGE_MASK;
681 		src_off = src & ~PAGE_MASK;
682 		dest_len = n;
683 		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
684 			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
685 		src_len = n;
686 		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
687 			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
688 		len = min(dest_len, src_len);
689 		to = early_memremap(dest - dest_off, dest_len + dest_off);
690 		from = early_memremap(src - src_off, src_len + src_off);
691 		memcpy(to, from, len);
692 		early_memunmap(to, dest_len + dest_off);
693 		early_memunmap(from, src_len + src_off);
694 		n -= len;
695 		dest += len;
696 		src += len;
697 	}
698 }
699 
700 /*
701  * Reserve Xen mfn_list.
702  */
703 static void __init xen_reserve_xen_mfnlist(void)
704 {
705 	phys_addr_t start, size;
706 
707 	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
708 		start = __pa(xen_start_info->mfn_list);
709 		size = PFN_ALIGN(xen_start_info->nr_pages *
710 				 sizeof(unsigned long));
711 	} else {
712 		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
713 		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
714 	}
715 
716 	if (!xen_is_e820_reserved(start, size)) {
717 		memblock_reserve(start, size);
718 		return;
719 	}
720 
721 #ifdef CONFIG_X86_32
722 	/*
723 	 * Relocating the p2m on 32 bit system to an arbitrary virtual address
724 	 * is not supported, so just give up.
725 	 */
726 	xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
727 	BUG();
728 #else
729 	xen_relocate_p2m();
730 #endif
731 }
732 
733 /**
734  * machine_specific_memory_setup - Hook for machine specific memory setup.
735  **/
736 char * __init xen_memory_setup(void)
737 {
738 	unsigned long max_pfn, pfn_s, n_pfns;
739 	phys_addr_t mem_end, addr, size, chunk_size;
740 	u32 type;
741 	int rc;
742 	struct xen_memory_map memmap;
743 	unsigned long max_pages;
744 	unsigned long extra_pages = 0;
745 	int i;
746 	int op;
747 
748 	xen_parse_512gb();
749 	max_pfn = xen_get_pages_limit();
750 	max_pfn = min(max_pfn, xen_start_info->nr_pages);
751 	mem_end = PFN_PHYS(max_pfn);
752 
753 	memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
754 	set_xen_guest_handle(memmap.buffer, xen_e820_map);
755 
756 	op = xen_initial_domain() ?
757 		XENMEM_machine_memory_map :
758 		XENMEM_memory_map;
759 	rc = HYPERVISOR_memory_op(op, &memmap);
760 	if (rc == -ENOSYS) {
761 		BUG_ON(xen_initial_domain());
762 		memmap.nr_entries = 1;
763 		xen_e820_map[0].addr = 0ULL;
764 		xen_e820_map[0].size = mem_end;
765 		/* 8MB slack (to balance backend allocations). */
766 		xen_e820_map[0].size += 8ULL << 20;
767 		xen_e820_map[0].type = E820_RAM;
768 		rc = 0;
769 	}
770 	BUG_ON(rc);
771 	BUG_ON(memmap.nr_entries == 0);
772 	xen_e820_map_entries = memmap.nr_entries;
773 
774 	/*
775 	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
776 	 * regions, so if we're using the machine memory map leave the
777 	 * region as RAM as it is in the pseudo-physical map.
778 	 *
779 	 * UNUSABLE regions in domUs are not handled and will need
780 	 * a patch in the future.
781 	 */
782 	if (xen_initial_domain())
783 		xen_ignore_unusable();
784 
785 	/* Make sure the Xen-supplied memory map is well-ordered. */
786 	sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
787 			  &xen_e820_map_entries);
788 
789 	max_pages = xen_get_max_pages();
790 
791 	/* How many extra pages do we need due to remapping? */
792 	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
793 
794 	if (max_pages > max_pfn)
795 		extra_pages += max_pages - max_pfn;
796 
797 	/*
798 	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
799 	 * factor the base size.  On non-highmem systems, the base
800 	 * size is the full initial memory allocation; on highmem it
801 	 * is limited to the max size of lowmem, so that it doesn't
802 	 * get completely filled.
803 	 *
804 	 * Make sure we have no memory above max_pages, as this area
805 	 * isn't handled by the p2m management.
806 	 *
807 	 * In principle there could be a problem in lowmem systems if
808 	 * the initial memory is also very large with respect to
809 	 * lowmem, but we won't try to deal with that here.
810 	 */
811 	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
812 			   extra_pages, max_pages - max_pfn);
813 	i = 0;
814 	addr = xen_e820_map[0].addr;
815 	size = xen_e820_map[0].size;
816 	while (i < xen_e820_map_entries) {
817 		bool discard = false;
818 
819 		chunk_size = size;
820 		type = xen_e820_map[i].type;
821 
822 		if (type == E820_RAM) {
823 			if (addr < mem_end) {
824 				chunk_size = min(size, mem_end - addr);
825 			} else if (extra_pages) {
826 				chunk_size = min(size, PFN_PHYS(extra_pages));
827 				pfn_s = PFN_UP(addr);
828 				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
829 				extra_pages -= n_pfns;
830 				xen_add_extra_mem(pfn_s, n_pfns);
831 				xen_max_p2m_pfn = pfn_s + n_pfns;
832 			} else
833 				discard = true;
834 		}
835 
836 		if (!discard)
837 			xen_align_and_add_e820_region(addr, chunk_size, type);
838 
839 		addr += chunk_size;
840 		size -= chunk_size;
841 		if (size == 0) {
842 			i++;
843 			if (i < xen_e820_map_entries) {
844 				addr = xen_e820_map[i].addr;
845 				size = xen_e820_map[i].size;
846 			}
847 		}
848 	}
849 
850 	/*
851 	 * Set the rest as identity mapped, in case PCI BARs are
852 	 * located here.
853 	 */
854 	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
855 
856 	/*
857 	 * In domU, the ISA region is normal, usable memory, but we
858 	 * reserve ISA memory anyway because too many things poke
859 	 * about in there.
860 	 */
861 	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
862 			E820_RESERVED);
863 
864 	sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
865 
866 	/*
867 	 * Check whether the kernel itself conflicts with the target E820 map.
868 	 * Failing now is better than running into weird problems later due
869 	 * to relocating (and even reusing) pages with kernel text or data.
870 	 */
871 	if (xen_is_e820_reserved(__pa_symbol(_text),
872 			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
873 		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
874 		BUG();
875 	}
876 
877 	/*
878 	 * Check for a conflict of the hypervisor supplied page tables with
879 	 * the target E820 map.
880 	 */
881 	xen_pt_check_e820();
882 
883 	xen_reserve_xen_mfnlist();
884 
885 	/* Check for a conflict of the initrd with the target E820 map. */
886 	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
887 				 boot_params.hdr.ramdisk_size)) {
888 		phys_addr_t new_area, start, size;
889 
890 		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
891 		if (!new_area) {
892 			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
893 			BUG();
894 		}
895 
896 		start = boot_params.hdr.ramdisk_image;
897 		size = boot_params.hdr.ramdisk_size;
898 		xen_phys_memcpy(new_area, start, size);
899 		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
900 			start, start + size, new_area, new_area + size);
901 		memblock_free(start, size);
902 		boot_params.hdr.ramdisk_image = new_area;
903 		boot_params.ext_ramdisk_image = new_area >> 32;
904 	}
905 
906 	/*
907 	 * Set identity map on non-RAM pages and prepare remapping the
908 	 * underlying RAM.
909 	 */
910 	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
911 
912 	pr_info("Released %ld page(s)\n", xen_released_pages);
913 
914 	return "Xen";
915 }
916 
917 /*
918  * Machine specific memory setup for auto-translated guests.
919  */
920 char * __init xen_auto_xlated_memory_setup(void)
921 {
922 	struct xen_memory_map memmap;
923 	int i;
924 	int rc;
925 
926 	memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
927 	set_xen_guest_handle(memmap.buffer, xen_e820_map);
928 
929 	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
930 	if (rc < 0)
931 		panic("No memory map (%d)\n", rc);
932 
933 	xen_e820_map_entries = memmap.nr_entries;
934 
935 	sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
936 			  &xen_e820_map_entries);
937 
938 	for (i = 0; i < xen_e820_map_entries; i++)
939 		e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
940 				xen_e820_map[i].type);
941 
942 	/* Remove p2m info, it is not needed. */
943 	xen_start_info->mfn_list = 0;
944 	xen_start_info->first_p2m_pfn = 0;
945 	xen_start_info->nr_p2m_frames = 0;
946 
947 	return "Xen";
948 }
949 
950 /*
951  * Set the bit indicating "nosegneg" library variants should be used.
952  * We only need to bother in pure 32-bit mode; compat 32-bit processes
953  * can have un-truncated segments, so wrapping around is allowed.
954  */
955 static void __init fiddle_vdso(void)
956 {
957 #ifdef CONFIG_X86_32
958 	u32 *mask = vdso_image_32.data +
959 		vdso_image_32.sym_VDSO32_NOTE_MASK;
960 	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
961 #endif
962 }
963 
964 static int register_callback(unsigned type, const void *func)
965 {
966 	struct callback_register callback = {
967 		.type = type,
968 		.address = XEN_CALLBACK(__KERNEL_CS, func),
969 		.flags = CALLBACKF_mask_events,
970 	};
971 
972 	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
973 }
974 
975 void xen_enable_sysenter(void)
976 {
977 	int ret;
978 	unsigned sysenter_feature;
979 
980 #ifdef CONFIG_X86_32
981 	sysenter_feature = X86_FEATURE_SEP;
982 #else
983 	sysenter_feature = X86_FEATURE_SYSENTER32;
984 #endif
985 
986 	if (!boot_cpu_has(sysenter_feature))
987 		return;
988 
989 	ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
990 	if(ret != 0)
991 		setup_clear_cpu_cap(sysenter_feature);
992 }
993 
994 void xen_enable_syscall(void)
995 {
996 #ifdef CONFIG_X86_64
997 	int ret;
998 
999 	ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
1000 	if (ret != 0) {
1001 		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
1002 		/* Pretty fatal; 64-bit userspace has no other
1003 		   mechanism for syscalls. */
1004 	}
1005 
1006 	if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
1007 		ret = register_callback(CALLBACKTYPE_syscall32,
1008 					xen_syscall32_target);
1009 		if (ret != 0)
1010 			setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1011 	}
1012 #endif /* CONFIG_X86_64 */
1013 }
1014 
1015 void __init xen_pvmmu_arch_setup(void)
1016 {
1017 	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1018 	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1019 
1020 	HYPERVISOR_vm_assist(VMASST_CMD_enable,
1021 			     VMASST_TYPE_pae_extended_cr3);
1022 
1023 	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1024 	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1025 		BUG();
1026 
1027 	xen_enable_sysenter();
1028 	xen_enable_syscall();
1029 }
1030 
1031 /* This function is not called for HVM domains */
1032 void __init xen_arch_setup(void)
1033 {
1034 	xen_panic_handler_init();
1035 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1036 		xen_pvmmu_arch_setup();
1037 
1038 #ifdef CONFIG_ACPI
1039 	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1040 		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1041 		disable_acpi();
1042 	}
1043 #endif
1044 
1045 	memcpy(boot_command_line, xen_start_info->cmd_line,
1046 	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1047 	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1048 
1049 	/* Set up idle, making sure it calls safe_halt() pvop */
1050 	disable_cpuidle();
1051 	disable_cpufreq();
1052 	WARN_ON(xen_set_default_idle());
1053 	fiddle_vdso();
1054 #ifdef CONFIG_NUMA
1055 	numa_off = 1;
1056 #endif
1057 }
1058