1aa04b4ccSPaul Mackerras /*
2aa04b4ccSPaul Mackerras  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3aa04b4ccSPaul Mackerras  *
4aa04b4ccSPaul Mackerras  * This program is free software; you can redistribute it and/or modify
5aa04b4ccSPaul Mackerras  * it under the terms of the GNU General Public License, version 2, as
6aa04b4ccSPaul Mackerras  * published by the Free Software Foundation.
7aa04b4ccSPaul Mackerras  */
8aa04b4ccSPaul Mackerras 
9441c19c8SMichael Ellerman #include <linux/cpu.h>
10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h>
11aa04b4ccSPaul Mackerras #include <linux/preempt.h>
1266b15db6SPaul Gortmaker #include <linux/export.h>
13aa04b4ccSPaul Mackerras #include <linux/sched.h>
14aa04b4ccSPaul Mackerras #include <linux/spinlock.h>
15aa04b4ccSPaul Mackerras #include <linux/bootmem.h>
16aa04b4ccSPaul Mackerras #include <linux/init.h>
17fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h>
18fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h>
19fc95ca72SJoonsoo Kim #include <linux/cma.h>
20aa04b4ccSPaul Mackerras 
21aa04b4ccSPaul Mackerras #include <asm/cputable.h>
22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h>
23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h>
24aa04b4ccSPaul Mackerras 
25fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER	18
26fc95ca72SJoonsoo Kim 
27fa61a4e3SAneesh Kumar K.V /*
28fa61a4e3SAneesh Kumar K.V  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
29fa61a4e3SAneesh Kumar K.V  * should be power of 2.
30fa61a4e3SAneesh Kumar K.V  */
31fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
32fa61a4e3SAneesh Kumar K.V /*
33fa61a4e3SAneesh Kumar K.V  * By default we reserve 5% of memory for hash pagetable allocation.
34fa61a4e3SAneesh Kumar K.V  */
35fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5;
36aa04b4ccSPaul Mackerras 
37fc95ca72SJoonsoo Kim static struct cma *kvm_cma;
38fc95ca72SJoonsoo Kim 
39fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p)
40d2a1b483SAlexander Graf {
41fa61a4e3SAneesh Kumar K.V 	pr_debug("%s(%s)\n", __func__, p);
42d2a1b483SAlexander Graf 	if (!p)
43fa61a4e3SAneesh Kumar K.V 		return -EINVAL;
44fa61a4e3SAneesh Kumar K.V 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
45d2a1b483SAlexander Graf }
46fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
47d2a1b483SAlexander Graf 
48fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages)
49d2a1b483SAlexander Graf {
50c04fa583SAlexey Kardashevskiy 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
51fc95ca72SJoonsoo Kim 
52c17b98cfSPaul Mackerras 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
53d2a1b483SAlexander Graf }
54d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
55d2a1b483SAlexander Graf 
56fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages)
57d2a1b483SAlexander Graf {
58fc95ca72SJoonsoo Kim 	cma_release(kvm_cma, page, nr_pages);
59d2a1b483SAlexander Graf }
60d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt);
61d2a1b483SAlexander Graf 
62fa61a4e3SAneesh Kumar K.V /**
63fa61a4e3SAneesh Kumar K.V  * kvm_cma_reserve() - reserve area for kvm hash pagetable
64fa61a4e3SAneesh Kumar K.V  *
65fa61a4e3SAneesh Kumar K.V  * This function reserves memory from early allocator. It should be
66fa61a4e3SAneesh Kumar K.V  * called by arch specific code once the early allocator (memblock or bootmem)
67fa61a4e3SAneesh Kumar K.V  * has been activated and all other subsystems have already allocated/reserved
68fa61a4e3SAneesh Kumar K.V  * memory.
69fa61a4e3SAneesh Kumar K.V  */
70fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void)
71fa61a4e3SAneesh Kumar K.V {
72fa61a4e3SAneesh Kumar K.V 	unsigned long align_size;
73fa61a4e3SAneesh Kumar K.V 	struct memblock_region *reg;
74fa61a4e3SAneesh Kumar K.V 	phys_addr_t selected_size = 0;
75cec26bc3SAneesh Kumar K.V 
76cec26bc3SAneesh Kumar K.V 	/*
77cec26bc3SAneesh Kumar K.V 	 * We need CMA reservation only when we are in HV mode
78cec26bc3SAneesh Kumar K.V 	 */
79cec26bc3SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_HVMODE))
80cec26bc3SAneesh Kumar K.V 		return;
81fa61a4e3SAneesh Kumar K.V 	/*
82fa61a4e3SAneesh Kumar K.V 	 * We cannot use memblock_phys_mem_size() here, because
83fa61a4e3SAneesh Kumar K.V 	 * memblock_analyze() has not been called yet.
84fa61a4e3SAneesh Kumar K.V 	 */
85fa61a4e3SAneesh Kumar K.V 	for_each_memblock(memory, reg)
86fa61a4e3SAneesh Kumar K.V 		selected_size += memblock_region_memory_end_pfn(reg) -
87fa61a4e3SAneesh Kumar K.V 				 memblock_region_memory_base_pfn(reg);
88fa61a4e3SAneesh Kumar K.V 
89fa61a4e3SAneesh Kumar K.V 	selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
90fa61a4e3SAneesh Kumar K.V 	if (selected_size) {
91fa61a4e3SAneesh Kumar K.V 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
92fa61a4e3SAneesh Kumar K.V 			 (unsigned long)selected_size / SZ_1M);
93fa61a4e3SAneesh Kumar K.V 		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
94c1f733aaSJoonsoo Kim 		cma_declare_contiguous(0, selected_size, 0, align_size,
95c1f733aaSJoonsoo Kim 			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
96fa61a4e3SAneesh Kumar K.V 	}
97fa61a4e3SAneesh Kumar K.V }
98441c19c8SMichael Ellerman 
99441c19c8SMichael Ellerman /*
100441c19c8SMichael Ellerman  * When running HV mode KVM we need to block certain operations while KVM VMs
101441c19c8SMichael Ellerman  * exist in the system. We use a counter of VMs to track this.
102441c19c8SMichael Ellerman  *
103441c19c8SMichael Ellerman  * One of the operations we need to block is onlining of secondaries, so we
104441c19c8SMichael Ellerman  * protect hv_vm_count with get/put_online_cpus().
105441c19c8SMichael Ellerman  */
106441c19c8SMichael Ellerman static atomic_t hv_vm_count;
107441c19c8SMichael Ellerman 
108441c19c8SMichael Ellerman void kvm_hv_vm_activated(void)
109441c19c8SMichael Ellerman {
110441c19c8SMichael Ellerman 	get_online_cpus();
111441c19c8SMichael Ellerman 	atomic_inc(&hv_vm_count);
112441c19c8SMichael Ellerman 	put_online_cpus();
113441c19c8SMichael Ellerman }
114441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
115441c19c8SMichael Ellerman 
116441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void)
117441c19c8SMichael Ellerman {
118441c19c8SMichael Ellerman 	get_online_cpus();
119441c19c8SMichael Ellerman 	atomic_dec(&hv_vm_count);
120441c19c8SMichael Ellerman 	put_online_cpus();
121441c19c8SMichael Ellerman }
122441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
123441c19c8SMichael Ellerman 
124441c19c8SMichael Ellerman bool kvm_hv_mode_active(void)
125441c19c8SMichael Ellerman {
126441c19c8SMichael Ellerman 	return atomic_read(&hv_vm_count) != 0;
127441c19c8SMichael Ellerman }
128ae2113a4SPaul Mackerras 
129ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[];
130ae2113a4SPaul Mackerras 
131ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
132ae2113a4SPaul Mackerras {
133ae2113a4SPaul Mackerras 	cmd /= 4;
134ae2113a4SPaul Mackerras 	if (cmd < hcall_real_table_end - hcall_real_table &&
135ae2113a4SPaul Mackerras 	    hcall_real_table[cmd])
136ae2113a4SPaul Mackerras 		return 1;
137ae2113a4SPaul Mackerras 
138ae2113a4SPaul Mackerras 	return 0;
139ae2113a4SPaul Mackerras }
140ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
141