xref: /openbmc/linux/fs/erofs/pcpubuf.c (revision 2a598d0b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Gao Xiang <xiang@kernel.org>
4  *
5  * For low-latency decompression algorithms (e.g. lz4), reserve consecutive
6  * per-CPU virtual memory (in pages) in advance to store such inplace I/O
7  * data if inplace decompression is failed (due to unmet inplace margin for
8  * example).
9  */
10 #include "internal.h"
11 
12 struct erofs_pcpubuf {
13 	raw_spinlock_t lock;
14 	void *ptr;
15 	struct page **pages;
16 	unsigned int nrpages;
17 };
18 
19 static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
20 
21 void *erofs_get_pcpubuf(unsigned int requiredpages)
22 	__acquires(pcb->lock)
23 {
24 	struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
25 
26 	raw_spin_lock(&pcb->lock);
27 	/* check if the per-CPU buffer is too small */
28 	if (requiredpages > pcb->nrpages) {
29 		raw_spin_unlock(&pcb->lock);
30 		put_cpu_var(erofs_pcb);
31 		/* (for sparse checker) pretend pcb->lock is still taken */
32 		__acquire(pcb->lock);
33 		return NULL;
34 	}
35 	return pcb->ptr;
36 }
37 
38 void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
39 {
40 	struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
41 
42 	DBG_BUGON(pcb->ptr != ptr);
43 	raw_spin_unlock(&pcb->lock);
44 	put_cpu_var(erofs_pcb);
45 }
46 
47 /* the next step: support per-CPU page buffers hotplug */
48 int erofs_pcpubuf_growsize(unsigned int nrpages)
49 {
50 	static DEFINE_MUTEX(pcb_resize_mutex);
51 	static unsigned int pcb_nrpages;
52 	struct page *pagepool = NULL;
53 	int delta, cpu, ret, i;
54 
55 	mutex_lock(&pcb_resize_mutex);
56 	delta = nrpages - pcb_nrpages;
57 	ret = 0;
58 	/* avoid shrinking pcpubuf, since no idea how many fses rely on */
59 	if (delta <= 0)
60 		goto out;
61 
62 	for_each_possible_cpu(cpu) {
63 		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
64 		struct page **pages, **oldpages;
65 		void *ptr, *old_ptr;
66 
67 		pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
68 		if (!pages) {
69 			ret = -ENOMEM;
70 			break;
71 		}
72 
73 		for (i = 0; i < nrpages; ++i) {
74 			pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
75 			if (!pages[i]) {
76 				ret = -ENOMEM;
77 				oldpages = pages;
78 				goto free_pagearray;
79 			}
80 		}
81 		ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
82 		if (!ptr) {
83 			ret = -ENOMEM;
84 			oldpages = pages;
85 			goto free_pagearray;
86 		}
87 		raw_spin_lock(&pcb->lock);
88 		old_ptr = pcb->ptr;
89 		pcb->ptr = ptr;
90 		oldpages = pcb->pages;
91 		pcb->pages = pages;
92 		i = pcb->nrpages;
93 		pcb->nrpages = nrpages;
94 		raw_spin_unlock(&pcb->lock);
95 
96 		if (!oldpages) {
97 			DBG_BUGON(old_ptr);
98 			continue;
99 		}
100 
101 		if (old_ptr)
102 			vunmap(old_ptr);
103 free_pagearray:
104 		while (i)
105 			erofs_pagepool_add(&pagepool, oldpages[--i]);
106 		kfree(oldpages);
107 		if (ret)
108 			break;
109 	}
110 	pcb_nrpages = nrpages;
111 	erofs_release_pages(&pagepool);
112 out:
113 	mutex_unlock(&pcb_resize_mutex);
114 	return ret;
115 }
116 
117 void __init erofs_pcpubuf_init(void)
118 {
119 	int cpu;
120 
121 	for_each_possible_cpu(cpu) {
122 		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
123 
124 		raw_spin_lock_init(&pcb->lock);
125 	}
126 }
127 
128 void erofs_pcpubuf_exit(void)
129 {
130 	int cpu, i;
131 
132 	for_each_possible_cpu(cpu) {
133 		struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
134 
135 		if (pcb->ptr) {
136 			vunmap(pcb->ptr);
137 			pcb->ptr = NULL;
138 		}
139 		if (!pcb->pages)
140 			continue;
141 
142 		for (i = 0; i < pcb->nrpages; ++i)
143 			if (pcb->pages[i])
144 				put_page(pcb->pages[i]);
145 		kfree(pcb->pages);
146 		pcb->pages = NULL;
147 	}
148 }
149