xref: /openbmc/linux/arch/s390/mm/cmm.c (revision 64c70b1c)
1 /*
2  *  arch/s390/mm/cmm.c
3  *
4  *  S390 version
5  *    Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Collaborative memory management interface.
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/sysctl.h>
17 #include <linux/ctype.h>
18 #include <linux/swap.h>
19 #include <linux/kthread.h>
20 
21 #include <asm/pgalloc.h>
22 #include <asm/uaccess.h>
23 
24 static char *sender = "VMRMSVM";
25 module_param(sender, charp, 0400);
26 MODULE_PARM_DESC(sender,
27 		 "Guest name that may send SMSG messages (default VMRMSVM)");
28 
29 #include "../../../drivers/s390/net/smsgiucv.h"
30 
31 #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
32 
33 struct cmm_page_array {
34 	struct cmm_page_array *next;
35 	unsigned long index;
36 	unsigned long pages[CMM_NR_PAGES];
37 };
38 
39 static long cmm_pages;
40 static long cmm_timed_pages;
41 static volatile long cmm_pages_target;
42 static volatile long cmm_timed_pages_target;
43 static long cmm_timeout_pages;
44 static long cmm_timeout_seconds;
45 
46 static struct cmm_page_array *cmm_page_list;
47 static struct cmm_page_array *cmm_timed_page_list;
48 static DEFINE_SPINLOCK(cmm_lock);
49 
50 static struct task_struct *cmm_thread_ptr;
51 static wait_queue_head_t cmm_thread_wait;
52 static struct timer_list cmm_timer;
53 
54 static void cmm_timer_fn(unsigned long);
55 static void cmm_set_timer(void);
56 
57 static long
58 cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
59 {
60 	struct cmm_page_array *pa, *npa;
61 	unsigned long addr;
62 
63 	while (nr) {
64 		addr = __get_free_page(GFP_NOIO);
65 		if (!addr)
66 			break;
67 		spin_lock(&cmm_lock);
68 		pa = *list;
69 		if (!pa || pa->index >= CMM_NR_PAGES) {
70 			/* Need a new page for the page list. */
71 			spin_unlock(&cmm_lock);
72 			npa = (struct cmm_page_array *)
73 				__get_free_page(GFP_NOIO);
74 			if (!npa) {
75 				free_page(addr);
76 				break;
77 			}
78 			spin_lock(&cmm_lock);
79 			pa = *list;
80 			if (!pa || pa->index >= CMM_NR_PAGES) {
81 				npa->next = pa;
82 				npa->index = 0;
83 				pa = npa;
84 				*list = pa;
85 			} else
86 				free_page((unsigned long) npa);
87 		}
88 		diag10(addr);
89 		pa->pages[pa->index++] = addr;
90 		(*counter)++;
91 		spin_unlock(&cmm_lock);
92 		nr--;
93 	}
94 	return nr;
95 }
96 
97 static long
98 cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
99 {
100 	struct cmm_page_array *pa;
101 	unsigned long addr;
102 
103 	spin_lock(&cmm_lock);
104 	pa = *list;
105 	while (nr) {
106 		if (!pa || pa->index <= 0)
107 			break;
108 		addr = pa->pages[--pa->index];
109 		if (pa->index == 0) {
110 			pa = pa->next;
111 			free_page((unsigned long) *list);
112 			*list = pa;
113 		}
114 		free_page(addr);
115 		(*counter)--;
116 		nr--;
117 	}
118 	spin_unlock(&cmm_lock);
119 	return nr;
120 }
121 
122 static int cmm_oom_notify(struct notifier_block *self,
123 			  unsigned long dummy, void *parm)
124 {
125 	unsigned long *freed = parm;
126 	long nr = 256;
127 
128 	nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
129 	if (nr > 0)
130 		nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
131 	cmm_pages_target = cmm_pages;
132 	cmm_timed_pages_target = cmm_timed_pages;
133 	*freed += 256 - nr;
134 	return NOTIFY_OK;
135 }
136 
137 static struct notifier_block cmm_oom_nb = {
138 	.notifier_call = cmm_oom_notify
139 };
140 
141 static int
142 cmm_thread(void *dummy)
143 {
144 	int rc;
145 
146 	while (1) {
147 		rc = wait_event_interruptible(cmm_thread_wait,
148 			(cmm_pages != cmm_pages_target ||
149 			 cmm_timed_pages != cmm_timed_pages_target ||
150 			 kthread_should_stop()));
151 		if (kthread_should_stop() || rc == -ERESTARTSYS) {
152 			cmm_pages_target = cmm_pages;
153 			cmm_timed_pages_target = cmm_timed_pages;
154 			break;
155 		}
156 		if (cmm_pages_target > cmm_pages) {
157 			if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
158 				cmm_pages_target = cmm_pages;
159 		} else if (cmm_pages_target < cmm_pages) {
160 			cmm_free_pages(1, &cmm_pages, &cmm_page_list);
161 		}
162 		if (cmm_timed_pages_target > cmm_timed_pages) {
163 			if (cmm_alloc_pages(1, &cmm_timed_pages,
164 					   &cmm_timed_page_list))
165 				cmm_timed_pages_target = cmm_timed_pages;
166 		} else if (cmm_timed_pages_target < cmm_timed_pages) {
167 			cmm_free_pages(1, &cmm_timed_pages,
168 			       	       &cmm_timed_page_list);
169 		}
170 		if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
171 			cmm_set_timer();
172 	}
173 	return 0;
174 }
175 
176 static void
177 cmm_kick_thread(void)
178 {
179 	wake_up(&cmm_thread_wait);
180 }
181 
182 static void
183 cmm_set_timer(void)
184 {
185 	if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
186 		if (timer_pending(&cmm_timer))
187 			del_timer(&cmm_timer);
188 		return;
189 	}
190 	if (timer_pending(&cmm_timer)) {
191 		if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
192 			return;
193 	}
194 	cmm_timer.function = cmm_timer_fn;
195 	cmm_timer.data = 0;
196 	cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
197 	add_timer(&cmm_timer);
198 }
199 
200 static void
201 cmm_timer_fn(unsigned long ignored)
202 {
203 	long nr;
204 
205 	nr = cmm_timed_pages_target - cmm_timeout_pages;
206 	if (nr < 0)
207 		cmm_timed_pages_target = 0;
208 	else
209 		cmm_timed_pages_target = nr;
210 	cmm_kick_thread();
211 	cmm_set_timer();
212 }
213 
214 void
215 cmm_set_pages(long nr)
216 {
217 	cmm_pages_target = nr;
218 	cmm_kick_thread();
219 }
220 
221 long
222 cmm_get_pages(void)
223 {
224 	return cmm_pages;
225 }
226 
227 void
228 cmm_add_timed_pages(long nr)
229 {
230 	cmm_timed_pages_target += nr;
231 	cmm_kick_thread();
232 }
233 
234 long
235 cmm_get_timed_pages(void)
236 {
237 	return cmm_timed_pages;
238 }
239 
240 void
241 cmm_set_timeout(long nr, long seconds)
242 {
243 	cmm_timeout_pages = nr;
244 	cmm_timeout_seconds = seconds;
245 	cmm_set_timer();
246 }
247 
248 static int
249 cmm_skip_blanks(char *cp, char **endp)
250 {
251 	char *str;
252 
253 	for (str = cp; *str == ' ' || *str == '\t'; str++);
254 	*endp = str;
255 	return str != cp;
256 }
257 
258 #ifdef CONFIG_CMM_PROC
259 
260 static struct ctl_table cmm_table[];
261 
262 static int
263 cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
264 		  void __user *buffer, size_t *lenp, loff_t *ppos)
265 {
266 	char buf[16], *p;
267 	long nr;
268 	int len;
269 
270 	if (!*lenp || (*ppos && !write)) {
271 		*lenp = 0;
272 		return 0;
273 	}
274 
275 	if (write) {
276 		len = *lenp;
277 		if (copy_from_user(buf, buffer,
278 				   len > sizeof(buf) ? sizeof(buf) : len))
279 			return -EFAULT;
280 		buf[sizeof(buf) - 1] = '\0';
281 		cmm_skip_blanks(buf, &p);
282 		nr = simple_strtoul(p, &p, 0);
283 		if (ctl == &cmm_table[0])
284 			cmm_set_pages(nr);
285 		else
286 			cmm_add_timed_pages(nr);
287 	} else {
288 		if (ctl == &cmm_table[0])
289 			nr = cmm_get_pages();
290 		else
291 			nr = cmm_get_timed_pages();
292 		len = sprintf(buf, "%ld\n", nr);
293 		if (len > *lenp)
294 			len = *lenp;
295 		if (copy_to_user(buffer, buf, len))
296 			return -EFAULT;
297 	}
298 	*lenp = len;
299 	*ppos += len;
300 	return 0;
301 }
302 
303 static int
304 cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
305 		    void __user *buffer, size_t *lenp, loff_t *ppos)
306 {
307 	char buf[64], *p;
308 	long nr, seconds;
309 	int len;
310 
311 	if (!*lenp || (*ppos && !write)) {
312 		*lenp = 0;
313 		return 0;
314 	}
315 
316 	if (write) {
317 		len = *lenp;
318 		if (copy_from_user(buf, buffer,
319 				   len > sizeof(buf) ? sizeof(buf) : len))
320 			return -EFAULT;
321 		buf[sizeof(buf) - 1] = '\0';
322 		cmm_skip_blanks(buf, &p);
323 		nr = simple_strtoul(p, &p, 0);
324 		cmm_skip_blanks(p, &p);
325 		seconds = simple_strtoul(p, &p, 0);
326 		cmm_set_timeout(nr, seconds);
327 	} else {
328 		len = sprintf(buf, "%ld %ld\n",
329 			      cmm_timeout_pages, cmm_timeout_seconds);
330 		if (len > *lenp)
331 			len = *lenp;
332 		if (copy_to_user(buffer, buf, len))
333 			return -EFAULT;
334 	}
335 	*lenp = len;
336 	*ppos += len;
337 	return 0;
338 }
339 
340 static struct ctl_table cmm_table[] = {
341 	{
342 		.ctl_name	= VM_CMM_PAGES,
343 		.procname	= "cmm_pages",
344 		.mode		= 0644,
345 		.proc_handler	= &cmm_pages_handler,
346 	},
347 	{
348 		.ctl_name	= VM_CMM_TIMED_PAGES,
349 		.procname	= "cmm_timed_pages",
350 		.mode		= 0644,
351 		.proc_handler	= &cmm_pages_handler,
352 	},
353 	{
354 		.ctl_name	= VM_CMM_TIMEOUT,
355 		.procname	= "cmm_timeout",
356 		.mode		= 0644,
357 		.proc_handler	= &cmm_timeout_handler,
358 	},
359 	{ .ctl_name = 0 }
360 };
361 
362 static struct ctl_table cmm_dir_table[] = {
363 	{
364 		.ctl_name	= CTL_VM,
365 		.procname	= "vm",
366 		.maxlen		= 0,
367 		.mode		= 0555,
368 		.child		= cmm_table,
369 	},
370 	{ .ctl_name = 0 }
371 };
372 #endif
373 
374 #ifdef CONFIG_CMM_IUCV
375 #define SMSG_PREFIX "CMM"
376 static void
377 cmm_smsg_target(char *from, char *msg)
378 {
379 	long nr, seconds;
380 
381 	if (strlen(sender) > 0 && strcmp(from, sender) != 0)
382 		return;
383 	if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
384 		return;
385 	if (strncmp(msg, "SHRINK", 6) == 0) {
386 		if (!cmm_skip_blanks(msg + 6, &msg))
387 			return;
388 		nr = simple_strtoul(msg, &msg, 0);
389 		cmm_skip_blanks(msg, &msg);
390 		if (*msg == '\0')
391 			cmm_set_pages(nr);
392 	} else if (strncmp(msg, "RELEASE", 7) == 0) {
393 		if (!cmm_skip_blanks(msg + 7, &msg))
394 			return;
395 		nr = simple_strtoul(msg, &msg, 0);
396 		cmm_skip_blanks(msg, &msg);
397 		if (*msg == '\0')
398 			cmm_add_timed_pages(nr);
399 	} else if (strncmp(msg, "REUSE", 5) == 0) {
400 		if (!cmm_skip_blanks(msg + 5, &msg))
401 			return;
402 		nr = simple_strtoul(msg, &msg, 0);
403 		if (!cmm_skip_blanks(msg, &msg))
404 			return;
405 		seconds = simple_strtoul(msg, &msg, 0);
406 		cmm_skip_blanks(msg, &msg);
407 		if (*msg == '\0')
408 			cmm_set_timeout(nr, seconds);
409 	}
410 }
411 #endif
412 
413 static struct ctl_table_header *cmm_sysctl_header;
414 
415 static int
416 cmm_init (void)
417 {
418 	int rc = -ENOMEM;
419 
420 #ifdef CONFIG_CMM_PROC
421 	cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
422 	if (!cmm_sysctl_header)
423 		goto out;
424 #endif
425 #ifdef CONFIG_CMM_IUCV
426 	rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
427 	if (rc < 0)
428 		goto out_smsg;
429 #endif
430 	rc = register_oom_notifier(&cmm_oom_nb);
431 	if (rc < 0)
432 		goto out_oom_notify;
433 	init_waitqueue_head(&cmm_thread_wait);
434 	init_timer(&cmm_timer);
435 	cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
436 	rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
437 	if (!rc)
438 		goto out;
439 	/*
440 	 * kthread_create failed. undo all the stuff from above again.
441 	 */
442 	unregister_oom_notifier(&cmm_oom_nb);
443 
444 out_oom_notify:
445 #ifdef CONFIG_CMM_IUCV
446 	smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
447 out_smsg:
448 #endif
449 #ifdef CONFIG_CMM_PROC
450 	unregister_sysctl_table(cmm_sysctl_header);
451 #endif
452 out:
453 	return rc;
454 }
455 
456 static void
457 cmm_exit(void)
458 {
459 	kthread_stop(cmm_thread_ptr);
460 	unregister_oom_notifier(&cmm_oom_nb);
461 	cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
462 	cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
463 #ifdef CONFIG_CMM_PROC
464 	unregister_sysctl_table(cmm_sysctl_header);
465 #endif
466 #ifdef CONFIG_CMM_IUCV
467 	smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
468 #endif
469 }
470 
471 module_init(cmm_init);
472 module_exit(cmm_exit);
473 
474 EXPORT_SYMBOL(cmm_set_pages);
475 EXPORT_SYMBOL(cmm_get_pages);
476 EXPORT_SYMBOL(cmm_add_timed_pages);
477 EXPORT_SYMBOL(cmm_get_timed_pages);
478 EXPORT_SYMBOL(cmm_set_timeout);
479 
480 MODULE_LICENSE("GPL");
481