1 /* 2 * arch/s390/mm/cmm.c 3 * 4 * S390 version 5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Collaborative memory management interface. 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/fs.h> 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/sysctl.h> 17 #include <linux/ctype.h> 18 #include <linux/swap.h> 19 #include <linux/kthread.h> 20 #include <linux/oom.h> 21 #include <linux/suspend.h> 22 23 #include <asm/pgalloc.h> 24 #include <asm/uaccess.h> 25 #include <asm/diag.h> 26 27 static char *sender = "VMRMSVM"; 28 module_param(sender, charp, 0400); 29 MODULE_PARM_DESC(sender, 30 "Guest name that may send SMSG messages (default VMRMSVM)"); 31 32 #include "../../../drivers/s390/net/smsgiucv.h" 33 34 #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) 35 36 struct cmm_page_array { 37 struct cmm_page_array *next; 38 unsigned long index; 39 unsigned long pages[CMM_NR_PAGES]; 40 }; 41 42 static long cmm_pages; 43 static long cmm_timed_pages; 44 static volatile long cmm_pages_target; 45 static volatile long cmm_timed_pages_target; 46 static long cmm_timeout_pages; 47 static long cmm_timeout_seconds; 48 static int cmm_suspended; 49 50 static struct cmm_page_array *cmm_page_list; 51 static struct cmm_page_array *cmm_timed_page_list; 52 static DEFINE_SPINLOCK(cmm_lock); 53 54 static struct task_struct *cmm_thread_ptr; 55 static wait_queue_head_t cmm_thread_wait; 56 static struct timer_list cmm_timer; 57 58 static void cmm_timer_fn(unsigned long); 59 static void cmm_set_timer(void); 60 61 static long 62 cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) 63 { 64 struct cmm_page_array *pa, *npa; 65 unsigned long addr; 66 67 while (nr) { 68 addr = __get_free_page(GFP_NOIO); 69 if (!addr) 70 break; 71 spin_lock(&cmm_lock); 72 pa = *list; 73 if (!pa || pa->index >= CMM_NR_PAGES) { 74 /* Need a new page for the page list. */ 75 spin_unlock(&cmm_lock); 76 npa = (struct cmm_page_array *) 77 __get_free_page(GFP_NOIO); 78 if (!npa) { 79 free_page(addr); 80 break; 81 } 82 spin_lock(&cmm_lock); 83 pa = *list; 84 if (!pa || pa->index >= CMM_NR_PAGES) { 85 npa->next = pa; 86 npa->index = 0; 87 pa = npa; 88 *list = pa; 89 } else 90 free_page((unsigned long) npa); 91 } 92 diag10(addr); 93 pa->pages[pa->index++] = addr; 94 (*counter)++; 95 spin_unlock(&cmm_lock); 96 nr--; 97 } 98 return nr; 99 } 100 101 static long 102 cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) 103 { 104 struct cmm_page_array *pa; 105 unsigned long addr; 106 107 spin_lock(&cmm_lock); 108 pa = *list; 109 while (nr) { 110 if (!pa || pa->index <= 0) 111 break; 112 addr = pa->pages[--pa->index]; 113 if (pa->index == 0) { 114 pa = pa->next; 115 free_page((unsigned long) *list); 116 *list = pa; 117 } 118 free_page(addr); 119 (*counter)--; 120 nr--; 121 } 122 spin_unlock(&cmm_lock); 123 return nr; 124 } 125 126 static int cmm_oom_notify(struct notifier_block *self, 127 unsigned long dummy, void *parm) 128 { 129 unsigned long *freed = parm; 130 long nr = 256; 131 132 nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list); 133 if (nr > 0) 134 nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list); 135 cmm_pages_target = cmm_pages; 136 cmm_timed_pages_target = cmm_timed_pages; 137 *freed += 256 - nr; 138 return NOTIFY_OK; 139 } 140 141 static struct notifier_block cmm_oom_nb = { 142 .notifier_call = cmm_oom_notify 143 }; 144 145 static int 146 cmm_thread(void *dummy) 147 { 148 int rc; 149 150 while (1) { 151 rc = wait_event_interruptible(cmm_thread_wait, 152 (!cmm_suspended && (cmm_pages != cmm_pages_target || 153 cmm_timed_pages != cmm_timed_pages_target)) || 154 kthread_should_stop()); 155 if (kthread_should_stop() || rc == -ERESTARTSYS) { 156 cmm_pages_target = cmm_pages; 157 cmm_timed_pages_target = cmm_timed_pages; 158 break; 159 } 160 if (cmm_pages_target > cmm_pages) { 161 if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list)) 162 cmm_pages_target = cmm_pages; 163 } else if (cmm_pages_target < cmm_pages) { 164 cmm_free_pages(1, &cmm_pages, &cmm_page_list); 165 } 166 if (cmm_timed_pages_target > cmm_timed_pages) { 167 if (cmm_alloc_pages(1, &cmm_timed_pages, 168 &cmm_timed_page_list)) 169 cmm_timed_pages_target = cmm_timed_pages; 170 } else if (cmm_timed_pages_target < cmm_timed_pages) { 171 cmm_free_pages(1, &cmm_timed_pages, 172 &cmm_timed_page_list); 173 } 174 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) 175 cmm_set_timer(); 176 } 177 return 0; 178 } 179 180 static void 181 cmm_kick_thread(void) 182 { 183 wake_up(&cmm_thread_wait); 184 } 185 186 static void 187 cmm_set_timer(void) 188 { 189 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 190 if (timer_pending(&cmm_timer)) 191 del_timer(&cmm_timer); 192 return; 193 } 194 if (timer_pending(&cmm_timer)) { 195 if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ)) 196 return; 197 } 198 cmm_timer.function = cmm_timer_fn; 199 cmm_timer.data = 0; 200 cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ; 201 add_timer(&cmm_timer); 202 } 203 204 static void 205 cmm_timer_fn(unsigned long ignored) 206 { 207 long nr; 208 209 nr = cmm_timed_pages_target - cmm_timeout_pages; 210 if (nr < 0) 211 cmm_timed_pages_target = 0; 212 else 213 cmm_timed_pages_target = nr; 214 cmm_kick_thread(); 215 cmm_set_timer(); 216 } 217 218 void 219 cmm_set_pages(long nr) 220 { 221 cmm_pages_target = nr; 222 cmm_kick_thread(); 223 } 224 225 long 226 cmm_get_pages(void) 227 { 228 return cmm_pages; 229 } 230 231 void 232 cmm_add_timed_pages(long nr) 233 { 234 cmm_timed_pages_target += nr; 235 cmm_kick_thread(); 236 } 237 238 long 239 cmm_get_timed_pages(void) 240 { 241 return cmm_timed_pages; 242 } 243 244 void 245 cmm_set_timeout(long nr, long seconds) 246 { 247 cmm_timeout_pages = nr; 248 cmm_timeout_seconds = seconds; 249 cmm_set_timer(); 250 } 251 252 static int 253 cmm_skip_blanks(char *cp, char **endp) 254 { 255 char *str; 256 257 for (str = cp; *str == ' ' || *str == '\t'; str++); 258 *endp = str; 259 return str != cp; 260 } 261 262 #ifdef CONFIG_CMM_PROC 263 264 static struct ctl_table cmm_table[]; 265 266 static int 267 cmm_pages_handler(ctl_table *ctl, int write, 268 void __user *buffer, size_t *lenp, loff_t *ppos) 269 { 270 char buf[16], *p; 271 long nr; 272 int len; 273 274 if (!*lenp || (*ppos && !write)) { 275 *lenp = 0; 276 return 0; 277 } 278 279 if (write) { 280 len = *lenp; 281 if (copy_from_user(buf, buffer, 282 len > sizeof(buf) ? sizeof(buf) : len)) 283 return -EFAULT; 284 buf[sizeof(buf) - 1] = '\0'; 285 cmm_skip_blanks(buf, &p); 286 nr = simple_strtoul(p, &p, 0); 287 if (ctl == &cmm_table[0]) 288 cmm_set_pages(nr); 289 else 290 cmm_add_timed_pages(nr); 291 } else { 292 if (ctl == &cmm_table[0]) 293 nr = cmm_get_pages(); 294 else 295 nr = cmm_get_timed_pages(); 296 len = sprintf(buf, "%ld\n", nr); 297 if (len > *lenp) 298 len = *lenp; 299 if (copy_to_user(buffer, buf, len)) 300 return -EFAULT; 301 } 302 *lenp = len; 303 *ppos += len; 304 return 0; 305 } 306 307 static int 308 cmm_timeout_handler(ctl_table *ctl, int write, 309 void __user *buffer, size_t *lenp, loff_t *ppos) 310 { 311 char buf[64], *p; 312 long nr, seconds; 313 int len; 314 315 if (!*lenp || (*ppos && !write)) { 316 *lenp = 0; 317 return 0; 318 } 319 320 if (write) { 321 len = *lenp; 322 if (copy_from_user(buf, buffer, 323 len > sizeof(buf) ? sizeof(buf) : len)) 324 return -EFAULT; 325 buf[sizeof(buf) - 1] = '\0'; 326 cmm_skip_blanks(buf, &p); 327 nr = simple_strtoul(p, &p, 0); 328 cmm_skip_blanks(p, &p); 329 seconds = simple_strtoul(p, &p, 0); 330 cmm_set_timeout(nr, seconds); 331 } else { 332 len = sprintf(buf, "%ld %ld\n", 333 cmm_timeout_pages, cmm_timeout_seconds); 334 if (len > *lenp) 335 len = *lenp; 336 if (copy_to_user(buffer, buf, len)) 337 return -EFAULT; 338 } 339 *lenp = len; 340 *ppos += len; 341 return 0; 342 } 343 344 static struct ctl_table cmm_table[] = { 345 { 346 .procname = "cmm_pages", 347 .mode = 0644, 348 .proc_handler = cmm_pages_handler, 349 }, 350 { 351 .procname = "cmm_timed_pages", 352 .mode = 0644, 353 .proc_handler = cmm_pages_handler, 354 }, 355 { 356 .procname = "cmm_timeout", 357 .mode = 0644, 358 .proc_handler = cmm_timeout_handler, 359 }, 360 { } 361 }; 362 363 static struct ctl_table cmm_dir_table[] = { 364 { 365 .procname = "vm", 366 .maxlen = 0, 367 .mode = 0555, 368 .child = cmm_table, 369 }, 370 { } 371 }; 372 #endif 373 374 #ifdef CONFIG_CMM_IUCV 375 #define SMSG_PREFIX "CMM" 376 static void 377 cmm_smsg_target(char *from, char *msg) 378 { 379 long nr, seconds; 380 381 if (strlen(sender) > 0 && strcmp(from, sender) != 0) 382 return; 383 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) 384 return; 385 if (strncmp(msg, "SHRINK", 6) == 0) { 386 if (!cmm_skip_blanks(msg + 6, &msg)) 387 return; 388 nr = simple_strtoul(msg, &msg, 0); 389 cmm_skip_blanks(msg, &msg); 390 if (*msg == '\0') 391 cmm_set_pages(nr); 392 } else if (strncmp(msg, "RELEASE", 7) == 0) { 393 if (!cmm_skip_blanks(msg + 7, &msg)) 394 return; 395 nr = simple_strtoul(msg, &msg, 0); 396 cmm_skip_blanks(msg, &msg); 397 if (*msg == '\0') 398 cmm_add_timed_pages(nr); 399 } else if (strncmp(msg, "REUSE", 5) == 0) { 400 if (!cmm_skip_blanks(msg + 5, &msg)) 401 return; 402 nr = simple_strtoul(msg, &msg, 0); 403 if (!cmm_skip_blanks(msg, &msg)) 404 return; 405 seconds = simple_strtoul(msg, &msg, 0); 406 cmm_skip_blanks(msg, &msg); 407 if (*msg == '\0') 408 cmm_set_timeout(nr, seconds); 409 } 410 } 411 #endif 412 413 static struct ctl_table_header *cmm_sysctl_header; 414 415 static int cmm_suspend(void) 416 { 417 cmm_suspended = 1; 418 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 419 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 420 return 0; 421 } 422 423 static int cmm_resume(void) 424 { 425 cmm_suspended = 0; 426 cmm_kick_thread(); 427 return 0; 428 } 429 430 static int cmm_power_event(struct notifier_block *this, 431 unsigned long event, void *ptr) 432 { 433 switch (event) { 434 case PM_POST_HIBERNATION: 435 return cmm_resume(); 436 case PM_HIBERNATION_PREPARE: 437 return cmm_suspend(); 438 default: 439 return NOTIFY_DONE; 440 } 441 } 442 443 static struct notifier_block cmm_power_notifier = { 444 .notifier_call = cmm_power_event, 445 }; 446 447 static int 448 cmm_init (void) 449 { 450 int rc = -ENOMEM; 451 452 #ifdef CONFIG_CMM_PROC 453 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 454 if (!cmm_sysctl_header) 455 goto out_sysctl; 456 #endif 457 #ifdef CONFIG_CMM_IUCV 458 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 459 if (rc < 0) 460 goto out_smsg; 461 #endif 462 rc = register_oom_notifier(&cmm_oom_nb); 463 if (rc < 0) 464 goto out_oom_notify; 465 rc = register_pm_notifier(&cmm_power_notifier); 466 if (rc) 467 goto out_pm; 468 init_waitqueue_head(&cmm_thread_wait); 469 init_timer(&cmm_timer); 470 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 471 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 472 if (rc) 473 goto out_kthread; 474 return 0; 475 476 out_kthread: 477 unregister_pm_notifier(&cmm_power_notifier); 478 out_pm: 479 unregister_oom_notifier(&cmm_oom_nb); 480 out_oom_notify: 481 #ifdef CONFIG_CMM_IUCV 482 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 483 out_smsg: 484 #endif 485 #ifdef CONFIG_CMM_PROC 486 unregister_sysctl_table(cmm_sysctl_header); 487 out_sysctl: 488 #endif 489 return rc; 490 } 491 492 static void 493 cmm_exit(void) 494 { 495 kthread_stop(cmm_thread_ptr); 496 unregister_pm_notifier(&cmm_power_notifier); 497 unregister_oom_notifier(&cmm_oom_nb); 498 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 499 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 500 #ifdef CONFIG_CMM_PROC 501 unregister_sysctl_table(cmm_sysctl_header); 502 #endif 503 #ifdef CONFIG_CMM_IUCV 504 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 505 #endif 506 } 507 508 module_init(cmm_init); 509 module_exit(cmm_exit); 510 511 EXPORT_SYMBOL(cmm_set_pages); 512 EXPORT_SYMBOL(cmm_get_pages); 513 EXPORT_SYMBOL(cmm_add_timed_pages); 514 EXPORT_SYMBOL(cmm_get_timed_pages); 515 EXPORT_SYMBOL(cmm_set_timeout); 516 517 MODULE_LICENSE("GPL"); 518