1 /* 2 * Collaborative memory management interface. 3 * 4 * Copyright IBM Corp 2003,2010 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/fs.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/gfp.h> 14 #include <linux/sched.h> 15 #include <linux/sysctl.h> 16 #include <linux/ctype.h> 17 #include <linux/swap.h> 18 #include <linux/kthread.h> 19 #include <linux/oom.h> 20 #include <linux/suspend.h> 21 #include <linux/uaccess.h> 22 23 #include <asm/pgalloc.h> 24 #include <asm/diag.h> 25 26 #ifdef CONFIG_CMM_IUCV 27 static char *cmm_default_sender = "VMRMSVM"; 28 #endif 29 static char *sender; 30 module_param(sender, charp, 0400); 31 MODULE_PARM_DESC(sender, 32 "Guest name that may send SMSG messages (default VMRMSVM)"); 33 34 #include "../../../drivers/s390/net/smsgiucv.h" 35 36 #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) 37 38 struct cmm_page_array { 39 struct cmm_page_array *next; 40 unsigned long index; 41 unsigned long pages[CMM_NR_PAGES]; 42 }; 43 44 static long cmm_pages; 45 static long cmm_timed_pages; 46 static volatile long cmm_pages_target; 47 static volatile long cmm_timed_pages_target; 48 static long cmm_timeout_pages; 49 static long cmm_timeout_seconds; 50 static int cmm_suspended; 51 52 static struct cmm_page_array *cmm_page_list; 53 static struct cmm_page_array *cmm_timed_page_list; 54 static DEFINE_SPINLOCK(cmm_lock); 55 56 static struct task_struct *cmm_thread_ptr; 57 static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait); 58 static DEFINE_TIMER(cmm_timer, NULL, 0, 0); 59 60 static void cmm_timer_fn(unsigned long); 61 static void cmm_set_timer(void); 62 63 static long cmm_alloc_pages(long nr, long *counter, 64 struct cmm_page_array **list) 65 { 66 struct cmm_page_array *pa, *npa; 67 unsigned long addr; 68 69 while (nr) { 70 addr = __get_free_page(GFP_NOIO); 71 if (!addr) 72 break; 73 spin_lock(&cmm_lock); 74 pa = *list; 75 if (!pa || pa->index >= CMM_NR_PAGES) { 76 /* Need a new page for the page list. */ 77 spin_unlock(&cmm_lock); 78 npa = (struct cmm_page_array *) 79 __get_free_page(GFP_NOIO); 80 if (!npa) { 81 free_page(addr); 82 break; 83 } 84 spin_lock(&cmm_lock); 85 pa = *list; 86 if (!pa || pa->index >= CMM_NR_PAGES) { 87 npa->next = pa; 88 npa->index = 0; 89 pa = npa; 90 *list = pa; 91 } else 92 free_page((unsigned long) npa); 93 } 94 diag10(addr); 95 pa->pages[pa->index++] = addr; 96 (*counter)++; 97 spin_unlock(&cmm_lock); 98 nr--; 99 } 100 return nr; 101 } 102 103 static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) 104 { 105 struct cmm_page_array *pa; 106 unsigned long addr; 107 108 spin_lock(&cmm_lock); 109 pa = *list; 110 while (nr) { 111 if (!pa || pa->index <= 0) 112 break; 113 addr = pa->pages[--pa->index]; 114 if (pa->index == 0) { 115 pa = pa->next; 116 free_page((unsigned long) *list); 117 *list = pa; 118 } 119 free_page(addr); 120 (*counter)--; 121 nr--; 122 } 123 spin_unlock(&cmm_lock); 124 return nr; 125 } 126 127 static int cmm_oom_notify(struct notifier_block *self, 128 unsigned long dummy, void *parm) 129 { 130 unsigned long *freed = parm; 131 long nr = 256; 132 133 nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list); 134 if (nr > 0) 135 nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list); 136 cmm_pages_target = cmm_pages; 137 cmm_timed_pages_target = cmm_timed_pages; 138 *freed += 256 - nr; 139 return NOTIFY_OK; 140 } 141 142 static struct notifier_block cmm_oom_nb = { 143 .notifier_call = cmm_oom_notify, 144 }; 145 146 static int cmm_thread(void *dummy) 147 { 148 int rc; 149 150 while (1) { 151 rc = wait_event_interruptible(cmm_thread_wait, 152 (!cmm_suspended && (cmm_pages != cmm_pages_target || 153 cmm_timed_pages != cmm_timed_pages_target)) || 154 kthread_should_stop()); 155 if (kthread_should_stop() || rc == -ERESTARTSYS) { 156 cmm_pages_target = cmm_pages; 157 cmm_timed_pages_target = cmm_timed_pages; 158 break; 159 } 160 if (cmm_pages_target > cmm_pages) { 161 if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list)) 162 cmm_pages_target = cmm_pages; 163 } else if (cmm_pages_target < cmm_pages) { 164 cmm_free_pages(1, &cmm_pages, &cmm_page_list); 165 } 166 if (cmm_timed_pages_target > cmm_timed_pages) { 167 if (cmm_alloc_pages(1, &cmm_timed_pages, 168 &cmm_timed_page_list)) 169 cmm_timed_pages_target = cmm_timed_pages; 170 } else if (cmm_timed_pages_target < cmm_timed_pages) { 171 cmm_free_pages(1, &cmm_timed_pages, 172 &cmm_timed_page_list); 173 } 174 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) 175 cmm_set_timer(); 176 } 177 return 0; 178 } 179 180 static void cmm_kick_thread(void) 181 { 182 wake_up(&cmm_thread_wait); 183 } 184 185 static void cmm_set_timer(void) 186 { 187 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 188 if (timer_pending(&cmm_timer)) 189 del_timer(&cmm_timer); 190 return; 191 } 192 if (timer_pending(&cmm_timer)) { 193 if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ)) 194 return; 195 } 196 cmm_timer.function = cmm_timer_fn; 197 cmm_timer.data = 0; 198 cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ; 199 add_timer(&cmm_timer); 200 } 201 202 static void cmm_timer_fn(unsigned long ignored) 203 { 204 long nr; 205 206 nr = cmm_timed_pages_target - cmm_timeout_pages; 207 if (nr < 0) 208 cmm_timed_pages_target = 0; 209 else 210 cmm_timed_pages_target = nr; 211 cmm_kick_thread(); 212 cmm_set_timer(); 213 } 214 215 static void cmm_set_pages(long nr) 216 { 217 cmm_pages_target = nr; 218 cmm_kick_thread(); 219 } 220 221 static long cmm_get_pages(void) 222 { 223 return cmm_pages; 224 } 225 226 static void cmm_add_timed_pages(long nr) 227 { 228 cmm_timed_pages_target += nr; 229 cmm_kick_thread(); 230 } 231 232 static long cmm_get_timed_pages(void) 233 { 234 return cmm_timed_pages; 235 } 236 237 static void cmm_set_timeout(long nr, long seconds) 238 { 239 cmm_timeout_pages = nr; 240 cmm_timeout_seconds = seconds; 241 cmm_set_timer(); 242 } 243 244 static int cmm_skip_blanks(char *cp, char **endp) 245 { 246 char *str; 247 248 for (str = cp; *str == ' ' || *str == '\t'; str++) 249 ; 250 *endp = str; 251 return str != cp; 252 } 253 254 static struct ctl_table cmm_table[]; 255 256 static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer, 257 size_t *lenp, loff_t *ppos) 258 { 259 char buf[16], *p; 260 long nr; 261 int len; 262 263 if (!*lenp || (*ppos && !write)) { 264 *lenp = 0; 265 return 0; 266 } 267 268 if (write) { 269 len = *lenp; 270 if (copy_from_user(buf, buffer, 271 len > sizeof(buf) ? sizeof(buf) : len)) 272 return -EFAULT; 273 buf[sizeof(buf) - 1] = '\0'; 274 cmm_skip_blanks(buf, &p); 275 nr = simple_strtoul(p, &p, 0); 276 if (ctl == &cmm_table[0]) 277 cmm_set_pages(nr); 278 else 279 cmm_add_timed_pages(nr); 280 } else { 281 if (ctl == &cmm_table[0]) 282 nr = cmm_get_pages(); 283 else 284 nr = cmm_get_timed_pages(); 285 len = sprintf(buf, "%ld\n", nr); 286 if (len > *lenp) 287 len = *lenp; 288 if (copy_to_user(buffer, buf, len)) 289 return -EFAULT; 290 } 291 *lenp = len; 292 *ppos += len; 293 return 0; 294 } 295 296 static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer, 297 size_t *lenp, loff_t *ppos) 298 { 299 char buf[64], *p; 300 long nr, seconds; 301 int len; 302 303 if (!*lenp || (*ppos && !write)) { 304 *lenp = 0; 305 return 0; 306 } 307 308 if (write) { 309 len = *lenp; 310 if (copy_from_user(buf, buffer, 311 len > sizeof(buf) ? sizeof(buf) : len)) 312 return -EFAULT; 313 buf[sizeof(buf) - 1] = '\0'; 314 cmm_skip_blanks(buf, &p); 315 nr = simple_strtoul(p, &p, 0); 316 cmm_skip_blanks(p, &p); 317 seconds = simple_strtoul(p, &p, 0); 318 cmm_set_timeout(nr, seconds); 319 } else { 320 len = sprintf(buf, "%ld %ld\n", 321 cmm_timeout_pages, cmm_timeout_seconds); 322 if (len > *lenp) 323 len = *lenp; 324 if (copy_to_user(buffer, buf, len)) 325 return -EFAULT; 326 } 327 *lenp = len; 328 *ppos += len; 329 return 0; 330 } 331 332 static struct ctl_table cmm_table[] = { 333 { 334 .procname = "cmm_pages", 335 .mode = 0644, 336 .proc_handler = cmm_pages_handler, 337 }, 338 { 339 .procname = "cmm_timed_pages", 340 .mode = 0644, 341 .proc_handler = cmm_pages_handler, 342 }, 343 { 344 .procname = "cmm_timeout", 345 .mode = 0644, 346 .proc_handler = cmm_timeout_handler, 347 }, 348 { } 349 }; 350 351 static struct ctl_table cmm_dir_table[] = { 352 { 353 .procname = "vm", 354 .maxlen = 0, 355 .mode = 0555, 356 .child = cmm_table, 357 }, 358 { } 359 }; 360 361 #ifdef CONFIG_CMM_IUCV 362 #define SMSG_PREFIX "CMM" 363 static void cmm_smsg_target(const char *from, char *msg) 364 { 365 long nr, seconds; 366 367 if (strlen(sender) > 0 && strcmp(from, sender) != 0) 368 return; 369 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) 370 return; 371 if (strncmp(msg, "SHRINK", 6) == 0) { 372 if (!cmm_skip_blanks(msg + 6, &msg)) 373 return; 374 nr = simple_strtoul(msg, &msg, 0); 375 cmm_skip_blanks(msg, &msg); 376 if (*msg == '\0') 377 cmm_set_pages(nr); 378 } else if (strncmp(msg, "RELEASE", 7) == 0) { 379 if (!cmm_skip_blanks(msg + 7, &msg)) 380 return; 381 nr = simple_strtoul(msg, &msg, 0); 382 cmm_skip_blanks(msg, &msg); 383 if (*msg == '\0') 384 cmm_add_timed_pages(nr); 385 } else if (strncmp(msg, "REUSE", 5) == 0) { 386 if (!cmm_skip_blanks(msg + 5, &msg)) 387 return; 388 nr = simple_strtoul(msg, &msg, 0); 389 if (!cmm_skip_blanks(msg, &msg)) 390 return; 391 seconds = simple_strtoul(msg, &msg, 0); 392 cmm_skip_blanks(msg, &msg); 393 if (*msg == '\0') 394 cmm_set_timeout(nr, seconds); 395 } 396 } 397 #endif 398 399 static struct ctl_table_header *cmm_sysctl_header; 400 401 static int cmm_suspend(void) 402 { 403 cmm_suspended = 1; 404 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 405 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 406 return 0; 407 } 408 409 static int cmm_resume(void) 410 { 411 cmm_suspended = 0; 412 cmm_kick_thread(); 413 return 0; 414 } 415 416 static int cmm_power_event(struct notifier_block *this, 417 unsigned long event, void *ptr) 418 { 419 switch (event) { 420 case PM_POST_HIBERNATION: 421 return cmm_resume(); 422 case PM_HIBERNATION_PREPARE: 423 return cmm_suspend(); 424 default: 425 return NOTIFY_DONE; 426 } 427 } 428 429 static struct notifier_block cmm_power_notifier = { 430 .notifier_call = cmm_power_event, 431 }; 432 433 static int __init cmm_init(void) 434 { 435 int rc = -ENOMEM; 436 437 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 438 if (!cmm_sysctl_header) 439 goto out_sysctl; 440 #ifdef CONFIG_CMM_IUCV 441 /* convert sender to uppercase characters */ 442 if (sender) { 443 int len = strlen(sender); 444 while (len--) 445 sender[len] = toupper(sender[len]); 446 } else { 447 sender = cmm_default_sender; 448 } 449 450 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 451 if (rc < 0) 452 goto out_smsg; 453 #endif 454 rc = register_oom_notifier(&cmm_oom_nb); 455 if (rc < 0) 456 goto out_oom_notify; 457 rc = register_pm_notifier(&cmm_power_notifier); 458 if (rc) 459 goto out_pm; 460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 461 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 462 if (rc) 463 goto out_kthread; 464 return 0; 465 466 out_kthread: 467 unregister_pm_notifier(&cmm_power_notifier); 468 out_pm: 469 unregister_oom_notifier(&cmm_oom_nb); 470 out_oom_notify: 471 #ifdef CONFIG_CMM_IUCV 472 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 473 out_smsg: 474 #endif 475 unregister_sysctl_table(cmm_sysctl_header); 476 out_sysctl: 477 del_timer_sync(&cmm_timer); 478 return rc; 479 } 480 module_init(cmm_init); 481 482 static void __exit cmm_exit(void) 483 { 484 unregister_sysctl_table(cmm_sysctl_header); 485 #ifdef CONFIG_CMM_IUCV 486 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 487 #endif 488 unregister_pm_notifier(&cmm_power_notifier); 489 unregister_oom_notifier(&cmm_oom_nb); 490 kthread_stop(cmm_thread_ptr); 491 del_timer_sync(&cmm_timer); 492 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 493 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 494 } 495 module_exit(cmm_exit); 496 497 MODULE_LICENSE("GPL"); 498