1 /* 2 * arch/s390/appldata/appldata_base.c 3 * 4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. 5 * Exports appldata_register_ops() and appldata_unregister_ops() for the 6 * data gathering modules. 7 * 8 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 9 * 10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/interrupt.h> 18 #include <linux/proc_fs.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/sysctl.h> 23 #include <linux/notifier.h> 24 #include <linux/cpu.h> 25 #include <linux/workqueue.h> 26 #include <asm/appldata.h> 27 #include <asm/timer.h> 28 #include <asm/uaccess.h> 29 #include <asm/io.h> 30 #include <asm/smp.h> 31 32 #include "appldata.h" 33 34 35 #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ 36 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for 37 sampling interval in 38 milliseconds */ 39 40 #define TOD_MICRO 0x01000 /* nr. of TOD clock units 41 for 1 microsecond */ 42 /* 43 * /proc entries (sysctl) 44 */ 45 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 46 static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 47 void __user *buffer, size_t *lenp, loff_t *ppos); 48 static int appldata_interval_handler(ctl_table *ctl, int write, 49 struct file *filp, 50 void __user *buffer, 51 size_t *lenp, loff_t *ppos); 52 53 static struct ctl_table_header *appldata_sysctl_header; 54 static struct ctl_table appldata_table[] = { 55 { 56 .procname = "timer", 57 .mode = S_IRUGO | S_IWUSR, 58 .proc_handler = &appldata_timer_handler, 59 }, 60 { 61 .procname = "interval", 62 .mode = S_IRUGO | S_IWUSR, 63 .proc_handler = &appldata_interval_handler, 64 }, 65 { }, 66 }; 67 68 static struct ctl_table appldata_dir_table[] = { 69 { 70 .procname = appldata_proc_name, 71 .maxlen = 0, 72 .mode = S_IRUGO | S_IXUGO, 73 .child = appldata_table, 74 }, 75 { }, 76 }; 77 78 /* 79 * Timer 80 */ 81 static DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 82 static atomic_t appldata_expire_count = ATOMIC_INIT(0); 83 84 static DEFINE_SPINLOCK(appldata_timer_lock); 85 static int appldata_interval = APPLDATA_CPU_INTERVAL; 86 static int appldata_timer_active; 87 88 /* 89 * Work queue 90 */ 91 static struct workqueue_struct *appldata_wq; 92 static void appldata_work_fn(struct work_struct *work); 93 static DECLARE_WORK(appldata_work, appldata_work_fn); 94 95 96 /* 97 * Ops list 98 */ 99 static DEFINE_SPINLOCK(appldata_ops_lock); 100 static LIST_HEAD(appldata_ops_list); 101 102 103 /*************************** timer, work, DIAG *******************************/ 104 /* 105 * appldata_timer_function() 106 * 107 * schedule work and reschedule timer 108 */ 109 static void appldata_timer_function(unsigned long data) 110 { 111 P_DEBUG(" -= Timer =-\n"); 112 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), 113 atomic_read(&appldata_expire_count)); 114 if (atomic_dec_and_test(&appldata_expire_count)) { 115 atomic_set(&appldata_expire_count, num_online_cpus()); 116 queue_work(appldata_wq, (struct work_struct *) data); 117 } 118 } 119 120 /* 121 * appldata_work_fn() 122 * 123 * call data gathering function for each (active) module 124 */ 125 static void appldata_work_fn(struct work_struct *work) 126 { 127 struct list_head *lh; 128 struct appldata_ops *ops; 129 int i; 130 131 P_DEBUG(" -= Work Queue =-\n"); 132 i = 0; 133 spin_lock(&appldata_ops_lock); 134 list_for_each(lh, &appldata_ops_list) { 135 ops = list_entry(lh, struct appldata_ops, list); 136 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n", 137 ++i, ops->active, ops->name); 138 if (ops->active == 1) { 139 ops->callback(ops->data); 140 } 141 } 142 spin_unlock(&appldata_ops_lock); 143 } 144 145 /* 146 * appldata_diag() 147 * 148 * prepare parameter list, issue DIAG 0xDC 149 */ 150 int appldata_diag(char record_nr, u16 function, unsigned long buffer, 151 u16 length, char *mod_lvl) 152 { 153 struct appldata_product_id id = { 154 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 155 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 156 .prod_fn = 0xD5D3, /* "NL" */ 157 .version_nr = 0xF2F6, /* "26" */ 158 .release_nr = 0xF0F1, /* "01" */ 159 }; 160 161 id.record_nr = record_nr; 162 id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; 163 return appldata_asm(&id, function, (void *) buffer, length); 164 } 165 /************************ timer, work, DIAG <END> ****************************/ 166 167 168 /****************************** /proc stuff **********************************/ 169 170 /* 171 * appldata_mod_vtimer_wrap() 172 * 173 * wrapper function for mod_virt_timer(), because smp_call_function_single() 174 * accepts only one parameter. 175 */ 176 static void __appldata_mod_vtimer_wrap(void *p) { 177 struct { 178 struct vtimer_list *timer; 179 u64 expires; 180 } *args = p; 181 mod_virt_timer(args->timer, args->expires); 182 } 183 184 #define APPLDATA_ADD_TIMER 0 185 #define APPLDATA_DEL_TIMER 1 186 #define APPLDATA_MOD_TIMER 2 187 188 /* 189 * __appldata_vtimer_setup() 190 * 191 * Add, delete or modify virtual timers on all online cpus. 192 * The caller needs to get the appldata_timer_lock spinlock. 193 */ 194 static void 195 __appldata_vtimer_setup(int cmd) 196 { 197 u64 per_cpu_interval; 198 int i; 199 200 switch (cmd) { 201 case APPLDATA_ADD_TIMER: 202 if (appldata_timer_active) 203 break; 204 per_cpu_interval = (u64) (appldata_interval*1000 / 205 num_online_cpus()) * TOD_MICRO; 206 for_each_online_cpu(i) { 207 per_cpu(appldata_timer, i).expires = per_cpu_interval; 208 smp_call_function_single(i, add_virt_timer_periodic, 209 &per_cpu(appldata_timer, i), 210 0, 1); 211 } 212 appldata_timer_active = 1; 213 P_INFO("Monitoring timer started.\n"); 214 break; 215 case APPLDATA_DEL_TIMER: 216 for_each_online_cpu(i) 217 del_virt_timer(&per_cpu(appldata_timer, i)); 218 if (!appldata_timer_active) 219 break; 220 appldata_timer_active = 0; 221 atomic_set(&appldata_expire_count, num_online_cpus()); 222 P_INFO("Monitoring timer stopped.\n"); 223 break; 224 case APPLDATA_MOD_TIMER: 225 per_cpu_interval = (u64) (appldata_interval*1000 / 226 num_online_cpus()) * TOD_MICRO; 227 if (!appldata_timer_active) 228 break; 229 for_each_online_cpu(i) { 230 struct { 231 struct vtimer_list *timer; 232 u64 expires; 233 } args; 234 args.timer = &per_cpu(appldata_timer, i); 235 args.expires = per_cpu_interval; 236 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 237 &args, 0, 1); 238 } 239 } 240 } 241 242 /* 243 * appldata_timer_handler() 244 * 245 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 246 */ 247 static int 248 appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, 249 void __user *buffer, size_t *lenp, loff_t *ppos) 250 { 251 int len; 252 char buf[2]; 253 254 if (!*lenp || *ppos) { 255 *lenp = 0; 256 return 0; 257 } 258 if (!write) { 259 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); 260 if (len > *lenp) 261 len = *lenp; 262 if (copy_to_user(buffer, buf, len)) 263 return -EFAULT; 264 goto out; 265 } 266 len = *lenp; 267 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 268 return -EFAULT; 269 spin_lock(&appldata_timer_lock); 270 if (buf[0] == '1') 271 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 272 else if (buf[0] == '0') 273 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 274 spin_unlock(&appldata_timer_lock); 275 out: 276 *lenp = len; 277 *ppos += len; 278 return 0; 279 } 280 281 /* 282 * appldata_interval_handler() 283 * 284 * Set (CPU) timer interval for collection of data (in milliseconds), show 285 * current timer interval. 286 */ 287 static int 288 appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, 289 void __user *buffer, size_t *lenp, loff_t *ppos) 290 { 291 int len, interval; 292 char buf[16]; 293 294 if (!*lenp || *ppos) { 295 *lenp = 0; 296 return 0; 297 } 298 if (!write) { 299 len = sprintf(buf, "%i\n", appldata_interval); 300 if (len > *lenp) 301 len = *lenp; 302 if (copy_to_user(buffer, buf, len)) 303 return -EFAULT; 304 goto out; 305 } 306 len = *lenp; 307 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { 308 return -EFAULT; 309 } 310 interval = 0; 311 sscanf(buf, "%i", &interval); 312 if (interval <= 0) { 313 P_ERROR("Timer CPU interval has to be > 0!\n"); 314 return -EINVAL; 315 } 316 317 spin_lock(&appldata_timer_lock); 318 appldata_interval = interval; 319 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 320 spin_unlock(&appldata_timer_lock); 321 322 P_INFO("Monitoring CPU interval set to %u milliseconds.\n", 323 interval); 324 out: 325 *lenp = len; 326 *ppos += len; 327 return 0; 328 } 329 330 /* 331 * appldata_generic_handler() 332 * 333 * Generic start/stop monitoring and DIAG, show status of 334 * monitoring (0 = not in process, 1 = in process) 335 */ 336 static int 337 appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, 338 void __user *buffer, size_t *lenp, loff_t *ppos) 339 { 340 struct appldata_ops *ops = NULL, *tmp_ops; 341 int rc, len, found; 342 char buf[2]; 343 struct list_head *lh; 344 345 found = 0; 346 spin_lock(&appldata_ops_lock); 347 list_for_each(lh, &appldata_ops_list) { 348 tmp_ops = list_entry(lh, struct appldata_ops, list); 349 if (&tmp_ops->ctl_table[2] == ctl) { 350 found = 1; 351 } 352 } 353 if (!found) { 354 spin_unlock(&appldata_ops_lock); 355 return -ENODEV; 356 } 357 ops = ctl->data; 358 if (!try_module_get(ops->owner)) { // protect this function 359 spin_unlock(&appldata_ops_lock); 360 return -ENODEV; 361 } 362 spin_unlock(&appldata_ops_lock); 363 364 if (!*lenp || *ppos) { 365 *lenp = 0; 366 module_put(ops->owner); 367 return 0; 368 } 369 if (!write) { 370 len = sprintf(buf, ops->active ? "1\n" : "0\n"); 371 if (len > *lenp) 372 len = *lenp; 373 if (copy_to_user(buffer, buf, len)) { 374 module_put(ops->owner); 375 return -EFAULT; 376 } 377 goto out; 378 } 379 len = *lenp; 380 if (copy_from_user(buf, buffer, 381 len > sizeof(buf) ? sizeof(buf) : len)) { 382 module_put(ops->owner); 383 return -EFAULT; 384 } 385 386 spin_lock(&appldata_ops_lock); 387 if ((buf[0] == '1') && (ops->active == 0)) { 388 // protect work queue callback 389 if (!try_module_get(ops->owner)) { 390 spin_unlock(&appldata_ops_lock); 391 module_put(ops->owner); 392 return -ENODEV; 393 } 394 ops->callback(ops->data); // init record 395 rc = appldata_diag(ops->record_nr, 396 APPLDATA_START_INTERVAL_REC, 397 (unsigned long) ops->data, ops->size, 398 ops->mod_lvl); 399 if (rc != 0) { 400 P_ERROR("START DIAG 0xDC for %s failed, " 401 "return code: %d\n", ops->name, rc); 402 module_put(ops->owner); 403 } else { 404 P_INFO("Monitoring %s data enabled, " 405 "DIAG 0xDC started.\n", ops->name); 406 ops->active = 1; 407 } 408 } else if ((buf[0] == '0') && (ops->active == 1)) { 409 ops->active = 0; 410 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 411 (unsigned long) ops->data, ops->size, 412 ops->mod_lvl); 413 if (rc != 0) { 414 P_ERROR("STOP DIAG 0xDC for %s failed, " 415 "return code: %d\n", ops->name, rc); 416 } else { 417 P_INFO("Monitoring %s data disabled, " 418 "DIAG 0xDC stopped.\n", ops->name); 419 } 420 module_put(ops->owner); 421 } 422 spin_unlock(&appldata_ops_lock); 423 out: 424 *lenp = len; 425 *ppos += len; 426 module_put(ops->owner); 427 return 0; 428 } 429 430 /*************************** /proc stuff <END> *******************************/ 431 432 433 /************************* module-ops management *****************************/ 434 /* 435 * appldata_register_ops() 436 * 437 * update ops list, register /proc/sys entries 438 */ 439 int appldata_register_ops(struct appldata_ops *ops) 440 { 441 if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0)) 442 return -EINVAL; 443 444 ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); 445 if (!ops->ctl_table) 446 return -ENOMEM; 447 448 spin_lock(&appldata_ops_lock); 449 list_add(&ops->list, &appldata_ops_list); 450 spin_unlock(&appldata_ops_lock); 451 452 ops->ctl_table[0].procname = appldata_proc_name; 453 ops->ctl_table[0].maxlen = 0; 454 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; 455 ops->ctl_table[0].child = &ops->ctl_table[2]; 456 457 ops->ctl_table[2].procname = ops->name; 458 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; 459 ops->ctl_table[2].proc_handler = appldata_generic_handler; 460 ops->ctl_table[2].data = ops; 461 462 ops->sysctl_header = register_sysctl_table(ops->ctl_table); 463 if (!ops->sysctl_header) 464 goto out; 465 P_INFO("%s-ops registered!\n", ops->name); 466 return 0; 467 out: 468 spin_lock(&appldata_ops_lock); 469 list_del(&ops->list); 470 spin_unlock(&appldata_ops_lock); 471 kfree(ops->ctl_table); 472 return -ENOMEM; 473 } 474 475 /* 476 * appldata_unregister_ops() 477 * 478 * update ops list, unregister /proc entries, stop DIAG if necessary 479 */ 480 void appldata_unregister_ops(struct appldata_ops *ops) 481 { 482 spin_lock(&appldata_ops_lock); 483 list_del(&ops->list); 484 spin_unlock(&appldata_ops_lock); 485 unregister_sysctl_table(ops->sysctl_header); 486 kfree(ops->ctl_table); 487 P_INFO("%s-ops unregistered!\n", ops->name); 488 } 489 /********************** module-ops management <END> **************************/ 490 491 492 /******************************* init / exit *********************************/ 493 494 static void __cpuinit appldata_online_cpu(int cpu) 495 { 496 init_virt_timer(&per_cpu(appldata_timer, cpu)); 497 per_cpu(appldata_timer, cpu).function = appldata_timer_function; 498 per_cpu(appldata_timer, cpu).data = (unsigned long) 499 &appldata_work; 500 atomic_inc(&appldata_expire_count); 501 spin_lock(&appldata_timer_lock); 502 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 503 spin_unlock(&appldata_timer_lock); 504 } 505 506 static void __cpuinit appldata_offline_cpu(int cpu) 507 { 508 del_virt_timer(&per_cpu(appldata_timer, cpu)); 509 if (atomic_dec_and_test(&appldata_expire_count)) { 510 atomic_set(&appldata_expire_count, num_online_cpus()); 511 queue_work(appldata_wq, &appldata_work); 512 } 513 spin_lock(&appldata_timer_lock); 514 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 515 spin_unlock(&appldata_timer_lock); 516 } 517 518 static int __cpuinit appldata_cpu_notify(struct notifier_block *self, 519 unsigned long action, 520 void *hcpu) 521 { 522 switch (action) { 523 case CPU_ONLINE: 524 case CPU_ONLINE_FROZEN: 525 appldata_online_cpu((long) hcpu); 526 break; 527 case CPU_DEAD: 528 case CPU_DEAD_FROZEN: 529 appldata_offline_cpu((long) hcpu); 530 break; 531 default: 532 break; 533 } 534 return NOTIFY_OK; 535 } 536 537 static struct notifier_block __cpuinitdata appldata_nb = { 538 .notifier_call = appldata_cpu_notify, 539 }; 540 541 /* 542 * appldata_init() 543 * 544 * init timer, register /proc entries 545 */ 546 static int __init appldata_init(void) 547 { 548 int i; 549 550 P_DEBUG("sizeof(parameter_list) = %lu\n", 551 sizeof(struct appldata_parameter_list)); 552 553 appldata_wq = create_singlethread_workqueue("appldata"); 554 if (!appldata_wq) { 555 P_ERROR("Could not create work queue\n"); 556 return -ENOMEM; 557 } 558 559 for_each_online_cpu(i) 560 appldata_online_cpu(i); 561 562 /* Register cpu hotplug notifier */ 563 register_hotcpu_notifier(&appldata_nb); 564 565 appldata_sysctl_header = register_sysctl_table(appldata_dir_table); 566 567 P_DEBUG("Base interface initialized.\n"); 568 return 0; 569 } 570 571 __initcall(appldata_init); 572 573 /**************************** init / exit <END> ******************************/ 574 575 EXPORT_SYMBOL_GPL(appldata_register_ops); 576 EXPORT_SYMBOL_GPL(appldata_unregister_ops); 577 EXPORT_SYMBOL_GPL(appldata_diag); 578 579 EXPORT_SYMBOL_GPL(si_swapinfo); 580 EXPORT_SYMBOL_GPL(nr_threads); 581 EXPORT_SYMBOL_GPL(nr_running); 582 EXPORT_SYMBOL_GPL(nr_iowait); 583