1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. 4 * Exports appldata_register_ops() and appldata_unregister_ops() for the 5 * data gathering modules. 6 * 7 * Copyright IBM Corp. 2003, 2009 8 * 9 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "appldata" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/sched/stat.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/errno.h> 20 #include <linux/interrupt.h> 21 #include <linux/proc_fs.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 24 #include <linux/pagemap.h> 25 #include <linux/sysctl.h> 26 #include <linux/notifier.h> 27 #include <linux/cpu.h> 28 #include <linux/workqueue.h> 29 #include <linux/uaccess.h> 30 #include <linux/io.h> 31 #include <asm/appldata.h> 32 #include <asm/vtimer.h> 33 #include <asm/smp.h> 34 35 #include "appldata.h" 36 37 38 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for 39 sampling interval in 40 milliseconds */ 41 42 #define TOD_MICRO 0x01000 /* nr. of TOD clock units 43 for 1 microsecond */ 44 45 /* 46 * /proc entries (sysctl) 47 */ 48 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 49 static int appldata_timer_handler(struct ctl_table *ctl, int write, 50 void *buffer, size_t *lenp, loff_t *ppos); 51 static int appldata_interval_handler(struct ctl_table *ctl, int write, 52 void *buffer, size_t *lenp, loff_t *ppos); 53 54 static struct ctl_table_header *appldata_sysctl_header; 55 static struct ctl_table appldata_table[] = { 56 { 57 .procname = "timer", 58 .mode = S_IRUGO | S_IWUSR, 59 .proc_handler = appldata_timer_handler, 60 }, 61 { 62 .procname = "interval", 63 .mode = S_IRUGO | S_IWUSR, 64 .proc_handler = appldata_interval_handler, 65 }, 66 { }, 67 }; 68 69 /* 70 * Timer 71 */ 72 static struct vtimer_list appldata_timer; 73 74 static DEFINE_SPINLOCK(appldata_timer_lock); 75 static int appldata_interval = APPLDATA_CPU_INTERVAL; 76 static int appldata_timer_active; 77 78 /* 79 * Work queue 80 */ 81 static struct workqueue_struct *appldata_wq; 82 static void appldata_work_fn(struct work_struct *work); 83 static DECLARE_WORK(appldata_work, appldata_work_fn); 84 85 86 /* 87 * Ops list 88 */ 89 static DEFINE_MUTEX(appldata_ops_mutex); 90 static LIST_HEAD(appldata_ops_list); 91 92 93 /*************************** timer, work, DIAG *******************************/ 94 /* 95 * appldata_timer_function() 96 * 97 * schedule work and reschedule timer 98 */ 99 static void appldata_timer_function(unsigned long data) 100 { 101 queue_work(appldata_wq, (struct work_struct *) data); 102 } 103 104 /* 105 * appldata_work_fn() 106 * 107 * call data gathering function for each (active) module 108 */ 109 static void appldata_work_fn(struct work_struct *work) 110 { 111 struct list_head *lh; 112 struct appldata_ops *ops; 113 114 mutex_lock(&appldata_ops_mutex); 115 list_for_each(lh, &appldata_ops_list) { 116 ops = list_entry(lh, struct appldata_ops, list); 117 if (ops->active == 1) { 118 ops->callback(ops->data); 119 } 120 } 121 mutex_unlock(&appldata_ops_mutex); 122 } 123 124 static struct appldata_product_id appldata_id = { 125 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 126 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 127 .prod_fn = 0xD5D3, /* "NL" */ 128 .version_nr = 0xF2F6, /* "26" */ 129 .release_nr = 0xF0F1, /* "01" */ 130 }; 131 132 /* 133 * appldata_diag() 134 * 135 * prepare parameter list, issue DIAG 0xDC 136 */ 137 int appldata_diag(char record_nr, u16 function, unsigned long buffer, 138 u16 length, char *mod_lvl) 139 { 140 struct appldata_parameter_list *parm_list; 141 struct appldata_product_id *id; 142 int rc; 143 144 parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL); 145 id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL); 146 rc = -ENOMEM; 147 if (parm_list && id) { 148 id->record_nr = record_nr; 149 id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; 150 rc = appldata_asm(parm_list, id, function, 151 (void *) buffer, length); 152 } 153 kfree(id); 154 kfree(parm_list); 155 return rc; 156 } 157 /************************ timer, work, DIAG <END> ****************************/ 158 159 160 /****************************** /proc stuff **********************************/ 161 162 #define APPLDATA_ADD_TIMER 0 163 #define APPLDATA_DEL_TIMER 1 164 #define APPLDATA_MOD_TIMER 2 165 166 /* 167 * __appldata_vtimer_setup() 168 * 169 * Add, delete or modify virtual timers on all online cpus. 170 * The caller needs to get the appldata_timer_lock spinlock. 171 */ 172 static void __appldata_vtimer_setup(int cmd) 173 { 174 u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO; 175 176 switch (cmd) { 177 case APPLDATA_ADD_TIMER: 178 if (appldata_timer_active) 179 break; 180 appldata_timer.expires = timer_interval; 181 add_virt_timer_periodic(&appldata_timer); 182 appldata_timer_active = 1; 183 break; 184 case APPLDATA_DEL_TIMER: 185 del_virt_timer(&appldata_timer); 186 if (!appldata_timer_active) 187 break; 188 appldata_timer_active = 0; 189 break; 190 case APPLDATA_MOD_TIMER: 191 if (!appldata_timer_active) 192 break; 193 mod_virt_timer_periodic(&appldata_timer, timer_interval); 194 } 195 } 196 197 /* 198 * appldata_timer_handler() 199 * 200 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 201 */ 202 static int 203 appldata_timer_handler(struct ctl_table *ctl, int write, 204 void *buffer, size_t *lenp, loff_t *ppos) 205 { 206 int timer_active = appldata_timer_active; 207 int rc; 208 struct ctl_table ctl_entry = { 209 .procname = ctl->procname, 210 .data = &timer_active, 211 .maxlen = sizeof(int), 212 .extra1 = SYSCTL_ZERO, 213 .extra2 = SYSCTL_ONE, 214 }; 215 216 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 217 if (rc < 0 || !write) 218 return rc; 219 220 spin_lock(&appldata_timer_lock); 221 if (timer_active) 222 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 223 else 224 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 225 spin_unlock(&appldata_timer_lock); 226 return 0; 227 } 228 229 /* 230 * appldata_interval_handler() 231 * 232 * Set (CPU) timer interval for collection of data (in milliseconds), show 233 * current timer interval. 234 */ 235 static int 236 appldata_interval_handler(struct ctl_table *ctl, int write, 237 void *buffer, size_t *lenp, loff_t *ppos) 238 { 239 int interval = appldata_interval; 240 int rc; 241 struct ctl_table ctl_entry = { 242 .procname = ctl->procname, 243 .data = &interval, 244 .maxlen = sizeof(int), 245 .extra1 = SYSCTL_ONE, 246 }; 247 248 rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 249 if (rc < 0 || !write) 250 return rc; 251 252 spin_lock(&appldata_timer_lock); 253 appldata_interval = interval; 254 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 255 spin_unlock(&appldata_timer_lock); 256 return 0; 257 } 258 259 /* 260 * appldata_generic_handler() 261 * 262 * Generic start/stop monitoring and DIAG, show status of 263 * monitoring (0 = not in process, 1 = in process) 264 */ 265 static int 266 appldata_generic_handler(struct ctl_table *ctl, int write, 267 void *buffer, size_t *lenp, loff_t *ppos) 268 { 269 struct appldata_ops *ops = NULL, *tmp_ops; 270 struct list_head *lh; 271 int rc, found; 272 int active; 273 struct ctl_table ctl_entry = { 274 .data = &active, 275 .maxlen = sizeof(int), 276 .extra1 = SYSCTL_ZERO, 277 .extra2 = SYSCTL_ONE, 278 }; 279 280 found = 0; 281 mutex_lock(&appldata_ops_mutex); 282 list_for_each(lh, &appldata_ops_list) { 283 tmp_ops = list_entry(lh, struct appldata_ops, list); 284 if (&tmp_ops->ctl_table[0] == ctl) { 285 found = 1; 286 } 287 } 288 if (!found) { 289 mutex_unlock(&appldata_ops_mutex); 290 return -ENODEV; 291 } 292 ops = ctl->data; 293 if (!try_module_get(ops->owner)) { // protect this function 294 mutex_unlock(&appldata_ops_mutex); 295 return -ENODEV; 296 } 297 mutex_unlock(&appldata_ops_mutex); 298 299 active = ops->active; 300 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 301 if (rc < 0 || !write) { 302 module_put(ops->owner); 303 return rc; 304 } 305 306 mutex_lock(&appldata_ops_mutex); 307 if (active && (ops->active == 0)) { 308 // protect work queue callback 309 if (!try_module_get(ops->owner)) { 310 mutex_unlock(&appldata_ops_mutex); 311 module_put(ops->owner); 312 return -ENODEV; 313 } 314 ops->callback(ops->data); // init record 315 rc = appldata_diag(ops->record_nr, 316 APPLDATA_START_INTERVAL_REC, 317 (unsigned long) ops->data, ops->size, 318 ops->mod_lvl); 319 if (rc != 0) { 320 pr_err("Starting the data collection for %s " 321 "failed with rc=%d\n", ops->name, rc); 322 module_put(ops->owner); 323 } else 324 ops->active = 1; 325 } else if (!active && (ops->active == 1)) { 326 ops->active = 0; 327 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 328 (unsigned long) ops->data, ops->size, 329 ops->mod_lvl); 330 if (rc != 0) 331 pr_err("Stopping the data collection for %s " 332 "failed with rc=%d\n", ops->name, rc); 333 module_put(ops->owner); 334 } 335 mutex_unlock(&appldata_ops_mutex); 336 module_put(ops->owner); 337 return 0; 338 } 339 340 /*************************** /proc stuff <END> *******************************/ 341 342 343 /************************* module-ops management *****************************/ 344 /* 345 * appldata_register_ops() 346 * 347 * update ops list, register /proc/sys entries 348 */ 349 int appldata_register_ops(struct appldata_ops *ops) 350 { 351 if (ops->size > APPLDATA_MAX_REC_SIZE) 352 return -EINVAL; 353 354 /* The last entry must be an empty one */ 355 ops->ctl_table = kcalloc(2, sizeof(struct ctl_table), GFP_KERNEL); 356 if (!ops->ctl_table) 357 return -ENOMEM; 358 359 mutex_lock(&appldata_ops_mutex); 360 list_add(&ops->list, &appldata_ops_list); 361 mutex_unlock(&appldata_ops_mutex); 362 363 ops->ctl_table[0].procname = ops->name; 364 ops->ctl_table[0].mode = S_IRUGO | S_IWUSR; 365 ops->ctl_table[0].proc_handler = appldata_generic_handler; 366 ops->ctl_table[0].data = ops; 367 368 ops->sysctl_header = register_sysctl_sz(appldata_proc_name, ops->ctl_table, 1); 369 if (!ops->sysctl_header) 370 goto out; 371 return 0; 372 out: 373 mutex_lock(&appldata_ops_mutex); 374 list_del(&ops->list); 375 mutex_unlock(&appldata_ops_mutex); 376 kfree(ops->ctl_table); 377 return -ENOMEM; 378 } 379 380 /* 381 * appldata_unregister_ops() 382 * 383 * update ops list, unregister /proc entries, stop DIAG if necessary 384 */ 385 void appldata_unregister_ops(struct appldata_ops *ops) 386 { 387 mutex_lock(&appldata_ops_mutex); 388 list_del(&ops->list); 389 mutex_unlock(&appldata_ops_mutex); 390 unregister_sysctl_table(ops->sysctl_header); 391 kfree(ops->ctl_table); 392 } 393 /********************** module-ops management <END> **************************/ 394 395 396 /******************************* init / exit *********************************/ 397 398 /* 399 * appldata_init() 400 * 401 * init timer, register /proc entries 402 */ 403 static int __init appldata_init(void) 404 { 405 init_virt_timer(&appldata_timer); 406 appldata_timer.function = appldata_timer_function; 407 appldata_timer.data = (unsigned long) &appldata_work; 408 appldata_wq = alloc_ordered_workqueue("appldata", 0); 409 if (!appldata_wq) 410 return -ENOMEM; 411 appldata_sysctl_header = register_sysctl(appldata_proc_name, appldata_table); 412 return 0; 413 } 414 415 __initcall(appldata_init); 416 417 /**************************** init / exit <END> ******************************/ 418 419 EXPORT_SYMBOL_GPL(appldata_register_ops); 420 EXPORT_SYMBOL_GPL(appldata_unregister_ops); 421 EXPORT_SYMBOL_GPL(appldata_diag); 422 423 #ifdef CONFIG_SWAP 424 EXPORT_SYMBOL_GPL(si_swapinfo); 425 #endif 426 EXPORT_SYMBOL_GPL(nr_threads); 427 EXPORT_SYMBOL_GPL(nr_running); 428 EXPORT_SYMBOL_GPL(nr_iowait); 429