1 /* 2 * watchdog_dev.c 3 * 4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>, 5 * All Rights Reserved. 6 * 7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>. 8 * 9 * 10 * This source code is part of the generic code that can be used 11 * by all the watchdog timer drivers. 12 * 13 * This part of the generic code takes care of the following 14 * misc device: /dev/watchdog. 15 * 16 * Based on source code of the following authors: 17 * Matt Domsch <Matt_Domsch@dell.com>, 18 * Rob Radez <rob@osinvestor.com>, 19 * Rusty Lynch <rusty@linux.co.intel.com> 20 * Satyam Sharma <satyam@infradead.org> 21 * Randy Dunlap <randy.dunlap@oracle.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 26 * 2 of the License, or (at your option) any later version. 27 * 28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw. 29 * admit liability nor provide warranty for any of this software. 30 * This material is provided "AS-IS" and at no charge. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/cdev.h> /* For character device */ 36 #include <linux/errno.h> /* For the -ENODEV/... values */ 37 #include <linux/fs.h> /* For file operations */ 38 #include <linux/init.h> /* For __init/__exit/... */ 39 #include <linux/jiffies.h> /* For timeout functions */ 40 #include <linux/kernel.h> /* For printk/panic/... */ 41 #include <linux/kref.h> /* For data references */ 42 #include <linux/miscdevice.h> /* For handling misc devices */ 43 #include <linux/module.h> /* For module stuff/... */ 44 #include <linux/mutex.h> /* For mutexes */ 45 #include <linux/slab.h> /* For memory functions */ 46 #include <linux/types.h> /* For standard types (like size_t) */ 47 #include <linux/watchdog.h> /* For watchdog specific items */ 48 #include <linux/workqueue.h> /* For workqueue */ 49 #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ 50 51 #include "watchdog_core.h" 52 #include "watchdog_pretimeout.h" 53 54 /* 55 * struct watchdog_core_data - watchdog core internal data 56 * @kref: Reference count. 57 * @cdev: The watchdog's Character device. 58 * @wdd: Pointer to watchdog device. 59 * @lock: Lock for watchdog core. 60 * @status: Watchdog core internal status bits. 61 */ 62 struct watchdog_core_data { 63 struct kref kref; 64 struct cdev cdev; 65 struct watchdog_device *wdd; 66 struct mutex lock; 67 unsigned long last_keepalive; 68 unsigned long last_hw_keepalive; 69 struct delayed_work work; 70 unsigned long status; /* Internal status bits */ 71 #define _WDOG_DEV_OPEN 0 /* Opened ? */ 72 #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */ 73 #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */ 74 }; 75 76 /* the dev_t structure to store the dynamically allocated watchdog devices */ 77 static dev_t watchdog_devt; 78 /* Reference to watchdog device behind /dev/watchdog */ 79 static struct watchdog_core_data *old_wd_data; 80 81 static struct workqueue_struct *watchdog_wq; 82 83 static inline bool watchdog_need_worker(struct watchdog_device *wdd) 84 { 85 /* All variables in milli-seconds */ 86 unsigned int hm = wdd->max_hw_heartbeat_ms; 87 unsigned int t = wdd->timeout * 1000; 88 89 /* 90 * A worker to generate heartbeat requests is needed if all of the 91 * following conditions are true. 92 * - Userspace activated the watchdog. 93 * - The driver provided a value for the maximum hardware timeout, and 94 * thus is aware that the framework supports generating heartbeat 95 * requests. 96 * - Userspace requests a longer timeout than the hardware can handle. 97 * 98 * Alternatively, if userspace has not opened the watchdog 99 * device, we take care of feeding the watchdog if it is 100 * running. 101 */ 102 return (hm && watchdog_active(wdd) && t > hm) || 103 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)); 104 } 105 106 static long watchdog_next_keepalive(struct watchdog_device *wdd) 107 { 108 struct watchdog_core_data *wd_data = wdd->wd_data; 109 unsigned int timeout_ms = wdd->timeout * 1000; 110 unsigned long keepalive_interval; 111 unsigned long last_heartbeat; 112 unsigned long virt_timeout; 113 unsigned int hw_heartbeat_ms; 114 115 virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms); 116 hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms); 117 keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2); 118 119 if (!watchdog_active(wdd)) 120 return keepalive_interval; 121 122 /* 123 * To ensure that the watchdog times out wdd->timeout seconds 124 * after the most recent ping from userspace, the last 125 * worker ping has to come in hw_heartbeat_ms before this timeout. 126 */ 127 last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms); 128 return min_t(long, last_heartbeat - jiffies, keepalive_interval); 129 } 130 131 static inline void watchdog_update_worker(struct watchdog_device *wdd) 132 { 133 struct watchdog_core_data *wd_data = wdd->wd_data; 134 135 if (watchdog_need_worker(wdd)) { 136 long t = watchdog_next_keepalive(wdd); 137 138 if (t > 0) 139 mod_delayed_work(watchdog_wq, &wd_data->work, t); 140 } else { 141 cancel_delayed_work(&wd_data->work); 142 } 143 } 144 145 static int __watchdog_ping(struct watchdog_device *wdd) 146 { 147 struct watchdog_core_data *wd_data = wdd->wd_data; 148 unsigned long earliest_keepalive = wd_data->last_hw_keepalive + 149 msecs_to_jiffies(wdd->min_hw_heartbeat_ms); 150 int err; 151 152 if (time_is_after_jiffies(earliest_keepalive)) { 153 mod_delayed_work(watchdog_wq, &wd_data->work, 154 earliest_keepalive - jiffies); 155 return 0; 156 } 157 158 wd_data->last_hw_keepalive = jiffies; 159 160 if (wdd->ops->ping) 161 err = wdd->ops->ping(wdd); /* ping the watchdog */ 162 else 163 err = wdd->ops->start(wdd); /* restart watchdog */ 164 165 watchdog_update_worker(wdd); 166 167 return err; 168 } 169 170 /* 171 * watchdog_ping: ping the watchdog. 172 * @wdd: the watchdog device to ping 173 * 174 * The caller must hold wd_data->lock. 175 * 176 * If the watchdog has no own ping operation then it needs to be 177 * restarted via the start operation. This wrapper function does 178 * exactly that. 179 * We only ping when the watchdog device is running. 180 */ 181 182 static int watchdog_ping(struct watchdog_device *wdd) 183 { 184 struct watchdog_core_data *wd_data = wdd->wd_data; 185 186 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd)) 187 return 0; 188 189 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 190 191 wd_data->last_keepalive = jiffies; 192 return __watchdog_ping(wdd); 193 } 194 195 static void watchdog_ping_work(struct work_struct *work) 196 { 197 struct watchdog_core_data *wd_data; 198 struct watchdog_device *wdd; 199 200 wd_data = container_of(to_delayed_work(work), struct watchdog_core_data, 201 work); 202 203 mutex_lock(&wd_data->lock); 204 wdd = wd_data->wdd; 205 if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd))) 206 __watchdog_ping(wdd); 207 mutex_unlock(&wd_data->lock); 208 } 209 210 /* 211 * watchdog_start: wrapper to start the watchdog. 212 * @wdd: the watchdog device to start 213 * 214 * The caller must hold wd_data->lock. 215 * 216 * Start the watchdog if it is not active and mark it active. 217 * This function returns zero on success or a negative errno code for 218 * failure. 219 */ 220 221 static int watchdog_start(struct watchdog_device *wdd) 222 { 223 struct watchdog_core_data *wd_data = wdd->wd_data; 224 unsigned long started_at; 225 int err; 226 227 if (watchdog_active(wdd)) 228 return 0; 229 230 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 231 232 started_at = jiffies; 233 if (watchdog_hw_running(wdd) && wdd->ops->ping) 234 err = wdd->ops->ping(wdd); 235 else 236 err = wdd->ops->start(wdd); 237 if (err == 0) { 238 set_bit(WDOG_ACTIVE, &wdd->status); 239 wd_data->last_keepalive = started_at; 240 watchdog_update_worker(wdd); 241 } 242 243 return err; 244 } 245 246 /* 247 * watchdog_stop: wrapper to stop the watchdog. 248 * @wdd: the watchdog device to stop 249 * 250 * The caller must hold wd_data->lock. 251 * 252 * Stop the watchdog if it is still active and unmark it active. 253 * This function returns zero on success or a negative errno code for 254 * failure. 255 * If the 'nowayout' feature was set, the watchdog cannot be stopped. 256 */ 257 258 static int watchdog_stop(struct watchdog_device *wdd) 259 { 260 int err = 0; 261 262 if (!watchdog_active(wdd)) 263 return 0; 264 265 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 266 pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n", 267 wdd->id); 268 return -EBUSY; 269 } 270 271 if (wdd->ops->stop) { 272 clear_bit(WDOG_HW_RUNNING, &wdd->status); 273 err = wdd->ops->stop(wdd); 274 } else { 275 set_bit(WDOG_HW_RUNNING, &wdd->status); 276 } 277 278 if (err == 0) { 279 clear_bit(WDOG_ACTIVE, &wdd->status); 280 watchdog_update_worker(wdd); 281 } 282 283 return err; 284 } 285 286 /* 287 * watchdog_get_status: wrapper to get the watchdog status 288 * @wdd: the watchdog device to get the status from 289 * 290 * The caller must hold wd_data->lock. 291 * 292 * Get the watchdog's status flags. 293 */ 294 295 static unsigned int watchdog_get_status(struct watchdog_device *wdd) 296 { 297 struct watchdog_core_data *wd_data = wdd->wd_data; 298 unsigned int status; 299 300 if (wdd->ops->status) 301 status = wdd->ops->status(wdd); 302 else 303 status = wdd->bootstatus & (WDIOF_CARDRESET | 304 WDIOF_OVERHEAT | 305 WDIOF_FANFAULT | 306 WDIOF_EXTERN1 | 307 WDIOF_EXTERN2 | 308 WDIOF_POWERUNDER | 309 WDIOF_POWEROVER); 310 311 if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status)) 312 status |= WDIOF_MAGICCLOSE; 313 314 if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status)) 315 status |= WDIOF_KEEPALIVEPING; 316 317 return status; 318 } 319 320 /* 321 * watchdog_set_timeout: set the watchdog timer timeout 322 * @wdd: the watchdog device to set the timeout for 323 * @timeout: timeout to set in seconds 324 * 325 * The caller must hold wd_data->lock. 326 */ 327 328 static int watchdog_set_timeout(struct watchdog_device *wdd, 329 unsigned int timeout) 330 { 331 int err = 0; 332 333 if (!(wdd->info->options & WDIOF_SETTIMEOUT)) 334 return -EOPNOTSUPP; 335 336 if (watchdog_timeout_invalid(wdd, timeout)) 337 return -EINVAL; 338 339 if (wdd->ops->set_timeout) { 340 err = wdd->ops->set_timeout(wdd, timeout); 341 } else { 342 wdd->timeout = timeout; 343 /* Disable pretimeout if it doesn't fit the new timeout */ 344 if (wdd->pretimeout >= wdd->timeout) 345 wdd->pretimeout = 0; 346 } 347 348 watchdog_update_worker(wdd); 349 350 return err; 351 } 352 353 /* 354 * watchdog_set_pretimeout: set the watchdog timer pretimeout 355 * @wdd: the watchdog device to set the timeout for 356 * @timeout: pretimeout to set in seconds 357 */ 358 359 static int watchdog_set_pretimeout(struct watchdog_device *wdd, 360 unsigned int timeout) 361 { 362 int err = 0; 363 364 if (!(wdd->info->options & WDIOF_PRETIMEOUT)) 365 return -EOPNOTSUPP; 366 367 if (watchdog_pretimeout_invalid(wdd, timeout)) 368 return -EINVAL; 369 370 if (wdd->ops->set_pretimeout) 371 err = wdd->ops->set_pretimeout(wdd, timeout); 372 else 373 wdd->pretimeout = timeout; 374 375 return err; 376 } 377 378 /* 379 * watchdog_get_timeleft: wrapper to get the time left before a reboot 380 * @wdd: the watchdog device to get the remaining time from 381 * @timeleft: the time that's left 382 * 383 * The caller must hold wd_data->lock. 384 * 385 * Get the time before a watchdog will reboot (if not pinged). 386 */ 387 388 static int watchdog_get_timeleft(struct watchdog_device *wdd, 389 unsigned int *timeleft) 390 { 391 *timeleft = 0; 392 393 if (!wdd->ops->get_timeleft) 394 return -EOPNOTSUPP; 395 396 *timeleft = wdd->ops->get_timeleft(wdd); 397 398 return 0; 399 } 400 401 #ifdef CONFIG_WATCHDOG_SYSFS 402 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr, 403 char *buf) 404 { 405 struct watchdog_device *wdd = dev_get_drvdata(dev); 406 407 return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status)); 408 } 409 static DEVICE_ATTR_RO(nowayout); 410 411 static ssize_t status_show(struct device *dev, struct device_attribute *attr, 412 char *buf) 413 { 414 struct watchdog_device *wdd = dev_get_drvdata(dev); 415 struct watchdog_core_data *wd_data = wdd->wd_data; 416 unsigned int status; 417 418 mutex_lock(&wd_data->lock); 419 status = watchdog_get_status(wdd); 420 mutex_unlock(&wd_data->lock); 421 422 return sprintf(buf, "0x%x\n", status); 423 } 424 static DEVICE_ATTR_RO(status); 425 426 static ssize_t bootstatus_show(struct device *dev, 427 struct device_attribute *attr, char *buf) 428 { 429 struct watchdog_device *wdd = dev_get_drvdata(dev); 430 431 return sprintf(buf, "%u\n", wdd->bootstatus); 432 } 433 static DEVICE_ATTR_RO(bootstatus); 434 435 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr, 436 char *buf) 437 { 438 struct watchdog_device *wdd = dev_get_drvdata(dev); 439 struct watchdog_core_data *wd_data = wdd->wd_data; 440 ssize_t status; 441 unsigned int val; 442 443 mutex_lock(&wd_data->lock); 444 status = watchdog_get_timeleft(wdd, &val); 445 mutex_unlock(&wd_data->lock); 446 if (!status) 447 status = sprintf(buf, "%u\n", val); 448 449 return status; 450 } 451 static DEVICE_ATTR_RO(timeleft); 452 453 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr, 454 char *buf) 455 { 456 struct watchdog_device *wdd = dev_get_drvdata(dev); 457 458 return sprintf(buf, "%u\n", wdd->timeout); 459 } 460 static DEVICE_ATTR_RO(timeout); 461 462 static ssize_t pretimeout_show(struct device *dev, 463 struct device_attribute *attr, char *buf) 464 { 465 struct watchdog_device *wdd = dev_get_drvdata(dev); 466 467 return sprintf(buf, "%u\n", wdd->pretimeout); 468 } 469 static DEVICE_ATTR_RO(pretimeout); 470 471 static ssize_t identity_show(struct device *dev, struct device_attribute *attr, 472 char *buf) 473 { 474 struct watchdog_device *wdd = dev_get_drvdata(dev); 475 476 return sprintf(buf, "%s\n", wdd->info->identity); 477 } 478 static DEVICE_ATTR_RO(identity); 479 480 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 481 char *buf) 482 { 483 struct watchdog_device *wdd = dev_get_drvdata(dev); 484 485 if (watchdog_active(wdd)) 486 return sprintf(buf, "active\n"); 487 488 return sprintf(buf, "inactive\n"); 489 } 490 static DEVICE_ATTR_RO(state); 491 492 static ssize_t pretimeout_available_governors_show(struct device *dev, 493 struct device_attribute *attr, char *buf) 494 { 495 return watchdog_pretimeout_available_governors_get(buf); 496 } 497 static DEVICE_ATTR_RO(pretimeout_available_governors); 498 499 static ssize_t pretimeout_governor_show(struct device *dev, 500 struct device_attribute *attr, 501 char *buf) 502 { 503 struct watchdog_device *wdd = dev_get_drvdata(dev); 504 505 return watchdog_pretimeout_governor_get(wdd, buf); 506 } 507 508 static ssize_t pretimeout_governor_store(struct device *dev, 509 struct device_attribute *attr, 510 const char *buf, size_t count) 511 { 512 struct watchdog_device *wdd = dev_get_drvdata(dev); 513 int ret = watchdog_pretimeout_governor_set(wdd, buf); 514 515 if (!ret) 516 ret = count; 517 518 return ret; 519 } 520 static DEVICE_ATTR_RW(pretimeout_governor); 521 522 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, 523 int n) 524 { 525 struct device *dev = container_of(kobj, struct device, kobj); 526 struct watchdog_device *wdd = dev_get_drvdata(dev); 527 umode_t mode = attr->mode; 528 529 if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) 530 mode = 0; 531 else if (attr == &dev_attr_pretimeout.attr && 532 !(wdd->info->options & WDIOF_PRETIMEOUT)) 533 mode = 0; 534 else if ((attr == &dev_attr_pretimeout_governor.attr || 535 attr == &dev_attr_pretimeout_available_governors.attr) && 536 (!(wdd->info->options & WDIOF_PRETIMEOUT) || 537 !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV))) 538 mode = 0; 539 540 return mode; 541 } 542 static struct attribute *wdt_attrs[] = { 543 &dev_attr_state.attr, 544 &dev_attr_identity.attr, 545 &dev_attr_timeout.attr, 546 &dev_attr_pretimeout.attr, 547 &dev_attr_timeleft.attr, 548 &dev_attr_bootstatus.attr, 549 &dev_attr_status.attr, 550 &dev_attr_nowayout.attr, 551 &dev_attr_pretimeout_governor.attr, 552 &dev_attr_pretimeout_available_governors.attr, 553 NULL, 554 }; 555 556 static const struct attribute_group wdt_group = { 557 .attrs = wdt_attrs, 558 .is_visible = wdt_is_visible, 559 }; 560 __ATTRIBUTE_GROUPS(wdt); 561 #else 562 #define wdt_groups NULL 563 #endif 564 565 /* 566 * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined 567 * @wdd: the watchdog device to do the ioctl on 568 * @cmd: watchdog command 569 * @arg: argument pointer 570 * 571 * The caller must hold wd_data->lock. 572 */ 573 574 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd, 575 unsigned long arg) 576 { 577 if (!wdd->ops->ioctl) 578 return -ENOIOCTLCMD; 579 580 return wdd->ops->ioctl(wdd, cmd, arg); 581 } 582 583 /* 584 * watchdog_write: writes to the watchdog. 585 * @file: file from VFS 586 * @data: user address of data 587 * @len: length of data 588 * @ppos: pointer to the file offset 589 * 590 * A write to a watchdog device is defined as a keepalive ping. 591 * Writing the magic 'V' sequence allows the next close to turn 592 * off the watchdog (if 'nowayout' is not set). 593 */ 594 595 static ssize_t watchdog_write(struct file *file, const char __user *data, 596 size_t len, loff_t *ppos) 597 { 598 struct watchdog_core_data *wd_data = file->private_data; 599 struct watchdog_device *wdd; 600 int err; 601 size_t i; 602 char c; 603 604 if (len == 0) 605 return 0; 606 607 /* 608 * Note: just in case someone wrote the magic character 609 * five months ago... 610 */ 611 clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 612 613 /* scan to see whether or not we got the magic character */ 614 for (i = 0; i != len; i++) { 615 if (get_user(c, data + i)) 616 return -EFAULT; 617 if (c == 'V') 618 set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 619 } 620 621 /* someone wrote to us, so we send the watchdog a keepalive ping */ 622 623 err = -ENODEV; 624 mutex_lock(&wd_data->lock); 625 wdd = wd_data->wdd; 626 if (wdd) 627 err = watchdog_ping(wdd); 628 mutex_unlock(&wd_data->lock); 629 630 if (err < 0) 631 return err; 632 633 return len; 634 } 635 636 /* 637 * watchdog_ioctl: handle the different ioctl's for the watchdog device. 638 * @file: file handle to the device 639 * @cmd: watchdog command 640 * @arg: argument pointer 641 * 642 * The watchdog API defines a common set of functions for all watchdogs 643 * according to their available features. 644 */ 645 646 static long watchdog_ioctl(struct file *file, unsigned int cmd, 647 unsigned long arg) 648 { 649 struct watchdog_core_data *wd_data = file->private_data; 650 void __user *argp = (void __user *)arg; 651 struct watchdog_device *wdd; 652 int __user *p = argp; 653 unsigned int val; 654 int err; 655 656 mutex_lock(&wd_data->lock); 657 658 wdd = wd_data->wdd; 659 if (!wdd) { 660 err = -ENODEV; 661 goto out_ioctl; 662 } 663 664 err = watchdog_ioctl_op(wdd, cmd, arg); 665 if (err != -ENOIOCTLCMD) 666 goto out_ioctl; 667 668 switch (cmd) { 669 case WDIOC_GETSUPPORT: 670 err = copy_to_user(argp, wdd->info, 671 sizeof(struct watchdog_info)) ? -EFAULT : 0; 672 break; 673 case WDIOC_GETSTATUS: 674 val = watchdog_get_status(wdd); 675 err = put_user(val, p); 676 break; 677 case WDIOC_GETBOOTSTATUS: 678 err = put_user(wdd->bootstatus, p); 679 break; 680 case WDIOC_SETOPTIONS: 681 if (get_user(val, p)) { 682 err = -EFAULT; 683 break; 684 } 685 if (val & WDIOS_DISABLECARD) { 686 err = watchdog_stop(wdd); 687 if (err < 0) 688 break; 689 } 690 if (val & WDIOS_ENABLECARD) 691 err = watchdog_start(wdd); 692 break; 693 case WDIOC_KEEPALIVE: 694 if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) { 695 err = -EOPNOTSUPP; 696 break; 697 } 698 err = watchdog_ping(wdd); 699 break; 700 case WDIOC_SETTIMEOUT: 701 if (get_user(val, p)) { 702 err = -EFAULT; 703 break; 704 } 705 err = watchdog_set_timeout(wdd, val); 706 if (err < 0) 707 break; 708 /* If the watchdog is active then we send a keepalive ping 709 * to make sure that the watchdog keep's running (and if 710 * possible that it takes the new timeout) */ 711 err = watchdog_ping(wdd); 712 if (err < 0) 713 break; 714 /* Fall */ 715 case WDIOC_GETTIMEOUT: 716 /* timeout == 0 means that we don't know the timeout */ 717 if (wdd->timeout == 0) { 718 err = -EOPNOTSUPP; 719 break; 720 } 721 err = put_user(wdd->timeout, p); 722 break; 723 case WDIOC_GETTIMELEFT: 724 err = watchdog_get_timeleft(wdd, &val); 725 if (err < 0) 726 break; 727 err = put_user(val, p); 728 break; 729 case WDIOC_SETPRETIMEOUT: 730 if (get_user(val, p)) { 731 err = -EFAULT; 732 break; 733 } 734 err = watchdog_set_pretimeout(wdd, val); 735 break; 736 case WDIOC_GETPRETIMEOUT: 737 err = put_user(wdd->pretimeout, p); 738 break; 739 default: 740 err = -ENOTTY; 741 break; 742 } 743 744 out_ioctl: 745 mutex_unlock(&wd_data->lock); 746 return err; 747 } 748 749 /* 750 * watchdog_open: open the /dev/watchdog* devices. 751 * @inode: inode of device 752 * @file: file handle to device 753 * 754 * When the /dev/watchdog* device gets opened, we start the watchdog. 755 * Watch out: the /dev/watchdog device is single open, so we make sure 756 * it can only be opened once. 757 */ 758 759 static int watchdog_open(struct inode *inode, struct file *file) 760 { 761 struct watchdog_core_data *wd_data; 762 struct watchdog_device *wdd; 763 int err; 764 765 /* Get the corresponding watchdog device */ 766 if (imajor(inode) == MISC_MAJOR) 767 wd_data = old_wd_data; 768 else 769 wd_data = container_of(inode->i_cdev, struct watchdog_core_data, 770 cdev); 771 772 /* the watchdog is single open! */ 773 if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status)) 774 return -EBUSY; 775 776 wdd = wd_data->wdd; 777 778 /* 779 * If the /dev/watchdog device is open, we don't want the module 780 * to be unloaded. 781 */ 782 if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) { 783 err = -EBUSY; 784 goto out_clear; 785 } 786 787 err = watchdog_start(wdd); 788 if (err < 0) 789 goto out_mod; 790 791 file->private_data = wd_data; 792 793 if (!watchdog_hw_running(wdd)) 794 kref_get(&wd_data->kref); 795 796 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ 797 return nonseekable_open(inode, file); 798 799 out_mod: 800 module_put(wd_data->wdd->ops->owner); 801 out_clear: 802 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 803 return err; 804 } 805 806 static void watchdog_core_data_release(struct kref *kref) 807 { 808 struct watchdog_core_data *wd_data; 809 810 wd_data = container_of(kref, struct watchdog_core_data, kref); 811 812 kfree(wd_data); 813 } 814 815 /* 816 * watchdog_release: release the watchdog device. 817 * @inode: inode of device 818 * @file: file handle to device 819 * 820 * This is the code for when /dev/watchdog gets closed. We will only 821 * stop the watchdog when we have received the magic char (and nowayout 822 * was not set), else the watchdog will keep running. 823 */ 824 825 static int watchdog_release(struct inode *inode, struct file *file) 826 { 827 struct watchdog_core_data *wd_data = file->private_data; 828 struct watchdog_device *wdd; 829 int err = -EBUSY; 830 bool running; 831 832 mutex_lock(&wd_data->lock); 833 834 wdd = wd_data->wdd; 835 if (!wdd) 836 goto done; 837 838 /* 839 * We only stop the watchdog if we received the magic character 840 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then 841 * watchdog_stop will fail. 842 */ 843 if (!test_bit(WDOG_ACTIVE, &wdd->status)) 844 err = 0; 845 else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) || 846 !(wdd->info->options & WDIOF_MAGICCLOSE)) 847 err = watchdog_stop(wdd); 848 849 /* If the watchdog was not stopped, send a keepalive ping */ 850 if (err < 0) { 851 pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id); 852 watchdog_ping(wdd); 853 } 854 855 watchdog_update_worker(wdd); 856 857 /* make sure that /dev/watchdog can be re-opened */ 858 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 859 860 done: 861 running = wdd && watchdog_hw_running(wdd); 862 mutex_unlock(&wd_data->lock); 863 /* 864 * Allow the owner module to be unloaded again unless the watchdog 865 * is still running. If the watchdog is still running, it can not 866 * be stopped, and its driver must not be unloaded. 867 */ 868 if (!running) { 869 module_put(wd_data->cdev.owner); 870 kref_put(&wd_data->kref, watchdog_core_data_release); 871 } 872 return 0; 873 } 874 875 static const struct file_operations watchdog_fops = { 876 .owner = THIS_MODULE, 877 .write = watchdog_write, 878 .unlocked_ioctl = watchdog_ioctl, 879 .open = watchdog_open, 880 .release = watchdog_release, 881 }; 882 883 static struct miscdevice watchdog_miscdev = { 884 .minor = WATCHDOG_MINOR, 885 .name = "watchdog", 886 .fops = &watchdog_fops, 887 }; 888 889 /* 890 * watchdog_cdev_register: register watchdog character device 891 * @wdd: watchdog device 892 * @devno: character device number 893 * 894 * Register a watchdog character device including handling the legacy 895 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 896 * thus we set it up like that. 897 */ 898 899 static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) 900 { 901 struct watchdog_core_data *wd_data; 902 int err; 903 904 wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); 905 if (!wd_data) 906 return -ENOMEM; 907 kref_init(&wd_data->kref); 908 mutex_init(&wd_data->lock); 909 910 wd_data->wdd = wdd; 911 wdd->wd_data = wd_data; 912 913 if (!watchdog_wq) 914 return -ENODEV; 915 916 INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work); 917 918 if (wdd->id == 0) { 919 old_wd_data = wd_data; 920 watchdog_miscdev.parent = wdd->parent; 921 err = misc_register(&watchdog_miscdev); 922 if (err != 0) { 923 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n", 924 wdd->info->identity, WATCHDOG_MINOR, err); 925 if (err == -EBUSY) 926 pr_err("%s: a legacy watchdog module is probably present.\n", 927 wdd->info->identity); 928 old_wd_data = NULL; 929 kfree(wd_data); 930 return err; 931 } 932 } 933 934 /* Fill in the data structures */ 935 cdev_init(&wd_data->cdev, &watchdog_fops); 936 wd_data->cdev.owner = wdd->ops->owner; 937 938 /* Add the device */ 939 err = cdev_add(&wd_data->cdev, devno, 1); 940 if (err) { 941 pr_err("watchdog%d unable to add device %d:%d\n", 942 wdd->id, MAJOR(watchdog_devt), wdd->id); 943 if (wdd->id == 0) { 944 misc_deregister(&watchdog_miscdev); 945 old_wd_data = NULL; 946 kref_put(&wd_data->kref, watchdog_core_data_release); 947 } 948 return err; 949 } 950 951 /* Record time of most recent heartbeat as 'just before now'. */ 952 wd_data->last_hw_keepalive = jiffies - 1; 953 954 /* 955 * If the watchdog is running, prevent its driver from being unloaded, 956 * and schedule an immediate ping. 957 */ 958 if (watchdog_hw_running(wdd)) { 959 __module_get(wdd->ops->owner); 960 kref_get(&wd_data->kref); 961 queue_delayed_work(watchdog_wq, &wd_data->work, 0); 962 } 963 964 return 0; 965 } 966 967 /* 968 * watchdog_cdev_unregister: unregister watchdog character device 969 * @watchdog: watchdog device 970 * 971 * Unregister watchdog character device and if needed the legacy 972 * /dev/watchdog device. 973 */ 974 975 static void watchdog_cdev_unregister(struct watchdog_device *wdd) 976 { 977 struct watchdog_core_data *wd_data = wdd->wd_data; 978 979 cdev_del(&wd_data->cdev); 980 if (wdd->id == 0) { 981 misc_deregister(&watchdog_miscdev); 982 old_wd_data = NULL; 983 } 984 985 mutex_lock(&wd_data->lock); 986 wd_data->wdd = NULL; 987 wdd->wd_data = NULL; 988 mutex_unlock(&wd_data->lock); 989 990 cancel_delayed_work_sync(&wd_data->work); 991 992 kref_put(&wd_data->kref, watchdog_core_data_release); 993 } 994 995 static struct class watchdog_class = { 996 .name = "watchdog", 997 .owner = THIS_MODULE, 998 .dev_groups = wdt_groups, 999 }; 1000 1001 /* 1002 * watchdog_dev_register: register a watchdog device 1003 * @wdd: watchdog device 1004 * 1005 * Register a watchdog device including handling the legacy 1006 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 1007 * thus we set it up like that. 1008 */ 1009 1010 int watchdog_dev_register(struct watchdog_device *wdd) 1011 { 1012 struct device *dev; 1013 dev_t devno; 1014 int ret; 1015 1016 devno = MKDEV(MAJOR(watchdog_devt), wdd->id); 1017 1018 ret = watchdog_cdev_register(wdd, devno); 1019 if (ret) 1020 return ret; 1021 1022 dev = device_create_with_groups(&watchdog_class, wdd->parent, 1023 devno, wdd, wdd->groups, 1024 "watchdog%d", wdd->id); 1025 if (IS_ERR(dev)) { 1026 watchdog_cdev_unregister(wdd); 1027 return PTR_ERR(dev); 1028 } 1029 1030 ret = watchdog_register_pretimeout(wdd); 1031 if (ret) { 1032 device_destroy(&watchdog_class, devno); 1033 watchdog_cdev_unregister(wdd); 1034 } 1035 1036 return ret; 1037 } 1038 1039 /* 1040 * watchdog_dev_unregister: unregister a watchdog device 1041 * @watchdog: watchdog device 1042 * 1043 * Unregister watchdog device and if needed the legacy 1044 * /dev/watchdog device. 1045 */ 1046 1047 void watchdog_dev_unregister(struct watchdog_device *wdd) 1048 { 1049 watchdog_unregister_pretimeout(wdd); 1050 device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); 1051 watchdog_cdev_unregister(wdd); 1052 } 1053 1054 /* 1055 * watchdog_dev_init: init dev part of watchdog core 1056 * 1057 * Allocate a range of chardev nodes to use for watchdog devices 1058 */ 1059 1060 int __init watchdog_dev_init(void) 1061 { 1062 int err; 1063 1064 watchdog_wq = alloc_workqueue("watchdogd", 1065 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 1066 if (!watchdog_wq) { 1067 pr_err("Failed to create watchdog workqueue\n"); 1068 return -ENOMEM; 1069 } 1070 1071 err = class_register(&watchdog_class); 1072 if (err < 0) { 1073 pr_err("couldn't register class\n"); 1074 goto err_register; 1075 } 1076 1077 err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog"); 1078 if (err < 0) { 1079 pr_err("watchdog: unable to allocate char dev region\n"); 1080 goto err_alloc; 1081 } 1082 1083 return 0; 1084 1085 err_alloc: 1086 class_unregister(&watchdog_class); 1087 err_register: 1088 destroy_workqueue(watchdog_wq); 1089 return err; 1090 } 1091 1092 /* 1093 * watchdog_dev_exit: exit dev part of watchdog core 1094 * 1095 * Release the range of chardev nodes used for watchdog devices 1096 */ 1097 1098 void __exit watchdog_dev_exit(void) 1099 { 1100 unregister_chrdev_region(watchdog_devt, MAX_DOGS); 1101 class_unregister(&watchdog_class); 1102 destroy_workqueue(watchdog_wq); 1103 } 1104