1 /* 2 * watchdog_dev.c 3 * 4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>, 5 * All Rights Reserved. 6 * 7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>. 8 * 9 * 10 * This source code is part of the generic code that can be used 11 * by all the watchdog timer drivers. 12 * 13 * This part of the generic code takes care of the following 14 * misc device: /dev/watchdog. 15 * 16 * Based on source code of the following authors: 17 * Matt Domsch <Matt_Domsch@dell.com>, 18 * Rob Radez <rob@osinvestor.com>, 19 * Rusty Lynch <rusty@linux.co.intel.com> 20 * Satyam Sharma <satyam@infradead.org> 21 * Randy Dunlap <randy.dunlap@oracle.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 26 * 2 of the License, or (at your option) any later version. 27 * 28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw. 29 * admit liability nor provide warranty for any of this software. 30 * This material is provided "AS-IS" and at no charge. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/cdev.h> /* For character device */ 36 #include <linux/errno.h> /* For the -ENODEV/... values */ 37 #include <linux/fs.h> /* For file operations */ 38 #include <linux/init.h> /* For __init/__exit/... */ 39 #include <linux/jiffies.h> /* For timeout functions */ 40 #include <linux/kernel.h> /* For printk/panic/... */ 41 #include <linux/kref.h> /* For data references */ 42 #include <linux/miscdevice.h> /* For handling misc devices */ 43 #include <linux/module.h> /* For module stuff/... */ 44 #include <linux/mutex.h> /* For mutexes */ 45 #include <linux/reboot.h> /* For reboot notifier */ 46 #include <linux/slab.h> /* For memory functions */ 47 #include <linux/types.h> /* For standard types (like size_t) */ 48 #include <linux/watchdog.h> /* For watchdog specific items */ 49 #include <linux/workqueue.h> /* For workqueue */ 50 #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ 51 52 #include "watchdog_core.h" 53 #include "watchdog_pretimeout.h" 54 55 /* 56 * struct watchdog_core_data - watchdog core internal data 57 * @kref: Reference count. 58 * @cdev: The watchdog's Character device. 59 * @wdd: Pointer to watchdog device. 60 * @lock: Lock for watchdog core. 61 * @status: Watchdog core internal status bits. 62 */ 63 struct watchdog_core_data { 64 struct kref kref; 65 struct cdev cdev; 66 struct watchdog_device *wdd; 67 struct mutex lock; 68 unsigned long last_keepalive; 69 unsigned long last_hw_keepalive; 70 struct delayed_work work; 71 unsigned long status; /* Internal status bits */ 72 #define _WDOG_DEV_OPEN 0 /* Opened ? */ 73 #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */ 74 #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */ 75 }; 76 77 /* the dev_t structure to store the dynamically allocated watchdog devices */ 78 static dev_t watchdog_devt; 79 /* Reference to watchdog device behind /dev/watchdog */ 80 static struct watchdog_core_data *old_wd_data; 81 82 static struct workqueue_struct *watchdog_wq; 83 84 static bool handle_boot_enabled = 85 IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED); 86 87 static inline bool watchdog_need_worker(struct watchdog_device *wdd) 88 { 89 /* All variables in milli-seconds */ 90 unsigned int hm = wdd->max_hw_heartbeat_ms; 91 unsigned int t = wdd->timeout * 1000; 92 93 /* 94 * A worker to generate heartbeat requests is needed if all of the 95 * following conditions are true. 96 * - Userspace activated the watchdog. 97 * - The driver provided a value for the maximum hardware timeout, and 98 * thus is aware that the framework supports generating heartbeat 99 * requests. 100 * - Userspace requests a longer timeout than the hardware can handle. 101 * 102 * Alternatively, if userspace has not opened the watchdog 103 * device, we take care of feeding the watchdog if it is 104 * running. 105 */ 106 return (hm && watchdog_active(wdd) && t > hm) || 107 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)); 108 } 109 110 static long watchdog_next_keepalive(struct watchdog_device *wdd) 111 { 112 struct watchdog_core_data *wd_data = wdd->wd_data; 113 unsigned int timeout_ms = wdd->timeout * 1000; 114 unsigned long keepalive_interval; 115 unsigned long last_heartbeat; 116 unsigned long virt_timeout; 117 unsigned int hw_heartbeat_ms; 118 119 virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms); 120 hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms); 121 keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2); 122 123 if (!watchdog_active(wdd)) 124 return keepalive_interval; 125 126 /* 127 * To ensure that the watchdog times out wdd->timeout seconds 128 * after the most recent ping from userspace, the last 129 * worker ping has to come in hw_heartbeat_ms before this timeout. 130 */ 131 last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms); 132 return min_t(long, last_heartbeat - jiffies, keepalive_interval); 133 } 134 135 static inline void watchdog_update_worker(struct watchdog_device *wdd) 136 { 137 struct watchdog_core_data *wd_data = wdd->wd_data; 138 139 if (watchdog_need_worker(wdd)) { 140 long t = watchdog_next_keepalive(wdd); 141 142 if (t > 0) 143 mod_delayed_work(watchdog_wq, &wd_data->work, t); 144 } else { 145 cancel_delayed_work(&wd_data->work); 146 } 147 } 148 149 static int __watchdog_ping(struct watchdog_device *wdd) 150 { 151 struct watchdog_core_data *wd_data = wdd->wd_data; 152 unsigned long earliest_keepalive = wd_data->last_hw_keepalive + 153 msecs_to_jiffies(wdd->min_hw_heartbeat_ms); 154 int err; 155 156 if (time_is_after_jiffies(earliest_keepalive)) { 157 mod_delayed_work(watchdog_wq, &wd_data->work, 158 earliest_keepalive - jiffies); 159 return 0; 160 } 161 162 wd_data->last_hw_keepalive = jiffies; 163 164 if (wdd->ops->ping) 165 err = wdd->ops->ping(wdd); /* ping the watchdog */ 166 else 167 err = wdd->ops->start(wdd); /* restart watchdog */ 168 169 watchdog_update_worker(wdd); 170 171 return err; 172 } 173 174 /* 175 * watchdog_ping: ping the watchdog. 176 * @wdd: the watchdog device to ping 177 * 178 * The caller must hold wd_data->lock. 179 * 180 * If the watchdog has no own ping operation then it needs to be 181 * restarted via the start operation. This wrapper function does 182 * exactly that. 183 * We only ping when the watchdog device is running. 184 */ 185 186 static int watchdog_ping(struct watchdog_device *wdd) 187 { 188 struct watchdog_core_data *wd_data = wdd->wd_data; 189 190 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd)) 191 return 0; 192 193 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 194 195 wd_data->last_keepalive = jiffies; 196 return __watchdog_ping(wdd); 197 } 198 199 static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data) 200 { 201 struct watchdog_device *wdd = wd_data->wdd; 202 203 return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)); 204 } 205 206 static void watchdog_ping_work(struct work_struct *work) 207 { 208 struct watchdog_core_data *wd_data; 209 210 wd_data = container_of(to_delayed_work(work), struct watchdog_core_data, 211 work); 212 213 mutex_lock(&wd_data->lock); 214 if (watchdog_worker_should_ping(wd_data)) 215 __watchdog_ping(wd_data->wdd); 216 mutex_unlock(&wd_data->lock); 217 } 218 219 /* 220 * watchdog_start: wrapper to start the watchdog. 221 * @wdd: the watchdog device to start 222 * 223 * The caller must hold wd_data->lock. 224 * 225 * Start the watchdog if it is not active and mark it active. 226 * This function returns zero on success or a negative errno code for 227 * failure. 228 */ 229 230 static int watchdog_start(struct watchdog_device *wdd) 231 { 232 struct watchdog_core_data *wd_data = wdd->wd_data; 233 unsigned long started_at; 234 int err; 235 236 if (watchdog_active(wdd)) 237 return 0; 238 239 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 240 241 started_at = jiffies; 242 if (watchdog_hw_running(wdd) && wdd->ops->ping) 243 err = wdd->ops->ping(wdd); 244 else 245 err = wdd->ops->start(wdd); 246 if (err == 0) { 247 set_bit(WDOG_ACTIVE, &wdd->status); 248 wd_data->last_keepalive = started_at; 249 watchdog_update_worker(wdd); 250 } 251 252 return err; 253 } 254 255 /* 256 * watchdog_stop: wrapper to stop the watchdog. 257 * @wdd: the watchdog device to stop 258 * 259 * The caller must hold wd_data->lock. 260 * 261 * Stop the watchdog if it is still active and unmark it active. 262 * This function returns zero on success or a negative errno code for 263 * failure. 264 * If the 'nowayout' feature was set, the watchdog cannot be stopped. 265 */ 266 267 static int watchdog_stop(struct watchdog_device *wdd) 268 { 269 int err = 0; 270 271 if (!watchdog_active(wdd)) 272 return 0; 273 274 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 275 pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n", 276 wdd->id); 277 return -EBUSY; 278 } 279 280 if (wdd->ops->stop) { 281 clear_bit(WDOG_HW_RUNNING, &wdd->status); 282 err = wdd->ops->stop(wdd); 283 } else { 284 set_bit(WDOG_HW_RUNNING, &wdd->status); 285 } 286 287 if (err == 0) { 288 clear_bit(WDOG_ACTIVE, &wdd->status); 289 watchdog_update_worker(wdd); 290 } 291 292 return err; 293 } 294 295 /* 296 * watchdog_get_status: wrapper to get the watchdog status 297 * @wdd: the watchdog device to get the status from 298 * 299 * The caller must hold wd_data->lock. 300 * 301 * Get the watchdog's status flags. 302 */ 303 304 static unsigned int watchdog_get_status(struct watchdog_device *wdd) 305 { 306 struct watchdog_core_data *wd_data = wdd->wd_data; 307 unsigned int status; 308 309 if (wdd->ops->status) 310 status = wdd->ops->status(wdd); 311 else 312 status = wdd->bootstatus & (WDIOF_CARDRESET | 313 WDIOF_OVERHEAT | 314 WDIOF_FANFAULT | 315 WDIOF_EXTERN1 | 316 WDIOF_EXTERN2 | 317 WDIOF_POWERUNDER | 318 WDIOF_POWEROVER); 319 320 if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status)) 321 status |= WDIOF_MAGICCLOSE; 322 323 if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status)) 324 status |= WDIOF_KEEPALIVEPING; 325 326 return status; 327 } 328 329 /* 330 * watchdog_set_timeout: set the watchdog timer timeout 331 * @wdd: the watchdog device to set the timeout for 332 * @timeout: timeout to set in seconds 333 * 334 * The caller must hold wd_data->lock. 335 */ 336 337 static int watchdog_set_timeout(struct watchdog_device *wdd, 338 unsigned int timeout) 339 { 340 int err = 0; 341 342 if (!(wdd->info->options & WDIOF_SETTIMEOUT)) 343 return -EOPNOTSUPP; 344 345 if (watchdog_timeout_invalid(wdd, timeout)) 346 return -EINVAL; 347 348 if (wdd->ops->set_timeout) { 349 err = wdd->ops->set_timeout(wdd, timeout); 350 } else { 351 wdd->timeout = timeout; 352 /* Disable pretimeout if it doesn't fit the new timeout */ 353 if (wdd->pretimeout >= wdd->timeout) 354 wdd->pretimeout = 0; 355 } 356 357 watchdog_update_worker(wdd); 358 359 return err; 360 } 361 362 /* 363 * watchdog_set_pretimeout: set the watchdog timer pretimeout 364 * @wdd: the watchdog device to set the timeout for 365 * @timeout: pretimeout to set in seconds 366 */ 367 368 static int watchdog_set_pretimeout(struct watchdog_device *wdd, 369 unsigned int timeout) 370 { 371 int err = 0; 372 373 if (!(wdd->info->options & WDIOF_PRETIMEOUT)) 374 return -EOPNOTSUPP; 375 376 if (watchdog_pretimeout_invalid(wdd, timeout)) 377 return -EINVAL; 378 379 if (wdd->ops->set_pretimeout) 380 err = wdd->ops->set_pretimeout(wdd, timeout); 381 else 382 wdd->pretimeout = timeout; 383 384 return err; 385 } 386 387 /* 388 * watchdog_get_timeleft: wrapper to get the time left before a reboot 389 * @wdd: the watchdog device to get the remaining time from 390 * @timeleft: the time that's left 391 * 392 * The caller must hold wd_data->lock. 393 * 394 * Get the time before a watchdog will reboot (if not pinged). 395 */ 396 397 static int watchdog_get_timeleft(struct watchdog_device *wdd, 398 unsigned int *timeleft) 399 { 400 *timeleft = 0; 401 402 if (!wdd->ops->get_timeleft) 403 return -EOPNOTSUPP; 404 405 *timeleft = wdd->ops->get_timeleft(wdd); 406 407 return 0; 408 } 409 410 #ifdef CONFIG_WATCHDOG_SYSFS 411 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr, 412 char *buf) 413 { 414 struct watchdog_device *wdd = dev_get_drvdata(dev); 415 416 return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status)); 417 } 418 static DEVICE_ATTR_RO(nowayout); 419 420 static ssize_t status_show(struct device *dev, struct device_attribute *attr, 421 char *buf) 422 { 423 struct watchdog_device *wdd = dev_get_drvdata(dev); 424 struct watchdog_core_data *wd_data = wdd->wd_data; 425 unsigned int status; 426 427 mutex_lock(&wd_data->lock); 428 status = watchdog_get_status(wdd); 429 mutex_unlock(&wd_data->lock); 430 431 return sprintf(buf, "0x%x\n", status); 432 } 433 static DEVICE_ATTR_RO(status); 434 435 static ssize_t bootstatus_show(struct device *dev, 436 struct device_attribute *attr, char *buf) 437 { 438 struct watchdog_device *wdd = dev_get_drvdata(dev); 439 440 return sprintf(buf, "%u\n", wdd->bootstatus); 441 } 442 static DEVICE_ATTR_RO(bootstatus); 443 444 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr, 445 char *buf) 446 { 447 struct watchdog_device *wdd = dev_get_drvdata(dev); 448 struct watchdog_core_data *wd_data = wdd->wd_data; 449 ssize_t status; 450 unsigned int val; 451 452 mutex_lock(&wd_data->lock); 453 status = watchdog_get_timeleft(wdd, &val); 454 mutex_unlock(&wd_data->lock); 455 if (!status) 456 status = sprintf(buf, "%u\n", val); 457 458 return status; 459 } 460 static DEVICE_ATTR_RO(timeleft); 461 462 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr, 463 char *buf) 464 { 465 struct watchdog_device *wdd = dev_get_drvdata(dev); 466 467 return sprintf(buf, "%u\n", wdd->timeout); 468 } 469 static DEVICE_ATTR_RO(timeout); 470 471 static ssize_t pretimeout_show(struct device *dev, 472 struct device_attribute *attr, char *buf) 473 { 474 struct watchdog_device *wdd = dev_get_drvdata(dev); 475 476 return sprintf(buf, "%u\n", wdd->pretimeout); 477 } 478 static DEVICE_ATTR_RO(pretimeout); 479 480 static ssize_t identity_show(struct device *dev, struct device_attribute *attr, 481 char *buf) 482 { 483 struct watchdog_device *wdd = dev_get_drvdata(dev); 484 485 return sprintf(buf, "%s\n", wdd->info->identity); 486 } 487 static DEVICE_ATTR_RO(identity); 488 489 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 490 char *buf) 491 { 492 struct watchdog_device *wdd = dev_get_drvdata(dev); 493 494 if (watchdog_active(wdd)) 495 return sprintf(buf, "active\n"); 496 497 return sprintf(buf, "inactive\n"); 498 } 499 static DEVICE_ATTR_RO(state); 500 501 static ssize_t pretimeout_available_governors_show(struct device *dev, 502 struct device_attribute *attr, char *buf) 503 { 504 return watchdog_pretimeout_available_governors_get(buf); 505 } 506 static DEVICE_ATTR_RO(pretimeout_available_governors); 507 508 static ssize_t pretimeout_governor_show(struct device *dev, 509 struct device_attribute *attr, 510 char *buf) 511 { 512 struct watchdog_device *wdd = dev_get_drvdata(dev); 513 514 return watchdog_pretimeout_governor_get(wdd, buf); 515 } 516 517 static ssize_t pretimeout_governor_store(struct device *dev, 518 struct device_attribute *attr, 519 const char *buf, size_t count) 520 { 521 struct watchdog_device *wdd = dev_get_drvdata(dev); 522 int ret = watchdog_pretimeout_governor_set(wdd, buf); 523 524 if (!ret) 525 ret = count; 526 527 return ret; 528 } 529 static DEVICE_ATTR_RW(pretimeout_governor); 530 531 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, 532 int n) 533 { 534 struct device *dev = container_of(kobj, struct device, kobj); 535 struct watchdog_device *wdd = dev_get_drvdata(dev); 536 umode_t mode = attr->mode; 537 538 if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) 539 mode = 0; 540 else if (attr == &dev_attr_pretimeout.attr && 541 !(wdd->info->options & WDIOF_PRETIMEOUT)) 542 mode = 0; 543 else if ((attr == &dev_attr_pretimeout_governor.attr || 544 attr == &dev_attr_pretimeout_available_governors.attr) && 545 (!(wdd->info->options & WDIOF_PRETIMEOUT) || 546 !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV))) 547 mode = 0; 548 549 return mode; 550 } 551 static struct attribute *wdt_attrs[] = { 552 &dev_attr_state.attr, 553 &dev_attr_identity.attr, 554 &dev_attr_timeout.attr, 555 &dev_attr_pretimeout.attr, 556 &dev_attr_timeleft.attr, 557 &dev_attr_bootstatus.attr, 558 &dev_attr_status.attr, 559 &dev_attr_nowayout.attr, 560 &dev_attr_pretimeout_governor.attr, 561 &dev_attr_pretimeout_available_governors.attr, 562 NULL, 563 }; 564 565 static const struct attribute_group wdt_group = { 566 .attrs = wdt_attrs, 567 .is_visible = wdt_is_visible, 568 }; 569 __ATTRIBUTE_GROUPS(wdt); 570 #else 571 #define wdt_groups NULL 572 #endif 573 574 /* 575 * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined 576 * @wdd: the watchdog device to do the ioctl on 577 * @cmd: watchdog command 578 * @arg: argument pointer 579 * 580 * The caller must hold wd_data->lock. 581 */ 582 583 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd, 584 unsigned long arg) 585 { 586 if (!wdd->ops->ioctl) 587 return -ENOIOCTLCMD; 588 589 return wdd->ops->ioctl(wdd, cmd, arg); 590 } 591 592 /* 593 * watchdog_write: writes to the watchdog. 594 * @file: file from VFS 595 * @data: user address of data 596 * @len: length of data 597 * @ppos: pointer to the file offset 598 * 599 * A write to a watchdog device is defined as a keepalive ping. 600 * Writing the magic 'V' sequence allows the next close to turn 601 * off the watchdog (if 'nowayout' is not set). 602 */ 603 604 static ssize_t watchdog_write(struct file *file, const char __user *data, 605 size_t len, loff_t *ppos) 606 { 607 struct watchdog_core_data *wd_data = file->private_data; 608 struct watchdog_device *wdd; 609 int err; 610 size_t i; 611 char c; 612 613 if (len == 0) 614 return 0; 615 616 /* 617 * Note: just in case someone wrote the magic character 618 * five months ago... 619 */ 620 clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 621 622 /* scan to see whether or not we got the magic character */ 623 for (i = 0; i != len; i++) { 624 if (get_user(c, data + i)) 625 return -EFAULT; 626 if (c == 'V') 627 set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 628 } 629 630 /* someone wrote to us, so we send the watchdog a keepalive ping */ 631 632 err = -ENODEV; 633 mutex_lock(&wd_data->lock); 634 wdd = wd_data->wdd; 635 if (wdd) 636 err = watchdog_ping(wdd); 637 mutex_unlock(&wd_data->lock); 638 639 if (err < 0) 640 return err; 641 642 return len; 643 } 644 645 /* 646 * watchdog_ioctl: handle the different ioctl's for the watchdog device. 647 * @file: file handle to the device 648 * @cmd: watchdog command 649 * @arg: argument pointer 650 * 651 * The watchdog API defines a common set of functions for all watchdogs 652 * according to their available features. 653 */ 654 655 static long watchdog_ioctl(struct file *file, unsigned int cmd, 656 unsigned long arg) 657 { 658 struct watchdog_core_data *wd_data = file->private_data; 659 void __user *argp = (void __user *)arg; 660 struct watchdog_device *wdd; 661 int __user *p = argp; 662 unsigned int val; 663 int err; 664 665 mutex_lock(&wd_data->lock); 666 667 wdd = wd_data->wdd; 668 if (!wdd) { 669 err = -ENODEV; 670 goto out_ioctl; 671 } 672 673 err = watchdog_ioctl_op(wdd, cmd, arg); 674 if (err != -ENOIOCTLCMD) 675 goto out_ioctl; 676 677 switch (cmd) { 678 case WDIOC_GETSUPPORT: 679 err = copy_to_user(argp, wdd->info, 680 sizeof(struct watchdog_info)) ? -EFAULT : 0; 681 break; 682 case WDIOC_GETSTATUS: 683 val = watchdog_get_status(wdd); 684 err = put_user(val, p); 685 break; 686 case WDIOC_GETBOOTSTATUS: 687 err = put_user(wdd->bootstatus, p); 688 break; 689 case WDIOC_SETOPTIONS: 690 if (get_user(val, p)) { 691 err = -EFAULT; 692 break; 693 } 694 if (val & WDIOS_DISABLECARD) { 695 err = watchdog_stop(wdd); 696 if (err < 0) 697 break; 698 } 699 if (val & WDIOS_ENABLECARD) 700 err = watchdog_start(wdd); 701 break; 702 case WDIOC_KEEPALIVE: 703 if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) { 704 err = -EOPNOTSUPP; 705 break; 706 } 707 err = watchdog_ping(wdd); 708 break; 709 case WDIOC_SETTIMEOUT: 710 if (get_user(val, p)) { 711 err = -EFAULT; 712 break; 713 } 714 err = watchdog_set_timeout(wdd, val); 715 if (err < 0) 716 break; 717 /* If the watchdog is active then we send a keepalive ping 718 * to make sure that the watchdog keep's running (and if 719 * possible that it takes the new timeout) */ 720 err = watchdog_ping(wdd); 721 if (err < 0) 722 break; 723 /* Fall */ 724 case WDIOC_GETTIMEOUT: 725 /* timeout == 0 means that we don't know the timeout */ 726 if (wdd->timeout == 0) { 727 err = -EOPNOTSUPP; 728 break; 729 } 730 err = put_user(wdd->timeout, p); 731 break; 732 case WDIOC_GETTIMELEFT: 733 err = watchdog_get_timeleft(wdd, &val); 734 if (err < 0) 735 break; 736 err = put_user(val, p); 737 break; 738 case WDIOC_SETPRETIMEOUT: 739 if (get_user(val, p)) { 740 err = -EFAULT; 741 break; 742 } 743 err = watchdog_set_pretimeout(wdd, val); 744 break; 745 case WDIOC_GETPRETIMEOUT: 746 err = put_user(wdd->pretimeout, p); 747 break; 748 default: 749 err = -ENOTTY; 750 break; 751 } 752 753 out_ioctl: 754 mutex_unlock(&wd_data->lock); 755 return err; 756 } 757 758 /* 759 * watchdog_open: open the /dev/watchdog* devices. 760 * @inode: inode of device 761 * @file: file handle to device 762 * 763 * When the /dev/watchdog* device gets opened, we start the watchdog. 764 * Watch out: the /dev/watchdog device is single open, so we make sure 765 * it can only be opened once. 766 */ 767 768 static int watchdog_open(struct inode *inode, struct file *file) 769 { 770 struct watchdog_core_data *wd_data; 771 struct watchdog_device *wdd; 772 int err; 773 774 /* Get the corresponding watchdog device */ 775 if (imajor(inode) == MISC_MAJOR) 776 wd_data = old_wd_data; 777 else 778 wd_data = container_of(inode->i_cdev, struct watchdog_core_data, 779 cdev); 780 781 /* the watchdog is single open! */ 782 if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status)) 783 return -EBUSY; 784 785 wdd = wd_data->wdd; 786 787 /* 788 * If the /dev/watchdog device is open, we don't want the module 789 * to be unloaded. 790 */ 791 if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) { 792 err = -EBUSY; 793 goto out_clear; 794 } 795 796 err = watchdog_start(wdd); 797 if (err < 0) 798 goto out_mod; 799 800 file->private_data = wd_data; 801 802 if (!watchdog_hw_running(wdd)) 803 kref_get(&wd_data->kref); 804 805 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ 806 return nonseekable_open(inode, file); 807 808 out_mod: 809 module_put(wd_data->wdd->ops->owner); 810 out_clear: 811 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 812 return err; 813 } 814 815 static void watchdog_core_data_release(struct kref *kref) 816 { 817 struct watchdog_core_data *wd_data; 818 819 wd_data = container_of(kref, struct watchdog_core_data, kref); 820 821 kfree(wd_data); 822 } 823 824 /* 825 * watchdog_release: release the watchdog device. 826 * @inode: inode of device 827 * @file: file handle to device 828 * 829 * This is the code for when /dev/watchdog gets closed. We will only 830 * stop the watchdog when we have received the magic char (and nowayout 831 * was not set), else the watchdog will keep running. 832 */ 833 834 static int watchdog_release(struct inode *inode, struct file *file) 835 { 836 struct watchdog_core_data *wd_data = file->private_data; 837 struct watchdog_device *wdd; 838 int err = -EBUSY; 839 bool running; 840 841 mutex_lock(&wd_data->lock); 842 843 wdd = wd_data->wdd; 844 if (!wdd) 845 goto done; 846 847 /* 848 * We only stop the watchdog if we received the magic character 849 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then 850 * watchdog_stop will fail. 851 */ 852 if (!test_bit(WDOG_ACTIVE, &wdd->status)) 853 err = 0; 854 else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) || 855 !(wdd->info->options & WDIOF_MAGICCLOSE)) 856 err = watchdog_stop(wdd); 857 858 /* If the watchdog was not stopped, send a keepalive ping */ 859 if (err < 0) { 860 pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id); 861 watchdog_ping(wdd); 862 } 863 864 watchdog_update_worker(wdd); 865 866 /* make sure that /dev/watchdog can be re-opened */ 867 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 868 869 done: 870 running = wdd && watchdog_hw_running(wdd); 871 mutex_unlock(&wd_data->lock); 872 /* 873 * Allow the owner module to be unloaded again unless the watchdog 874 * is still running. If the watchdog is still running, it can not 875 * be stopped, and its driver must not be unloaded. 876 */ 877 if (!running) { 878 module_put(wd_data->cdev.owner); 879 kref_put(&wd_data->kref, watchdog_core_data_release); 880 } 881 return 0; 882 } 883 884 static const struct file_operations watchdog_fops = { 885 .owner = THIS_MODULE, 886 .write = watchdog_write, 887 .unlocked_ioctl = watchdog_ioctl, 888 .open = watchdog_open, 889 .release = watchdog_release, 890 }; 891 892 static struct miscdevice watchdog_miscdev = { 893 .minor = WATCHDOG_MINOR, 894 .name = "watchdog", 895 .fops = &watchdog_fops, 896 }; 897 898 /* 899 * watchdog_cdev_register: register watchdog character device 900 * @wdd: watchdog device 901 * @devno: character device number 902 * 903 * Register a watchdog character device including handling the legacy 904 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 905 * thus we set it up like that. 906 */ 907 908 static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) 909 { 910 struct watchdog_core_data *wd_data; 911 int err; 912 913 wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); 914 if (!wd_data) 915 return -ENOMEM; 916 kref_init(&wd_data->kref); 917 mutex_init(&wd_data->lock); 918 919 wd_data->wdd = wdd; 920 wdd->wd_data = wd_data; 921 922 if (!watchdog_wq) 923 return -ENODEV; 924 925 INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work); 926 927 if (wdd->id == 0) { 928 old_wd_data = wd_data; 929 watchdog_miscdev.parent = wdd->parent; 930 err = misc_register(&watchdog_miscdev); 931 if (err != 0) { 932 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n", 933 wdd->info->identity, WATCHDOG_MINOR, err); 934 if (err == -EBUSY) 935 pr_err("%s: a legacy watchdog module is probably present.\n", 936 wdd->info->identity); 937 old_wd_data = NULL; 938 kfree(wd_data); 939 return err; 940 } 941 } 942 943 /* Fill in the data structures */ 944 cdev_init(&wd_data->cdev, &watchdog_fops); 945 wd_data->cdev.owner = wdd->ops->owner; 946 947 /* Add the device */ 948 err = cdev_add(&wd_data->cdev, devno, 1); 949 if (err) { 950 pr_err("watchdog%d unable to add device %d:%d\n", 951 wdd->id, MAJOR(watchdog_devt), wdd->id); 952 if (wdd->id == 0) { 953 misc_deregister(&watchdog_miscdev); 954 old_wd_data = NULL; 955 kref_put(&wd_data->kref, watchdog_core_data_release); 956 } 957 return err; 958 } 959 960 /* Record time of most recent heartbeat as 'just before now'. */ 961 wd_data->last_hw_keepalive = jiffies - 1; 962 963 /* 964 * If the watchdog is running, prevent its driver from being unloaded, 965 * and schedule an immediate ping. 966 */ 967 if (watchdog_hw_running(wdd)) { 968 if (handle_boot_enabled) { 969 __module_get(wdd->ops->owner); 970 kref_get(&wd_data->kref); 971 queue_delayed_work(watchdog_wq, &wd_data->work, 0); 972 } else { 973 pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", 974 wdd->id); 975 } 976 } 977 978 return 0; 979 } 980 981 /* 982 * watchdog_cdev_unregister: unregister watchdog character device 983 * @watchdog: watchdog device 984 * 985 * Unregister watchdog character device and if needed the legacy 986 * /dev/watchdog device. 987 */ 988 989 static void watchdog_cdev_unregister(struct watchdog_device *wdd) 990 { 991 struct watchdog_core_data *wd_data = wdd->wd_data; 992 993 cdev_del(&wd_data->cdev); 994 if (wdd->id == 0) { 995 misc_deregister(&watchdog_miscdev); 996 old_wd_data = NULL; 997 } 998 999 mutex_lock(&wd_data->lock); 1000 wd_data->wdd = NULL; 1001 wdd->wd_data = NULL; 1002 mutex_unlock(&wd_data->lock); 1003 1004 if (watchdog_active(wdd) && 1005 test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) { 1006 watchdog_stop(wdd); 1007 } 1008 1009 cancel_delayed_work_sync(&wd_data->work); 1010 1011 kref_put(&wd_data->kref, watchdog_core_data_release); 1012 } 1013 1014 static struct class watchdog_class = { 1015 .name = "watchdog", 1016 .owner = THIS_MODULE, 1017 .dev_groups = wdt_groups, 1018 }; 1019 1020 static int watchdog_reboot_notifier(struct notifier_block *nb, 1021 unsigned long code, void *data) 1022 { 1023 struct watchdog_device *wdd; 1024 1025 wdd = container_of(nb, struct watchdog_device, reboot_nb); 1026 if (code == SYS_DOWN || code == SYS_HALT) { 1027 if (watchdog_active(wdd)) { 1028 int ret; 1029 1030 ret = wdd->ops->stop(wdd); 1031 if (ret) 1032 return NOTIFY_BAD; 1033 } 1034 } 1035 1036 return NOTIFY_DONE; 1037 } 1038 1039 /* 1040 * watchdog_dev_register: register a watchdog device 1041 * @wdd: watchdog device 1042 * 1043 * Register a watchdog device including handling the legacy 1044 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 1045 * thus we set it up like that. 1046 */ 1047 1048 int watchdog_dev_register(struct watchdog_device *wdd) 1049 { 1050 struct device *dev; 1051 dev_t devno; 1052 int ret; 1053 1054 devno = MKDEV(MAJOR(watchdog_devt), wdd->id); 1055 1056 ret = watchdog_cdev_register(wdd, devno); 1057 if (ret) 1058 return ret; 1059 1060 dev = device_create_with_groups(&watchdog_class, wdd->parent, 1061 devno, wdd, wdd->groups, 1062 "watchdog%d", wdd->id); 1063 if (IS_ERR(dev)) { 1064 watchdog_cdev_unregister(wdd); 1065 return PTR_ERR(dev); 1066 } 1067 1068 ret = watchdog_register_pretimeout(wdd); 1069 if (ret) { 1070 device_destroy(&watchdog_class, devno); 1071 watchdog_cdev_unregister(wdd); 1072 return ret; 1073 } 1074 1075 if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { 1076 wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; 1077 1078 ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb); 1079 if (ret) { 1080 pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", 1081 wdd->id, ret); 1082 watchdog_dev_unregister(wdd); 1083 } 1084 } 1085 1086 return ret; 1087 } 1088 1089 /* 1090 * watchdog_dev_unregister: unregister a watchdog device 1091 * @watchdog: watchdog device 1092 * 1093 * Unregister watchdog device and if needed the legacy 1094 * /dev/watchdog device. 1095 */ 1096 1097 void watchdog_dev_unregister(struct watchdog_device *wdd) 1098 { 1099 watchdog_unregister_pretimeout(wdd); 1100 device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); 1101 watchdog_cdev_unregister(wdd); 1102 } 1103 1104 /* 1105 * watchdog_dev_init: init dev part of watchdog core 1106 * 1107 * Allocate a range of chardev nodes to use for watchdog devices 1108 */ 1109 1110 int __init watchdog_dev_init(void) 1111 { 1112 int err; 1113 1114 watchdog_wq = alloc_workqueue("watchdogd", 1115 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 1116 if (!watchdog_wq) { 1117 pr_err("Failed to create watchdog workqueue\n"); 1118 return -ENOMEM; 1119 } 1120 1121 err = class_register(&watchdog_class); 1122 if (err < 0) { 1123 pr_err("couldn't register class\n"); 1124 goto err_register; 1125 } 1126 1127 err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog"); 1128 if (err < 0) { 1129 pr_err("watchdog: unable to allocate char dev region\n"); 1130 goto err_alloc; 1131 } 1132 1133 return 0; 1134 1135 err_alloc: 1136 class_unregister(&watchdog_class); 1137 err_register: 1138 destroy_workqueue(watchdog_wq); 1139 return err; 1140 } 1141 1142 /* 1143 * watchdog_dev_exit: exit dev part of watchdog core 1144 * 1145 * Release the range of chardev nodes used for watchdog devices 1146 */ 1147 1148 void __exit watchdog_dev_exit(void) 1149 { 1150 unregister_chrdev_region(watchdog_devt, MAX_DOGS); 1151 class_unregister(&watchdog_class); 1152 destroy_workqueue(watchdog_wq); 1153 } 1154 1155 module_param(handle_boot_enabled, bool, 0444); 1156 MODULE_PARM_DESC(handle_boot_enabled, 1157 "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default=" 1158 __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")"); 1159