1 /* 2 * watchdog_dev.c 3 * 4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>, 5 * All Rights Reserved. 6 * 7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>. 8 * 9 * 10 * This source code is part of the generic code that can be used 11 * by all the watchdog timer drivers. 12 * 13 * This part of the generic code takes care of the following 14 * misc device: /dev/watchdog. 15 * 16 * Based on source code of the following authors: 17 * Matt Domsch <Matt_Domsch@dell.com>, 18 * Rob Radez <rob@osinvestor.com>, 19 * Rusty Lynch <rusty@linux.co.intel.com> 20 * Satyam Sharma <satyam@infradead.org> 21 * Randy Dunlap <randy.dunlap@oracle.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 26 * 2 of the License, or (at your option) any later version. 27 * 28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw. 29 * admit liability nor provide warranty for any of this software. 30 * This material is provided "AS-IS" and at no charge. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/cdev.h> /* For character device */ 36 #include <linux/errno.h> /* For the -ENODEV/... values */ 37 #include <linux/fs.h> /* For file operations */ 38 #include <linux/init.h> /* For __init/__exit/... */ 39 #include <linux/hrtimer.h> /* For hrtimers */ 40 #include <linux/kernel.h> /* For printk/panic/... */ 41 #include <linux/kref.h> /* For data references */ 42 #include <linux/kthread.h> /* For kthread_work */ 43 #include <linux/miscdevice.h> /* For handling misc devices */ 44 #include <linux/module.h> /* For module stuff/... */ 45 #include <linux/mutex.h> /* For mutexes */ 46 #include <linux/reboot.h> /* For reboot notifier */ 47 #include <linux/slab.h> /* For memory functions */ 48 #include <linux/types.h> /* For standard types (like size_t) */ 49 #include <linux/watchdog.h> /* For watchdog specific items */ 50 #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ 51 52 #include <uapi/linux/sched/types.h> /* For struct sched_param */ 53 54 #include "watchdog_core.h" 55 #include "watchdog_pretimeout.h" 56 57 /* 58 * struct watchdog_core_data - watchdog core internal data 59 * @kref: Reference count. 60 * @cdev: The watchdog's Character device. 61 * @wdd: Pointer to watchdog device. 62 * @lock: Lock for watchdog core. 63 * @status: Watchdog core internal status bits. 64 */ 65 struct watchdog_core_data { 66 struct kref kref; 67 struct cdev cdev; 68 struct watchdog_device *wdd; 69 struct mutex lock; 70 ktime_t last_keepalive; 71 ktime_t last_hw_keepalive; 72 struct hrtimer timer; 73 struct kthread_work work; 74 unsigned long status; /* Internal status bits */ 75 #define _WDOG_DEV_OPEN 0 /* Opened ? */ 76 #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */ 77 #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */ 78 }; 79 80 /* the dev_t structure to store the dynamically allocated watchdog devices */ 81 static dev_t watchdog_devt; 82 /* Reference to watchdog device behind /dev/watchdog */ 83 static struct watchdog_core_data *old_wd_data; 84 85 static struct kthread_worker *watchdog_kworker; 86 87 static bool handle_boot_enabled = 88 IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED); 89 90 static inline bool watchdog_need_worker(struct watchdog_device *wdd) 91 { 92 /* All variables in milli-seconds */ 93 unsigned int hm = wdd->max_hw_heartbeat_ms; 94 unsigned int t = wdd->timeout * 1000; 95 96 /* 97 * A worker to generate heartbeat requests is needed if all of the 98 * following conditions are true. 99 * - Userspace activated the watchdog. 100 * - The driver provided a value for the maximum hardware timeout, and 101 * thus is aware that the framework supports generating heartbeat 102 * requests. 103 * - Userspace requests a longer timeout than the hardware can handle. 104 * 105 * Alternatively, if userspace has not opened the watchdog 106 * device, we take care of feeding the watchdog if it is 107 * running. 108 */ 109 return (hm && watchdog_active(wdd) && t > hm) || 110 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)); 111 } 112 113 static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd) 114 { 115 struct watchdog_core_data *wd_data = wdd->wd_data; 116 unsigned int timeout_ms = wdd->timeout * 1000; 117 ktime_t keepalive_interval; 118 ktime_t last_heartbeat, latest_heartbeat; 119 ktime_t virt_timeout; 120 unsigned int hw_heartbeat_ms; 121 122 virt_timeout = ktime_add(wd_data->last_keepalive, 123 ms_to_ktime(timeout_ms)); 124 hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms); 125 keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2); 126 127 if (!watchdog_active(wdd)) 128 return keepalive_interval; 129 130 /* 131 * To ensure that the watchdog times out wdd->timeout seconds 132 * after the most recent ping from userspace, the last 133 * worker ping has to come in hw_heartbeat_ms before this timeout. 134 */ 135 last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms)); 136 latest_heartbeat = ktime_sub(last_heartbeat, ktime_get()); 137 if (ktime_before(latest_heartbeat, keepalive_interval)) 138 return latest_heartbeat; 139 return keepalive_interval; 140 } 141 142 static inline void watchdog_update_worker(struct watchdog_device *wdd) 143 { 144 struct watchdog_core_data *wd_data = wdd->wd_data; 145 146 if (watchdog_need_worker(wdd)) { 147 ktime_t t = watchdog_next_keepalive(wdd); 148 149 if (t > 0) 150 hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); 151 } else { 152 hrtimer_cancel(&wd_data->timer); 153 } 154 } 155 156 static int __watchdog_ping(struct watchdog_device *wdd) 157 { 158 struct watchdog_core_data *wd_data = wdd->wd_data; 159 ktime_t earliest_keepalive, now; 160 int err; 161 162 earliest_keepalive = ktime_add(wd_data->last_hw_keepalive, 163 ms_to_ktime(wdd->min_hw_heartbeat_ms)); 164 now = ktime_get(); 165 166 if (ktime_after(earliest_keepalive, now)) { 167 hrtimer_start(&wd_data->timer, 168 ktime_sub(earliest_keepalive, now), 169 HRTIMER_MODE_REL); 170 return 0; 171 } 172 173 wd_data->last_hw_keepalive = now; 174 175 if (wdd->ops->ping) 176 err = wdd->ops->ping(wdd); /* ping the watchdog */ 177 else 178 err = wdd->ops->start(wdd); /* restart watchdog */ 179 180 watchdog_update_worker(wdd); 181 182 return err; 183 } 184 185 /* 186 * watchdog_ping: ping the watchdog. 187 * @wdd: the watchdog device to ping 188 * 189 * The caller must hold wd_data->lock. 190 * 191 * If the watchdog has no own ping operation then it needs to be 192 * restarted via the start operation. This wrapper function does 193 * exactly that. 194 * We only ping when the watchdog device is running. 195 */ 196 197 static int watchdog_ping(struct watchdog_device *wdd) 198 { 199 struct watchdog_core_data *wd_data = wdd->wd_data; 200 201 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd)) 202 return 0; 203 204 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 205 206 wd_data->last_keepalive = ktime_get(); 207 return __watchdog_ping(wdd); 208 } 209 210 static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data) 211 { 212 struct watchdog_device *wdd = wd_data->wdd; 213 214 return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)); 215 } 216 217 static void watchdog_ping_work(struct kthread_work *work) 218 { 219 struct watchdog_core_data *wd_data; 220 221 wd_data = container_of(work, struct watchdog_core_data, work); 222 223 mutex_lock(&wd_data->lock); 224 if (watchdog_worker_should_ping(wd_data)) 225 __watchdog_ping(wd_data->wdd); 226 mutex_unlock(&wd_data->lock); 227 } 228 229 static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer) 230 { 231 struct watchdog_core_data *wd_data; 232 233 wd_data = container_of(timer, struct watchdog_core_data, timer); 234 235 kthread_queue_work(watchdog_kworker, &wd_data->work); 236 return HRTIMER_NORESTART; 237 } 238 239 /* 240 * watchdog_start: wrapper to start the watchdog. 241 * @wdd: the watchdog device to start 242 * 243 * The caller must hold wd_data->lock. 244 * 245 * Start the watchdog if it is not active and mark it active. 246 * This function returns zero on success or a negative errno code for 247 * failure. 248 */ 249 250 static int watchdog_start(struct watchdog_device *wdd) 251 { 252 struct watchdog_core_data *wd_data = wdd->wd_data; 253 ktime_t started_at; 254 int err; 255 256 if (watchdog_active(wdd)) 257 return 0; 258 259 set_bit(_WDOG_KEEPALIVE, &wd_data->status); 260 261 started_at = ktime_get(); 262 if (watchdog_hw_running(wdd) && wdd->ops->ping) 263 err = wdd->ops->ping(wdd); 264 else 265 err = wdd->ops->start(wdd); 266 if (err == 0) { 267 set_bit(WDOG_ACTIVE, &wdd->status); 268 wd_data->last_keepalive = started_at; 269 watchdog_update_worker(wdd); 270 } 271 272 return err; 273 } 274 275 /* 276 * watchdog_stop: wrapper to stop the watchdog. 277 * @wdd: the watchdog device to stop 278 * 279 * The caller must hold wd_data->lock. 280 * 281 * Stop the watchdog if it is still active and unmark it active. 282 * This function returns zero on success or a negative errno code for 283 * failure. 284 * If the 'nowayout' feature was set, the watchdog cannot be stopped. 285 */ 286 287 static int watchdog_stop(struct watchdog_device *wdd) 288 { 289 int err = 0; 290 291 if (!watchdog_active(wdd)) 292 return 0; 293 294 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 295 pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n", 296 wdd->id); 297 return -EBUSY; 298 } 299 300 if (wdd->ops->stop) { 301 clear_bit(WDOG_HW_RUNNING, &wdd->status); 302 err = wdd->ops->stop(wdd); 303 } else { 304 set_bit(WDOG_HW_RUNNING, &wdd->status); 305 } 306 307 if (err == 0) { 308 clear_bit(WDOG_ACTIVE, &wdd->status); 309 watchdog_update_worker(wdd); 310 } 311 312 return err; 313 } 314 315 /* 316 * watchdog_get_status: wrapper to get the watchdog status 317 * @wdd: the watchdog device to get the status from 318 * 319 * The caller must hold wd_data->lock. 320 * 321 * Get the watchdog's status flags. 322 */ 323 324 static unsigned int watchdog_get_status(struct watchdog_device *wdd) 325 { 326 struct watchdog_core_data *wd_data = wdd->wd_data; 327 unsigned int status; 328 329 if (wdd->ops->status) 330 status = wdd->ops->status(wdd); 331 else 332 status = wdd->bootstatus & (WDIOF_CARDRESET | 333 WDIOF_OVERHEAT | 334 WDIOF_FANFAULT | 335 WDIOF_EXTERN1 | 336 WDIOF_EXTERN2 | 337 WDIOF_POWERUNDER | 338 WDIOF_POWEROVER); 339 340 if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status)) 341 status |= WDIOF_MAGICCLOSE; 342 343 if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status)) 344 status |= WDIOF_KEEPALIVEPING; 345 346 return status; 347 } 348 349 /* 350 * watchdog_set_timeout: set the watchdog timer timeout 351 * @wdd: the watchdog device to set the timeout for 352 * @timeout: timeout to set in seconds 353 * 354 * The caller must hold wd_data->lock. 355 */ 356 357 static int watchdog_set_timeout(struct watchdog_device *wdd, 358 unsigned int timeout) 359 { 360 int err = 0; 361 362 if (!(wdd->info->options & WDIOF_SETTIMEOUT)) 363 return -EOPNOTSUPP; 364 365 if (watchdog_timeout_invalid(wdd, timeout)) 366 return -EINVAL; 367 368 if (wdd->ops->set_timeout) { 369 err = wdd->ops->set_timeout(wdd, timeout); 370 } else { 371 wdd->timeout = timeout; 372 /* Disable pretimeout if it doesn't fit the new timeout */ 373 if (wdd->pretimeout >= wdd->timeout) 374 wdd->pretimeout = 0; 375 } 376 377 watchdog_update_worker(wdd); 378 379 return err; 380 } 381 382 /* 383 * watchdog_set_pretimeout: set the watchdog timer pretimeout 384 * @wdd: the watchdog device to set the timeout for 385 * @timeout: pretimeout to set in seconds 386 */ 387 388 static int watchdog_set_pretimeout(struct watchdog_device *wdd, 389 unsigned int timeout) 390 { 391 int err = 0; 392 393 if (!(wdd->info->options & WDIOF_PRETIMEOUT)) 394 return -EOPNOTSUPP; 395 396 if (watchdog_pretimeout_invalid(wdd, timeout)) 397 return -EINVAL; 398 399 if (wdd->ops->set_pretimeout) 400 err = wdd->ops->set_pretimeout(wdd, timeout); 401 else 402 wdd->pretimeout = timeout; 403 404 return err; 405 } 406 407 /* 408 * watchdog_get_timeleft: wrapper to get the time left before a reboot 409 * @wdd: the watchdog device to get the remaining time from 410 * @timeleft: the time that's left 411 * 412 * The caller must hold wd_data->lock. 413 * 414 * Get the time before a watchdog will reboot (if not pinged). 415 */ 416 417 static int watchdog_get_timeleft(struct watchdog_device *wdd, 418 unsigned int *timeleft) 419 { 420 *timeleft = 0; 421 422 if (!wdd->ops->get_timeleft) 423 return -EOPNOTSUPP; 424 425 *timeleft = wdd->ops->get_timeleft(wdd); 426 427 return 0; 428 } 429 430 #ifdef CONFIG_WATCHDOG_SYSFS 431 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr, 432 char *buf) 433 { 434 struct watchdog_device *wdd = dev_get_drvdata(dev); 435 436 return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status)); 437 } 438 static DEVICE_ATTR_RO(nowayout); 439 440 static ssize_t status_show(struct device *dev, struct device_attribute *attr, 441 char *buf) 442 { 443 struct watchdog_device *wdd = dev_get_drvdata(dev); 444 struct watchdog_core_data *wd_data = wdd->wd_data; 445 unsigned int status; 446 447 mutex_lock(&wd_data->lock); 448 status = watchdog_get_status(wdd); 449 mutex_unlock(&wd_data->lock); 450 451 return sprintf(buf, "0x%x\n", status); 452 } 453 static DEVICE_ATTR_RO(status); 454 455 static ssize_t bootstatus_show(struct device *dev, 456 struct device_attribute *attr, char *buf) 457 { 458 struct watchdog_device *wdd = dev_get_drvdata(dev); 459 460 return sprintf(buf, "%u\n", wdd->bootstatus); 461 } 462 static DEVICE_ATTR_RO(bootstatus); 463 464 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr, 465 char *buf) 466 { 467 struct watchdog_device *wdd = dev_get_drvdata(dev); 468 struct watchdog_core_data *wd_data = wdd->wd_data; 469 ssize_t status; 470 unsigned int val; 471 472 mutex_lock(&wd_data->lock); 473 status = watchdog_get_timeleft(wdd, &val); 474 mutex_unlock(&wd_data->lock); 475 if (!status) 476 status = sprintf(buf, "%u\n", val); 477 478 return status; 479 } 480 static DEVICE_ATTR_RO(timeleft); 481 482 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr, 483 char *buf) 484 { 485 struct watchdog_device *wdd = dev_get_drvdata(dev); 486 487 return sprintf(buf, "%u\n", wdd->timeout); 488 } 489 static DEVICE_ATTR_RO(timeout); 490 491 static ssize_t pretimeout_show(struct device *dev, 492 struct device_attribute *attr, char *buf) 493 { 494 struct watchdog_device *wdd = dev_get_drvdata(dev); 495 496 return sprintf(buf, "%u\n", wdd->pretimeout); 497 } 498 static DEVICE_ATTR_RO(pretimeout); 499 500 static ssize_t identity_show(struct device *dev, struct device_attribute *attr, 501 char *buf) 502 { 503 struct watchdog_device *wdd = dev_get_drvdata(dev); 504 505 return sprintf(buf, "%s\n", wdd->info->identity); 506 } 507 static DEVICE_ATTR_RO(identity); 508 509 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 510 char *buf) 511 { 512 struct watchdog_device *wdd = dev_get_drvdata(dev); 513 514 if (watchdog_active(wdd)) 515 return sprintf(buf, "active\n"); 516 517 return sprintf(buf, "inactive\n"); 518 } 519 static DEVICE_ATTR_RO(state); 520 521 static ssize_t pretimeout_available_governors_show(struct device *dev, 522 struct device_attribute *attr, char *buf) 523 { 524 return watchdog_pretimeout_available_governors_get(buf); 525 } 526 static DEVICE_ATTR_RO(pretimeout_available_governors); 527 528 static ssize_t pretimeout_governor_show(struct device *dev, 529 struct device_attribute *attr, 530 char *buf) 531 { 532 struct watchdog_device *wdd = dev_get_drvdata(dev); 533 534 return watchdog_pretimeout_governor_get(wdd, buf); 535 } 536 537 static ssize_t pretimeout_governor_store(struct device *dev, 538 struct device_attribute *attr, 539 const char *buf, size_t count) 540 { 541 struct watchdog_device *wdd = dev_get_drvdata(dev); 542 int ret = watchdog_pretimeout_governor_set(wdd, buf); 543 544 if (!ret) 545 ret = count; 546 547 return ret; 548 } 549 static DEVICE_ATTR_RW(pretimeout_governor); 550 551 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, 552 int n) 553 { 554 struct device *dev = container_of(kobj, struct device, kobj); 555 struct watchdog_device *wdd = dev_get_drvdata(dev); 556 umode_t mode = attr->mode; 557 558 if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft) 559 mode = 0; 560 else if (attr == &dev_attr_pretimeout.attr && 561 !(wdd->info->options & WDIOF_PRETIMEOUT)) 562 mode = 0; 563 else if ((attr == &dev_attr_pretimeout_governor.attr || 564 attr == &dev_attr_pretimeout_available_governors.attr) && 565 (!(wdd->info->options & WDIOF_PRETIMEOUT) || 566 !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV))) 567 mode = 0; 568 569 return mode; 570 } 571 static struct attribute *wdt_attrs[] = { 572 &dev_attr_state.attr, 573 &dev_attr_identity.attr, 574 &dev_attr_timeout.attr, 575 &dev_attr_pretimeout.attr, 576 &dev_attr_timeleft.attr, 577 &dev_attr_bootstatus.attr, 578 &dev_attr_status.attr, 579 &dev_attr_nowayout.attr, 580 &dev_attr_pretimeout_governor.attr, 581 &dev_attr_pretimeout_available_governors.attr, 582 NULL, 583 }; 584 585 static const struct attribute_group wdt_group = { 586 .attrs = wdt_attrs, 587 .is_visible = wdt_is_visible, 588 }; 589 __ATTRIBUTE_GROUPS(wdt); 590 #else 591 #define wdt_groups NULL 592 #endif 593 594 /* 595 * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined 596 * @wdd: the watchdog device to do the ioctl on 597 * @cmd: watchdog command 598 * @arg: argument pointer 599 * 600 * The caller must hold wd_data->lock. 601 */ 602 603 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd, 604 unsigned long arg) 605 { 606 if (!wdd->ops->ioctl) 607 return -ENOIOCTLCMD; 608 609 return wdd->ops->ioctl(wdd, cmd, arg); 610 } 611 612 /* 613 * watchdog_write: writes to the watchdog. 614 * @file: file from VFS 615 * @data: user address of data 616 * @len: length of data 617 * @ppos: pointer to the file offset 618 * 619 * A write to a watchdog device is defined as a keepalive ping. 620 * Writing the magic 'V' sequence allows the next close to turn 621 * off the watchdog (if 'nowayout' is not set). 622 */ 623 624 static ssize_t watchdog_write(struct file *file, const char __user *data, 625 size_t len, loff_t *ppos) 626 { 627 struct watchdog_core_data *wd_data = file->private_data; 628 struct watchdog_device *wdd; 629 int err; 630 size_t i; 631 char c; 632 633 if (len == 0) 634 return 0; 635 636 /* 637 * Note: just in case someone wrote the magic character 638 * five months ago... 639 */ 640 clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 641 642 /* scan to see whether or not we got the magic character */ 643 for (i = 0; i != len; i++) { 644 if (get_user(c, data + i)) 645 return -EFAULT; 646 if (c == 'V') 647 set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status); 648 } 649 650 /* someone wrote to us, so we send the watchdog a keepalive ping */ 651 652 err = -ENODEV; 653 mutex_lock(&wd_data->lock); 654 wdd = wd_data->wdd; 655 if (wdd) 656 err = watchdog_ping(wdd); 657 mutex_unlock(&wd_data->lock); 658 659 if (err < 0) 660 return err; 661 662 return len; 663 } 664 665 /* 666 * watchdog_ioctl: handle the different ioctl's for the watchdog device. 667 * @file: file handle to the device 668 * @cmd: watchdog command 669 * @arg: argument pointer 670 * 671 * The watchdog API defines a common set of functions for all watchdogs 672 * according to their available features. 673 */ 674 675 static long watchdog_ioctl(struct file *file, unsigned int cmd, 676 unsigned long arg) 677 { 678 struct watchdog_core_data *wd_data = file->private_data; 679 void __user *argp = (void __user *)arg; 680 struct watchdog_device *wdd; 681 int __user *p = argp; 682 unsigned int val; 683 int err; 684 685 mutex_lock(&wd_data->lock); 686 687 wdd = wd_data->wdd; 688 if (!wdd) { 689 err = -ENODEV; 690 goto out_ioctl; 691 } 692 693 err = watchdog_ioctl_op(wdd, cmd, arg); 694 if (err != -ENOIOCTLCMD) 695 goto out_ioctl; 696 697 switch (cmd) { 698 case WDIOC_GETSUPPORT: 699 err = copy_to_user(argp, wdd->info, 700 sizeof(struct watchdog_info)) ? -EFAULT : 0; 701 break; 702 case WDIOC_GETSTATUS: 703 val = watchdog_get_status(wdd); 704 err = put_user(val, p); 705 break; 706 case WDIOC_GETBOOTSTATUS: 707 err = put_user(wdd->bootstatus, p); 708 break; 709 case WDIOC_SETOPTIONS: 710 if (get_user(val, p)) { 711 err = -EFAULT; 712 break; 713 } 714 if (val & WDIOS_DISABLECARD) { 715 err = watchdog_stop(wdd); 716 if (err < 0) 717 break; 718 } 719 if (val & WDIOS_ENABLECARD) 720 err = watchdog_start(wdd); 721 break; 722 case WDIOC_KEEPALIVE: 723 if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) { 724 err = -EOPNOTSUPP; 725 break; 726 } 727 err = watchdog_ping(wdd); 728 break; 729 case WDIOC_SETTIMEOUT: 730 if (get_user(val, p)) { 731 err = -EFAULT; 732 break; 733 } 734 err = watchdog_set_timeout(wdd, val); 735 if (err < 0) 736 break; 737 /* If the watchdog is active then we send a keepalive ping 738 * to make sure that the watchdog keep's running (and if 739 * possible that it takes the new timeout) */ 740 err = watchdog_ping(wdd); 741 if (err < 0) 742 break; 743 /* fall through */ 744 case WDIOC_GETTIMEOUT: 745 /* timeout == 0 means that we don't know the timeout */ 746 if (wdd->timeout == 0) { 747 err = -EOPNOTSUPP; 748 break; 749 } 750 err = put_user(wdd->timeout, p); 751 break; 752 case WDIOC_GETTIMELEFT: 753 err = watchdog_get_timeleft(wdd, &val); 754 if (err < 0) 755 break; 756 err = put_user(val, p); 757 break; 758 case WDIOC_SETPRETIMEOUT: 759 if (get_user(val, p)) { 760 err = -EFAULT; 761 break; 762 } 763 err = watchdog_set_pretimeout(wdd, val); 764 break; 765 case WDIOC_GETPRETIMEOUT: 766 err = put_user(wdd->pretimeout, p); 767 break; 768 default: 769 err = -ENOTTY; 770 break; 771 } 772 773 out_ioctl: 774 mutex_unlock(&wd_data->lock); 775 return err; 776 } 777 778 /* 779 * watchdog_open: open the /dev/watchdog* devices. 780 * @inode: inode of device 781 * @file: file handle to device 782 * 783 * When the /dev/watchdog* device gets opened, we start the watchdog. 784 * Watch out: the /dev/watchdog device is single open, so we make sure 785 * it can only be opened once. 786 */ 787 788 static int watchdog_open(struct inode *inode, struct file *file) 789 { 790 struct watchdog_core_data *wd_data; 791 struct watchdog_device *wdd; 792 bool hw_running; 793 int err; 794 795 /* Get the corresponding watchdog device */ 796 if (imajor(inode) == MISC_MAJOR) 797 wd_data = old_wd_data; 798 else 799 wd_data = container_of(inode->i_cdev, struct watchdog_core_data, 800 cdev); 801 802 /* the watchdog is single open! */ 803 if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status)) 804 return -EBUSY; 805 806 wdd = wd_data->wdd; 807 808 /* 809 * If the /dev/watchdog device is open, we don't want the module 810 * to be unloaded. 811 */ 812 hw_running = watchdog_hw_running(wdd); 813 if (!hw_running && !try_module_get(wdd->ops->owner)) { 814 err = -EBUSY; 815 goto out_clear; 816 } 817 818 err = watchdog_start(wdd); 819 if (err < 0) 820 goto out_mod; 821 822 file->private_data = wd_data; 823 824 if (!hw_running) 825 kref_get(&wd_data->kref); 826 827 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ 828 return stream_open(inode, file); 829 830 out_mod: 831 module_put(wd_data->wdd->ops->owner); 832 out_clear: 833 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 834 return err; 835 } 836 837 static void watchdog_core_data_release(struct kref *kref) 838 { 839 struct watchdog_core_data *wd_data; 840 841 wd_data = container_of(kref, struct watchdog_core_data, kref); 842 843 kfree(wd_data); 844 } 845 846 /* 847 * watchdog_release: release the watchdog device. 848 * @inode: inode of device 849 * @file: file handle to device 850 * 851 * This is the code for when /dev/watchdog gets closed. We will only 852 * stop the watchdog when we have received the magic char (and nowayout 853 * was not set), else the watchdog will keep running. 854 */ 855 856 static int watchdog_release(struct inode *inode, struct file *file) 857 { 858 struct watchdog_core_data *wd_data = file->private_data; 859 struct watchdog_device *wdd; 860 int err = -EBUSY; 861 bool running; 862 863 mutex_lock(&wd_data->lock); 864 865 wdd = wd_data->wdd; 866 if (!wdd) 867 goto done; 868 869 /* 870 * We only stop the watchdog if we received the magic character 871 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then 872 * watchdog_stop will fail. 873 */ 874 if (!test_bit(WDOG_ACTIVE, &wdd->status)) 875 err = 0; 876 else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) || 877 !(wdd->info->options & WDIOF_MAGICCLOSE)) 878 err = watchdog_stop(wdd); 879 880 /* If the watchdog was not stopped, send a keepalive ping */ 881 if (err < 0) { 882 pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id); 883 watchdog_ping(wdd); 884 } 885 886 watchdog_update_worker(wdd); 887 888 /* make sure that /dev/watchdog can be re-opened */ 889 clear_bit(_WDOG_DEV_OPEN, &wd_data->status); 890 891 done: 892 running = wdd && watchdog_hw_running(wdd); 893 mutex_unlock(&wd_data->lock); 894 /* 895 * Allow the owner module to be unloaded again unless the watchdog 896 * is still running. If the watchdog is still running, it can not 897 * be stopped, and its driver must not be unloaded. 898 */ 899 if (!running) { 900 module_put(wd_data->cdev.owner); 901 kref_put(&wd_data->kref, watchdog_core_data_release); 902 } 903 return 0; 904 } 905 906 static const struct file_operations watchdog_fops = { 907 .owner = THIS_MODULE, 908 .write = watchdog_write, 909 .unlocked_ioctl = watchdog_ioctl, 910 .open = watchdog_open, 911 .release = watchdog_release, 912 }; 913 914 static struct miscdevice watchdog_miscdev = { 915 .minor = WATCHDOG_MINOR, 916 .name = "watchdog", 917 .fops = &watchdog_fops, 918 }; 919 920 /* 921 * watchdog_cdev_register: register watchdog character device 922 * @wdd: watchdog device 923 * @devno: character device number 924 * 925 * Register a watchdog character device including handling the legacy 926 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 927 * thus we set it up like that. 928 */ 929 930 static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) 931 { 932 struct watchdog_core_data *wd_data; 933 int err; 934 935 wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); 936 if (!wd_data) 937 return -ENOMEM; 938 kref_init(&wd_data->kref); 939 mutex_init(&wd_data->lock); 940 941 wd_data->wdd = wdd; 942 wdd->wd_data = wd_data; 943 944 if (IS_ERR_OR_NULL(watchdog_kworker)) 945 return -ENODEV; 946 947 kthread_init_work(&wd_data->work, watchdog_ping_work); 948 hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 949 wd_data->timer.function = watchdog_timer_expired; 950 951 if (wdd->id == 0) { 952 old_wd_data = wd_data; 953 watchdog_miscdev.parent = wdd->parent; 954 err = misc_register(&watchdog_miscdev); 955 if (err != 0) { 956 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n", 957 wdd->info->identity, WATCHDOG_MINOR, err); 958 if (err == -EBUSY) 959 pr_err("%s: a legacy watchdog module is probably present.\n", 960 wdd->info->identity); 961 old_wd_data = NULL; 962 kfree(wd_data); 963 return err; 964 } 965 } 966 967 /* Fill in the data structures */ 968 cdev_init(&wd_data->cdev, &watchdog_fops); 969 wd_data->cdev.owner = wdd->ops->owner; 970 971 /* Add the device */ 972 err = cdev_add(&wd_data->cdev, devno, 1); 973 if (err) { 974 pr_err("watchdog%d unable to add device %d:%d\n", 975 wdd->id, MAJOR(watchdog_devt), wdd->id); 976 if (wdd->id == 0) { 977 misc_deregister(&watchdog_miscdev); 978 old_wd_data = NULL; 979 kref_put(&wd_data->kref, watchdog_core_data_release); 980 } 981 return err; 982 } 983 984 /* Record time of most recent heartbeat as 'just before now'. */ 985 wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1); 986 987 /* 988 * If the watchdog is running, prevent its driver from being unloaded, 989 * and schedule an immediate ping. 990 */ 991 if (watchdog_hw_running(wdd)) { 992 __module_get(wdd->ops->owner); 993 kref_get(&wd_data->kref); 994 if (handle_boot_enabled) 995 hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); 996 else 997 pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", 998 wdd->id); 999 } 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * watchdog_cdev_unregister: unregister watchdog character device 1006 * @watchdog: watchdog device 1007 * 1008 * Unregister watchdog character device and if needed the legacy 1009 * /dev/watchdog device. 1010 */ 1011 1012 static void watchdog_cdev_unregister(struct watchdog_device *wdd) 1013 { 1014 struct watchdog_core_data *wd_data = wdd->wd_data; 1015 1016 cdev_del(&wd_data->cdev); 1017 if (wdd->id == 0) { 1018 misc_deregister(&watchdog_miscdev); 1019 old_wd_data = NULL; 1020 } 1021 1022 if (watchdog_active(wdd) && 1023 test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) { 1024 watchdog_stop(wdd); 1025 } 1026 1027 mutex_lock(&wd_data->lock); 1028 wd_data->wdd = NULL; 1029 wdd->wd_data = NULL; 1030 mutex_unlock(&wd_data->lock); 1031 1032 hrtimer_cancel(&wd_data->timer); 1033 kthread_cancel_work_sync(&wd_data->work); 1034 1035 kref_put(&wd_data->kref, watchdog_core_data_release); 1036 } 1037 1038 static struct class watchdog_class = { 1039 .name = "watchdog", 1040 .owner = THIS_MODULE, 1041 .dev_groups = wdt_groups, 1042 }; 1043 1044 static int watchdog_reboot_notifier(struct notifier_block *nb, 1045 unsigned long code, void *data) 1046 { 1047 struct watchdog_device *wdd; 1048 1049 wdd = container_of(nb, struct watchdog_device, reboot_nb); 1050 if (code == SYS_DOWN || code == SYS_HALT) { 1051 if (watchdog_active(wdd)) { 1052 int ret; 1053 1054 ret = wdd->ops->stop(wdd); 1055 if (ret) 1056 return NOTIFY_BAD; 1057 } 1058 } 1059 1060 return NOTIFY_DONE; 1061 } 1062 1063 /* 1064 * watchdog_dev_register: register a watchdog device 1065 * @wdd: watchdog device 1066 * 1067 * Register a watchdog device including handling the legacy 1068 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and 1069 * thus we set it up like that. 1070 */ 1071 1072 int watchdog_dev_register(struct watchdog_device *wdd) 1073 { 1074 struct device *dev; 1075 dev_t devno; 1076 int ret; 1077 1078 devno = MKDEV(MAJOR(watchdog_devt), wdd->id); 1079 1080 ret = watchdog_cdev_register(wdd, devno); 1081 if (ret) 1082 return ret; 1083 1084 dev = device_create_with_groups(&watchdog_class, wdd->parent, 1085 devno, wdd, wdd->groups, 1086 "watchdog%d", wdd->id); 1087 if (IS_ERR(dev)) { 1088 watchdog_cdev_unregister(wdd); 1089 return PTR_ERR(dev); 1090 } 1091 1092 ret = watchdog_register_pretimeout(wdd); 1093 if (ret) { 1094 device_destroy(&watchdog_class, devno); 1095 watchdog_cdev_unregister(wdd); 1096 return ret; 1097 } 1098 1099 if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { 1100 wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; 1101 1102 ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb); 1103 if (ret) { 1104 pr_err("watchdog%d: Cannot register reboot notifier (%d)\n", 1105 wdd->id, ret); 1106 watchdog_dev_unregister(wdd); 1107 } 1108 } 1109 1110 return ret; 1111 } 1112 1113 /* 1114 * watchdog_dev_unregister: unregister a watchdog device 1115 * @watchdog: watchdog device 1116 * 1117 * Unregister watchdog device and if needed the legacy 1118 * /dev/watchdog device. 1119 */ 1120 1121 void watchdog_dev_unregister(struct watchdog_device *wdd) 1122 { 1123 watchdog_unregister_pretimeout(wdd); 1124 device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); 1125 watchdog_cdev_unregister(wdd); 1126 } 1127 1128 /* 1129 * watchdog_dev_init: init dev part of watchdog core 1130 * 1131 * Allocate a range of chardev nodes to use for watchdog devices 1132 */ 1133 1134 int __init watchdog_dev_init(void) 1135 { 1136 int err; 1137 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,}; 1138 1139 watchdog_kworker = kthread_create_worker(0, "watchdogd"); 1140 if (IS_ERR(watchdog_kworker)) { 1141 pr_err("Failed to create watchdog kworker\n"); 1142 return PTR_ERR(watchdog_kworker); 1143 } 1144 sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, ¶m); 1145 1146 err = class_register(&watchdog_class); 1147 if (err < 0) { 1148 pr_err("couldn't register class\n"); 1149 goto err_register; 1150 } 1151 1152 err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog"); 1153 if (err < 0) { 1154 pr_err("watchdog: unable to allocate char dev region\n"); 1155 goto err_alloc; 1156 } 1157 1158 return 0; 1159 1160 err_alloc: 1161 class_unregister(&watchdog_class); 1162 err_register: 1163 kthread_destroy_worker(watchdog_kworker); 1164 return err; 1165 } 1166 1167 /* 1168 * watchdog_dev_exit: exit dev part of watchdog core 1169 * 1170 * Release the range of chardev nodes used for watchdog devices 1171 */ 1172 1173 void __exit watchdog_dev_exit(void) 1174 { 1175 unregister_chrdev_region(watchdog_devt, MAX_DOGS); 1176 class_unregister(&watchdog_class); 1177 kthread_destroy_worker(watchdog_kworker); 1178 } 1179 1180 module_param(handle_boot_enabled, bool, 0444); 1181 MODULE_PARM_DESC(handle_boot_enabled, 1182 "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default=" 1183 __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")"); 1184