1 /*
2  *	watchdog_dev.c
3  *
4  *	(c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5  *						All Rights Reserved.
6  *
7  *	(c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8  *
9  *
10  *	This source code is part of the generic code that can be used
11  *	by all the watchdog timer drivers.
12  *
13  *	This part of the generic code takes care of the following
14  *	misc device: /dev/watchdog.
15  *
16  *	Based on source code of the following authors:
17  *	  Matt Domsch <Matt_Domsch@dell.com>,
18  *	  Rob Radez <rob@osinvestor.com>,
19  *	  Rusty Lynch <rusty@linux.co.intel.com>
20  *	  Satyam Sharma <satyam@infradead.org>
21  *	  Randy Dunlap <randy.dunlap@oracle.com>
22  *
23  *	This program is free software; you can redistribute it and/or
24  *	modify it under the terms of the GNU General Public License
25  *	as published by the Free Software Foundation; either version
26  *	2 of the License, or (at your option) any later version.
27  *
28  *	Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
29  *	admit liability nor provide warranty for any of this software.
30  *	This material is provided "AS-IS" and at no charge.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/cdev.h>		/* For character device */
36 #include <linux/errno.h>	/* For the -ENODEV/... values */
37 #include <linux/fs.h>		/* For file operations */
38 #include <linux/init.h>		/* For __init/__exit/... */
39 #include <linux/jiffies.h>	/* For timeout functions */
40 #include <linux/kernel.h>	/* For printk/panic/... */
41 #include <linux/kref.h>		/* For data references */
42 #include <linux/miscdevice.h>	/* For handling misc devices */
43 #include <linux/module.h>	/* For module stuff/... */
44 #include <linux/mutex.h>	/* For mutexes */
45 #include <linux/slab.h>		/* For memory functions */
46 #include <linux/types.h>	/* For standard types (like size_t) */
47 #include <linux/watchdog.h>	/* For watchdog specific items */
48 #include <linux/workqueue.h>	/* For workqueue */
49 #include <linux/uaccess.h>	/* For copy_to_user/put_user/... */
50 
51 #include "watchdog_core.h"
52 
53 /*
54  * struct watchdog_core_data - watchdog core internal data
55  * @kref:	Reference count.
56  * @cdev:	The watchdog's Character device.
57  * @wdd:	Pointer to watchdog device.
58  * @lock:	Lock for watchdog core.
59  * @status:	Watchdog core internal status bits.
60  */
61 struct watchdog_core_data {
62 	struct kref kref;
63 	struct cdev cdev;
64 	struct watchdog_device *wdd;
65 	struct mutex lock;
66 	unsigned long last_keepalive;
67 	unsigned long last_hw_keepalive;
68 	struct delayed_work work;
69 	unsigned long status;		/* Internal status bits */
70 #define _WDOG_DEV_OPEN		0	/* Opened ? */
71 #define _WDOG_ALLOW_RELEASE	1	/* Did we receive the magic char ? */
72 };
73 
74 /* the dev_t structure to store the dynamically allocated watchdog devices */
75 static dev_t watchdog_devt;
76 /* Reference to watchdog device behind /dev/watchdog */
77 static struct watchdog_core_data *old_wd_data;
78 
79 static struct workqueue_struct *watchdog_wq;
80 
81 static inline bool watchdog_need_worker(struct watchdog_device *wdd)
82 {
83 	/* All variables in milli-seconds */
84 	unsigned int hm = wdd->max_hw_heartbeat_ms;
85 	unsigned int t = wdd->timeout * 1000;
86 
87 	/*
88 	 * A worker to generate heartbeat requests is needed if all of the
89 	 * following conditions are true.
90 	 * - Userspace activated the watchdog.
91 	 * - The driver provided a value for the maximum hardware timeout, and
92 	 *   thus is aware that the framework supports generating heartbeat
93 	 *   requests.
94 	 * - Userspace requests a longer timeout than the hardware can handle.
95 	 *
96 	 * Alternatively, if userspace has not opened the watchdog
97 	 * device, we take care of feeding the watchdog if it is
98 	 * running.
99 	 */
100 	return (hm && watchdog_active(wdd) && t > hm) ||
101 		(t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
102 }
103 
104 static long watchdog_next_keepalive(struct watchdog_device *wdd)
105 {
106 	struct watchdog_core_data *wd_data = wdd->wd_data;
107 	unsigned int timeout_ms = wdd->timeout * 1000;
108 	unsigned long keepalive_interval;
109 	unsigned long last_heartbeat;
110 	unsigned long virt_timeout;
111 	unsigned int hw_heartbeat_ms;
112 
113 	virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
114 	hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
115 	keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
116 
117 	if (!watchdog_active(wdd))
118 		return keepalive_interval;
119 
120 	/*
121 	 * To ensure that the watchdog times out wdd->timeout seconds
122 	 * after the most recent ping from userspace, the last
123 	 * worker ping has to come in hw_heartbeat_ms before this timeout.
124 	 */
125 	last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms);
126 	return min_t(long, last_heartbeat - jiffies, keepalive_interval);
127 }
128 
129 static inline void watchdog_update_worker(struct watchdog_device *wdd)
130 {
131 	struct watchdog_core_data *wd_data = wdd->wd_data;
132 
133 	if (watchdog_need_worker(wdd)) {
134 		long t = watchdog_next_keepalive(wdd);
135 
136 		if (t > 0)
137 			mod_delayed_work(watchdog_wq, &wd_data->work, t);
138 	} else {
139 		cancel_delayed_work(&wd_data->work);
140 	}
141 }
142 
143 static int __watchdog_ping(struct watchdog_device *wdd)
144 {
145 	struct watchdog_core_data *wd_data = wdd->wd_data;
146 	unsigned long earliest_keepalive = wd_data->last_hw_keepalive +
147 				msecs_to_jiffies(wdd->min_hw_heartbeat_ms);
148 	int err;
149 
150 	if (time_is_after_jiffies(earliest_keepalive)) {
151 		mod_delayed_work(watchdog_wq, &wd_data->work,
152 				 earliest_keepalive - jiffies);
153 		return 0;
154 	}
155 
156 	wd_data->last_hw_keepalive = jiffies;
157 
158 	if (wdd->ops->ping)
159 		err = wdd->ops->ping(wdd);  /* ping the watchdog */
160 	else
161 		err = wdd->ops->start(wdd); /* restart watchdog */
162 
163 	watchdog_update_worker(wdd);
164 
165 	return err;
166 }
167 
168 /*
169  *	watchdog_ping: ping the watchdog.
170  *	@wdd: the watchdog device to ping
171  *
172  *	The caller must hold wd_data->lock.
173  *
174  *	If the watchdog has no own ping operation then it needs to be
175  *	restarted via the start operation. This wrapper function does
176  *	exactly that.
177  *	We only ping when the watchdog device is running.
178  */
179 
180 static int watchdog_ping(struct watchdog_device *wdd)
181 {
182 	struct watchdog_core_data *wd_data = wdd->wd_data;
183 
184 	if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
185 		return 0;
186 
187 	wd_data->last_keepalive = jiffies;
188 	return __watchdog_ping(wdd);
189 }
190 
191 static void watchdog_ping_work(struct work_struct *work)
192 {
193 	struct watchdog_core_data *wd_data;
194 	struct watchdog_device *wdd;
195 
196 	wd_data = container_of(to_delayed_work(work), struct watchdog_core_data,
197 			       work);
198 
199 	mutex_lock(&wd_data->lock);
200 	wdd = wd_data->wdd;
201 	if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)))
202 		__watchdog_ping(wdd);
203 	mutex_unlock(&wd_data->lock);
204 }
205 
206 /*
207  *	watchdog_start: wrapper to start the watchdog.
208  *	@wdd: the watchdog device to start
209  *
210  *	The caller must hold wd_data->lock.
211  *
212  *	Start the watchdog if it is not active and mark it active.
213  *	This function returns zero on success or a negative errno code for
214  *	failure.
215  */
216 
217 static int watchdog_start(struct watchdog_device *wdd)
218 {
219 	struct watchdog_core_data *wd_data = wdd->wd_data;
220 	unsigned long started_at;
221 	int err;
222 
223 	if (watchdog_active(wdd))
224 		return 0;
225 
226 	started_at = jiffies;
227 	if (watchdog_hw_running(wdd) && wdd->ops->ping)
228 		err = wdd->ops->ping(wdd);
229 	else
230 		err = wdd->ops->start(wdd);
231 	if (err == 0) {
232 		set_bit(WDOG_ACTIVE, &wdd->status);
233 		wd_data->last_keepalive = started_at;
234 		watchdog_update_worker(wdd);
235 	}
236 
237 	return err;
238 }
239 
240 /*
241  *	watchdog_stop: wrapper to stop the watchdog.
242  *	@wdd: the watchdog device to stop
243  *
244  *	The caller must hold wd_data->lock.
245  *
246  *	Stop the watchdog if it is still active and unmark it active.
247  *	This function returns zero on success or a negative errno code for
248  *	failure.
249  *	If the 'nowayout' feature was set, the watchdog cannot be stopped.
250  */
251 
252 static int watchdog_stop(struct watchdog_device *wdd)
253 {
254 	int err = 0;
255 
256 	if (!watchdog_active(wdd))
257 		return 0;
258 
259 	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
260 		pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
261 			wdd->id);
262 		return -EBUSY;
263 	}
264 
265 	if (wdd->ops->stop)
266 		err = wdd->ops->stop(wdd);
267 	else
268 		set_bit(WDOG_HW_RUNNING, &wdd->status);
269 
270 	if (err == 0) {
271 		clear_bit(WDOG_ACTIVE, &wdd->status);
272 		watchdog_update_worker(wdd);
273 	}
274 
275 	return err;
276 }
277 
278 /*
279  *	watchdog_get_status: wrapper to get the watchdog status
280  *	@wdd: the watchdog device to get the status from
281  *
282  *	The caller must hold wd_data->lock.
283  *
284  *	Get the watchdog's status flags.
285  */
286 
287 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
288 {
289 	if (!wdd->ops->status)
290 		return 0;
291 
292 	return wdd->ops->status(wdd);
293 }
294 
295 /*
296  *	watchdog_set_timeout: set the watchdog timer timeout
297  *	@wdd: the watchdog device to set the timeout for
298  *	@timeout: timeout to set in seconds
299  *
300  *	The caller must hold wd_data->lock.
301  */
302 
303 static int watchdog_set_timeout(struct watchdog_device *wdd,
304 							unsigned int timeout)
305 {
306 	int err = 0;
307 
308 	if (!(wdd->info->options & WDIOF_SETTIMEOUT))
309 		return -EOPNOTSUPP;
310 
311 	if (watchdog_timeout_invalid(wdd, timeout))
312 		return -EINVAL;
313 
314 	if (wdd->ops->set_timeout)
315 		err = wdd->ops->set_timeout(wdd, timeout);
316 	else
317 		wdd->timeout = timeout;
318 
319 	watchdog_update_worker(wdd);
320 
321 	return err;
322 }
323 
324 /*
325  *	watchdog_get_timeleft: wrapper to get the time left before a reboot
326  *	@wdd: the watchdog device to get the remaining time from
327  *	@timeleft: the time that's left
328  *
329  *	The caller must hold wd_data->lock.
330  *
331  *	Get the time before a watchdog will reboot (if not pinged).
332  */
333 
334 static int watchdog_get_timeleft(struct watchdog_device *wdd,
335 							unsigned int *timeleft)
336 {
337 	*timeleft = 0;
338 
339 	if (!wdd->ops->get_timeleft)
340 		return -EOPNOTSUPP;
341 
342 	*timeleft = wdd->ops->get_timeleft(wdd);
343 
344 	return 0;
345 }
346 
347 #ifdef CONFIG_WATCHDOG_SYSFS
348 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
349 				char *buf)
350 {
351 	struct watchdog_device *wdd = dev_get_drvdata(dev);
352 
353 	return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
354 }
355 static DEVICE_ATTR_RO(nowayout);
356 
357 static ssize_t status_show(struct device *dev, struct device_attribute *attr,
358 				char *buf)
359 {
360 	struct watchdog_device *wdd = dev_get_drvdata(dev);
361 	struct watchdog_core_data *wd_data = wdd->wd_data;
362 	unsigned int status;
363 
364 	mutex_lock(&wd_data->lock);
365 	status = watchdog_get_status(wdd);
366 	mutex_unlock(&wd_data->lock);
367 
368 	return sprintf(buf, "%u\n", status);
369 }
370 static DEVICE_ATTR_RO(status);
371 
372 static ssize_t bootstatus_show(struct device *dev,
373 				struct device_attribute *attr, char *buf)
374 {
375 	struct watchdog_device *wdd = dev_get_drvdata(dev);
376 
377 	return sprintf(buf, "%u\n", wdd->bootstatus);
378 }
379 static DEVICE_ATTR_RO(bootstatus);
380 
381 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
382 				char *buf)
383 {
384 	struct watchdog_device *wdd = dev_get_drvdata(dev);
385 	struct watchdog_core_data *wd_data = wdd->wd_data;
386 	ssize_t status;
387 	unsigned int val;
388 
389 	mutex_lock(&wd_data->lock);
390 	status = watchdog_get_timeleft(wdd, &val);
391 	mutex_unlock(&wd_data->lock);
392 	if (!status)
393 		status = sprintf(buf, "%u\n", val);
394 
395 	return status;
396 }
397 static DEVICE_ATTR_RO(timeleft);
398 
399 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
400 				char *buf)
401 {
402 	struct watchdog_device *wdd = dev_get_drvdata(dev);
403 
404 	return sprintf(buf, "%u\n", wdd->timeout);
405 }
406 static DEVICE_ATTR_RO(timeout);
407 
408 static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
409 				char *buf)
410 {
411 	struct watchdog_device *wdd = dev_get_drvdata(dev);
412 
413 	return sprintf(buf, "%s\n", wdd->info->identity);
414 }
415 static DEVICE_ATTR_RO(identity);
416 
417 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
418 				char *buf)
419 {
420 	struct watchdog_device *wdd = dev_get_drvdata(dev);
421 
422 	if (watchdog_active(wdd))
423 		return sprintf(buf, "active\n");
424 
425 	return sprintf(buf, "inactive\n");
426 }
427 static DEVICE_ATTR_RO(state);
428 
429 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
430 				int n)
431 {
432 	struct device *dev = container_of(kobj, struct device, kobj);
433 	struct watchdog_device *wdd = dev_get_drvdata(dev);
434 	umode_t mode = attr->mode;
435 
436 	if (attr == &dev_attr_status.attr && !wdd->ops->status)
437 		mode = 0;
438 	else if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
439 		mode = 0;
440 
441 	return mode;
442 }
443 static struct attribute *wdt_attrs[] = {
444 	&dev_attr_state.attr,
445 	&dev_attr_identity.attr,
446 	&dev_attr_timeout.attr,
447 	&dev_attr_timeleft.attr,
448 	&dev_attr_bootstatus.attr,
449 	&dev_attr_status.attr,
450 	&dev_attr_nowayout.attr,
451 	NULL,
452 };
453 
454 static const struct attribute_group wdt_group = {
455 	.attrs = wdt_attrs,
456 	.is_visible = wdt_is_visible,
457 };
458 __ATTRIBUTE_GROUPS(wdt);
459 #else
460 #define wdt_groups	NULL
461 #endif
462 
463 /*
464  *	watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
465  *	@wdd: the watchdog device to do the ioctl on
466  *	@cmd: watchdog command
467  *	@arg: argument pointer
468  *
469  *	The caller must hold wd_data->lock.
470  */
471 
472 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
473 							unsigned long arg)
474 {
475 	if (!wdd->ops->ioctl)
476 		return -ENOIOCTLCMD;
477 
478 	return wdd->ops->ioctl(wdd, cmd, arg);
479 }
480 
481 /*
482  *	watchdog_write: writes to the watchdog.
483  *	@file: file from VFS
484  *	@data: user address of data
485  *	@len: length of data
486  *	@ppos: pointer to the file offset
487  *
488  *	A write to a watchdog device is defined as a keepalive ping.
489  *	Writing the magic 'V' sequence allows the next close to turn
490  *	off the watchdog (if 'nowayout' is not set).
491  */
492 
493 static ssize_t watchdog_write(struct file *file, const char __user *data,
494 						size_t len, loff_t *ppos)
495 {
496 	struct watchdog_core_data *wd_data = file->private_data;
497 	struct watchdog_device *wdd;
498 	int err;
499 	size_t i;
500 	char c;
501 
502 	if (len == 0)
503 		return 0;
504 
505 	/*
506 	 * Note: just in case someone wrote the magic character
507 	 * five months ago...
508 	 */
509 	clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
510 
511 	/* scan to see whether or not we got the magic character */
512 	for (i = 0; i != len; i++) {
513 		if (get_user(c, data + i))
514 			return -EFAULT;
515 		if (c == 'V')
516 			set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
517 	}
518 
519 	/* someone wrote to us, so we send the watchdog a keepalive ping */
520 
521 	err = -ENODEV;
522 	mutex_lock(&wd_data->lock);
523 	wdd = wd_data->wdd;
524 	if (wdd)
525 		err = watchdog_ping(wdd);
526 	mutex_unlock(&wd_data->lock);
527 
528 	if (err < 0)
529 		return err;
530 
531 	return len;
532 }
533 
534 /*
535  *	watchdog_ioctl: handle the different ioctl's for the watchdog device.
536  *	@file: file handle to the device
537  *	@cmd: watchdog command
538  *	@arg: argument pointer
539  *
540  *	The watchdog API defines a common set of functions for all watchdogs
541  *	according to their available features.
542  */
543 
544 static long watchdog_ioctl(struct file *file, unsigned int cmd,
545 							unsigned long arg)
546 {
547 	struct watchdog_core_data *wd_data = file->private_data;
548 	void __user *argp = (void __user *)arg;
549 	struct watchdog_device *wdd;
550 	int __user *p = argp;
551 	unsigned int val;
552 	int err;
553 
554 	mutex_lock(&wd_data->lock);
555 
556 	wdd = wd_data->wdd;
557 	if (!wdd) {
558 		err = -ENODEV;
559 		goto out_ioctl;
560 	}
561 
562 	err = watchdog_ioctl_op(wdd, cmd, arg);
563 	if (err != -ENOIOCTLCMD)
564 		goto out_ioctl;
565 
566 	switch (cmd) {
567 	case WDIOC_GETSUPPORT:
568 		err = copy_to_user(argp, wdd->info,
569 			sizeof(struct watchdog_info)) ? -EFAULT : 0;
570 		break;
571 	case WDIOC_GETSTATUS:
572 		val = watchdog_get_status(wdd);
573 		err = put_user(val, p);
574 		break;
575 	case WDIOC_GETBOOTSTATUS:
576 		err = put_user(wdd->bootstatus, p);
577 		break;
578 	case WDIOC_SETOPTIONS:
579 		if (get_user(val, p)) {
580 			err = -EFAULT;
581 			break;
582 		}
583 		if (val & WDIOS_DISABLECARD) {
584 			err = watchdog_stop(wdd);
585 			if (err < 0)
586 				break;
587 		}
588 		if (val & WDIOS_ENABLECARD)
589 			err = watchdog_start(wdd);
590 		break;
591 	case WDIOC_KEEPALIVE:
592 		if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
593 			err = -EOPNOTSUPP;
594 			break;
595 		}
596 		err = watchdog_ping(wdd);
597 		break;
598 	case WDIOC_SETTIMEOUT:
599 		if (get_user(val, p)) {
600 			err = -EFAULT;
601 			break;
602 		}
603 		err = watchdog_set_timeout(wdd, val);
604 		if (err < 0)
605 			break;
606 		/* If the watchdog is active then we send a keepalive ping
607 		 * to make sure that the watchdog keep's running (and if
608 		 * possible that it takes the new timeout) */
609 		err = watchdog_ping(wdd);
610 		if (err < 0)
611 			break;
612 		/* Fall */
613 	case WDIOC_GETTIMEOUT:
614 		/* timeout == 0 means that we don't know the timeout */
615 		if (wdd->timeout == 0) {
616 			err = -EOPNOTSUPP;
617 			break;
618 		}
619 		err = put_user(wdd->timeout, p);
620 		break;
621 	case WDIOC_GETTIMELEFT:
622 		err = watchdog_get_timeleft(wdd, &val);
623 		if (err < 0)
624 			break;
625 		err = put_user(val, p);
626 		break;
627 	default:
628 		err = -ENOTTY;
629 		break;
630 	}
631 
632 out_ioctl:
633 	mutex_unlock(&wd_data->lock);
634 	return err;
635 }
636 
637 /*
638  *	watchdog_open: open the /dev/watchdog* devices.
639  *	@inode: inode of device
640  *	@file: file handle to device
641  *
642  *	When the /dev/watchdog* device gets opened, we start the watchdog.
643  *	Watch out: the /dev/watchdog device is single open, so we make sure
644  *	it can only be opened once.
645  */
646 
647 static int watchdog_open(struct inode *inode, struct file *file)
648 {
649 	struct watchdog_core_data *wd_data;
650 	struct watchdog_device *wdd;
651 	int err;
652 
653 	/* Get the corresponding watchdog device */
654 	if (imajor(inode) == MISC_MAJOR)
655 		wd_data = old_wd_data;
656 	else
657 		wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
658 				       cdev);
659 
660 	/* the watchdog is single open! */
661 	if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
662 		return -EBUSY;
663 
664 	wdd = wd_data->wdd;
665 
666 	/*
667 	 * If the /dev/watchdog device is open, we don't want the module
668 	 * to be unloaded.
669 	 */
670 	if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) {
671 		err = -EBUSY;
672 		goto out_clear;
673 	}
674 
675 	err = watchdog_start(wdd);
676 	if (err < 0)
677 		goto out_mod;
678 
679 	file->private_data = wd_data;
680 
681 	if (!watchdog_hw_running(wdd))
682 		kref_get(&wd_data->kref);
683 
684 	/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
685 	return nonseekable_open(inode, file);
686 
687 out_mod:
688 	module_put(wd_data->wdd->ops->owner);
689 out_clear:
690 	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
691 	return err;
692 }
693 
694 static void watchdog_core_data_release(struct kref *kref)
695 {
696 	struct watchdog_core_data *wd_data;
697 
698 	wd_data = container_of(kref, struct watchdog_core_data, kref);
699 
700 	kfree(wd_data);
701 }
702 
703 /*
704  *	watchdog_release: release the watchdog device.
705  *	@inode: inode of device
706  *	@file: file handle to device
707  *
708  *	This is the code for when /dev/watchdog gets closed. We will only
709  *	stop the watchdog when we have received the magic char (and nowayout
710  *	was not set), else the watchdog will keep running.
711  */
712 
713 static int watchdog_release(struct inode *inode, struct file *file)
714 {
715 	struct watchdog_core_data *wd_data = file->private_data;
716 	struct watchdog_device *wdd;
717 	int err = -EBUSY;
718 	bool running;
719 
720 	mutex_lock(&wd_data->lock);
721 
722 	wdd = wd_data->wdd;
723 	if (!wdd)
724 		goto done;
725 
726 	/*
727 	 * We only stop the watchdog if we received the magic character
728 	 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
729 	 * watchdog_stop will fail.
730 	 */
731 	if (!test_bit(WDOG_ACTIVE, &wdd->status))
732 		err = 0;
733 	else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
734 		 !(wdd->info->options & WDIOF_MAGICCLOSE))
735 		err = watchdog_stop(wdd);
736 
737 	/* If the watchdog was not stopped, send a keepalive ping */
738 	if (err < 0) {
739 		pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
740 		watchdog_ping(wdd);
741 	}
742 
743 	watchdog_update_worker(wdd);
744 
745 	/* make sure that /dev/watchdog can be re-opened */
746 	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
747 
748 done:
749 	running = wdd && watchdog_hw_running(wdd);
750 	mutex_unlock(&wd_data->lock);
751 	/*
752 	 * Allow the owner module to be unloaded again unless the watchdog
753 	 * is still running. If the watchdog is still running, it can not
754 	 * be stopped, and its driver must not be unloaded.
755 	 */
756 	if (!running) {
757 		module_put(wd_data->cdev.owner);
758 		kref_put(&wd_data->kref, watchdog_core_data_release);
759 	}
760 	return 0;
761 }
762 
763 static const struct file_operations watchdog_fops = {
764 	.owner		= THIS_MODULE,
765 	.write		= watchdog_write,
766 	.unlocked_ioctl	= watchdog_ioctl,
767 	.open		= watchdog_open,
768 	.release	= watchdog_release,
769 };
770 
771 static struct miscdevice watchdog_miscdev = {
772 	.minor		= WATCHDOG_MINOR,
773 	.name		= "watchdog",
774 	.fops		= &watchdog_fops,
775 };
776 
777 /*
778  *	watchdog_cdev_register: register watchdog character device
779  *	@wdd: watchdog device
780  *	@devno: character device number
781  *
782  *	Register a watchdog character device including handling the legacy
783  *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
784  *	thus we set it up like that.
785  */
786 
787 static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
788 {
789 	struct watchdog_core_data *wd_data;
790 	int err;
791 
792 	wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
793 	if (!wd_data)
794 		return -ENOMEM;
795 	kref_init(&wd_data->kref);
796 	mutex_init(&wd_data->lock);
797 
798 	wd_data->wdd = wdd;
799 	wdd->wd_data = wd_data;
800 
801 	if (!watchdog_wq)
802 		return -ENODEV;
803 
804 	INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work);
805 
806 	if (wdd->id == 0) {
807 		old_wd_data = wd_data;
808 		watchdog_miscdev.parent = wdd->parent;
809 		err = misc_register(&watchdog_miscdev);
810 		if (err != 0) {
811 			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
812 				wdd->info->identity, WATCHDOG_MINOR, err);
813 			if (err == -EBUSY)
814 				pr_err("%s: a legacy watchdog module is probably present.\n",
815 					wdd->info->identity);
816 			old_wd_data = NULL;
817 			kfree(wd_data);
818 			return err;
819 		}
820 	}
821 
822 	/* Fill in the data structures */
823 	cdev_init(&wd_data->cdev, &watchdog_fops);
824 	wd_data->cdev.owner = wdd->ops->owner;
825 
826 	/* Add the device */
827 	err = cdev_add(&wd_data->cdev, devno, 1);
828 	if (err) {
829 		pr_err("watchdog%d unable to add device %d:%d\n",
830 			wdd->id,  MAJOR(watchdog_devt), wdd->id);
831 		if (wdd->id == 0) {
832 			misc_deregister(&watchdog_miscdev);
833 			old_wd_data = NULL;
834 			kref_put(&wd_data->kref, watchdog_core_data_release);
835 		}
836 		return err;
837 	}
838 
839 	/* Record time of most recent heartbeat as 'just before now'. */
840 	wd_data->last_hw_keepalive = jiffies - 1;
841 
842 	/*
843 	 * If the watchdog is running, prevent its driver from being unloaded,
844 	 * and schedule an immediate ping.
845 	 */
846 	if (watchdog_hw_running(wdd)) {
847 		__module_get(wdd->ops->owner);
848 		kref_get(&wd_data->kref);
849 		queue_delayed_work(watchdog_wq, &wd_data->work, 0);
850 	}
851 
852 	return 0;
853 }
854 
855 /*
856  *	watchdog_cdev_unregister: unregister watchdog character device
857  *	@watchdog: watchdog device
858  *
859  *	Unregister watchdog character device and if needed the legacy
860  *	/dev/watchdog device.
861  */
862 
863 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
864 {
865 	struct watchdog_core_data *wd_data = wdd->wd_data;
866 
867 	cdev_del(&wd_data->cdev);
868 	if (wdd->id == 0) {
869 		misc_deregister(&watchdog_miscdev);
870 		old_wd_data = NULL;
871 	}
872 
873 	mutex_lock(&wd_data->lock);
874 	wd_data->wdd = NULL;
875 	wdd->wd_data = NULL;
876 	mutex_unlock(&wd_data->lock);
877 
878 	cancel_delayed_work_sync(&wd_data->work);
879 
880 	kref_put(&wd_data->kref, watchdog_core_data_release);
881 }
882 
883 static struct class watchdog_class = {
884 	.name =		"watchdog",
885 	.owner =	THIS_MODULE,
886 	.dev_groups =	wdt_groups,
887 };
888 
889 /*
890  *	watchdog_dev_register: register a watchdog device
891  *	@wdd: watchdog device
892  *
893  *	Register a watchdog device including handling the legacy
894  *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
895  *	thus we set it up like that.
896  */
897 
898 int watchdog_dev_register(struct watchdog_device *wdd)
899 {
900 	struct device *dev;
901 	dev_t devno;
902 	int ret;
903 
904 	devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
905 
906 	ret = watchdog_cdev_register(wdd, devno);
907 	if (ret)
908 		return ret;
909 
910 	dev = device_create_with_groups(&watchdog_class, wdd->parent,
911 					devno, wdd, wdd->groups,
912 					"watchdog%d", wdd->id);
913 	if (IS_ERR(dev)) {
914 		watchdog_cdev_unregister(wdd);
915 		return PTR_ERR(dev);
916 	}
917 
918 	return ret;
919 }
920 
921 /*
922  *	watchdog_dev_unregister: unregister a watchdog device
923  *	@watchdog: watchdog device
924  *
925  *	Unregister watchdog device and if needed the legacy
926  *	/dev/watchdog device.
927  */
928 
929 void watchdog_dev_unregister(struct watchdog_device *wdd)
930 {
931 	device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
932 	watchdog_cdev_unregister(wdd);
933 }
934 
935 /*
936  *	watchdog_dev_init: init dev part of watchdog core
937  *
938  *	Allocate a range of chardev nodes to use for watchdog devices
939  */
940 
941 int __init watchdog_dev_init(void)
942 {
943 	int err;
944 
945 	watchdog_wq = alloc_workqueue("watchdogd",
946 				      WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
947 	if (!watchdog_wq) {
948 		pr_err("Failed to create watchdog workqueue\n");
949 		return -ENOMEM;
950 	}
951 
952 	err = class_register(&watchdog_class);
953 	if (err < 0) {
954 		pr_err("couldn't register class\n");
955 		return err;
956 	}
957 
958 	err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
959 	if (err < 0) {
960 		pr_err("watchdog: unable to allocate char dev region\n");
961 		class_unregister(&watchdog_class);
962 		return err;
963 	}
964 
965 	return 0;
966 }
967 
968 /*
969  *	watchdog_dev_exit: exit dev part of watchdog core
970  *
971  *	Release the range of chardev nodes used for watchdog devices
972  */
973 
974 void __exit watchdog_dev_exit(void)
975 {
976 	unregister_chrdev_region(watchdog_devt, MAX_DOGS);
977 	class_unregister(&watchdog_class);
978 	destroy_workqueue(watchdog_wq);
979 }
980