1 /*
2  *	watchdog_dev.c
3  *
4  *	(c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5  *						All Rights Reserved.
6  *
7  *	(c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8  *
9  *
10  *	This source code is part of the generic code that can be used
11  *	by all the watchdog timer drivers.
12  *
13  *	This part of the generic code takes care of the following
14  *	misc device: /dev/watchdog.
15  *
16  *	Based on source code of the following authors:
17  *	  Matt Domsch <Matt_Domsch@dell.com>,
18  *	  Rob Radez <rob@osinvestor.com>,
19  *	  Rusty Lynch <rusty@linux.co.intel.com>
20  *	  Satyam Sharma <satyam@infradead.org>
21  *	  Randy Dunlap <randy.dunlap@oracle.com>
22  *
23  *	This program is free software; you can redistribute it and/or
24  *	modify it under the terms of the GNU General Public License
25  *	as published by the Free Software Foundation; either version
26  *	2 of the License, or (at your option) any later version.
27  *
28  *	Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
29  *	admit liability nor provide warranty for any of this software.
30  *	This material is provided "AS-IS" and at no charge.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/cdev.h>		/* For character device */
36 #include <linux/errno.h>	/* For the -ENODEV/... values */
37 #include <linux/fs.h>		/* For file operations */
38 #include <linux/init.h>		/* For __init/__exit/... */
39 #include <linux/jiffies.h>	/* For timeout functions */
40 #include <linux/kernel.h>	/* For printk/panic/... */
41 #include <linux/kref.h>		/* For data references */
42 #include <linux/miscdevice.h>	/* For handling misc devices */
43 #include <linux/module.h>	/* For module stuff/... */
44 #include <linux/mutex.h>	/* For mutexes */
45 #include <linux/slab.h>		/* For memory functions */
46 #include <linux/types.h>	/* For standard types (like size_t) */
47 #include <linux/watchdog.h>	/* For watchdog specific items */
48 #include <linux/workqueue.h>	/* For workqueue */
49 #include <linux/uaccess.h>	/* For copy_to_user/put_user/... */
50 
51 #include "watchdog_core.h"
52 
53 /*
54  * struct watchdog_core_data - watchdog core internal data
55  * @kref:	Reference count.
56  * @cdev:	The watchdog's Character device.
57  * @wdd:	Pointer to watchdog device.
58  * @lock:	Lock for watchdog core.
59  * @status:	Watchdog core internal status bits.
60  */
61 struct watchdog_core_data {
62 	struct kref kref;
63 	struct cdev cdev;
64 	struct watchdog_device *wdd;
65 	struct mutex lock;
66 	unsigned long last_keepalive;
67 	struct delayed_work work;
68 	unsigned long status;		/* Internal status bits */
69 #define _WDOG_DEV_OPEN		0	/* Opened ? */
70 #define _WDOG_ALLOW_RELEASE	1	/* Did we receive the magic char ? */
71 };
72 
73 /* the dev_t structure to store the dynamically allocated watchdog devices */
74 static dev_t watchdog_devt;
75 /* Reference to watchdog device behind /dev/watchdog */
76 static struct watchdog_core_data *old_wd_data;
77 
78 static struct workqueue_struct *watchdog_wq;
79 
80 static inline bool watchdog_need_worker(struct watchdog_device *wdd)
81 {
82 	/* All variables in milli-seconds */
83 	unsigned int hm = wdd->max_hw_heartbeat_ms;
84 	unsigned int t = wdd->timeout * 1000;
85 
86 	/*
87 	 * A worker to generate heartbeat requests is needed if all of the
88 	 * following conditions are true.
89 	 * - Userspace activated the watchdog.
90 	 * - The driver provided a value for the maximum hardware timeout, and
91 	 *   thus is aware that the framework supports generating heartbeat
92 	 *   requests.
93 	 * - Userspace requests a longer timeout than the hardware can handle.
94 	 */
95 	return watchdog_active(wdd) && hm && t > hm;
96 }
97 
98 static long watchdog_next_keepalive(struct watchdog_device *wdd)
99 {
100 	struct watchdog_core_data *wd_data = wdd->wd_data;
101 	unsigned int timeout_ms = wdd->timeout * 1000;
102 	unsigned long keepalive_interval;
103 	unsigned long last_heartbeat;
104 	unsigned long virt_timeout;
105 	unsigned int hw_heartbeat_ms;
106 
107 	virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
108 	hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
109 	keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
110 
111 	/*
112 	 * To ensure that the watchdog times out wdd->timeout seconds
113 	 * after the most recent ping from userspace, the last
114 	 * worker ping has to come in hw_heartbeat_ms before this timeout.
115 	 */
116 	last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms);
117 	return min_t(long, last_heartbeat - jiffies, keepalive_interval);
118 }
119 
120 static inline void watchdog_update_worker(struct watchdog_device *wdd)
121 {
122 	struct watchdog_core_data *wd_data = wdd->wd_data;
123 
124 	if (watchdog_need_worker(wdd)) {
125 		long t = watchdog_next_keepalive(wdd);
126 
127 		if (t > 0)
128 			mod_delayed_work(watchdog_wq, &wd_data->work, t);
129 	} else {
130 		cancel_delayed_work(&wd_data->work);
131 	}
132 }
133 
134 static int __watchdog_ping(struct watchdog_device *wdd)
135 {
136 	int err;
137 
138 	if (wdd->ops->ping)
139 		err = wdd->ops->ping(wdd);  /* ping the watchdog */
140 	else
141 		err = wdd->ops->start(wdd); /* restart watchdog */
142 
143 	watchdog_update_worker(wdd);
144 
145 	return err;
146 }
147 
148 /*
149  *	watchdog_ping: ping the watchdog.
150  *	@wdd: the watchdog device to ping
151  *
152  *	The caller must hold wd_data->lock.
153  *
154  *	If the watchdog has no own ping operation then it needs to be
155  *	restarted via the start operation. This wrapper function does
156  *	exactly that.
157  *	We only ping when the watchdog device is running.
158  */
159 
160 static int watchdog_ping(struct watchdog_device *wdd)
161 {
162 	struct watchdog_core_data *wd_data = wdd->wd_data;
163 
164 	if (!watchdog_active(wdd))
165 		return 0;
166 
167 	wd_data->last_keepalive = jiffies;
168 	return __watchdog_ping(wdd);
169 }
170 
171 static void watchdog_ping_work(struct work_struct *work)
172 {
173 	struct watchdog_core_data *wd_data;
174 	struct watchdog_device *wdd;
175 
176 	wd_data = container_of(to_delayed_work(work), struct watchdog_core_data,
177 			       work);
178 
179 	mutex_lock(&wd_data->lock);
180 	wdd = wd_data->wdd;
181 	if (wdd && watchdog_active(wdd))
182 		__watchdog_ping(wdd);
183 	mutex_unlock(&wd_data->lock);
184 }
185 
186 /*
187  *	watchdog_start: wrapper to start the watchdog.
188  *	@wdd: the watchdog device to start
189  *
190  *	The caller must hold wd_data->lock.
191  *
192  *	Start the watchdog if it is not active and mark it active.
193  *	This function returns zero on success or a negative errno code for
194  *	failure.
195  */
196 
197 static int watchdog_start(struct watchdog_device *wdd)
198 {
199 	struct watchdog_core_data *wd_data = wdd->wd_data;
200 	unsigned long started_at;
201 	int err;
202 
203 	if (watchdog_active(wdd))
204 		return 0;
205 
206 	started_at = jiffies;
207 	err = wdd->ops->start(wdd);
208 	if (err == 0) {
209 		set_bit(WDOG_ACTIVE, &wdd->status);
210 		wd_data->last_keepalive = started_at;
211 		watchdog_update_worker(wdd);
212 	}
213 
214 	return err;
215 }
216 
217 /*
218  *	watchdog_stop: wrapper to stop the watchdog.
219  *	@wdd: the watchdog device to stop
220  *
221  *	The caller must hold wd_data->lock.
222  *
223  *	Stop the watchdog if it is still active and unmark it active.
224  *	This function returns zero on success or a negative errno code for
225  *	failure.
226  *	If the 'nowayout' feature was set, the watchdog cannot be stopped.
227  */
228 
229 static int watchdog_stop(struct watchdog_device *wdd)
230 {
231 	struct watchdog_core_data *wd_data = wdd->wd_data;
232 	int err;
233 
234 	if (!watchdog_active(wdd))
235 		return 0;
236 
237 	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
238 		pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
239 			wdd->id);
240 		return -EBUSY;
241 	}
242 
243 	err = wdd->ops->stop(wdd);
244 	if (err == 0) {
245 		clear_bit(WDOG_ACTIVE, &wdd->status);
246 		cancel_delayed_work(&wd_data->work);
247 	}
248 
249 	return err;
250 }
251 
252 /*
253  *	watchdog_get_status: wrapper to get the watchdog status
254  *	@wdd: the watchdog device to get the status from
255  *
256  *	The caller must hold wd_data->lock.
257  *
258  *	Get the watchdog's status flags.
259  */
260 
261 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
262 {
263 	if (!wdd->ops->status)
264 		return 0;
265 
266 	return wdd->ops->status(wdd);
267 }
268 
269 /*
270  *	watchdog_set_timeout: set the watchdog timer timeout
271  *	@wdd: the watchdog device to set the timeout for
272  *	@timeout: timeout to set in seconds
273  *
274  *	The caller must hold wd_data->lock.
275  */
276 
277 static int watchdog_set_timeout(struct watchdog_device *wdd,
278 							unsigned int timeout)
279 {
280 	int err = 0;
281 
282 	if (!(wdd->info->options & WDIOF_SETTIMEOUT))
283 		return -EOPNOTSUPP;
284 
285 	if (watchdog_timeout_invalid(wdd, timeout))
286 		return -EINVAL;
287 
288 	if (wdd->ops->set_timeout)
289 		err = wdd->ops->set_timeout(wdd, timeout);
290 	else
291 		wdd->timeout = timeout;
292 
293 	watchdog_update_worker(wdd);
294 
295 	return err;
296 }
297 
298 /*
299  *	watchdog_get_timeleft: wrapper to get the time left before a reboot
300  *	@wdd: the watchdog device to get the remaining time from
301  *	@timeleft: the time that's left
302  *
303  *	The caller must hold wd_data->lock.
304  *
305  *	Get the time before a watchdog will reboot (if not pinged).
306  */
307 
308 static int watchdog_get_timeleft(struct watchdog_device *wdd,
309 							unsigned int *timeleft)
310 {
311 	*timeleft = 0;
312 
313 	if (!wdd->ops->get_timeleft)
314 		return -EOPNOTSUPP;
315 
316 	*timeleft = wdd->ops->get_timeleft(wdd);
317 
318 	return 0;
319 }
320 
321 #ifdef CONFIG_WATCHDOG_SYSFS
322 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
323 				char *buf)
324 {
325 	struct watchdog_device *wdd = dev_get_drvdata(dev);
326 
327 	return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
328 }
329 static DEVICE_ATTR_RO(nowayout);
330 
331 static ssize_t status_show(struct device *dev, struct device_attribute *attr,
332 				char *buf)
333 {
334 	struct watchdog_device *wdd = dev_get_drvdata(dev);
335 	struct watchdog_core_data *wd_data = wdd->wd_data;
336 	unsigned int status;
337 
338 	mutex_lock(&wd_data->lock);
339 	status = watchdog_get_status(wdd);
340 	mutex_unlock(&wd_data->lock);
341 
342 	return sprintf(buf, "%u\n", status);
343 }
344 static DEVICE_ATTR_RO(status);
345 
346 static ssize_t bootstatus_show(struct device *dev,
347 				struct device_attribute *attr, char *buf)
348 {
349 	struct watchdog_device *wdd = dev_get_drvdata(dev);
350 
351 	return sprintf(buf, "%u\n", wdd->bootstatus);
352 }
353 static DEVICE_ATTR_RO(bootstatus);
354 
355 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
356 				char *buf)
357 {
358 	struct watchdog_device *wdd = dev_get_drvdata(dev);
359 	struct watchdog_core_data *wd_data = wdd->wd_data;
360 	ssize_t status;
361 	unsigned int val;
362 
363 	mutex_lock(&wd_data->lock);
364 	status = watchdog_get_timeleft(wdd, &val);
365 	mutex_unlock(&wd_data->lock);
366 	if (!status)
367 		status = sprintf(buf, "%u\n", val);
368 
369 	return status;
370 }
371 static DEVICE_ATTR_RO(timeleft);
372 
373 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
374 				char *buf)
375 {
376 	struct watchdog_device *wdd = dev_get_drvdata(dev);
377 
378 	return sprintf(buf, "%u\n", wdd->timeout);
379 }
380 static DEVICE_ATTR_RO(timeout);
381 
382 static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
383 				char *buf)
384 {
385 	struct watchdog_device *wdd = dev_get_drvdata(dev);
386 
387 	return sprintf(buf, "%s\n", wdd->info->identity);
388 }
389 static DEVICE_ATTR_RO(identity);
390 
391 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
392 				char *buf)
393 {
394 	struct watchdog_device *wdd = dev_get_drvdata(dev);
395 
396 	if (watchdog_active(wdd))
397 		return sprintf(buf, "active\n");
398 
399 	return sprintf(buf, "inactive\n");
400 }
401 static DEVICE_ATTR_RO(state);
402 
403 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
404 				int n)
405 {
406 	struct device *dev = container_of(kobj, struct device, kobj);
407 	struct watchdog_device *wdd = dev_get_drvdata(dev);
408 	umode_t mode = attr->mode;
409 
410 	if (attr == &dev_attr_status.attr && !wdd->ops->status)
411 		mode = 0;
412 	else if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
413 		mode = 0;
414 
415 	return mode;
416 }
417 static struct attribute *wdt_attrs[] = {
418 	&dev_attr_state.attr,
419 	&dev_attr_identity.attr,
420 	&dev_attr_timeout.attr,
421 	&dev_attr_timeleft.attr,
422 	&dev_attr_bootstatus.attr,
423 	&dev_attr_status.attr,
424 	&dev_attr_nowayout.attr,
425 	NULL,
426 };
427 
428 static const struct attribute_group wdt_group = {
429 	.attrs = wdt_attrs,
430 	.is_visible = wdt_is_visible,
431 };
432 __ATTRIBUTE_GROUPS(wdt);
433 #else
434 #define wdt_groups	NULL
435 #endif
436 
437 /*
438  *	watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
439  *	@wdd: the watchdog device to do the ioctl on
440  *	@cmd: watchdog command
441  *	@arg: argument pointer
442  *
443  *	The caller must hold wd_data->lock.
444  */
445 
446 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
447 							unsigned long arg)
448 {
449 	if (!wdd->ops->ioctl)
450 		return -ENOIOCTLCMD;
451 
452 	return wdd->ops->ioctl(wdd, cmd, arg);
453 }
454 
455 /*
456  *	watchdog_write: writes to the watchdog.
457  *	@file: file from VFS
458  *	@data: user address of data
459  *	@len: length of data
460  *	@ppos: pointer to the file offset
461  *
462  *	A write to a watchdog device is defined as a keepalive ping.
463  *	Writing the magic 'V' sequence allows the next close to turn
464  *	off the watchdog (if 'nowayout' is not set).
465  */
466 
467 static ssize_t watchdog_write(struct file *file, const char __user *data,
468 						size_t len, loff_t *ppos)
469 {
470 	struct watchdog_core_data *wd_data = file->private_data;
471 	struct watchdog_device *wdd;
472 	int err;
473 	size_t i;
474 	char c;
475 
476 	if (len == 0)
477 		return 0;
478 
479 	/*
480 	 * Note: just in case someone wrote the magic character
481 	 * five months ago...
482 	 */
483 	clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
484 
485 	/* scan to see whether or not we got the magic character */
486 	for (i = 0; i != len; i++) {
487 		if (get_user(c, data + i))
488 			return -EFAULT;
489 		if (c == 'V')
490 			set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
491 	}
492 
493 	/* someone wrote to us, so we send the watchdog a keepalive ping */
494 
495 	err = -ENODEV;
496 	mutex_lock(&wd_data->lock);
497 	wdd = wd_data->wdd;
498 	if (wdd)
499 		err = watchdog_ping(wdd);
500 	mutex_unlock(&wd_data->lock);
501 
502 	if (err < 0)
503 		return err;
504 
505 	return len;
506 }
507 
508 /*
509  *	watchdog_ioctl: handle the different ioctl's for the watchdog device.
510  *	@file: file handle to the device
511  *	@cmd: watchdog command
512  *	@arg: argument pointer
513  *
514  *	The watchdog API defines a common set of functions for all watchdogs
515  *	according to their available features.
516  */
517 
518 static long watchdog_ioctl(struct file *file, unsigned int cmd,
519 							unsigned long arg)
520 {
521 	struct watchdog_core_data *wd_data = file->private_data;
522 	void __user *argp = (void __user *)arg;
523 	struct watchdog_device *wdd;
524 	int __user *p = argp;
525 	unsigned int val;
526 	int err;
527 
528 	mutex_lock(&wd_data->lock);
529 
530 	wdd = wd_data->wdd;
531 	if (!wdd) {
532 		err = -ENODEV;
533 		goto out_ioctl;
534 	}
535 
536 	err = watchdog_ioctl_op(wdd, cmd, arg);
537 	if (err != -ENOIOCTLCMD)
538 		goto out_ioctl;
539 
540 	switch (cmd) {
541 	case WDIOC_GETSUPPORT:
542 		err = copy_to_user(argp, wdd->info,
543 			sizeof(struct watchdog_info)) ? -EFAULT : 0;
544 		break;
545 	case WDIOC_GETSTATUS:
546 		val = watchdog_get_status(wdd);
547 		err = put_user(val, p);
548 		break;
549 	case WDIOC_GETBOOTSTATUS:
550 		err = put_user(wdd->bootstatus, p);
551 		break;
552 	case WDIOC_SETOPTIONS:
553 		if (get_user(val, p)) {
554 			err = -EFAULT;
555 			break;
556 		}
557 		if (val & WDIOS_DISABLECARD) {
558 			err = watchdog_stop(wdd);
559 			if (err < 0)
560 				break;
561 		}
562 		if (val & WDIOS_ENABLECARD)
563 			err = watchdog_start(wdd);
564 		break;
565 	case WDIOC_KEEPALIVE:
566 		if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
567 			err = -EOPNOTSUPP;
568 			break;
569 		}
570 		err = watchdog_ping(wdd);
571 		break;
572 	case WDIOC_SETTIMEOUT:
573 		if (get_user(val, p)) {
574 			err = -EFAULT;
575 			break;
576 		}
577 		err = watchdog_set_timeout(wdd, val);
578 		if (err < 0)
579 			break;
580 		/* If the watchdog is active then we send a keepalive ping
581 		 * to make sure that the watchdog keep's running (and if
582 		 * possible that it takes the new timeout) */
583 		err = watchdog_ping(wdd);
584 		if (err < 0)
585 			break;
586 		/* Fall */
587 	case WDIOC_GETTIMEOUT:
588 		/* timeout == 0 means that we don't know the timeout */
589 		if (wdd->timeout == 0) {
590 			err = -EOPNOTSUPP;
591 			break;
592 		}
593 		err = put_user(wdd->timeout, p);
594 		break;
595 	case WDIOC_GETTIMELEFT:
596 		err = watchdog_get_timeleft(wdd, &val);
597 		if (err < 0)
598 			break;
599 		err = put_user(val, p);
600 		break;
601 	default:
602 		err = -ENOTTY;
603 		break;
604 	}
605 
606 out_ioctl:
607 	mutex_unlock(&wd_data->lock);
608 	return err;
609 }
610 
611 /*
612  *	watchdog_open: open the /dev/watchdog* devices.
613  *	@inode: inode of device
614  *	@file: file handle to device
615  *
616  *	When the /dev/watchdog* device gets opened, we start the watchdog.
617  *	Watch out: the /dev/watchdog device is single open, so we make sure
618  *	it can only be opened once.
619  */
620 
621 static int watchdog_open(struct inode *inode, struct file *file)
622 {
623 	struct watchdog_core_data *wd_data;
624 	struct watchdog_device *wdd;
625 	int err;
626 
627 	/* Get the corresponding watchdog device */
628 	if (imajor(inode) == MISC_MAJOR)
629 		wd_data = old_wd_data;
630 	else
631 		wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
632 				       cdev);
633 
634 	/* the watchdog is single open! */
635 	if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
636 		return -EBUSY;
637 
638 	wdd = wd_data->wdd;
639 
640 	/*
641 	 * If the /dev/watchdog device is open, we don't want the module
642 	 * to be unloaded.
643 	 */
644 	if (!try_module_get(wdd->ops->owner)) {
645 		err = -EBUSY;
646 		goto out_clear;
647 	}
648 
649 	err = watchdog_start(wdd);
650 	if (err < 0)
651 		goto out_mod;
652 
653 	file->private_data = wd_data;
654 
655 	kref_get(&wd_data->kref);
656 
657 	/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
658 	return nonseekable_open(inode, file);
659 
660 out_mod:
661 	module_put(wd_data->wdd->ops->owner);
662 out_clear:
663 	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
664 	return err;
665 }
666 
667 static void watchdog_core_data_release(struct kref *kref)
668 {
669 	struct watchdog_core_data *wd_data;
670 
671 	wd_data = container_of(kref, struct watchdog_core_data, kref);
672 
673 	kfree(wd_data);
674 }
675 
676 /*
677  *	watchdog_release: release the watchdog device.
678  *	@inode: inode of device
679  *	@file: file handle to device
680  *
681  *	This is the code for when /dev/watchdog gets closed. We will only
682  *	stop the watchdog when we have received the magic char (and nowayout
683  *	was not set), else the watchdog will keep running.
684  */
685 
686 static int watchdog_release(struct inode *inode, struct file *file)
687 {
688 	struct watchdog_core_data *wd_data = file->private_data;
689 	struct watchdog_device *wdd;
690 	int err = -EBUSY;
691 
692 	mutex_lock(&wd_data->lock);
693 
694 	wdd = wd_data->wdd;
695 	if (!wdd)
696 		goto done;
697 
698 	/*
699 	 * We only stop the watchdog if we received the magic character
700 	 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
701 	 * watchdog_stop will fail.
702 	 */
703 	if (!test_bit(WDOG_ACTIVE, &wdd->status))
704 		err = 0;
705 	else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
706 		 !(wdd->info->options & WDIOF_MAGICCLOSE))
707 		err = watchdog_stop(wdd);
708 
709 	/* If the watchdog was not stopped, send a keepalive ping */
710 	if (err < 0) {
711 		pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
712 		watchdog_ping(wdd);
713 	}
714 
715 	cancel_delayed_work_sync(&wd_data->work);
716 
717 	/* make sure that /dev/watchdog can be re-opened */
718 	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
719 
720 done:
721 	mutex_unlock(&wd_data->lock);
722 	/* Allow the owner module to be unloaded again */
723 	module_put(wd_data->cdev.owner);
724 	kref_put(&wd_data->kref, watchdog_core_data_release);
725 	return 0;
726 }
727 
728 static const struct file_operations watchdog_fops = {
729 	.owner		= THIS_MODULE,
730 	.write		= watchdog_write,
731 	.unlocked_ioctl	= watchdog_ioctl,
732 	.open		= watchdog_open,
733 	.release	= watchdog_release,
734 };
735 
736 static struct miscdevice watchdog_miscdev = {
737 	.minor		= WATCHDOG_MINOR,
738 	.name		= "watchdog",
739 	.fops		= &watchdog_fops,
740 };
741 
742 /*
743  *	watchdog_cdev_register: register watchdog character device
744  *	@wdd: watchdog device
745  *	@devno: character device number
746  *
747  *	Register a watchdog character device including handling the legacy
748  *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
749  *	thus we set it up like that.
750  */
751 
752 static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
753 {
754 	struct watchdog_core_data *wd_data;
755 	int err;
756 
757 	wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
758 	if (!wd_data)
759 		return -ENOMEM;
760 	kref_init(&wd_data->kref);
761 	mutex_init(&wd_data->lock);
762 
763 	wd_data->wdd = wdd;
764 	wdd->wd_data = wd_data;
765 
766 	if (!watchdog_wq)
767 		return -ENODEV;
768 
769 	INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work);
770 
771 	if (wdd->id == 0) {
772 		old_wd_data = wd_data;
773 		watchdog_miscdev.parent = wdd->parent;
774 		err = misc_register(&watchdog_miscdev);
775 		if (err != 0) {
776 			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
777 				wdd->info->identity, WATCHDOG_MINOR, err);
778 			if (err == -EBUSY)
779 				pr_err("%s: a legacy watchdog module is probably present.\n",
780 					wdd->info->identity);
781 			old_wd_data = NULL;
782 			kfree(wd_data);
783 			return err;
784 		}
785 	}
786 
787 	/* Fill in the data structures */
788 	cdev_init(&wd_data->cdev, &watchdog_fops);
789 	wd_data->cdev.owner = wdd->ops->owner;
790 
791 	/* Add the device */
792 	err = cdev_add(&wd_data->cdev, devno, 1);
793 	if (err) {
794 		pr_err("watchdog%d unable to add device %d:%d\n",
795 			wdd->id,  MAJOR(watchdog_devt), wdd->id);
796 		if (wdd->id == 0) {
797 			misc_deregister(&watchdog_miscdev);
798 			old_wd_data = NULL;
799 			kref_put(&wd_data->kref, watchdog_core_data_release);
800 		}
801 	}
802 	return err;
803 }
804 
805 /*
806  *	watchdog_cdev_unregister: unregister watchdog character device
807  *	@watchdog: watchdog device
808  *
809  *	Unregister watchdog character device and if needed the legacy
810  *	/dev/watchdog device.
811  */
812 
813 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
814 {
815 	struct watchdog_core_data *wd_data = wdd->wd_data;
816 
817 	cdev_del(&wd_data->cdev);
818 	if (wdd->id == 0) {
819 		misc_deregister(&watchdog_miscdev);
820 		old_wd_data = NULL;
821 	}
822 
823 	mutex_lock(&wd_data->lock);
824 	wd_data->wdd = NULL;
825 	wdd->wd_data = NULL;
826 	mutex_unlock(&wd_data->lock);
827 
828 	cancel_delayed_work_sync(&wd_data->work);
829 
830 	kref_put(&wd_data->kref, watchdog_core_data_release);
831 }
832 
833 static struct class watchdog_class = {
834 	.name =		"watchdog",
835 	.owner =	THIS_MODULE,
836 	.dev_groups =	wdt_groups,
837 };
838 
839 /*
840  *	watchdog_dev_register: register a watchdog device
841  *	@wdd: watchdog device
842  *
843  *	Register a watchdog device including handling the legacy
844  *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
845  *	thus we set it up like that.
846  */
847 
848 int watchdog_dev_register(struct watchdog_device *wdd)
849 {
850 	struct device *dev;
851 	dev_t devno;
852 	int ret;
853 
854 	devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
855 
856 	ret = watchdog_cdev_register(wdd, devno);
857 	if (ret)
858 		return ret;
859 
860 	dev = device_create_with_groups(&watchdog_class, wdd->parent,
861 					devno, wdd, wdd->groups,
862 					"watchdog%d", wdd->id);
863 	if (IS_ERR(dev)) {
864 		watchdog_cdev_unregister(wdd);
865 		return PTR_ERR(dev);
866 	}
867 
868 	return ret;
869 }
870 
871 /*
872  *	watchdog_dev_unregister: unregister a watchdog device
873  *	@watchdog: watchdog device
874  *
875  *	Unregister watchdog device and if needed the legacy
876  *	/dev/watchdog device.
877  */
878 
879 void watchdog_dev_unregister(struct watchdog_device *wdd)
880 {
881 	device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
882 	watchdog_cdev_unregister(wdd);
883 }
884 
885 /*
886  *	watchdog_dev_init: init dev part of watchdog core
887  *
888  *	Allocate a range of chardev nodes to use for watchdog devices
889  */
890 
891 int __init watchdog_dev_init(void)
892 {
893 	int err;
894 
895 	watchdog_wq = alloc_workqueue("watchdogd",
896 				      WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
897 	if (!watchdog_wq) {
898 		pr_err("Failed to create watchdog workqueue\n");
899 		return -ENOMEM;
900 	}
901 
902 	err = class_register(&watchdog_class);
903 	if (err < 0) {
904 		pr_err("couldn't register class\n");
905 		return err;
906 	}
907 
908 	err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
909 	if (err < 0) {
910 		pr_err("watchdog: unable to allocate char dev region\n");
911 		class_unregister(&watchdog_class);
912 		return err;
913 	}
914 
915 	return 0;
916 }
917 
918 /*
919  *	watchdog_dev_exit: exit dev part of watchdog core
920  *
921  *	Release the range of chardev nodes used for watchdog devices
922  */
923 
924 void __exit watchdog_dev_exit(void)
925 {
926 	unregister_chrdev_region(watchdog_devt, MAX_DOGS);
927 	class_unregister(&watchdog_class);
928 	destroy_workqueue(watchdog_wq);
929 }
930