xref: /openbmc/linux/drivers/watchdog/dw_wdt.c (revision 6523d3b2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2010-2011 Picochip Ltd., Jamie Iles
4  * https://www.picochip.com
5  *
6  * This file implements a driver for the Synopsys DesignWare watchdog device
7  * in the many subsystems. The watchdog has 16 different timeout periods
8  * and these are a function of the input clock frequency.
9  *
10  * The DesignWare watchdog cannot be stopped once it has been started so we
11  * do not implement a stop function. The watchdog core will continue to send
12  * heartbeat requests after the watchdog device has been closed.
13  */
14 
15 #include <linux/bitops.h>
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/limits.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/of.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm.h>
29 #include <linux/reset.h>
30 #include <linux/watchdog.h>
31 
32 #define WDOG_CONTROL_REG_OFFSET		    0x00
33 #define WDOG_CONTROL_REG_WDT_EN_MASK	    0x01
34 #define WDOG_CONTROL_REG_RESP_MODE_MASK	    0x02
35 #define WDOG_TIMEOUT_RANGE_REG_OFFSET	    0x04
36 #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT    4
37 #define WDOG_CURRENT_COUNT_REG_OFFSET	    0x08
38 #define WDOG_COUNTER_RESTART_REG_OFFSET     0x0c
39 #define WDOG_COUNTER_RESTART_KICK_VALUE	    0x76
40 #define WDOG_INTERRUPT_STATUS_REG_OFFSET    0x10
41 #define WDOG_INTERRUPT_CLEAR_REG_OFFSET     0x14
42 #define WDOG_COMP_PARAMS_5_REG_OFFSET       0xe4
43 #define WDOG_COMP_PARAMS_4_REG_OFFSET       0xe8
44 #define WDOG_COMP_PARAMS_3_REG_OFFSET       0xec
45 #define WDOG_COMP_PARAMS_2_REG_OFFSET       0xf0
46 #define WDOG_COMP_PARAMS_1_REG_OFFSET       0xf4
47 #define WDOG_COMP_PARAMS_1_USE_FIX_TOP      BIT(6)
48 #define WDOG_COMP_VERSION_REG_OFFSET        0xf8
49 #define WDOG_COMP_TYPE_REG_OFFSET           0xfc
50 
51 /* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */
52 #define DW_WDT_NUM_TOPS		16
53 #define DW_WDT_FIX_TOP(_idx)	(1U << (16 + _idx))
54 
55 #define DW_WDT_DEFAULT_SECONDS	30
56 
57 static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = {
58 	DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2),
59 	DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5),
60 	DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8),
61 	DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11),
62 	DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14),
63 	DW_WDT_FIX_TOP(15)
64 };
65 
66 static bool nowayout = WATCHDOG_NOWAYOUT;
67 module_param(nowayout, bool, 0);
68 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
69 		 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
70 
71 enum dw_wdt_rmod {
72 	DW_WDT_RMOD_RESET = 1,
73 	DW_WDT_RMOD_IRQ = 2
74 };
75 
76 struct dw_wdt_timeout {
77 	u32 top_val;
78 	unsigned int sec;
79 	unsigned int msec;
80 };
81 
82 struct dw_wdt {
83 	void __iomem		*regs;
84 	struct clk		*clk;
85 	struct clk		*pclk;
86 	unsigned long		rate;
87 	enum dw_wdt_rmod	rmod;
88 	struct dw_wdt_timeout	timeouts[DW_WDT_NUM_TOPS];
89 	struct watchdog_device	wdd;
90 	struct reset_control	*rst;
91 	/* Save/restore */
92 	u32			control;
93 	u32			timeout;
94 
95 #ifdef CONFIG_DEBUG_FS
96 	struct dentry		*dbgfs_dir;
97 #endif
98 };
99 
100 #define to_dw_wdt(wdd)	container_of(wdd, struct dw_wdt, wdd)
101 
102 static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
103 {
104 	return readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET) &
105 		WDOG_CONTROL_REG_WDT_EN_MASK;
106 }
107 
108 static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
109 {
110 	u32 val;
111 
112 	val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
113 	if (rmod == DW_WDT_RMOD_IRQ)
114 		val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
115 	else
116 		val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
117 	writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
118 
119 	dw_wdt->rmod = rmod;
120 }
121 
122 static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
123 					 unsigned int timeout, u32 *top_val)
124 {
125 	int idx;
126 
127 	/*
128 	 * Find a TOP with timeout greater or equal to the requested number.
129 	 * Note we'll select a TOP with maximum timeout if the requested
130 	 * timeout couldn't be reached.
131 	 */
132 	for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
133 		if (dw_wdt->timeouts[idx].sec >= timeout)
134 			break;
135 	}
136 
137 	if (idx == DW_WDT_NUM_TOPS)
138 		--idx;
139 
140 	*top_val = dw_wdt->timeouts[idx].top_val;
141 
142 	return dw_wdt->timeouts[idx].sec;
143 }
144 
145 static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
146 {
147 	int idx;
148 
149 	/*
150 	 * We'll find a timeout greater or equal to one second anyway because
151 	 * the driver probe would have failed if there was none.
152 	 */
153 	for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
154 		if (dw_wdt->timeouts[idx].sec)
155 			break;
156 	}
157 
158 	return dw_wdt->timeouts[idx].sec;
159 }
160 
161 static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
162 {
163 	struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
164 	u64 msec;
165 
166 	msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec;
167 
168 	return msec < UINT_MAX ? msec : UINT_MAX;
169 }
170 
171 static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
172 {
173 	int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
174 	int idx;
175 
176 	for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
177 		if (dw_wdt->timeouts[idx].top_val == top_val)
178 			break;
179 	}
180 
181 	/*
182 	 * In IRQ mode due to the two stages counter, the actual timeout is
183 	 * twice greater than the TOP setting.
184 	 */
185 	return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
186 }
187 
188 static int dw_wdt_ping(struct watchdog_device *wdd)
189 {
190 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
191 
192 	writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs +
193 	       WDOG_COUNTER_RESTART_REG_OFFSET);
194 
195 	return 0;
196 }
197 
198 static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
199 {
200 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
201 	unsigned int timeout;
202 	u32 top_val;
203 
204 	/*
205 	 * Note IRQ mode being enabled means having a non-zero pre-timeout
206 	 * setup. In this case we try to find a TOP as close to the half of the
207 	 * requested timeout as possible since DW Watchdog IRQ mode is designed
208 	 * in two stages way - first timeout rises the pre-timeout interrupt,
209 	 * second timeout performs the system reset. So basically the effective
210 	 * watchdog-caused reset happens after two watchdog TOPs elapsed.
211 	 */
212 	timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
213 				       &top_val);
214 	if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
215 		wdd->pretimeout = timeout;
216 	else
217 		wdd->pretimeout = 0;
218 
219 	/*
220 	 * Set the new value in the watchdog.  Some versions of dw_wdt
221 	 * have have TOPINIT in the TIMEOUT_RANGE register (as per
222 	 * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1).  On those we
223 	 * effectively get a pat of the watchdog right here.
224 	 */
225 	writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
226 	       dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
227 
228 	/* Kick new TOP value into the watchdog counter if activated. */
229 	if (watchdog_active(wdd))
230 		dw_wdt_ping(wdd);
231 
232 	/*
233 	 * In case users set bigger timeout value than HW can support,
234 	 * kernel(watchdog_dev.c) helps to feed watchdog before
235 	 * wdd->max_hw_heartbeat_ms
236 	 */
237 	if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
238 		wdd->timeout = timeout * dw_wdt->rmod;
239 	else
240 		wdd->timeout = top_s;
241 
242 	return 0;
243 }
244 
245 static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
246 {
247 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
248 
249 	/*
250 	 * We ignore actual value of the timeout passed from user-space
251 	 * using it as a flag whether the pretimeout functionality is intended
252 	 * to be activated.
253 	 */
254 	dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
255 	dw_wdt_set_timeout(wdd, wdd->timeout);
256 
257 	return 0;
258 }
259 
260 static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
261 {
262 	u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
263 
264 	/* Disable/enable interrupt mode depending on the RMOD flag. */
265 	if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
266 		val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
267 	else
268 		val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
269 	/* Enable watchdog. */
270 	val |= WDOG_CONTROL_REG_WDT_EN_MASK;
271 	writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
272 }
273 
274 static int dw_wdt_start(struct watchdog_device *wdd)
275 {
276 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
277 
278 	dw_wdt_set_timeout(wdd, wdd->timeout);
279 	dw_wdt_ping(&dw_wdt->wdd);
280 	dw_wdt_arm_system_reset(dw_wdt);
281 
282 	return 0;
283 }
284 
285 static int dw_wdt_stop(struct watchdog_device *wdd)
286 {
287 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
288 
289 	if (!dw_wdt->rst) {
290 		set_bit(WDOG_HW_RUNNING, &wdd->status);
291 		return 0;
292 	}
293 
294 	reset_control_assert(dw_wdt->rst);
295 	reset_control_deassert(dw_wdt->rst);
296 
297 	return 0;
298 }
299 
300 static int dw_wdt_restart(struct watchdog_device *wdd,
301 			  unsigned long action, void *data)
302 {
303 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
304 
305 	writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
306 	dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
307 	if (dw_wdt_is_enabled(dw_wdt))
308 		writel(WDOG_COUNTER_RESTART_KICK_VALUE,
309 		       dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
310 	else
311 		dw_wdt_arm_system_reset(dw_wdt);
312 
313 	/* wait for reset to assert... */
314 	mdelay(500);
315 
316 	return 0;
317 }
318 
319 static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
320 {
321 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
322 	unsigned int sec;
323 	u32 val;
324 
325 	val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
326 	sec = val / dw_wdt->rate;
327 
328 	if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
329 		val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
330 		if (!val)
331 			sec += wdd->pretimeout;
332 	}
333 
334 	return sec;
335 }
336 
337 static const struct watchdog_info dw_wdt_ident = {
338 	.options	= WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
339 			  WDIOF_MAGICCLOSE,
340 	.identity	= "Synopsys DesignWare Watchdog",
341 };
342 
343 static const struct watchdog_info dw_wdt_pt_ident = {
344 	.options	= WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
345 			  WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE,
346 	.identity	= "Synopsys DesignWare Watchdog",
347 };
348 
349 static const struct watchdog_ops dw_wdt_ops = {
350 	.owner		= THIS_MODULE,
351 	.start		= dw_wdt_start,
352 	.stop		= dw_wdt_stop,
353 	.ping		= dw_wdt_ping,
354 	.set_timeout	= dw_wdt_set_timeout,
355 	.set_pretimeout	= dw_wdt_set_pretimeout,
356 	.get_timeleft	= dw_wdt_get_timeleft,
357 	.restart	= dw_wdt_restart,
358 };
359 
360 static irqreturn_t dw_wdt_irq(int irq, void *devid)
361 {
362 	struct dw_wdt *dw_wdt = devid;
363 	u32 val;
364 
365 	/*
366 	 * We don't clear the IRQ status. It's supposed to be done by the
367 	 * following ping operations.
368 	 */
369 	val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
370 	if (!val)
371 		return IRQ_NONE;
372 
373 	watchdog_notify_pretimeout(&dw_wdt->wdd);
374 
375 	return IRQ_HANDLED;
376 }
377 
378 #ifdef CONFIG_PM_SLEEP
379 static int dw_wdt_suspend(struct device *dev)
380 {
381 	struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
382 
383 	dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
384 	dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
385 
386 	clk_disable_unprepare(dw_wdt->pclk);
387 	clk_disable_unprepare(dw_wdt->clk);
388 
389 	return 0;
390 }
391 
392 static int dw_wdt_resume(struct device *dev)
393 {
394 	struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
395 	int err = clk_prepare_enable(dw_wdt->clk);
396 
397 	if (err)
398 		return err;
399 
400 	err = clk_prepare_enable(dw_wdt->pclk);
401 	if (err) {
402 		clk_disable_unprepare(dw_wdt->clk);
403 		return err;
404 	}
405 
406 	writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
407 	writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
408 
409 	dw_wdt_ping(&dw_wdt->wdd);
410 
411 	return 0;
412 }
413 #endif /* CONFIG_PM_SLEEP */
414 
415 static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
416 
417 /*
418  * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
419  * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers
420  * depending on the system engineer imagination. The next method handles the
421  * passed TOPs array to pre-calculate the effective timeouts and to sort the
422  * TOP items out in the ascending order with respect to the timeouts.
423  */
424 
425 static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
426 {
427 	struct dw_wdt_timeout tout, *dst;
428 	int val, tidx;
429 	u64 msec;
430 
431 	/*
432 	 * We walk over the passed TOPs array and calculate corresponding
433 	 * timeouts in seconds and milliseconds. The milliseconds granularity
434 	 * is needed to distinguish the TOPs with very close timeouts and to
435 	 * set the watchdog max heartbeat setting further.
436 	 */
437 	for (val = 0; val < DW_WDT_NUM_TOPS; ++val) {
438 		tout.top_val = val;
439 		tout.sec = tops[val] / dw_wdt->rate;
440 		msec = (u64)tops[val] * MSEC_PER_SEC;
441 		do_div(msec, dw_wdt->rate);
442 		tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC);
443 
444 		/*
445 		 * Find a suitable place for the current TOP in the timeouts
446 		 * array so that the list is remained in the ascending order.
447 		 */
448 		for (tidx = 0; tidx < val; ++tidx) {
449 			dst = &dw_wdt->timeouts[tidx];
450 			if (tout.sec > dst->sec || (tout.sec == dst->sec &&
451 			    tout.msec >= dst->msec))
452 				continue;
453 			else
454 				swap(*dst, tout);
455 		}
456 
457 		dw_wdt->timeouts[val] = tout;
458 	}
459 }
460 
461 static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
462 {
463 	u32 data, of_tops[DW_WDT_NUM_TOPS];
464 	const u32 *tops;
465 	int ret;
466 
467 	/*
468 	 * Retrieve custom or fixed counter values depending on the
469 	 * WDT_USE_FIX_TOP flag found in the component specific parameters
470 	 * #1 register.
471 	 */
472 	data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
473 	if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) {
474 		tops = dw_wdt_fix_tops;
475 	} else {
476 		ret = of_property_read_variable_u32_array(dev_of_node(dev),
477 			"snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS,
478 			DW_WDT_NUM_TOPS);
479 		if (ret < 0) {
480 			dev_warn(dev, "No valid TOPs array specified\n");
481 			tops = dw_wdt_fix_tops;
482 		} else {
483 			tops = of_tops;
484 		}
485 	}
486 
487 	/* Convert the specified TOPs into an array of watchdog timeouts. */
488 	dw_wdt_handle_tops(dw_wdt, tops);
489 	if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
490 		dev_err(dev, "No any valid TOP detected\n");
491 		return -EINVAL;
492 	}
493 
494 	return 0;
495 }
496 
497 #ifdef CONFIG_DEBUG_FS
498 
499 #define DW_WDT_DBGFS_REG(_name, _off) \
500 {				      \
501 	.name = _name,		      \
502 	.offset = _off		      \
503 }
504 
505 static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = {
506 	DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET),
507 	DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET),
508 	DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET),
509 	DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET),
510 	DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET),
511 	DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET),
512 	DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET),
513 	DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET),
514 	DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET),
515 	DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET),
516 	DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET),
517 	DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET)
518 };
519 
520 static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
521 {
522 	struct device *dev = dw_wdt->wdd.parent;
523 	struct debugfs_regset32 *regset;
524 
525 	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
526 	if (!regset)
527 		return;
528 
529 	regset->regs = dw_wdt_dbgfs_regs;
530 	regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs);
531 	regset->base = dw_wdt->regs;
532 
533 	dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
534 
535 	debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
536 }
537 
538 static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
539 {
540 	debugfs_remove_recursive(dw_wdt->dbgfs_dir);
541 }
542 
543 #else /* !CONFIG_DEBUG_FS */
544 
545 static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
546 static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
547 
548 #endif /* !CONFIG_DEBUG_FS */
549 
550 static int dw_wdt_drv_probe(struct platform_device *pdev)
551 {
552 	struct device *dev = &pdev->dev;
553 	struct watchdog_device *wdd;
554 	struct dw_wdt *dw_wdt;
555 	int ret;
556 
557 	dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
558 	if (!dw_wdt)
559 		return -ENOMEM;
560 
561 	dw_wdt->regs = devm_platform_ioremap_resource(pdev, 0);
562 	if (IS_ERR(dw_wdt->regs))
563 		return PTR_ERR(dw_wdt->regs);
564 
565 	/*
566 	 * Try to request the watchdog dedicated timer clock source. It must
567 	 * be supplied if asynchronous mode is enabled. Otherwise fallback
568 	 * to the common timer/bus clocks configuration, in which the very
569 	 * first found clock supply both timer and APB signals.
570 	 */
571 	dw_wdt->clk = devm_clk_get(dev, "tclk");
572 	if (IS_ERR(dw_wdt->clk)) {
573 		dw_wdt->clk = devm_clk_get(dev, NULL);
574 		if (IS_ERR(dw_wdt->clk))
575 			return PTR_ERR(dw_wdt->clk);
576 	}
577 
578 	ret = clk_prepare_enable(dw_wdt->clk);
579 	if (ret)
580 		return ret;
581 
582 	dw_wdt->rate = clk_get_rate(dw_wdt->clk);
583 	if (dw_wdt->rate == 0) {
584 		ret = -EINVAL;
585 		goto out_disable_clk;
586 	}
587 
588 	/*
589 	 * Request APB clock if device is configured with async clocks mode.
590 	 * In this case both tclk and pclk clocks are supposed to be specified.
591 	 * Alas we can't know for sure whether async mode was really activated,
592 	 * so the pclk phandle reference is left optional. If it couldn't be
593 	 * found we consider the device configured in synchronous clocks mode.
594 	 */
595 	dw_wdt->pclk = devm_clk_get_optional(dev, "pclk");
596 	if (IS_ERR(dw_wdt->pclk)) {
597 		ret = PTR_ERR(dw_wdt->pclk);
598 		goto out_disable_clk;
599 	}
600 
601 	ret = clk_prepare_enable(dw_wdt->pclk);
602 	if (ret)
603 		goto out_disable_clk;
604 
605 	dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
606 	if (IS_ERR(dw_wdt->rst)) {
607 		ret = PTR_ERR(dw_wdt->rst);
608 		goto out_disable_pclk;
609 	}
610 
611 	/* Enable normal reset without pre-timeout by default. */
612 	dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
613 
614 	/*
615 	 * Pre-timeout IRQ is optional, since some hardware may lack support
616 	 * of it. Note we must request rising-edge IRQ, since the lane is left
617 	 * pending either until the next watchdog kick event or up to the
618 	 * system reset.
619 	 */
620 	ret = platform_get_irq_optional(pdev, 0);
621 	if (ret > 0) {
622 		ret = devm_request_irq(dev, ret, dw_wdt_irq,
623 				       IRQF_SHARED | IRQF_TRIGGER_RISING,
624 				       pdev->name, dw_wdt);
625 		if (ret)
626 			goto out_disable_pclk;
627 
628 		dw_wdt->wdd.info = &dw_wdt_pt_ident;
629 	} else {
630 		if (ret == -EPROBE_DEFER)
631 			goto out_disable_pclk;
632 
633 		dw_wdt->wdd.info = &dw_wdt_ident;
634 	}
635 
636 	reset_control_deassert(dw_wdt->rst);
637 
638 	ret = dw_wdt_init_timeouts(dw_wdt, dev);
639 	if (ret)
640 		goto out_disable_clk;
641 
642 	wdd = &dw_wdt->wdd;
643 	wdd->ops = &dw_wdt_ops;
644 	wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
645 	wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
646 	wdd->parent = dev;
647 
648 	watchdog_set_drvdata(wdd, dw_wdt);
649 	watchdog_set_nowayout(wdd, nowayout);
650 	watchdog_init_timeout(wdd, 0, dev);
651 
652 	/*
653 	 * If the watchdog is already running, use its already configured
654 	 * timeout. Otherwise use the default or the value provided through
655 	 * devicetree.
656 	 */
657 	if (dw_wdt_is_enabled(dw_wdt)) {
658 		wdd->timeout = dw_wdt_get_timeout(dw_wdt);
659 		set_bit(WDOG_HW_RUNNING, &wdd->status);
660 	} else {
661 		wdd->timeout = DW_WDT_DEFAULT_SECONDS;
662 		watchdog_init_timeout(wdd, 0, dev);
663 	}
664 
665 	platform_set_drvdata(pdev, dw_wdt);
666 
667 	watchdog_set_restart_priority(wdd, 128);
668 
669 	ret = watchdog_register_device(wdd);
670 	if (ret)
671 		goto out_disable_pclk;
672 
673 	dw_wdt_dbgfs_init(dw_wdt);
674 
675 	return 0;
676 
677 out_disable_pclk:
678 	clk_disable_unprepare(dw_wdt->pclk);
679 
680 out_disable_clk:
681 	clk_disable_unprepare(dw_wdt->clk);
682 	return ret;
683 }
684 
685 static int dw_wdt_drv_remove(struct platform_device *pdev)
686 {
687 	struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
688 
689 	dw_wdt_dbgfs_clear(dw_wdt);
690 
691 	watchdog_unregister_device(&dw_wdt->wdd);
692 	reset_control_assert(dw_wdt->rst);
693 	clk_disable_unprepare(dw_wdt->pclk);
694 	clk_disable_unprepare(dw_wdt->clk);
695 
696 	return 0;
697 }
698 
699 #ifdef CONFIG_OF
700 static const struct of_device_id dw_wdt_of_match[] = {
701 	{ .compatible = "snps,dw-wdt", },
702 	{ /* sentinel */ }
703 };
704 MODULE_DEVICE_TABLE(of, dw_wdt_of_match);
705 #endif
706 
707 static struct platform_driver dw_wdt_driver = {
708 	.probe		= dw_wdt_drv_probe,
709 	.remove		= dw_wdt_drv_remove,
710 	.driver		= {
711 		.name	= "dw_wdt",
712 		.of_match_table = of_match_ptr(dw_wdt_of_match),
713 		.pm	= &dw_wdt_pm_ops,
714 	},
715 };
716 
717 module_platform_driver(dw_wdt_driver);
718 
719 MODULE_AUTHOR("Jamie Iles");
720 MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
721 MODULE_LICENSE("GPL");
722