xref: /openbmc/linux/drivers/w1/masters/omap_hdq.c (revision 4bb1eb3c)
1 /*
2  * drivers/w1/masters/omap_hdq.c
3  *
4  * Copyright (C) 2007,2012 Texas Instruments, Inc.
5  *
6  * This file is licensed under the terms of the GNU General Public License
7  * version 2. This program is licensed "as is" without any warranty of any
8  * kind, whether express or implied.
9  *
10  */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21 
22 #include <linux/w1.h>
23 
24 #define	MOD_NAME	"OMAP_HDQ:"
25 
26 #define OMAP_HDQ_REVISION			0x00
27 #define OMAP_HDQ_TX_DATA			0x04
28 #define OMAP_HDQ_RX_DATA			0x08
29 #define OMAP_HDQ_CTRL_STATUS			0x0c
30 #define OMAP_HDQ_CTRL_STATUS_SINGLE		BIT(7)
31 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK	BIT(6)
32 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE	BIT(5)
33 #define OMAP_HDQ_CTRL_STATUS_GO                 BIT(4)
34 #define OMAP_HDQ_CTRL_STATUS_PRESENCE		BIT(3)
35 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION	BIT(2)
36 #define OMAP_HDQ_CTRL_STATUS_DIR		BIT(1)
37 #define OMAP_HDQ_INT_STATUS			0x10
38 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE		BIT(2)
39 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE		BIT(1)
40 #define OMAP_HDQ_INT_STATUS_TIMEOUT		BIT(0)
41 
42 #define OMAP_HDQ_FLAG_CLEAR			0
43 #define OMAP_HDQ_FLAG_SET			1
44 #define OMAP_HDQ_TIMEOUT			(HZ/5)
45 
46 #define OMAP_HDQ_MAX_USER			4
47 
48 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
49 
50 static int w1_id;
51 module_param(w1_id, int, S_IRUSR);
52 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
53 
54 struct hdq_data {
55 	struct device		*dev;
56 	void __iomem		*hdq_base;
57 	/* lock read/write/break operations */
58 	struct  mutex		hdq_mutex;
59 	/* interrupt status and a lock for it */
60 	u8			hdq_irqstatus;
61 	spinlock_t		hdq_spinlock;
62 	/* mode: 0-HDQ 1-W1 */
63 	int                     mode;
64 
65 };
66 
67 /* HDQ register I/O routines */
68 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
69 {
70 	return __raw_readl(hdq_data->hdq_base + offset);
71 }
72 
73 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
74 {
75 	__raw_writel(val, hdq_data->hdq_base + offset);
76 }
77 
78 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
79 			u8 val, u8 mask)
80 {
81 	u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
82 			| (val & mask);
83 	__raw_writel(new_val, hdq_data->hdq_base + offset);
84 
85 	return new_val;
86 }
87 
88 /*
89  * Wait for one or more bits in flag change.
90  * HDQ_FLAG_SET: wait until any bit in the flag is set.
91  * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
92  * return 0 on success and -ETIMEDOUT in the case of timeout.
93  */
94 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
95 		u8 flag, u8 flag_set, u8 *status)
96 {
97 	int ret = 0;
98 	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
99 
100 	if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
101 		/* wait for the flag clear */
102 		while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
103 			&& time_before(jiffies, timeout)) {
104 			schedule_timeout_uninterruptible(1);
105 		}
106 		if (*status & flag)
107 			ret = -ETIMEDOUT;
108 	} else if (flag_set == OMAP_HDQ_FLAG_SET) {
109 		/* wait for the flag set */
110 		while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
111 			&& time_before(jiffies, timeout)) {
112 			schedule_timeout_uninterruptible(1);
113 		}
114 		if (!(*status & flag))
115 			ret = -ETIMEDOUT;
116 	} else
117 		return -EINVAL;
118 
119 	return ret;
120 }
121 
122 /* Clear saved irqstatus after using an interrupt */
123 static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
124 {
125 	unsigned long irqflags;
126 	u8 status;
127 
128 	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
129 	status = hdq_data->hdq_irqstatus;
130 	/* this is a read-modify-write */
131 	hdq_data->hdq_irqstatus &= ~bits;
132 	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
133 
134 	return status;
135 }
136 
137 /* write out a byte and fill *status with HDQ_INT_STATUS */
138 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
139 {
140 	int ret;
141 	u8 tmp_status;
142 
143 	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
144 	if (ret < 0) {
145 		ret = -EINTR;
146 		goto rtn;
147 	}
148 
149 	if (hdq_data->hdq_irqstatus)
150 		dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
151 			hdq_data->hdq_irqstatus);
152 
153 	*status = 0;
154 
155 	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
156 
157 	/* set the GO bit */
158 	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
159 		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
160 	/* wait for the TXCOMPLETE bit */
161 	ret = wait_event_timeout(hdq_wait_queue,
162 		(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
163 		OMAP_HDQ_TIMEOUT);
164 	*status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
165 	if (ret == 0) {
166 		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
167 		ret = -ETIMEDOUT;
168 		goto out;
169 	}
170 
171 	/* check irqstatus */
172 	if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
173 		dev_dbg(hdq_data->dev, "timeout waiting for"
174 			" TXCOMPLETE/RXCOMPLETE, %x\n", *status);
175 		ret = -ETIMEDOUT;
176 		goto out;
177 	}
178 
179 	/* wait for the GO bit return to zero */
180 	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
181 			OMAP_HDQ_CTRL_STATUS_GO,
182 			OMAP_HDQ_FLAG_CLEAR, &tmp_status);
183 	if (ret) {
184 		dev_dbg(hdq_data->dev, "timeout waiting GO bit"
185 			" return to zero, %x\n", tmp_status);
186 	}
187 
188 out:
189 	mutex_unlock(&hdq_data->hdq_mutex);
190 rtn:
191 	return ret;
192 }
193 
194 /* HDQ Interrupt service routine */
195 static irqreturn_t hdq_isr(int irq, void *_hdq)
196 {
197 	struct hdq_data *hdq_data = _hdq;
198 	unsigned long irqflags;
199 
200 	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
201 	hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
202 	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
203 	dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
204 
205 	if (hdq_data->hdq_irqstatus &
206 		(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
207 		| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
208 		/* wake up sleeping process */
209 		wake_up(&hdq_wait_queue);
210 	}
211 
212 	return IRQ_HANDLED;
213 }
214 
215 /* W1 search callback function  in HDQ mode */
216 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
217 		u8 search_type, w1_slave_found_callback slave_found)
218 {
219 	u64 module_id, rn_le, cs, id;
220 
221 	if (w1_id)
222 		module_id = w1_id;
223 	else
224 		module_id = 0x1;
225 
226 	rn_le = cpu_to_le64(module_id);
227 	/*
228 	 * HDQ might not obey truly the 1-wire spec.
229 	 * So calculate CRC based on module parameter.
230 	 */
231 	cs = w1_calc_crc8((u8 *)&rn_le, 7);
232 	id = (cs << 56) | module_id;
233 
234 	slave_found(master_dev, id);
235 }
236 
237 /* Issue break pulse to the device */
238 static int omap_hdq_break(struct hdq_data *hdq_data)
239 {
240 	int ret = 0;
241 	u8 tmp_status;
242 
243 	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
244 	if (ret < 0) {
245 		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
246 		ret = -EINTR;
247 		goto rtn;
248 	}
249 
250 	if (hdq_data->hdq_irqstatus)
251 		dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
252 			hdq_data->hdq_irqstatus);
253 
254 	/* set the INIT and GO bit */
255 	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
256 		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
257 		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
258 		OMAP_HDQ_CTRL_STATUS_GO);
259 
260 	/* wait for the TIMEOUT bit */
261 	ret = wait_event_timeout(hdq_wait_queue,
262 		(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
263 		OMAP_HDQ_TIMEOUT);
264 	tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
265 	if (ret == 0) {
266 		dev_dbg(hdq_data->dev, "break wait elapsed\n");
267 		ret = -EINTR;
268 		goto out;
269 	}
270 
271 	/* check irqstatus */
272 	if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
273 		dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
274 			tmp_status);
275 		ret = -ETIMEDOUT;
276 		goto out;
277 	}
278 
279 	/*
280 	 * check for the presence detect bit to get
281 	 * set to show that the slave is responding
282 	 */
283 	if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
284 			OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
285 		dev_dbg(hdq_data->dev, "Presence bit not set\n");
286 		ret = -ETIMEDOUT;
287 		goto out;
288 	}
289 
290 	/*
291 	 * wait for both INIT and GO bits rerurn to zero.
292 	 * zero wait time expected for interrupt mode.
293 	 */
294 	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
295 			OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
296 			OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
297 			&tmp_status);
298 	if (ret)
299 		dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
300 			" return to zero, %x\n", tmp_status);
301 
302 out:
303 	mutex_unlock(&hdq_data->hdq_mutex);
304 rtn:
305 	return ret;
306 }
307 
308 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
309 {
310 	int ret = 0;
311 	u8 status;
312 
313 	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
314 	if (ret < 0) {
315 		ret = -EINTR;
316 		goto rtn;
317 	}
318 
319 	if (pm_runtime_suspended(hdq_data->dev)) {
320 		ret = -EINVAL;
321 		goto out;
322 	}
323 
324 	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
325 		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
326 			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
327 			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
328 		/*
329 		 * The RX comes immediately after TX.
330 		 */
331 		wait_event_timeout(hdq_wait_queue,
332 				   (hdq_data->hdq_irqstatus
333 				    & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
334 				       OMAP_HDQ_INT_STATUS_TIMEOUT)),
335 				   OMAP_HDQ_TIMEOUT);
336 		status = hdq_reset_irqstatus(hdq_data,
337 					     OMAP_HDQ_INT_STATUS_RXCOMPLETE |
338 					     OMAP_HDQ_INT_STATUS_TIMEOUT);
339 		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
340 			OMAP_HDQ_CTRL_STATUS_DIR);
341 
342 		/* check irqstatus */
343 		if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
344 			dev_dbg(hdq_data->dev, "timeout waiting for"
345 				" RXCOMPLETE, %x", status);
346 			ret = -ETIMEDOUT;
347 			goto out;
348 		}
349 	} else { /* interrupt had occurred before hdq_read_byte was called */
350 		hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
351 	}
352 	/* the data is ready. Read it in! */
353 	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
354 out:
355 	mutex_unlock(&hdq_data->hdq_mutex);
356 rtn:
357 	return ret;
358 
359 }
360 
361 /*
362  * W1 triplet callback function - used for searching ROM addresses.
363  * Registered only when controller is in 1-wire mode.
364  */
365 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
366 {
367 	u8 id_bit, comp_bit;
368 	int err;
369 	u8 ret = 0x3; /* no slaves responded */
370 	struct hdq_data *hdq_data = _hdq;
371 	u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
372 		  OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
373 	u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
374 
375 	err = pm_runtime_get_sync(hdq_data->dev);
376 	if (err < 0) {
377 		pm_runtime_put_noidle(hdq_data->dev);
378 
379 		return err;
380 	}
381 
382 	err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
383 	if (err < 0) {
384 		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
385 		goto rtn;
386 	}
387 
388 	/* read id_bit */
389 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
390 		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
391 	err = wait_event_timeout(hdq_wait_queue,
392 				 (hdq_data->hdq_irqstatus
393 				  & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
394 				 OMAP_HDQ_TIMEOUT);
395 	/* Must clear irqstatus for another RXCOMPLETE interrupt */
396 	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
397 
398 	if (err == 0) {
399 		dev_dbg(hdq_data->dev, "RX wait elapsed\n");
400 		goto out;
401 	}
402 	id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
403 
404 	/* read comp_bit */
405 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
406 		      ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
407 	err = wait_event_timeout(hdq_wait_queue,
408 				 (hdq_data->hdq_irqstatus
409 				  & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
410 				 OMAP_HDQ_TIMEOUT);
411 	/* Must clear irqstatus for another RXCOMPLETE interrupt */
412 	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
413 
414 	if (err == 0) {
415 		dev_dbg(hdq_data->dev, "RX wait elapsed\n");
416 		goto out;
417 	}
418 	comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
419 
420 	if (id_bit && comp_bit) {
421 		ret = 0x03;  /* no slaves responded */
422 		goto out;
423 	}
424 	if (!id_bit && !comp_bit) {
425 		/* Both bits are valid, take the direction given */
426 		ret = bdir ? 0x04 : 0;
427 	} else {
428 		/* Only one bit is valid, take that direction */
429 		bdir = id_bit;
430 		ret = id_bit ? 0x05 : 0x02;
431 	}
432 
433 	/* write bdir bit */
434 	hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
435 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
436 	err = wait_event_timeout(hdq_wait_queue,
437 				 (hdq_data->hdq_irqstatus
438 				  & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
439 				 OMAP_HDQ_TIMEOUT);
440 	/* Must clear irqstatus for another TXCOMPLETE interrupt */
441 	hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
442 
443 	if (err == 0) {
444 		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
445 		goto out;
446 	}
447 
448 	hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
449 		      OMAP_HDQ_CTRL_STATUS_SINGLE);
450 
451 out:
452 	mutex_unlock(&hdq_data->hdq_mutex);
453 rtn:
454 	pm_runtime_mark_last_busy(hdq_data->dev);
455 	pm_runtime_put_autosuspend(hdq_data->dev);
456 
457 	return ret;
458 }
459 
460 /* reset callback */
461 static u8 omap_w1_reset_bus(void *_hdq)
462 {
463 	struct hdq_data *hdq_data = _hdq;
464 	int err;
465 
466 	err = pm_runtime_get_sync(hdq_data->dev);
467 	if (err < 0) {
468 		pm_runtime_put_noidle(hdq_data->dev);
469 
470 		return err;
471 	}
472 
473 	omap_hdq_break(hdq_data);
474 
475 	pm_runtime_mark_last_busy(hdq_data->dev);
476 	pm_runtime_put_autosuspend(hdq_data->dev);
477 
478 	return 0;
479 }
480 
481 /* Read a byte of data from the device */
482 static u8 omap_w1_read_byte(void *_hdq)
483 {
484 	struct hdq_data *hdq_data = _hdq;
485 	u8 val = 0;
486 	int ret;
487 
488 	ret = pm_runtime_get_sync(hdq_data->dev);
489 	if (ret < 0) {
490 		pm_runtime_put_noidle(hdq_data->dev);
491 
492 		return -1;
493 	}
494 
495 	ret = hdq_read_byte(hdq_data, &val);
496 	if (ret)
497 		val = -1;
498 
499 	pm_runtime_mark_last_busy(hdq_data->dev);
500 	pm_runtime_put_autosuspend(hdq_data->dev);
501 
502 	return val;
503 }
504 
505 /* Write a byte of data to the device */
506 static void omap_w1_write_byte(void *_hdq, u8 byte)
507 {
508 	struct hdq_data *hdq_data = _hdq;
509 	int ret;
510 	u8 status;
511 
512 	ret = pm_runtime_get_sync(hdq_data->dev);
513 	if (ret < 0) {
514 		pm_runtime_put_noidle(hdq_data->dev);
515 
516 		return;
517 	}
518 
519 	/*
520 	 * We need to reset the slave before
521 	 * issuing the SKIP ROM command, else
522 	 * the slave will not work.
523 	 */
524 	if (byte == W1_SKIP_ROM)
525 		omap_hdq_break(hdq_data);
526 
527 	ret = hdq_write_byte(hdq_data, byte, &status);
528 	if (ret < 0) {
529 		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
530 		goto out_err;
531 	}
532 
533 out_err:
534 	pm_runtime_mark_last_busy(hdq_data->dev);
535 	pm_runtime_put_autosuspend(hdq_data->dev);
536 }
537 
538 static struct w1_bus_master omap_w1_master = {
539 	.read_byte	= omap_w1_read_byte,
540 	.write_byte	= omap_w1_write_byte,
541 	.reset_bus	= omap_w1_reset_bus,
542 };
543 
544 static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
545 {
546 	struct hdq_data *hdq_data = dev_get_drvdata(dev);
547 
548 	hdq_reg_out(hdq_data, 0, hdq_data->mode);
549 	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
550 
551 	return 0;
552 }
553 
554 static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
555 {
556 	struct hdq_data *hdq_data = dev_get_drvdata(dev);
557 
558 	/* select HDQ/1W mode & enable clocks */
559 	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
560 		    OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
561 		    OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
562 		    hdq_data->mode);
563 	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
564 
565 	return 0;
566 }
567 
568 static const struct dev_pm_ops omap_hdq_pm_ops = {
569 	SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
570 			   omap_hdq_runtime_resume, NULL)
571 };
572 
573 static int omap_hdq_probe(struct platform_device *pdev)
574 {
575 	struct device *dev = &pdev->dev;
576 	struct hdq_data *hdq_data;
577 	int ret, irq;
578 	u8 rev;
579 	const char *mode;
580 
581 	hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
582 	if (!hdq_data) {
583 		dev_dbg(&pdev->dev, "unable to allocate memory\n");
584 		return -ENOMEM;
585 	}
586 
587 	hdq_data->dev = dev;
588 	platform_set_drvdata(pdev, hdq_data);
589 
590 	hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
591 	if (IS_ERR(hdq_data->hdq_base))
592 		return PTR_ERR(hdq_data->hdq_base);
593 
594 	mutex_init(&hdq_data->hdq_mutex);
595 
596 	ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
597 	if (ret < 0 || !strcmp(mode, "hdq")) {
598 		hdq_data->mode = 0;
599 		omap_w1_master.search = omap_w1_search_bus;
600 	} else {
601 		hdq_data->mode = 1;
602 		omap_w1_master.triplet = omap_w1_triplet;
603 	}
604 
605 	pm_runtime_enable(&pdev->dev);
606 	pm_runtime_use_autosuspend(&pdev->dev);
607 	pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
608 	ret = pm_runtime_get_sync(&pdev->dev);
609 	if (ret < 0) {
610 		pm_runtime_put_noidle(&pdev->dev);
611 		dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
612 		goto err_w1;
613 	}
614 
615 	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
616 	dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
617 		(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
618 
619 	spin_lock_init(&hdq_data->hdq_spinlock);
620 
621 	irq = platform_get_irq(pdev, 0);
622 	if (irq	< 0) {
623 		dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
624 		ret = irq;
625 		goto err_irq;
626 	}
627 
628 	ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
629 	if (ret < 0) {
630 		dev_dbg(&pdev->dev, "could not request irq\n");
631 		goto err_irq;
632 	}
633 
634 	omap_hdq_break(hdq_data);
635 
636 	pm_runtime_mark_last_busy(&pdev->dev);
637 	pm_runtime_put_autosuspend(&pdev->dev);
638 
639 	omap_w1_master.data = hdq_data;
640 
641 	ret = w1_add_master_device(&omap_w1_master);
642 	if (ret) {
643 		dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
644 		goto err_w1;
645 	}
646 
647 	return 0;
648 
649 err_irq:
650 	pm_runtime_put_sync(&pdev->dev);
651 err_w1:
652 	pm_runtime_dont_use_autosuspend(&pdev->dev);
653 	pm_runtime_disable(&pdev->dev);
654 
655 	return ret;
656 }
657 
658 static int omap_hdq_remove(struct platform_device *pdev)
659 {
660 	int active;
661 
662 	active = pm_runtime_get_sync(&pdev->dev);
663 	if (active < 0)
664 		pm_runtime_put_noidle(&pdev->dev);
665 
666 	w1_remove_master_device(&omap_w1_master);
667 
668 	pm_runtime_dont_use_autosuspend(&pdev->dev);
669 	if (active >= 0)
670 		pm_runtime_put_sync(&pdev->dev);
671 	pm_runtime_disable(&pdev->dev);
672 
673 	return 0;
674 }
675 
676 static const struct of_device_id omap_hdq_dt_ids[] = {
677 	{ .compatible = "ti,omap3-1w" },
678 	{ .compatible = "ti,am4372-hdq" },
679 	{}
680 };
681 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
682 
683 static struct platform_driver omap_hdq_driver = {
684 	.probe = omap_hdq_probe,
685 	.remove = omap_hdq_remove,
686 	.driver = {
687 		.name =	"omap_hdq",
688 		.of_match_table = omap_hdq_dt_ids,
689 		.pm = &omap_hdq_pm_ops,
690 	},
691 };
692 module_platform_driver(omap_hdq_driver);
693 
694 MODULE_AUTHOR("Texas Instruments");
695 MODULE_DESCRIPTION("HDQ-1W driver Library");
696 MODULE_LICENSE("GPL");
697