1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007,2012 Texas Instruments, Inc.
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/interrupt.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/io.h>
12 #include <linux/sched.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/of.h>
15
16 #include <linux/w1.h>
17
18 #define MOD_NAME "OMAP_HDQ:"
19
20 #define OMAP_HDQ_REVISION 0x00
21 #define OMAP_HDQ_TX_DATA 0x04
22 #define OMAP_HDQ_RX_DATA 0x08
23 #define OMAP_HDQ_CTRL_STATUS 0x0c
24 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
25 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
26 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
27 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
28 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
29 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
30 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
31 #define OMAP_HDQ_INT_STATUS 0x10
32 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
33 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
34 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
35
36 #define OMAP_HDQ_FLAG_CLEAR 0
37 #define OMAP_HDQ_FLAG_SET 1
38 #define OMAP_HDQ_TIMEOUT (HZ/5)
39
40 #define OMAP_HDQ_MAX_USER 4
41
42 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
43
44 static int w1_id;
45 module_param(w1_id, int, 0400);
46 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
47
48 struct hdq_data {
49 struct device *dev;
50 void __iomem *hdq_base;
51 /* lock read/write/break operations */
52 struct mutex hdq_mutex;
53 /* interrupt status and a lock for it */
54 u8 hdq_irqstatus;
55 spinlock_t hdq_spinlock;
56 /* mode: 0-HDQ 1-W1 */
57 int mode;
58
59 };
60
61 /* HDQ register I/O routines */
hdq_reg_in(struct hdq_data * hdq_data,u32 offset)62 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
63 {
64 return __raw_readl(hdq_data->hdq_base + offset);
65 }
66
hdq_reg_out(struct hdq_data * hdq_data,u32 offset,u8 val)67 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
68 {
69 __raw_writel(val, hdq_data->hdq_base + offset);
70 }
71
hdq_reg_merge(struct hdq_data * hdq_data,u32 offset,u8 val,u8 mask)72 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
73 u8 val, u8 mask)
74 {
75 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
76 | (val & mask);
77 __raw_writel(new_val, hdq_data->hdq_base + offset);
78
79 return new_val;
80 }
81
82 /*
83 * Wait for one or more bits in flag change.
84 * HDQ_FLAG_SET: wait until any bit in the flag is set.
85 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
86 * return 0 on success and -ETIMEDOUT in the case of timeout.
87 */
hdq_wait_for_flag(struct hdq_data * hdq_data,u32 offset,u8 flag,u8 flag_set,u8 * status)88 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
89 u8 flag, u8 flag_set, u8 *status)
90 {
91 int ret = 0;
92 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
93
94 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
95 /* wait for the flag clear */
96 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
97 && time_before(jiffies, timeout)) {
98 schedule_timeout_uninterruptible(1);
99 }
100 if (*status & flag)
101 ret = -ETIMEDOUT;
102 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
103 /* wait for the flag set */
104 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
105 && time_before(jiffies, timeout)) {
106 schedule_timeout_uninterruptible(1);
107 }
108 if (!(*status & flag))
109 ret = -ETIMEDOUT;
110 } else
111 return -EINVAL;
112
113 return ret;
114 }
115
116 /* Clear saved irqstatus after using an interrupt */
hdq_reset_irqstatus(struct hdq_data * hdq_data,u8 bits)117 static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
118 {
119 unsigned long irqflags;
120 u8 status;
121
122 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
123 status = hdq_data->hdq_irqstatus;
124 /* this is a read-modify-write */
125 hdq_data->hdq_irqstatus &= ~bits;
126 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
127
128 return status;
129 }
130
131 /* write out a byte and fill *status with HDQ_INT_STATUS */
hdq_write_byte(struct hdq_data * hdq_data,u8 val,u8 * status)132 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
133 {
134 int ret;
135 u8 tmp_status;
136
137 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
138 if (ret < 0) {
139 ret = -EINTR;
140 goto rtn;
141 }
142
143 if (hdq_data->hdq_irqstatus)
144 dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
145 hdq_data->hdq_irqstatus);
146
147 *status = 0;
148
149 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
150
151 /* set the GO bit */
152 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
153 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
154 /* wait for the TXCOMPLETE bit */
155 ret = wait_event_timeout(hdq_wait_queue,
156 (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
157 OMAP_HDQ_TIMEOUT);
158 *status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
159 if (ret == 0) {
160 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
161 ret = -ETIMEDOUT;
162 goto out;
163 }
164
165 /* check irqstatus */
166 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
167 dev_dbg(hdq_data->dev, "timeout waiting for"
168 " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
169 ret = -ETIMEDOUT;
170 goto out;
171 }
172
173 /* wait for the GO bit return to zero */
174 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
175 OMAP_HDQ_CTRL_STATUS_GO,
176 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
177 if (ret) {
178 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
179 " return to zero, %x\n", tmp_status);
180 }
181
182 out:
183 mutex_unlock(&hdq_data->hdq_mutex);
184 rtn:
185 return ret;
186 }
187
188 /* HDQ Interrupt service routine */
hdq_isr(int irq,void * _hdq)189 static irqreturn_t hdq_isr(int irq, void *_hdq)
190 {
191 struct hdq_data *hdq_data = _hdq;
192 unsigned long irqflags;
193
194 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
195 hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
196 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
197 dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
198
199 if (hdq_data->hdq_irqstatus &
200 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
201 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
202 /* wake up sleeping process */
203 wake_up(&hdq_wait_queue);
204 }
205
206 return IRQ_HANDLED;
207 }
208
209 /* W1 search callback function in HDQ mode */
omap_w1_search_bus(void * _hdq,struct w1_master * master_dev,u8 search_type,w1_slave_found_callback slave_found)210 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
211 u8 search_type, w1_slave_found_callback slave_found)
212 {
213 u64 module_id, rn_le, cs, id;
214
215 if (w1_id)
216 module_id = w1_id;
217 else
218 module_id = 0x1;
219
220 rn_le = cpu_to_le64(module_id);
221 /*
222 * HDQ might not obey truly the 1-wire spec.
223 * So calculate CRC based on module parameter.
224 */
225 cs = w1_calc_crc8((u8 *)&rn_le, 7);
226 id = (cs << 56) | module_id;
227
228 slave_found(master_dev, id);
229 }
230
231 /* Issue break pulse to the device */
omap_hdq_break(struct hdq_data * hdq_data)232 static int omap_hdq_break(struct hdq_data *hdq_data)
233 {
234 int ret = 0;
235 u8 tmp_status;
236
237 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
238 if (ret < 0) {
239 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
240 ret = -EINTR;
241 goto rtn;
242 }
243
244 if (hdq_data->hdq_irqstatus)
245 dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
246 hdq_data->hdq_irqstatus);
247
248 /* set the INIT and GO bit */
249 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
250 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
251 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
252 OMAP_HDQ_CTRL_STATUS_GO);
253
254 /* wait for the TIMEOUT bit */
255 ret = wait_event_timeout(hdq_wait_queue,
256 (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
257 OMAP_HDQ_TIMEOUT);
258 tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
259 if (ret == 0) {
260 dev_dbg(hdq_data->dev, "break wait elapsed\n");
261 ret = -EINTR;
262 goto out;
263 }
264
265 /* check irqstatus */
266 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
267 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
268 tmp_status);
269 ret = -ETIMEDOUT;
270 goto out;
271 }
272
273 /*
274 * check for the presence detect bit to get
275 * set to show that the slave is responding
276 */
277 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
278 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
279 dev_dbg(hdq_data->dev, "Presence bit not set\n");
280 ret = -ETIMEDOUT;
281 goto out;
282 }
283
284 /*
285 * wait for both INIT and GO bits rerurn to zero.
286 * zero wait time expected for interrupt mode.
287 */
288 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
289 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
290 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
291 &tmp_status);
292 if (ret)
293 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
294 " return to zero, %x\n", tmp_status);
295
296 out:
297 mutex_unlock(&hdq_data->hdq_mutex);
298 rtn:
299 return ret;
300 }
301
hdq_read_byte(struct hdq_data * hdq_data,u8 * val)302 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
303 {
304 int ret = 0;
305 u8 status;
306
307 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
308 if (ret < 0) {
309 ret = -EINTR;
310 goto rtn;
311 }
312
313 if (pm_runtime_suspended(hdq_data->dev)) {
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
319 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
320 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
321 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
322 /*
323 * The RX comes immediately after TX.
324 */
325 wait_event_timeout(hdq_wait_queue,
326 (hdq_data->hdq_irqstatus
327 & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
328 OMAP_HDQ_INT_STATUS_TIMEOUT)),
329 OMAP_HDQ_TIMEOUT);
330 status = hdq_reset_irqstatus(hdq_data,
331 OMAP_HDQ_INT_STATUS_RXCOMPLETE |
332 OMAP_HDQ_INT_STATUS_TIMEOUT);
333 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
334 OMAP_HDQ_CTRL_STATUS_DIR);
335
336 /* check irqstatus */
337 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
338 dev_dbg(hdq_data->dev, "timeout waiting for"
339 " RXCOMPLETE, %x", status);
340 ret = -ETIMEDOUT;
341 goto out;
342 }
343 } else { /* interrupt had occurred before hdq_read_byte was called */
344 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
345 }
346 /* the data is ready. Read it in! */
347 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
348 out:
349 mutex_unlock(&hdq_data->hdq_mutex);
350 rtn:
351 return ret;
352
353 }
354
355 /*
356 * W1 triplet callback function - used for searching ROM addresses.
357 * Registered only when controller is in 1-wire mode.
358 */
omap_w1_triplet(void * _hdq,u8 bdir)359 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
360 {
361 u8 id_bit, comp_bit;
362 int err;
363 u8 ret = 0x3; /* no slaves responded */
364 struct hdq_data *hdq_data = _hdq;
365 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
366 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
367 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
368
369 err = pm_runtime_get_sync(hdq_data->dev);
370 if (err < 0) {
371 pm_runtime_put_noidle(hdq_data->dev);
372
373 return err;
374 }
375
376 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
377 if (err < 0) {
378 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
379 goto rtn;
380 }
381
382 /* read id_bit */
383 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
384 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
385 err = wait_event_timeout(hdq_wait_queue,
386 (hdq_data->hdq_irqstatus
387 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
388 OMAP_HDQ_TIMEOUT);
389 /* Must clear irqstatus for another RXCOMPLETE interrupt */
390 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
391
392 if (err == 0) {
393 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
394 goto out;
395 }
396 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
397
398 /* read comp_bit */
399 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
400 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
401 err = wait_event_timeout(hdq_wait_queue,
402 (hdq_data->hdq_irqstatus
403 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
404 OMAP_HDQ_TIMEOUT);
405 /* Must clear irqstatus for another RXCOMPLETE interrupt */
406 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
407
408 if (err == 0) {
409 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
410 goto out;
411 }
412 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
413
414 if (id_bit && comp_bit) {
415 ret = 0x03; /* no slaves responded */
416 goto out;
417 }
418 if (!id_bit && !comp_bit) {
419 /* Both bits are valid, take the direction given */
420 ret = bdir ? 0x04 : 0;
421 } else {
422 /* Only one bit is valid, take that direction */
423 bdir = id_bit;
424 ret = id_bit ? 0x05 : 0x02;
425 }
426
427 /* write bdir bit */
428 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
429 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
430 err = wait_event_timeout(hdq_wait_queue,
431 (hdq_data->hdq_irqstatus
432 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
433 OMAP_HDQ_TIMEOUT);
434 /* Must clear irqstatus for another TXCOMPLETE interrupt */
435 hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
436
437 if (err == 0) {
438 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
439 goto out;
440 }
441
442 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
443 OMAP_HDQ_CTRL_STATUS_SINGLE);
444
445 out:
446 mutex_unlock(&hdq_data->hdq_mutex);
447 rtn:
448 pm_runtime_mark_last_busy(hdq_data->dev);
449 pm_runtime_put_autosuspend(hdq_data->dev);
450
451 return ret;
452 }
453
454 /* reset callback */
omap_w1_reset_bus(void * _hdq)455 static u8 omap_w1_reset_bus(void *_hdq)
456 {
457 struct hdq_data *hdq_data = _hdq;
458 int err;
459
460 err = pm_runtime_get_sync(hdq_data->dev);
461 if (err < 0) {
462 pm_runtime_put_noidle(hdq_data->dev);
463
464 return err;
465 }
466
467 omap_hdq_break(hdq_data);
468
469 pm_runtime_mark_last_busy(hdq_data->dev);
470 pm_runtime_put_autosuspend(hdq_data->dev);
471
472 return 0;
473 }
474
475 /* Read a byte of data from the device */
omap_w1_read_byte(void * _hdq)476 static u8 omap_w1_read_byte(void *_hdq)
477 {
478 struct hdq_data *hdq_data = _hdq;
479 u8 val = 0;
480 int ret;
481
482 ret = pm_runtime_get_sync(hdq_data->dev);
483 if (ret < 0) {
484 pm_runtime_put_noidle(hdq_data->dev);
485
486 return -1;
487 }
488
489 ret = hdq_read_byte(hdq_data, &val);
490 if (ret)
491 val = -1;
492
493 pm_runtime_mark_last_busy(hdq_data->dev);
494 pm_runtime_put_autosuspend(hdq_data->dev);
495
496 return val;
497 }
498
499 /* Write a byte of data to the device */
omap_w1_write_byte(void * _hdq,u8 byte)500 static void omap_w1_write_byte(void *_hdq, u8 byte)
501 {
502 struct hdq_data *hdq_data = _hdq;
503 int ret;
504 u8 status;
505
506 ret = pm_runtime_get_sync(hdq_data->dev);
507 if (ret < 0) {
508 pm_runtime_put_noidle(hdq_data->dev);
509
510 return;
511 }
512
513 /*
514 * We need to reset the slave before
515 * issuing the SKIP ROM command, else
516 * the slave will not work.
517 */
518 if (byte == W1_SKIP_ROM)
519 omap_hdq_break(hdq_data);
520
521 ret = hdq_write_byte(hdq_data, byte, &status);
522 if (ret < 0) {
523 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
524 goto out_err;
525 }
526
527 out_err:
528 pm_runtime_mark_last_busy(hdq_data->dev);
529 pm_runtime_put_autosuspend(hdq_data->dev);
530 }
531
532 static struct w1_bus_master omap_w1_master = {
533 .read_byte = omap_w1_read_byte,
534 .write_byte = omap_w1_write_byte,
535 .reset_bus = omap_w1_reset_bus,
536 };
537
omap_hdq_runtime_suspend(struct device * dev)538 static int __maybe_unused omap_hdq_runtime_suspend(struct device *dev)
539 {
540 struct hdq_data *hdq_data = dev_get_drvdata(dev);
541
542 hdq_reg_out(hdq_data, 0, hdq_data->mode);
543 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
544
545 return 0;
546 }
547
omap_hdq_runtime_resume(struct device * dev)548 static int __maybe_unused omap_hdq_runtime_resume(struct device *dev)
549 {
550 struct hdq_data *hdq_data = dev_get_drvdata(dev);
551
552 /* select HDQ/1W mode & enable clocks */
553 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
554 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
555 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
556 hdq_data->mode);
557 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
558
559 return 0;
560 }
561
562 static const struct dev_pm_ops omap_hdq_pm_ops = {
563 SET_RUNTIME_PM_OPS(omap_hdq_runtime_suspend,
564 omap_hdq_runtime_resume, NULL)
565 };
566
omap_hdq_probe(struct platform_device * pdev)567 static int omap_hdq_probe(struct platform_device *pdev)
568 {
569 struct device *dev = &pdev->dev;
570 struct hdq_data *hdq_data;
571 int ret, irq;
572 u8 rev;
573 const char *mode;
574
575 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
576 if (!hdq_data)
577 return -ENOMEM;
578
579 hdq_data->dev = dev;
580 platform_set_drvdata(pdev, hdq_data);
581
582 hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
583 if (IS_ERR(hdq_data->hdq_base))
584 return PTR_ERR(hdq_data->hdq_base);
585
586 mutex_init(&hdq_data->hdq_mutex);
587
588 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
589 if (ret < 0 || !strcmp(mode, "hdq")) {
590 hdq_data->mode = 0;
591 omap_w1_master.search = omap_w1_search_bus;
592 } else {
593 hdq_data->mode = 1;
594 omap_w1_master.triplet = omap_w1_triplet;
595 }
596
597 pm_runtime_enable(&pdev->dev);
598 pm_runtime_use_autosuspend(&pdev->dev);
599 pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
600 ret = pm_runtime_get_sync(&pdev->dev);
601 if (ret < 0) {
602 pm_runtime_put_noidle(&pdev->dev);
603 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
604 goto err_w1;
605 }
606
607 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
608 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
609 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
610
611 spin_lock_init(&hdq_data->hdq_spinlock);
612
613 irq = platform_get_irq(pdev, 0);
614 if (irq < 0) {
615 dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
616 ret = irq;
617 goto err_irq;
618 }
619
620 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
621 if (ret < 0) {
622 dev_dbg(&pdev->dev, "could not request irq\n");
623 goto err_irq;
624 }
625
626 omap_hdq_break(hdq_data);
627
628 pm_runtime_mark_last_busy(&pdev->dev);
629 pm_runtime_put_autosuspend(&pdev->dev);
630
631 omap_w1_master.data = hdq_data;
632
633 ret = w1_add_master_device(&omap_w1_master);
634 if (ret) {
635 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
636 goto err_w1;
637 }
638
639 return 0;
640
641 err_irq:
642 pm_runtime_put_sync(&pdev->dev);
643 err_w1:
644 pm_runtime_dont_use_autosuspend(&pdev->dev);
645 pm_runtime_disable(&pdev->dev);
646
647 return ret;
648 }
649
omap_hdq_remove(struct platform_device * pdev)650 static int omap_hdq_remove(struct platform_device *pdev)
651 {
652 int active;
653
654 active = pm_runtime_get_sync(&pdev->dev);
655 if (active < 0)
656 pm_runtime_put_noidle(&pdev->dev);
657
658 w1_remove_master_device(&omap_w1_master);
659
660 pm_runtime_dont_use_autosuspend(&pdev->dev);
661 if (active >= 0)
662 pm_runtime_put_sync(&pdev->dev);
663 pm_runtime_disable(&pdev->dev);
664
665 return 0;
666 }
667
668 static const struct of_device_id omap_hdq_dt_ids[] = {
669 { .compatible = "ti,omap3-1w" },
670 { .compatible = "ti,am4372-hdq" },
671 {}
672 };
673 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
674
675 static struct platform_driver omap_hdq_driver = {
676 .probe = omap_hdq_probe,
677 .remove = omap_hdq_remove,
678 .driver = {
679 .name = "omap_hdq",
680 .of_match_table = omap_hdq_dt_ids,
681 .pm = &omap_hdq_pm_ops,
682 },
683 };
684 module_platform_driver(omap_hdq_driver);
685
686 MODULE_AUTHOR("Texas Instruments");
687 MODULE_DESCRIPTION("HDQ-1W driver Library");
688 MODULE_LICENSE("GPL");
689