1 /* 2 * drivers/w1/masters/omap_hdq.c 3 * 4 * Copyright (C) 2007,2012 Texas Instruments, Inc. 5 * 6 * This file is licensed under the terms of the GNU General Public License 7 * version 2. This program is licensed "as is" without any warranty of any 8 * kind, whether express or implied. 9 * 10 */ 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/platform_device.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/io.h> 18 #include <linux/sched.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/of.h> 21 22 #include "../w1.h" 23 #include "../w1_int.h" 24 25 #define MOD_NAME "OMAP_HDQ:" 26 27 #define OMAP_HDQ_REVISION 0x00 28 #define OMAP_HDQ_TX_DATA 0x04 29 #define OMAP_HDQ_RX_DATA 0x08 30 #define OMAP_HDQ_CTRL_STATUS 0x0c 31 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7) 32 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6) 33 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5) 34 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4) 35 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3) 36 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2) 37 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1) 38 #define OMAP_HDQ_INT_STATUS 0x10 39 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2) 40 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1) 41 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0) 42 #define OMAP_HDQ_SYSCONFIG 0x14 43 #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1) 44 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0) 45 #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0 46 #define OMAP_HDQ_SYSSTATUS 0x18 47 #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0) 48 49 #define OMAP_HDQ_FLAG_CLEAR 0 50 #define OMAP_HDQ_FLAG_SET 1 51 #define OMAP_HDQ_TIMEOUT (HZ/5) 52 53 #define OMAP_HDQ_MAX_USER 4 54 55 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); 56 static int w1_id; 57 58 struct hdq_data { 59 struct device *dev; 60 void __iomem *hdq_base; 61 /* lock status update */ 62 struct mutex hdq_mutex; 63 int hdq_usecount; 64 u8 hdq_irqstatus; 65 /* device lock */ 66 spinlock_t hdq_spinlock; 67 /* 68 * Used to control the call to omap_hdq_get and omap_hdq_put. 69 * HDQ Protocol: Write the CMD|REG_address first, followed by 70 * the data wrire or read. 71 */ 72 int init_trans; 73 int rrw; 74 /* mode: 0-HDQ 1-W1 */ 75 int mode; 76 77 }; 78 79 static int omap_hdq_probe(struct platform_device *pdev); 80 static int omap_hdq_remove(struct platform_device *pdev); 81 82 static const struct of_device_id omap_hdq_dt_ids[] = { 83 { .compatible = "ti,omap3-1w" }, 84 { .compatible = "ti,am4372-hdq" }, 85 {} 86 }; 87 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids); 88 89 static struct platform_driver omap_hdq_driver = { 90 .probe = omap_hdq_probe, 91 .remove = omap_hdq_remove, 92 .driver = { 93 .name = "omap_hdq", 94 .of_match_table = omap_hdq_dt_ids, 95 }, 96 }; 97 98 static u8 omap_w1_read_byte(void *_hdq); 99 static void omap_w1_write_byte(void *_hdq, u8 byte); 100 static u8 omap_w1_reset_bus(void *_hdq); 101 102 103 static struct w1_bus_master omap_w1_master = { 104 .read_byte = omap_w1_read_byte, 105 .write_byte = omap_w1_write_byte, 106 .reset_bus = omap_w1_reset_bus, 107 }; 108 109 /* HDQ register I/O routines */ 110 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) 111 { 112 return __raw_readl(hdq_data->hdq_base + offset); 113 } 114 115 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) 116 { 117 __raw_writel(val, hdq_data->hdq_base + offset); 118 } 119 120 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, 121 u8 val, u8 mask) 122 { 123 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask) 124 | (val & mask); 125 __raw_writel(new_val, hdq_data->hdq_base + offset); 126 127 return new_val; 128 } 129 130 static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset, 131 u32 mask) 132 { 133 u32 ie; 134 135 ie = readl(hdq_data->hdq_base + offset); 136 writel(ie & mask, hdq_data->hdq_base + offset); 137 } 138 139 /* 140 * Wait for one or more bits in flag change. 141 * HDQ_FLAG_SET: wait until any bit in the flag is set. 142 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. 143 * return 0 on success and -ETIMEDOUT in the case of timeout. 144 */ 145 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, 146 u8 flag, u8 flag_set, u8 *status) 147 { 148 int ret = 0; 149 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; 150 151 if (flag_set == OMAP_HDQ_FLAG_CLEAR) { 152 /* wait for the flag clear */ 153 while (((*status = hdq_reg_in(hdq_data, offset)) & flag) 154 && time_before(jiffies, timeout)) { 155 schedule_timeout_uninterruptible(1); 156 } 157 if (*status & flag) 158 ret = -ETIMEDOUT; 159 } else if (flag_set == OMAP_HDQ_FLAG_SET) { 160 /* wait for the flag set */ 161 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) 162 && time_before(jiffies, timeout)) { 163 schedule_timeout_uninterruptible(1); 164 } 165 if (!(*status & flag)) 166 ret = -ETIMEDOUT; 167 } else 168 return -EINVAL; 169 170 return ret; 171 } 172 173 /* write out a byte and fill *status with HDQ_INT_STATUS */ 174 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) 175 { 176 int ret; 177 u8 tmp_status; 178 unsigned long irqflags; 179 180 *status = 0; 181 182 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 183 /* clear interrupt flags via a dummy read */ 184 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 185 /* ISR loads it with new INT_STATUS */ 186 hdq_data->hdq_irqstatus = 0; 187 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 188 189 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); 190 191 /* set the GO bit */ 192 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, 193 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 194 /* wait for the TXCOMPLETE bit */ 195 ret = wait_event_timeout(hdq_wait_queue, 196 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 197 if (ret == 0) { 198 dev_dbg(hdq_data->dev, "TX wait elapsed\n"); 199 ret = -ETIMEDOUT; 200 goto out; 201 } 202 203 *status = hdq_data->hdq_irqstatus; 204 /* check irqstatus */ 205 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { 206 dev_dbg(hdq_data->dev, "timeout waiting for" 207 " TXCOMPLETE/RXCOMPLETE, %x", *status); 208 ret = -ETIMEDOUT; 209 goto out; 210 } 211 212 /* wait for the GO bit return to zero */ 213 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 214 OMAP_HDQ_CTRL_STATUS_GO, 215 OMAP_HDQ_FLAG_CLEAR, &tmp_status); 216 if (ret) { 217 dev_dbg(hdq_data->dev, "timeout waiting GO bit" 218 " return to zero, %x", tmp_status); 219 } 220 221 out: 222 return ret; 223 } 224 225 /* HDQ Interrupt service routine */ 226 static irqreturn_t hdq_isr(int irq, void *_hdq) 227 { 228 struct hdq_data *hdq_data = _hdq; 229 unsigned long irqflags; 230 231 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 232 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 233 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 234 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); 235 236 if (hdq_data->hdq_irqstatus & 237 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE 238 | OMAP_HDQ_INT_STATUS_TIMEOUT)) { 239 /* wake up sleeping process */ 240 wake_up(&hdq_wait_queue); 241 } 242 243 return IRQ_HANDLED; 244 } 245 246 /* W1 search callback function in HDQ mode */ 247 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 248 u8 search_type, w1_slave_found_callback slave_found) 249 { 250 u64 module_id, rn_le, cs, id; 251 252 if (w1_id) 253 module_id = w1_id; 254 else 255 module_id = 0x1; 256 257 rn_le = cpu_to_le64(module_id); 258 /* 259 * HDQ might not obey truly the 1-wire spec. 260 * So calculate CRC based on module parameter. 261 */ 262 cs = w1_calc_crc8((u8 *)&rn_le, 7); 263 id = (cs << 56) | module_id; 264 265 slave_found(master_dev, id); 266 } 267 268 static int _omap_hdq_reset(struct hdq_data *hdq_data) 269 { 270 int ret; 271 u8 tmp_status; 272 273 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 274 OMAP_HDQ_SYSCONFIG_SOFTRESET); 275 /* 276 * Select HDQ/1W mode & enable clocks. 277 * It is observed that INT flags can't be cleared via a read and GO/INIT 278 * won't return to zero if interrupt is disabled. So we always enable 279 * interrupt. 280 */ 281 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 282 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 283 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 284 285 /* wait for reset to complete */ 286 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, 287 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); 288 if (ret) 289 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", 290 tmp_status); 291 else { 292 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 293 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 294 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK | 295 hdq_data->mode); 296 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 297 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 298 } 299 300 return ret; 301 } 302 303 /* Issue break pulse to the device */ 304 static int omap_hdq_break(struct hdq_data *hdq_data) 305 { 306 int ret = 0; 307 u8 tmp_status; 308 unsigned long irqflags; 309 310 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 311 if (ret < 0) { 312 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 313 ret = -EINTR; 314 goto rtn; 315 } 316 317 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 318 /* clear interrupt flags via a dummy read */ 319 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 320 /* ISR loads it with new INT_STATUS */ 321 hdq_data->hdq_irqstatus = 0; 322 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 323 324 /* set the INIT and GO bit */ 325 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 326 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, 327 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 328 OMAP_HDQ_CTRL_STATUS_GO); 329 330 /* wait for the TIMEOUT bit */ 331 ret = wait_event_timeout(hdq_wait_queue, 332 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 333 if (ret == 0) { 334 dev_dbg(hdq_data->dev, "break wait elapsed\n"); 335 ret = -EINTR; 336 goto out; 337 } 338 339 tmp_status = hdq_data->hdq_irqstatus; 340 /* check irqstatus */ 341 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { 342 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", 343 tmp_status); 344 ret = -ETIMEDOUT; 345 goto out; 346 } 347 348 /* 349 * check for the presence detect bit to get 350 * set to show that the slave is responding 351 */ 352 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) & 353 OMAP_HDQ_CTRL_STATUS_PRESENCE)) { 354 dev_dbg(hdq_data->dev, "Presence bit not set\n"); 355 ret = -ETIMEDOUT; 356 goto out; 357 } 358 359 /* 360 * wait for both INIT and GO bits rerurn to zero. 361 * zero wait time expected for interrupt mode. 362 */ 363 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 364 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 365 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, 366 &tmp_status); 367 if (ret) 368 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" 369 " return to zero, %x", tmp_status); 370 371 out: 372 mutex_unlock(&hdq_data->hdq_mutex); 373 rtn: 374 return ret; 375 } 376 377 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) 378 { 379 int ret = 0; 380 u8 status; 381 382 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 383 if (ret < 0) { 384 ret = -EINTR; 385 goto rtn; 386 } 387 388 if (!hdq_data->hdq_usecount) { 389 ret = -EINVAL; 390 goto out; 391 } 392 393 hdq_data->hdq_irqstatus = 0; 394 395 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 396 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 397 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, 398 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 399 /* 400 * The RX comes immediately after TX. 401 */ 402 wait_event_timeout(hdq_wait_queue, 403 (hdq_data->hdq_irqstatus 404 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 405 OMAP_HDQ_TIMEOUT); 406 407 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, 408 OMAP_HDQ_CTRL_STATUS_DIR); 409 status = hdq_data->hdq_irqstatus; 410 /* check irqstatus */ 411 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 412 dev_dbg(hdq_data->dev, "timeout waiting for" 413 " RXCOMPLETE, %x", status); 414 ret = -ETIMEDOUT; 415 goto out; 416 } 417 } 418 /* the data is ready. Read it in! */ 419 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); 420 out: 421 mutex_unlock(&hdq_data->hdq_mutex); 422 rtn: 423 return ret; 424 425 } 426 427 /* Enable clocks and set the controller to HDQ/1W mode */ 428 static int omap_hdq_get(struct hdq_data *hdq_data) 429 { 430 int ret = 0; 431 432 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 433 if (ret < 0) { 434 ret = -EINTR; 435 goto rtn; 436 } 437 438 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { 439 dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); 440 ret = -EINVAL; 441 goto out; 442 } else { 443 hdq_data->hdq_usecount++; 444 try_module_get(THIS_MODULE); 445 if (1 == hdq_data->hdq_usecount) { 446 447 pm_runtime_get_sync(hdq_data->dev); 448 449 /* make sure HDQ/1W is out of reset */ 450 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & 451 OMAP_HDQ_SYSSTATUS_RESETDONE)) { 452 ret = _omap_hdq_reset(hdq_data); 453 if (ret) 454 /* back up the count */ 455 hdq_data->hdq_usecount--; 456 } else { 457 /* select HDQ/1W mode & enable clocks */ 458 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 459 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 460 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK | 461 hdq_data->mode); 462 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 463 OMAP_HDQ_SYSCONFIG_NOIDLE); 464 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 465 } 466 } 467 } 468 469 out: 470 mutex_unlock(&hdq_data->hdq_mutex); 471 rtn: 472 return ret; 473 } 474 475 /* Disable clocks to the module */ 476 static int omap_hdq_put(struct hdq_data *hdq_data) 477 { 478 int ret = 0; 479 480 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 481 if (ret < 0) 482 return -EINTR; 483 484 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 485 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 486 if (0 == hdq_data->hdq_usecount) { 487 dev_dbg(hdq_data->dev, "attempt to decrement use count" 488 " when it is zero"); 489 ret = -EINVAL; 490 } else { 491 hdq_data->hdq_usecount--; 492 module_put(THIS_MODULE); 493 if (0 == hdq_data->hdq_usecount) 494 pm_runtime_put_sync(hdq_data->dev); 495 } 496 mutex_unlock(&hdq_data->hdq_mutex); 497 498 return ret; 499 } 500 501 /* 502 * W1 triplet callback function - used for searching ROM addresses. 503 * Registered only when controller is in 1-wire mode. 504 */ 505 static u8 omap_w1_triplet(void *_hdq, u8 bdir) 506 { 507 u8 id_bit, comp_bit; 508 int err; 509 u8 ret = 0x3; /* no slaves responded */ 510 struct hdq_data *hdq_data = _hdq; 511 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO | 512 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK; 513 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR; 514 515 omap_hdq_get(_hdq); 516 517 err = mutex_lock_interruptible(&hdq_data->hdq_mutex); 518 if (err < 0) { 519 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 520 goto rtn; 521 } 522 523 hdq_data->hdq_irqstatus = 0; 524 /* read id_bit */ 525 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 526 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask); 527 err = wait_event_timeout(hdq_wait_queue, 528 (hdq_data->hdq_irqstatus 529 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 530 OMAP_HDQ_TIMEOUT); 531 if (err == 0) { 532 dev_dbg(hdq_data->dev, "RX wait elapsed\n"); 533 goto out; 534 } 535 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01); 536 537 hdq_data->hdq_irqstatus = 0; 538 /* read comp_bit */ 539 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 540 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask); 541 err = wait_event_timeout(hdq_wait_queue, 542 (hdq_data->hdq_irqstatus 543 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 544 OMAP_HDQ_TIMEOUT); 545 if (err == 0) { 546 dev_dbg(hdq_data->dev, "RX wait elapsed\n"); 547 goto out; 548 } 549 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01); 550 551 if (id_bit && comp_bit) { 552 ret = 0x03; /* no slaves responded */ 553 goto out; 554 } 555 if (!id_bit && !comp_bit) { 556 /* Both bits are valid, take the direction given */ 557 ret = bdir ? 0x04 : 0; 558 } else { 559 /* Only one bit is valid, take that direction */ 560 bdir = id_bit; 561 ret = id_bit ? 0x05 : 0x02; 562 } 563 564 /* write bdir bit */ 565 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir); 566 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask); 567 err = wait_event_timeout(hdq_wait_queue, 568 (hdq_data->hdq_irqstatus 569 & OMAP_HDQ_INT_STATUS_TXCOMPLETE), 570 OMAP_HDQ_TIMEOUT); 571 if (err == 0) { 572 dev_dbg(hdq_data->dev, "TX wait elapsed\n"); 573 goto out; 574 } 575 576 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0, 577 OMAP_HDQ_CTRL_STATUS_SINGLE); 578 579 out: 580 mutex_unlock(&hdq_data->hdq_mutex); 581 rtn: 582 omap_hdq_put(_hdq); 583 return ret; 584 } 585 586 /* reset callback */ 587 static u8 omap_w1_reset_bus(void *_hdq) 588 { 589 omap_hdq_get(_hdq); 590 omap_hdq_break(_hdq); 591 omap_hdq_put(_hdq); 592 return 0; 593 } 594 595 /* Read a byte of data from the device */ 596 static u8 omap_w1_read_byte(void *_hdq) 597 { 598 struct hdq_data *hdq_data = _hdq; 599 u8 val = 0; 600 int ret; 601 602 /* First write to initialize the transfer */ 603 if (hdq_data->init_trans == 0) 604 omap_hdq_get(hdq_data); 605 606 ret = hdq_read_byte(hdq_data, &val); 607 if (ret) { 608 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 609 if (ret < 0) { 610 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 611 return -EINTR; 612 } 613 hdq_data->init_trans = 0; 614 mutex_unlock(&hdq_data->hdq_mutex); 615 omap_hdq_put(hdq_data); 616 return -1; 617 } 618 619 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, 620 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 621 622 /* Write followed by a read, release the module */ 623 if (hdq_data->init_trans) { 624 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 625 if (ret < 0) { 626 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 627 return -EINTR; 628 } 629 hdq_data->init_trans = 0; 630 mutex_unlock(&hdq_data->hdq_mutex); 631 omap_hdq_put(hdq_data); 632 } 633 634 return val; 635 } 636 637 /* Write a byte of data to the device */ 638 static void omap_w1_write_byte(void *_hdq, u8 byte) 639 { 640 struct hdq_data *hdq_data = _hdq; 641 int ret; 642 u8 status; 643 644 /* First write to initialize the transfer */ 645 if (hdq_data->init_trans == 0) 646 omap_hdq_get(hdq_data); 647 648 /* 649 * We need to reset the slave before 650 * issuing the SKIP ROM command, else 651 * the slave will not work. 652 */ 653 if (byte == W1_SKIP_ROM) 654 omap_hdq_break(hdq_data); 655 656 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 657 if (ret < 0) { 658 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 659 return; 660 } 661 hdq_data->init_trans++; 662 mutex_unlock(&hdq_data->hdq_mutex); 663 664 ret = hdq_write_byte(hdq_data, byte, &status); 665 if (ret < 0) { 666 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); 667 return; 668 } 669 670 /* Second write, data transferred. Release the module */ 671 if (hdq_data->init_trans > 1) { 672 omap_hdq_put(hdq_data); 673 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 674 if (ret < 0) { 675 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 676 return; 677 } 678 hdq_data->init_trans = 0; 679 mutex_unlock(&hdq_data->hdq_mutex); 680 } 681 } 682 683 static int omap_hdq_probe(struct platform_device *pdev) 684 { 685 struct device *dev = &pdev->dev; 686 struct hdq_data *hdq_data; 687 struct resource *res; 688 int ret, irq; 689 u8 rev; 690 const char *mode; 691 692 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL); 693 if (!hdq_data) { 694 dev_dbg(&pdev->dev, "unable to allocate memory\n"); 695 return -ENOMEM; 696 } 697 698 hdq_data->dev = dev; 699 platform_set_drvdata(pdev, hdq_data); 700 701 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 702 hdq_data->hdq_base = devm_ioremap_resource(dev, res); 703 if (IS_ERR(hdq_data->hdq_base)) 704 return PTR_ERR(hdq_data->hdq_base); 705 706 hdq_data->hdq_usecount = 0; 707 hdq_data->rrw = 0; 708 mutex_init(&hdq_data->hdq_mutex); 709 710 pm_runtime_enable(&pdev->dev); 711 ret = pm_runtime_get_sync(&pdev->dev); 712 if (ret < 0) { 713 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n"); 714 goto err_w1; 715 } 716 717 ret = _omap_hdq_reset(hdq_data); 718 if (ret) { 719 dev_dbg(&pdev->dev, "reset failed\n"); 720 return -EINVAL; 721 } 722 723 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); 724 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", 725 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); 726 727 spin_lock_init(&hdq_data->hdq_spinlock); 728 729 irq = platform_get_irq(pdev, 0); 730 if (irq < 0) { 731 ret = -ENXIO; 732 goto err_irq; 733 } 734 735 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data); 736 if (ret < 0) { 737 dev_dbg(&pdev->dev, "could not request irq\n"); 738 goto err_irq; 739 } 740 741 omap_hdq_break(hdq_data); 742 743 pm_runtime_put_sync(&pdev->dev); 744 745 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode); 746 if (ret < 0 || !strcmp(mode, "hdq")) { 747 hdq_data->mode = 0; 748 omap_w1_master.search = omap_w1_search_bus; 749 } else { 750 hdq_data->mode = 1; 751 omap_w1_master.triplet = omap_w1_triplet; 752 } 753 754 omap_w1_master.data = hdq_data; 755 756 ret = w1_add_master_device(&omap_w1_master); 757 if (ret) { 758 dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); 759 goto err_w1; 760 } 761 762 return 0; 763 764 err_irq: 765 pm_runtime_put_sync(&pdev->dev); 766 err_w1: 767 pm_runtime_disable(&pdev->dev); 768 769 return ret; 770 } 771 772 static int omap_hdq_remove(struct platform_device *pdev) 773 { 774 struct hdq_data *hdq_data = platform_get_drvdata(pdev); 775 776 mutex_lock(&hdq_data->hdq_mutex); 777 778 if (hdq_data->hdq_usecount) { 779 dev_dbg(&pdev->dev, "removed when use count is not zero\n"); 780 mutex_unlock(&hdq_data->hdq_mutex); 781 return -EBUSY; 782 } 783 784 mutex_unlock(&hdq_data->hdq_mutex); 785 786 /* remove module dependency */ 787 pm_runtime_disable(&pdev->dev); 788 789 return 0; 790 } 791 792 module_platform_driver(omap_hdq_driver); 793 794 module_param(w1_id, int, S_IRUSR); 795 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode"); 796 797 MODULE_AUTHOR("Texas Instruments"); 798 MODULE_DESCRIPTION("HDQ-1W driver Library"); 799 MODULE_LICENSE("GPL"); 800