1 /* 2 * drivers/w1/masters/omap_hdq.c 3 * 4 * Copyright (C) 2007,2012 Texas Instruments, Inc. 5 * 6 * This file is licensed under the terms of the GNU General Public License 7 * version 2. This program is licensed "as is" without any warranty of any 8 * kind, whether express or implied. 9 * 10 */ 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/platform_device.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/io.h> 18 #include <linux/sched.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/of.h> 21 22 #include "../w1.h" 23 #include "../w1_int.h" 24 25 #define MOD_NAME "OMAP_HDQ:" 26 27 #define OMAP_HDQ_REVISION 0x00 28 #define OMAP_HDQ_TX_DATA 0x04 29 #define OMAP_HDQ_RX_DATA 0x08 30 #define OMAP_HDQ_CTRL_STATUS 0x0c 31 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7) 32 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6) 33 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5) 34 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4) 35 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3) 36 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2) 37 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1) 38 #define OMAP_HDQ_INT_STATUS 0x10 39 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2) 40 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1) 41 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0) 42 #define OMAP_HDQ_SYSCONFIG 0x14 43 #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1) 44 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0) 45 #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0 46 #define OMAP_HDQ_SYSSTATUS 0x18 47 #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0) 48 49 #define OMAP_HDQ_FLAG_CLEAR 0 50 #define OMAP_HDQ_FLAG_SET 1 51 #define OMAP_HDQ_TIMEOUT (HZ/5) 52 53 #define OMAP_HDQ_MAX_USER 4 54 55 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); 56 57 static int w1_id; 58 module_param(w1_id, int, S_IRUSR); 59 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode"); 60 61 struct hdq_data { 62 struct device *dev; 63 void __iomem *hdq_base; 64 /* lock status update */ 65 struct mutex hdq_mutex; 66 int hdq_usecount; 67 u8 hdq_irqstatus; 68 /* device lock */ 69 spinlock_t hdq_spinlock; 70 /* 71 * Used to control the call to omap_hdq_get and omap_hdq_put. 72 * HDQ Protocol: Write the CMD|REG_address first, followed by 73 * the data wrire or read. 74 */ 75 int init_trans; 76 int rrw; 77 /* mode: 0-HDQ 1-W1 */ 78 int mode; 79 80 }; 81 82 /* HDQ register I/O routines */ 83 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) 84 { 85 return __raw_readl(hdq_data->hdq_base + offset); 86 } 87 88 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) 89 { 90 __raw_writel(val, hdq_data->hdq_base + offset); 91 } 92 93 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, 94 u8 val, u8 mask) 95 { 96 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask) 97 | (val & mask); 98 __raw_writel(new_val, hdq_data->hdq_base + offset); 99 100 return new_val; 101 } 102 103 static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset, 104 u32 mask) 105 { 106 u32 ie; 107 108 ie = readl(hdq_data->hdq_base + offset); 109 writel(ie & mask, hdq_data->hdq_base + offset); 110 } 111 112 /* 113 * Wait for one or more bits in flag change. 114 * HDQ_FLAG_SET: wait until any bit in the flag is set. 115 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. 116 * return 0 on success and -ETIMEDOUT in the case of timeout. 117 */ 118 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, 119 u8 flag, u8 flag_set, u8 *status) 120 { 121 int ret = 0; 122 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; 123 124 if (flag_set == OMAP_HDQ_FLAG_CLEAR) { 125 /* wait for the flag clear */ 126 while (((*status = hdq_reg_in(hdq_data, offset)) & flag) 127 && time_before(jiffies, timeout)) { 128 schedule_timeout_uninterruptible(1); 129 } 130 if (*status & flag) 131 ret = -ETIMEDOUT; 132 } else if (flag_set == OMAP_HDQ_FLAG_SET) { 133 /* wait for the flag set */ 134 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) 135 && time_before(jiffies, timeout)) { 136 schedule_timeout_uninterruptible(1); 137 } 138 if (!(*status & flag)) 139 ret = -ETIMEDOUT; 140 } else 141 return -EINVAL; 142 143 return ret; 144 } 145 146 /* write out a byte and fill *status with HDQ_INT_STATUS */ 147 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) 148 { 149 int ret; 150 u8 tmp_status; 151 unsigned long irqflags; 152 153 *status = 0; 154 155 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 156 /* clear interrupt flags via a dummy read */ 157 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 158 /* ISR loads it with new INT_STATUS */ 159 hdq_data->hdq_irqstatus = 0; 160 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 161 162 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); 163 164 /* set the GO bit */ 165 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, 166 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 167 /* wait for the TXCOMPLETE bit */ 168 ret = wait_event_timeout(hdq_wait_queue, 169 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 170 if (ret == 0) { 171 dev_dbg(hdq_data->dev, "TX wait elapsed\n"); 172 ret = -ETIMEDOUT; 173 goto out; 174 } 175 176 *status = hdq_data->hdq_irqstatus; 177 /* check irqstatus */ 178 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { 179 dev_dbg(hdq_data->dev, "timeout waiting for" 180 " TXCOMPLETE/RXCOMPLETE, %x", *status); 181 ret = -ETIMEDOUT; 182 goto out; 183 } 184 185 /* wait for the GO bit return to zero */ 186 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 187 OMAP_HDQ_CTRL_STATUS_GO, 188 OMAP_HDQ_FLAG_CLEAR, &tmp_status); 189 if (ret) { 190 dev_dbg(hdq_data->dev, "timeout waiting GO bit" 191 " return to zero, %x", tmp_status); 192 } 193 194 out: 195 return ret; 196 } 197 198 /* HDQ Interrupt service routine */ 199 static irqreturn_t hdq_isr(int irq, void *_hdq) 200 { 201 struct hdq_data *hdq_data = _hdq; 202 unsigned long irqflags; 203 204 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 205 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 206 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 207 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); 208 209 if (hdq_data->hdq_irqstatus & 210 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE 211 | OMAP_HDQ_INT_STATUS_TIMEOUT)) { 212 /* wake up sleeping process */ 213 wake_up(&hdq_wait_queue); 214 } 215 216 return IRQ_HANDLED; 217 } 218 219 /* W1 search callback function in HDQ mode */ 220 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 221 u8 search_type, w1_slave_found_callback slave_found) 222 { 223 u64 module_id, rn_le, cs, id; 224 225 if (w1_id) 226 module_id = w1_id; 227 else 228 module_id = 0x1; 229 230 rn_le = cpu_to_le64(module_id); 231 /* 232 * HDQ might not obey truly the 1-wire spec. 233 * So calculate CRC based on module parameter. 234 */ 235 cs = w1_calc_crc8((u8 *)&rn_le, 7); 236 id = (cs << 56) | module_id; 237 238 slave_found(master_dev, id); 239 } 240 241 static int _omap_hdq_reset(struct hdq_data *hdq_data) 242 { 243 int ret; 244 u8 tmp_status; 245 246 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 247 OMAP_HDQ_SYSCONFIG_SOFTRESET); 248 /* 249 * Select HDQ/1W mode & enable clocks. 250 * It is observed that INT flags can't be cleared via a read and GO/INIT 251 * won't return to zero if interrupt is disabled. So we always enable 252 * interrupt. 253 */ 254 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 255 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 256 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 257 258 /* wait for reset to complete */ 259 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, 260 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); 261 if (ret) 262 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", 263 tmp_status); 264 else { 265 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 266 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 267 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK | 268 hdq_data->mode); 269 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 270 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 271 } 272 273 return ret; 274 } 275 276 /* Issue break pulse to the device */ 277 static int omap_hdq_break(struct hdq_data *hdq_data) 278 { 279 int ret = 0; 280 u8 tmp_status; 281 unsigned long irqflags; 282 283 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 284 if (ret < 0) { 285 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 286 ret = -EINTR; 287 goto rtn; 288 } 289 290 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 291 /* clear interrupt flags via a dummy read */ 292 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 293 /* ISR loads it with new INT_STATUS */ 294 hdq_data->hdq_irqstatus = 0; 295 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 296 297 /* set the INIT and GO bit */ 298 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 299 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, 300 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 301 OMAP_HDQ_CTRL_STATUS_GO); 302 303 /* wait for the TIMEOUT bit */ 304 ret = wait_event_timeout(hdq_wait_queue, 305 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 306 if (ret == 0) { 307 dev_dbg(hdq_data->dev, "break wait elapsed\n"); 308 ret = -EINTR; 309 goto out; 310 } 311 312 tmp_status = hdq_data->hdq_irqstatus; 313 /* check irqstatus */ 314 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { 315 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", 316 tmp_status); 317 ret = -ETIMEDOUT; 318 goto out; 319 } 320 321 /* 322 * check for the presence detect bit to get 323 * set to show that the slave is responding 324 */ 325 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) & 326 OMAP_HDQ_CTRL_STATUS_PRESENCE)) { 327 dev_dbg(hdq_data->dev, "Presence bit not set\n"); 328 ret = -ETIMEDOUT; 329 goto out; 330 } 331 332 /* 333 * wait for both INIT and GO bits rerurn to zero. 334 * zero wait time expected for interrupt mode. 335 */ 336 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 337 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 338 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, 339 &tmp_status); 340 if (ret) 341 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" 342 " return to zero, %x", tmp_status); 343 344 out: 345 mutex_unlock(&hdq_data->hdq_mutex); 346 rtn: 347 return ret; 348 } 349 350 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) 351 { 352 int ret = 0; 353 u8 status; 354 355 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 356 if (ret < 0) { 357 ret = -EINTR; 358 goto rtn; 359 } 360 361 if (!hdq_data->hdq_usecount) { 362 ret = -EINVAL; 363 goto out; 364 } 365 366 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 367 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 368 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, 369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 370 /* 371 * The RX comes immediately after TX. 372 */ 373 wait_event_timeout(hdq_wait_queue, 374 (hdq_data->hdq_irqstatus 375 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 376 OMAP_HDQ_TIMEOUT); 377 378 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, 379 OMAP_HDQ_CTRL_STATUS_DIR); 380 status = hdq_data->hdq_irqstatus; 381 /* check irqstatus */ 382 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 383 dev_dbg(hdq_data->dev, "timeout waiting for" 384 " RXCOMPLETE, %x", status); 385 ret = -ETIMEDOUT; 386 goto out; 387 } 388 } 389 /* the data is ready. Read it in! */ 390 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); 391 out: 392 mutex_unlock(&hdq_data->hdq_mutex); 393 rtn: 394 return ret; 395 396 } 397 398 /* Enable clocks and set the controller to HDQ/1W mode */ 399 static int omap_hdq_get(struct hdq_data *hdq_data) 400 { 401 int ret = 0; 402 403 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 404 if (ret < 0) { 405 ret = -EINTR; 406 goto rtn; 407 } 408 409 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { 410 dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); 411 ret = -EINVAL; 412 goto out; 413 } else { 414 hdq_data->hdq_usecount++; 415 try_module_get(THIS_MODULE); 416 if (1 == hdq_data->hdq_usecount) { 417 418 pm_runtime_get_sync(hdq_data->dev); 419 420 /* make sure HDQ/1W is out of reset */ 421 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & 422 OMAP_HDQ_SYSSTATUS_RESETDONE)) { 423 ret = _omap_hdq_reset(hdq_data); 424 if (ret) 425 /* back up the count */ 426 hdq_data->hdq_usecount--; 427 } else { 428 /* select HDQ/1W mode & enable clocks */ 429 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 430 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 431 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK | 432 hdq_data->mode); 433 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 434 OMAP_HDQ_SYSCONFIG_NOIDLE); 435 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 436 } 437 } 438 } 439 440 out: 441 mutex_unlock(&hdq_data->hdq_mutex); 442 rtn: 443 return ret; 444 } 445 446 /* Disable clocks to the module */ 447 static int omap_hdq_put(struct hdq_data *hdq_data) 448 { 449 int ret = 0; 450 451 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 452 if (ret < 0) 453 return -EINTR; 454 455 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 456 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 457 if (0 == hdq_data->hdq_usecount) { 458 dev_dbg(hdq_data->dev, "attempt to decrement use count" 459 " when it is zero"); 460 ret = -EINVAL; 461 } else { 462 hdq_data->hdq_usecount--; 463 module_put(THIS_MODULE); 464 if (0 == hdq_data->hdq_usecount) 465 pm_runtime_put_sync(hdq_data->dev); 466 } 467 mutex_unlock(&hdq_data->hdq_mutex); 468 469 return ret; 470 } 471 472 /* 473 * W1 triplet callback function - used for searching ROM addresses. 474 * Registered only when controller is in 1-wire mode. 475 */ 476 static u8 omap_w1_triplet(void *_hdq, u8 bdir) 477 { 478 u8 id_bit, comp_bit; 479 int err; 480 u8 ret = 0x3; /* no slaves responded */ 481 struct hdq_data *hdq_data = _hdq; 482 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO | 483 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK; 484 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR; 485 486 omap_hdq_get(_hdq); 487 488 err = mutex_lock_interruptible(&hdq_data->hdq_mutex); 489 if (err < 0) { 490 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 491 goto rtn; 492 } 493 494 hdq_data->hdq_irqstatus = 0; 495 /* read id_bit */ 496 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 497 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask); 498 err = wait_event_timeout(hdq_wait_queue, 499 (hdq_data->hdq_irqstatus 500 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 501 OMAP_HDQ_TIMEOUT); 502 if (err == 0) { 503 dev_dbg(hdq_data->dev, "RX wait elapsed\n"); 504 goto out; 505 } 506 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01); 507 508 hdq_data->hdq_irqstatus = 0; 509 /* read comp_bit */ 510 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 511 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask); 512 err = wait_event_timeout(hdq_wait_queue, 513 (hdq_data->hdq_irqstatus 514 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 515 OMAP_HDQ_TIMEOUT); 516 if (err == 0) { 517 dev_dbg(hdq_data->dev, "RX wait elapsed\n"); 518 goto out; 519 } 520 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01); 521 522 if (id_bit && comp_bit) { 523 ret = 0x03; /* no slaves responded */ 524 goto out; 525 } 526 if (!id_bit && !comp_bit) { 527 /* Both bits are valid, take the direction given */ 528 ret = bdir ? 0x04 : 0; 529 } else { 530 /* Only one bit is valid, take that direction */ 531 bdir = id_bit; 532 ret = id_bit ? 0x05 : 0x02; 533 } 534 535 /* write bdir bit */ 536 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir); 537 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask); 538 err = wait_event_timeout(hdq_wait_queue, 539 (hdq_data->hdq_irqstatus 540 & OMAP_HDQ_INT_STATUS_TXCOMPLETE), 541 OMAP_HDQ_TIMEOUT); 542 if (err == 0) { 543 dev_dbg(hdq_data->dev, "TX wait elapsed\n"); 544 goto out; 545 } 546 547 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0, 548 OMAP_HDQ_CTRL_STATUS_SINGLE); 549 550 out: 551 mutex_unlock(&hdq_data->hdq_mutex); 552 rtn: 553 omap_hdq_put(_hdq); 554 return ret; 555 } 556 557 /* reset callback */ 558 static u8 omap_w1_reset_bus(void *_hdq) 559 { 560 omap_hdq_get(_hdq); 561 omap_hdq_break(_hdq); 562 omap_hdq_put(_hdq); 563 return 0; 564 } 565 566 /* Read a byte of data from the device */ 567 static u8 omap_w1_read_byte(void *_hdq) 568 { 569 struct hdq_data *hdq_data = _hdq; 570 u8 val = 0; 571 int ret; 572 573 /* First write to initialize the transfer */ 574 if (hdq_data->init_trans == 0) 575 omap_hdq_get(hdq_data); 576 577 ret = hdq_read_byte(hdq_data, &val); 578 if (ret) { 579 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 580 if (ret < 0) { 581 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 582 return -EINTR; 583 } 584 hdq_data->init_trans = 0; 585 mutex_unlock(&hdq_data->hdq_mutex); 586 omap_hdq_put(hdq_data); 587 return -1; 588 } 589 590 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, 591 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 592 593 /* Write followed by a read, release the module */ 594 if (hdq_data->init_trans) { 595 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 596 if (ret < 0) { 597 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 598 return -EINTR; 599 } 600 hdq_data->init_trans = 0; 601 mutex_unlock(&hdq_data->hdq_mutex); 602 omap_hdq_put(hdq_data); 603 } 604 605 return val; 606 } 607 608 /* Write a byte of data to the device */ 609 static void omap_w1_write_byte(void *_hdq, u8 byte) 610 { 611 struct hdq_data *hdq_data = _hdq; 612 int ret; 613 u8 status; 614 615 /* First write to initialize the transfer */ 616 if (hdq_data->init_trans == 0) 617 omap_hdq_get(hdq_data); 618 619 /* 620 * We need to reset the slave before 621 * issuing the SKIP ROM command, else 622 * the slave will not work. 623 */ 624 if (byte == W1_SKIP_ROM) 625 omap_hdq_break(hdq_data); 626 627 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 628 if (ret < 0) { 629 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 630 return; 631 } 632 hdq_data->init_trans++; 633 mutex_unlock(&hdq_data->hdq_mutex); 634 635 ret = hdq_write_byte(hdq_data, byte, &status); 636 if (ret < 0) { 637 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); 638 return; 639 } 640 641 /* Second write, data transferred. Release the module */ 642 if (hdq_data->init_trans > 1) { 643 omap_hdq_put(hdq_data); 644 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 645 if (ret < 0) { 646 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 647 return; 648 } 649 hdq_data->init_trans = 0; 650 mutex_unlock(&hdq_data->hdq_mutex); 651 } 652 } 653 654 static struct w1_bus_master omap_w1_master = { 655 .read_byte = omap_w1_read_byte, 656 .write_byte = omap_w1_write_byte, 657 .reset_bus = omap_w1_reset_bus, 658 }; 659 660 static int omap_hdq_probe(struct platform_device *pdev) 661 { 662 struct device *dev = &pdev->dev; 663 struct hdq_data *hdq_data; 664 struct resource *res; 665 int ret, irq; 666 u8 rev; 667 const char *mode; 668 669 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL); 670 if (!hdq_data) { 671 dev_dbg(&pdev->dev, "unable to allocate memory\n"); 672 return -ENOMEM; 673 } 674 675 hdq_data->dev = dev; 676 platform_set_drvdata(pdev, hdq_data); 677 678 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 679 hdq_data->hdq_base = devm_ioremap_resource(dev, res); 680 if (IS_ERR(hdq_data->hdq_base)) 681 return PTR_ERR(hdq_data->hdq_base); 682 683 hdq_data->hdq_usecount = 0; 684 hdq_data->rrw = 0; 685 mutex_init(&hdq_data->hdq_mutex); 686 687 pm_runtime_enable(&pdev->dev); 688 ret = pm_runtime_get_sync(&pdev->dev); 689 if (ret < 0) { 690 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n"); 691 goto err_w1; 692 } 693 694 ret = _omap_hdq_reset(hdq_data); 695 if (ret) { 696 dev_dbg(&pdev->dev, "reset failed\n"); 697 goto err_irq; 698 } 699 700 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); 701 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", 702 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); 703 704 spin_lock_init(&hdq_data->hdq_spinlock); 705 706 irq = platform_get_irq(pdev, 0); 707 if (irq < 0) { 708 ret = -ENXIO; 709 goto err_irq; 710 } 711 712 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data); 713 if (ret < 0) { 714 dev_dbg(&pdev->dev, "could not request irq\n"); 715 goto err_irq; 716 } 717 718 omap_hdq_break(hdq_data); 719 720 pm_runtime_put_sync(&pdev->dev); 721 722 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode); 723 if (ret < 0 || !strcmp(mode, "hdq")) { 724 hdq_data->mode = 0; 725 omap_w1_master.search = omap_w1_search_bus; 726 } else { 727 hdq_data->mode = 1; 728 omap_w1_master.triplet = omap_w1_triplet; 729 } 730 731 omap_w1_master.data = hdq_data; 732 733 ret = w1_add_master_device(&omap_w1_master); 734 if (ret) { 735 dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); 736 goto err_w1; 737 } 738 739 return 0; 740 741 err_irq: 742 pm_runtime_put_sync(&pdev->dev); 743 err_w1: 744 pm_runtime_disable(&pdev->dev); 745 746 return ret; 747 } 748 749 static int omap_hdq_remove(struct platform_device *pdev) 750 { 751 struct hdq_data *hdq_data = platform_get_drvdata(pdev); 752 753 mutex_lock(&hdq_data->hdq_mutex); 754 755 if (hdq_data->hdq_usecount) { 756 dev_dbg(&pdev->dev, "removed when use count is not zero\n"); 757 mutex_unlock(&hdq_data->hdq_mutex); 758 return -EBUSY; 759 } 760 761 mutex_unlock(&hdq_data->hdq_mutex); 762 763 /* remove module dependency */ 764 pm_runtime_disable(&pdev->dev); 765 766 return 0; 767 } 768 769 static const struct of_device_id omap_hdq_dt_ids[] = { 770 { .compatible = "ti,omap3-1w" }, 771 { .compatible = "ti,am4372-hdq" }, 772 {} 773 }; 774 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids); 775 776 static struct platform_driver omap_hdq_driver = { 777 .probe = omap_hdq_probe, 778 .remove = omap_hdq_remove, 779 .driver = { 780 .name = "omap_hdq", 781 .of_match_table = omap_hdq_dt_ids, 782 }, 783 }; 784 module_platform_driver(omap_hdq_driver); 785 786 MODULE_AUTHOR("Texas Instruments"); 787 MODULE_DESCRIPTION("HDQ-1W driver Library"); 788 MODULE_LICENSE("GPL"); 789