1 /* 2 * drivers/w1/masters/omap_hdq.c 3 * 4 * Copyright (C) 2007,2012 Texas Instruments, Inc. 5 * 6 * This file is licensed under the terms of the GNU General Public License 7 * version 2. This program is licensed "as is" without any warranty of any 8 * kind, whether express or implied. 9 * 10 */ 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/platform_device.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/io.h> 18 #include <linux/sched.h> 19 #include <linux/pm_runtime.h> 20 21 #include "../w1.h" 22 #include "../w1_int.h" 23 24 #define MOD_NAME "OMAP_HDQ:" 25 26 #define OMAP_HDQ_REVISION 0x00 27 #define OMAP_HDQ_TX_DATA 0x04 28 #define OMAP_HDQ_RX_DATA 0x08 29 #define OMAP_HDQ_CTRL_STATUS 0x0c 30 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6) 31 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5) 32 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4) 33 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2) 34 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1) 35 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0) 36 #define OMAP_HDQ_INT_STATUS 0x10 37 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2) 38 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1) 39 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0) 40 #define OMAP_HDQ_SYSCONFIG 0x14 41 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1) 42 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0) 43 #define OMAP_HDQ_SYSSTATUS 0x18 44 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0) 45 46 #define OMAP_HDQ_FLAG_CLEAR 0 47 #define OMAP_HDQ_FLAG_SET 1 48 #define OMAP_HDQ_TIMEOUT (HZ/5) 49 50 #define OMAP_HDQ_MAX_USER 4 51 52 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); 53 static int w1_id; 54 55 struct hdq_data { 56 struct device *dev; 57 void __iomem *hdq_base; 58 /* lock status update */ 59 struct mutex hdq_mutex; 60 int hdq_usecount; 61 u8 hdq_irqstatus; 62 /* device lock */ 63 spinlock_t hdq_spinlock; 64 /* 65 * Used to control the call to omap_hdq_get and omap_hdq_put. 66 * HDQ Protocol: Write the CMD|REG_address first, followed by 67 * the data wrire or read. 68 */ 69 int init_trans; 70 }; 71 72 static int omap_hdq_probe(struct platform_device *pdev); 73 static int omap_hdq_remove(struct platform_device *pdev); 74 75 static struct platform_driver omap_hdq_driver = { 76 .probe = omap_hdq_probe, 77 .remove = omap_hdq_remove, 78 .driver = { 79 .name = "omap_hdq", 80 }, 81 }; 82 83 static u8 omap_w1_read_byte(void *_hdq); 84 static void omap_w1_write_byte(void *_hdq, u8 byte); 85 static u8 omap_w1_reset_bus(void *_hdq); 86 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 87 u8 search_type, w1_slave_found_callback slave_found); 88 89 90 static struct w1_bus_master omap_w1_master = { 91 .read_byte = omap_w1_read_byte, 92 .write_byte = omap_w1_write_byte, 93 .reset_bus = omap_w1_reset_bus, 94 .search = omap_w1_search_bus, 95 }; 96 97 /* HDQ register I/O routines */ 98 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) 99 { 100 return __raw_readl(hdq_data->hdq_base + offset); 101 } 102 103 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) 104 { 105 __raw_writel(val, hdq_data->hdq_base + offset); 106 } 107 108 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, 109 u8 val, u8 mask) 110 { 111 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask) 112 | (val & mask); 113 __raw_writel(new_val, hdq_data->hdq_base + offset); 114 115 return new_val; 116 } 117 118 /* 119 * Wait for one or more bits in flag change. 120 * HDQ_FLAG_SET: wait until any bit in the flag is set. 121 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. 122 * return 0 on success and -ETIMEDOUT in the case of timeout. 123 */ 124 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, 125 u8 flag, u8 flag_set, u8 *status) 126 { 127 int ret = 0; 128 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; 129 130 if (flag_set == OMAP_HDQ_FLAG_CLEAR) { 131 /* wait for the flag clear */ 132 while (((*status = hdq_reg_in(hdq_data, offset)) & flag) 133 && time_before(jiffies, timeout)) { 134 schedule_timeout_uninterruptible(1); 135 } 136 if (*status & flag) 137 ret = -ETIMEDOUT; 138 } else if (flag_set == OMAP_HDQ_FLAG_SET) { 139 /* wait for the flag set */ 140 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) 141 && time_before(jiffies, timeout)) { 142 schedule_timeout_uninterruptible(1); 143 } 144 if (!(*status & flag)) 145 ret = -ETIMEDOUT; 146 } else 147 return -EINVAL; 148 149 return ret; 150 } 151 152 /* write out a byte and fill *status with HDQ_INT_STATUS */ 153 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) 154 { 155 int ret; 156 u8 tmp_status; 157 unsigned long irqflags; 158 159 *status = 0; 160 161 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 162 /* clear interrupt flags via a dummy read */ 163 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 164 /* ISR loads it with new INT_STATUS */ 165 hdq_data->hdq_irqstatus = 0; 166 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 167 168 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); 169 170 /* set the GO bit */ 171 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, 172 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 173 /* wait for the TXCOMPLETE bit */ 174 ret = wait_event_timeout(hdq_wait_queue, 175 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 176 if (ret == 0) { 177 dev_dbg(hdq_data->dev, "TX wait elapsed\n"); 178 ret = -ETIMEDOUT; 179 goto out; 180 } 181 182 *status = hdq_data->hdq_irqstatus; 183 /* check irqstatus */ 184 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { 185 dev_dbg(hdq_data->dev, "timeout waiting for" 186 " TXCOMPLETE/RXCOMPLETE, %x", *status); 187 ret = -ETIMEDOUT; 188 goto out; 189 } 190 191 /* wait for the GO bit return to zero */ 192 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 193 OMAP_HDQ_CTRL_STATUS_GO, 194 OMAP_HDQ_FLAG_CLEAR, &tmp_status); 195 if (ret) { 196 dev_dbg(hdq_data->dev, "timeout waiting GO bit" 197 " return to zero, %x", tmp_status); 198 } 199 200 out: 201 return ret; 202 } 203 204 /* HDQ Interrupt service routine */ 205 static irqreturn_t hdq_isr(int irq, void *_hdq) 206 { 207 struct hdq_data *hdq_data = _hdq; 208 unsigned long irqflags; 209 210 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 211 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 212 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 213 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); 214 215 if (hdq_data->hdq_irqstatus & 216 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE 217 | OMAP_HDQ_INT_STATUS_TIMEOUT)) { 218 /* wake up sleeping process */ 219 wake_up(&hdq_wait_queue); 220 } 221 222 return IRQ_HANDLED; 223 } 224 225 /* HDQ Mode: always return success */ 226 static u8 omap_w1_reset_bus(void *_hdq) 227 { 228 return 0; 229 } 230 231 /* W1 search callback function */ 232 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 233 u8 search_type, w1_slave_found_callback slave_found) 234 { 235 u64 module_id, rn_le, cs, id; 236 237 if (w1_id) 238 module_id = w1_id; 239 else 240 module_id = 0x1; 241 242 rn_le = cpu_to_le64(module_id); 243 /* 244 * HDQ might not obey truly the 1-wire spec. 245 * So calculate CRC based on module parameter. 246 */ 247 cs = w1_calc_crc8((u8 *)&rn_le, 7); 248 id = (cs << 56) | module_id; 249 250 slave_found(master_dev, id); 251 } 252 253 static int _omap_hdq_reset(struct hdq_data *hdq_data) 254 { 255 int ret; 256 u8 tmp_status; 257 258 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET); 259 /* 260 * Select HDQ mode & enable clocks. 261 * It is observed that INT flags can't be cleared via a read and GO/INIT 262 * won't return to zero if interrupt is disabled. So we always enable 263 * interrupt. 264 */ 265 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 266 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 267 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 268 269 /* wait for reset to complete */ 270 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, 271 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); 272 if (ret) 273 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", 274 tmp_status); 275 else { 276 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 277 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 278 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 279 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 280 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 281 } 282 283 return ret; 284 } 285 286 /* Issue break pulse to the device */ 287 static int omap_hdq_break(struct hdq_data *hdq_data) 288 { 289 int ret = 0; 290 u8 tmp_status; 291 unsigned long irqflags; 292 293 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 294 if (ret < 0) { 295 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 296 ret = -EINTR; 297 goto rtn; 298 } 299 300 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); 301 /* clear interrupt flags via a dummy read */ 302 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 303 /* ISR loads it with new INT_STATUS */ 304 hdq_data->hdq_irqstatus = 0; 305 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); 306 307 /* set the INIT and GO bit */ 308 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 309 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, 310 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 311 OMAP_HDQ_CTRL_STATUS_GO); 312 313 /* wait for the TIMEOUT bit */ 314 ret = wait_event_timeout(hdq_wait_queue, 315 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); 316 if (ret == 0) { 317 dev_dbg(hdq_data->dev, "break wait elapsed\n"); 318 ret = -EINTR; 319 goto out; 320 } 321 322 tmp_status = hdq_data->hdq_irqstatus; 323 /* check irqstatus */ 324 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { 325 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", 326 tmp_status); 327 ret = -ETIMEDOUT; 328 goto out; 329 } 330 /* 331 * wait for both INIT and GO bits rerurn to zero. 332 * zero wait time expected for interrupt mode. 333 */ 334 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, 335 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | 336 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, 337 &tmp_status); 338 if (ret) 339 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" 340 " return to zero, %x", tmp_status); 341 342 out: 343 mutex_unlock(&hdq_data->hdq_mutex); 344 rtn: 345 return ret; 346 } 347 348 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) 349 { 350 int ret = 0; 351 u8 status; 352 353 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 354 if (ret < 0) { 355 ret = -EINTR; 356 goto rtn; 357 } 358 359 if (!hdq_data->hdq_usecount) { 360 ret = -EINVAL; 361 goto out; 362 } 363 364 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 365 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 366 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, 367 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); 368 /* 369 * The RX comes immediately after TX. 370 */ 371 wait_event_timeout(hdq_wait_queue, 372 (hdq_data->hdq_irqstatus 373 & OMAP_HDQ_INT_STATUS_RXCOMPLETE), 374 OMAP_HDQ_TIMEOUT); 375 376 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, 377 OMAP_HDQ_CTRL_STATUS_DIR); 378 status = hdq_data->hdq_irqstatus; 379 /* check irqstatus */ 380 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 381 dev_dbg(hdq_data->dev, "timeout waiting for" 382 " RXCOMPLETE, %x", status); 383 ret = -ETIMEDOUT; 384 goto out; 385 } 386 } 387 /* the data is ready. Read it in! */ 388 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); 389 out: 390 mutex_unlock(&hdq_data->hdq_mutex); 391 rtn: 392 return ret; 393 394 } 395 396 /* Enable clocks and set the controller to HDQ mode */ 397 static int omap_hdq_get(struct hdq_data *hdq_data) 398 { 399 int ret = 0; 400 401 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 402 if (ret < 0) { 403 ret = -EINTR; 404 goto rtn; 405 } 406 407 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { 408 dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); 409 ret = -EINVAL; 410 goto out; 411 } else { 412 hdq_data->hdq_usecount++; 413 try_module_get(THIS_MODULE); 414 if (1 == hdq_data->hdq_usecount) { 415 416 pm_runtime_get_sync(hdq_data->dev); 417 418 /* make sure HDQ is out of reset */ 419 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & 420 OMAP_HDQ_SYSSTATUS_RESETDONE)) { 421 ret = _omap_hdq_reset(hdq_data); 422 if (ret) 423 /* back up the count */ 424 hdq_data->hdq_usecount--; 425 } else { 426 /* select HDQ mode & enable clocks */ 427 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 428 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 429 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 430 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 431 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 432 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 433 } 434 } 435 } 436 437 out: 438 mutex_unlock(&hdq_data->hdq_mutex); 439 rtn: 440 return ret; 441 } 442 443 /* Disable clocks to the module */ 444 static int omap_hdq_put(struct hdq_data *hdq_data) 445 { 446 int ret = 0; 447 448 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 449 if (ret < 0) 450 return -EINTR; 451 452 if (0 == hdq_data->hdq_usecount) { 453 dev_dbg(hdq_data->dev, "attempt to decrement use count" 454 " when it is zero"); 455 ret = -EINVAL; 456 } else { 457 hdq_data->hdq_usecount--; 458 module_put(THIS_MODULE); 459 if (0 == hdq_data->hdq_usecount) 460 pm_runtime_put_sync(hdq_data->dev); 461 } 462 mutex_unlock(&hdq_data->hdq_mutex); 463 464 return ret; 465 } 466 467 /* Read a byte of data from the device */ 468 static u8 omap_w1_read_byte(void *_hdq) 469 { 470 struct hdq_data *hdq_data = _hdq; 471 u8 val = 0; 472 int ret; 473 474 ret = hdq_read_byte(hdq_data, &val); 475 if (ret) { 476 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 477 if (ret < 0) { 478 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 479 return -EINTR; 480 } 481 hdq_data->init_trans = 0; 482 mutex_unlock(&hdq_data->hdq_mutex); 483 omap_hdq_put(hdq_data); 484 return -1; 485 } 486 487 /* Write followed by a read, release the module */ 488 if (hdq_data->init_trans) { 489 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 490 if (ret < 0) { 491 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 492 return -EINTR; 493 } 494 hdq_data->init_trans = 0; 495 mutex_unlock(&hdq_data->hdq_mutex); 496 omap_hdq_put(hdq_data); 497 } 498 499 return val; 500 } 501 502 /* Write a byte of data to the device */ 503 static void omap_w1_write_byte(void *_hdq, u8 byte) 504 { 505 struct hdq_data *hdq_data = _hdq; 506 int ret; 507 u8 status; 508 509 /* First write to initialize the transfer */ 510 if (hdq_data->init_trans == 0) 511 omap_hdq_get(hdq_data); 512 513 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 514 if (ret < 0) { 515 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 516 return; 517 } 518 hdq_data->init_trans++; 519 mutex_unlock(&hdq_data->hdq_mutex); 520 521 ret = hdq_write_byte(hdq_data, byte, &status); 522 if (ret < 0) { 523 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); 524 return; 525 } 526 527 /* Second write, data transferred. Release the module */ 528 if (hdq_data->init_trans > 1) { 529 omap_hdq_put(hdq_data); 530 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 531 if (ret < 0) { 532 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 533 return; 534 } 535 hdq_data->init_trans = 0; 536 mutex_unlock(&hdq_data->hdq_mutex); 537 } 538 } 539 540 static int omap_hdq_probe(struct platform_device *pdev) 541 { 542 struct device *dev = &pdev->dev; 543 struct hdq_data *hdq_data; 544 struct resource *res; 545 int ret, irq; 546 u8 rev; 547 548 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL); 549 if (!hdq_data) { 550 dev_dbg(&pdev->dev, "unable to allocate memory\n"); 551 return -ENOMEM; 552 } 553 554 hdq_data->dev = dev; 555 platform_set_drvdata(pdev, hdq_data); 556 557 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 558 hdq_data->hdq_base = devm_ioremap_resource(dev, res); 559 if (IS_ERR(hdq_data->hdq_base)) 560 return PTR_ERR(hdq_data->hdq_base); 561 562 hdq_data->hdq_usecount = 0; 563 mutex_init(&hdq_data->hdq_mutex); 564 565 pm_runtime_enable(&pdev->dev); 566 pm_runtime_get_sync(&pdev->dev); 567 568 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); 569 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", 570 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); 571 572 spin_lock_init(&hdq_data->hdq_spinlock); 573 574 irq = platform_get_irq(pdev, 0); 575 if (irq < 0) { 576 ret = -ENXIO; 577 goto err_irq; 578 } 579 580 ret = devm_request_irq(dev, irq, hdq_isr, IRQF_DISABLED, 581 "omap_hdq", hdq_data); 582 if (ret < 0) { 583 dev_dbg(&pdev->dev, "could not request irq\n"); 584 goto err_irq; 585 } 586 587 omap_hdq_break(hdq_data); 588 589 pm_runtime_put_sync(&pdev->dev); 590 591 omap_w1_master.data = hdq_data; 592 593 ret = w1_add_master_device(&omap_w1_master); 594 if (ret) { 595 dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); 596 goto err_w1; 597 } 598 599 return 0; 600 601 err_irq: 602 pm_runtime_put_sync(&pdev->dev); 603 err_w1: 604 pm_runtime_disable(&pdev->dev); 605 606 return ret; 607 } 608 609 static int omap_hdq_remove(struct platform_device *pdev) 610 { 611 struct hdq_data *hdq_data = platform_get_drvdata(pdev); 612 613 mutex_lock(&hdq_data->hdq_mutex); 614 615 if (hdq_data->hdq_usecount) { 616 dev_dbg(&pdev->dev, "removed when use count is not zero\n"); 617 mutex_unlock(&hdq_data->hdq_mutex); 618 return -EBUSY; 619 } 620 621 mutex_unlock(&hdq_data->hdq_mutex); 622 623 /* remove module dependency */ 624 pm_runtime_disable(&pdev->dev); 625 626 return 0; 627 } 628 629 module_platform_driver(omap_hdq_driver); 630 631 module_param(w1_id, int, S_IRUSR); 632 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection"); 633 634 MODULE_AUTHOR("Texas Instruments"); 635 MODULE_DESCRIPTION("HDQ driver Library"); 636 MODULE_LICENSE("GPL"); 637