1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/of.h> 19 #include <linux/of_gpio.h> 20 #include <linux/pagemap.h> 21 #include <linux/export.h> 22 #include <linux/leds.h> 23 #include <linux/slab.h> 24 #include <linux/suspend.h> 25 26 #include <linux/mmc/host.h> 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/slot-gpio.h> 29 30 #include "core.h" 31 #include "host.h" 32 33 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 34 35 static void mmc_host_classdev_release(struct device *dev) 36 { 37 struct mmc_host *host = cls_dev_to_mmc_host(dev); 38 mutex_destroy(&host->slot.lock); 39 kfree(host); 40 } 41 42 static struct class mmc_host_class = { 43 .name = "mmc_host", 44 .dev_release = mmc_host_classdev_release, 45 }; 46 47 int mmc_register_host_class(void) 48 { 49 return class_register(&mmc_host_class); 50 } 51 52 void mmc_unregister_host_class(void) 53 { 54 class_unregister(&mmc_host_class); 55 } 56 57 static DEFINE_IDR(mmc_host_idr); 58 static DEFINE_SPINLOCK(mmc_host_lock); 59 60 #ifdef CONFIG_MMC_CLKGATE 61 static ssize_t clkgate_delay_show(struct device *dev, 62 struct device_attribute *attr, char *buf) 63 { 64 struct mmc_host *host = cls_dev_to_mmc_host(dev); 65 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); 66 } 67 68 static ssize_t clkgate_delay_store(struct device *dev, 69 struct device_attribute *attr, const char *buf, size_t count) 70 { 71 struct mmc_host *host = cls_dev_to_mmc_host(dev); 72 unsigned long flags, value; 73 74 if (kstrtoul(buf, 0, &value)) 75 return -EINVAL; 76 77 spin_lock_irqsave(&host->clk_lock, flags); 78 host->clkgate_delay = value; 79 spin_unlock_irqrestore(&host->clk_lock, flags); 80 return count; 81 } 82 83 /* 84 * Enabling clock gating will make the core call out to the host 85 * once up and once down when it performs a request or card operation 86 * intermingled in any fashion. The driver will see this through 87 * set_ios() operations with ios.clock field set to 0 to gate (disable) 88 * the block clock, and to the old frequency to enable it again. 89 */ 90 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 91 { 92 unsigned long tick_ns; 93 unsigned long freq = host->ios.clock; 94 unsigned long flags; 95 96 if (!freq) { 97 pr_debug("%s: frequency set to 0 in disable function, " 98 "this means the clock is already disabled.\n", 99 mmc_hostname(host)); 100 return; 101 } 102 /* 103 * New requests may have appeared while we were scheduling, 104 * then there is no reason to delay the check before 105 * clk_disable(). 106 */ 107 spin_lock_irqsave(&host->clk_lock, flags); 108 109 /* 110 * Delay n bus cycles (at least 8 from MMC spec) before attempting 111 * to disable the MCI block clock. The reference count may have 112 * gone up again after this delay due to rescheduling! 113 */ 114 if (!host->clk_requests) { 115 spin_unlock_irqrestore(&host->clk_lock, flags); 116 tick_ns = DIV_ROUND_UP(1000000000, freq); 117 ndelay(host->clk_delay * tick_ns); 118 } else { 119 /* New users appeared while waiting for this work */ 120 spin_unlock_irqrestore(&host->clk_lock, flags); 121 return; 122 } 123 mutex_lock(&host->clk_gate_mutex); 124 spin_lock_irqsave(&host->clk_lock, flags); 125 if (!host->clk_requests) { 126 spin_unlock_irqrestore(&host->clk_lock, flags); 127 /* This will set host->ios.clock to 0 */ 128 mmc_gate_clock(host); 129 spin_lock_irqsave(&host->clk_lock, flags); 130 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 131 } 132 spin_unlock_irqrestore(&host->clk_lock, flags); 133 mutex_unlock(&host->clk_gate_mutex); 134 } 135 136 /* 137 * Internal work. Work to disable the clock at some later point. 138 */ 139 static void mmc_host_clk_gate_work(struct work_struct *work) 140 { 141 struct mmc_host *host = container_of(work, struct mmc_host, 142 clk_gate_work.work); 143 144 mmc_host_clk_gate_delayed(host); 145 } 146 147 /** 148 * mmc_host_clk_hold - ungate hardware MCI clocks 149 * @host: host to ungate. 150 * 151 * Makes sure the host ios.clock is restored to a non-zero value 152 * past this call. Increase clock reference count and ungate clock 153 * if we're the first user. 154 */ 155 void mmc_host_clk_hold(struct mmc_host *host) 156 { 157 unsigned long flags; 158 159 /* cancel any clock gating work scheduled by mmc_host_clk_release() */ 160 cancel_delayed_work_sync(&host->clk_gate_work); 161 mutex_lock(&host->clk_gate_mutex); 162 spin_lock_irqsave(&host->clk_lock, flags); 163 if (host->clk_gated) { 164 spin_unlock_irqrestore(&host->clk_lock, flags); 165 mmc_ungate_clock(host); 166 spin_lock_irqsave(&host->clk_lock, flags); 167 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 168 } 169 host->clk_requests++; 170 spin_unlock_irqrestore(&host->clk_lock, flags); 171 mutex_unlock(&host->clk_gate_mutex); 172 } 173 174 /** 175 * mmc_host_may_gate_card - check if this card may be gated 176 * @card: card to check. 177 */ 178 static bool mmc_host_may_gate_card(struct mmc_card *card) 179 { 180 /* If there is no card we may gate it */ 181 if (!card) 182 return true; 183 /* 184 * Don't gate SDIO cards! These need to be clocked at all times 185 * since they may be independent systems generating interrupts 186 * and other events. The clock requests counter from the core will 187 * go down to zero since the core does not need it, but we will not 188 * gate the clock, because there is somebody out there that may still 189 * be using it. 190 */ 191 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 192 } 193 194 /** 195 * mmc_host_clk_release - gate off hardware MCI clocks 196 * @host: host to gate. 197 * 198 * Calls the host driver with ios.clock set to zero as often as possible 199 * in order to gate off hardware MCI clocks. Decrease clock reference 200 * count and schedule disabling of clock. 201 */ 202 void mmc_host_clk_release(struct mmc_host *host) 203 { 204 unsigned long flags; 205 206 spin_lock_irqsave(&host->clk_lock, flags); 207 host->clk_requests--; 208 if (mmc_host_may_gate_card(host->card) && 209 !host->clk_requests) 210 schedule_delayed_work(&host->clk_gate_work, 211 msecs_to_jiffies(host->clkgate_delay)); 212 spin_unlock_irqrestore(&host->clk_lock, flags); 213 } 214 215 /** 216 * mmc_host_clk_rate - get current clock frequency setting 217 * @host: host to get the clock frequency for. 218 * 219 * Returns current clock frequency regardless of gating. 220 */ 221 unsigned int mmc_host_clk_rate(struct mmc_host *host) 222 { 223 unsigned long freq; 224 unsigned long flags; 225 226 spin_lock_irqsave(&host->clk_lock, flags); 227 if (host->clk_gated) 228 freq = host->clk_old; 229 else 230 freq = host->ios.clock; 231 spin_unlock_irqrestore(&host->clk_lock, flags); 232 return freq; 233 } 234 235 /** 236 * mmc_host_clk_init - set up clock gating code 237 * @host: host with potential clock to control 238 */ 239 static inline void mmc_host_clk_init(struct mmc_host *host) 240 { 241 host->clk_requests = 0; 242 /* Hold MCI clock for 8 cycles by default */ 243 host->clk_delay = 8; 244 /* 245 * Default clock gating delay is 0ms to avoid wasting power. 246 * This value can be tuned by writing into sysfs entry. 247 */ 248 host->clkgate_delay = 0; 249 host->clk_gated = false; 250 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 251 spin_lock_init(&host->clk_lock); 252 mutex_init(&host->clk_gate_mutex); 253 } 254 255 /** 256 * mmc_host_clk_exit - shut down clock gating code 257 * @host: host with potential clock to control 258 */ 259 static inline void mmc_host_clk_exit(struct mmc_host *host) 260 { 261 /* 262 * Wait for any outstanding gate and then make sure we're 263 * ungated before exiting. 264 */ 265 if (cancel_delayed_work_sync(&host->clk_gate_work)) 266 mmc_host_clk_gate_delayed(host); 267 if (host->clk_gated) 268 mmc_host_clk_hold(host); 269 /* There should be only one user now */ 270 WARN_ON(host->clk_requests > 1); 271 } 272 273 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 274 { 275 host->clkgate_delay_attr.show = clkgate_delay_show; 276 host->clkgate_delay_attr.store = clkgate_delay_store; 277 sysfs_attr_init(&host->clkgate_delay_attr.attr); 278 host->clkgate_delay_attr.attr.name = "clkgate_delay"; 279 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; 280 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) 281 pr_err("%s: Failed to create clkgate_delay sysfs entry\n", 282 mmc_hostname(host)); 283 } 284 #else 285 286 static inline void mmc_host_clk_init(struct mmc_host *host) 287 { 288 } 289 290 static inline void mmc_host_clk_exit(struct mmc_host *host) 291 { 292 } 293 294 static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) 295 { 296 } 297 298 #endif 299 300 /** 301 * mmc_of_parse() - parse host's device-tree node 302 * @host: host whose node should be parsed. 303 * 304 * To keep the rest of the MMC subsystem unaware of whether DT has been 305 * used to to instantiate and configure this host instance or not, we 306 * parse the properties and set respective generic mmc-host flags and 307 * parameters. 308 */ 309 int mmc_of_parse(struct mmc_host *host) 310 { 311 struct device_node *np; 312 u32 bus_width; 313 bool explicit_inv_wp, gpio_inv_wp = false; 314 enum of_gpio_flags flags; 315 int len, ret, gpio; 316 317 if (!host->parent || !host->parent->of_node) 318 return 0; 319 320 np = host->parent->of_node; 321 322 /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ 323 if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { 324 dev_dbg(host->parent, 325 "\"bus-width\" property is missing, assuming 1 bit.\n"); 326 bus_width = 1; 327 } 328 329 switch (bus_width) { 330 case 8: 331 host->caps |= MMC_CAP_8_BIT_DATA; 332 /* Hosts capable of 8-bit transfers can also do 4 bits */ 333 case 4: 334 host->caps |= MMC_CAP_4_BIT_DATA; 335 break; 336 case 1: 337 break; 338 default: 339 dev_err(host->parent, 340 "Invalid \"bus-width\" value %ud!\n", bus_width); 341 return -EINVAL; 342 } 343 344 /* f_max is obtained from the optional "max-frequency" property */ 345 of_property_read_u32(np, "max-frequency", &host->f_max); 346 347 /* 348 * Configure CD and WP pins. They are both by default active low to 349 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the 350 * mmc-gpio helpers are used to attach, configure and use them. If 351 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH 352 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the 353 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability 354 * is set. If the "non-removable" property is found, the 355 * MMC_CAP_NONREMOVABLE capability is set and no card-detection 356 * configuration is performed. 357 */ 358 359 /* Parse Card Detection */ 360 if (of_find_property(np, "non-removable", &len)) { 361 host->caps |= MMC_CAP_NONREMOVABLE; 362 } else { 363 bool explicit_inv_cd, gpio_inv_cd = false; 364 365 explicit_inv_cd = of_property_read_bool(np, "cd-inverted"); 366 367 if (of_find_property(np, "broken-cd", &len)) 368 host->caps |= MMC_CAP_NEEDS_POLL; 369 370 gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); 371 if (gpio == -EPROBE_DEFER) 372 return gpio; 373 if (gpio_is_valid(gpio)) { 374 if (!(flags & OF_GPIO_ACTIVE_LOW)) 375 gpio_inv_cd = true; 376 377 ret = mmc_gpio_request_cd(host, gpio, 0); 378 if (ret < 0) { 379 dev_err(host->parent, 380 "Failed to request CD GPIO #%d: %d!\n", 381 gpio, ret); 382 return ret; 383 } else { 384 dev_info(host->parent, "Got CD GPIO #%d.\n", 385 gpio); 386 } 387 } 388 389 if (explicit_inv_cd ^ gpio_inv_cd) 390 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 391 } 392 393 /* Parse Write Protection */ 394 explicit_inv_wp = of_property_read_bool(np, "wp-inverted"); 395 396 gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); 397 if (gpio == -EPROBE_DEFER) { 398 ret = -EPROBE_DEFER; 399 goto out; 400 } 401 if (gpio_is_valid(gpio)) { 402 if (!(flags & OF_GPIO_ACTIVE_LOW)) 403 gpio_inv_wp = true; 404 405 ret = mmc_gpio_request_ro(host, gpio); 406 if (ret < 0) { 407 dev_err(host->parent, 408 "Failed to request WP GPIO: %d!\n", ret); 409 goto out; 410 } else { 411 dev_info(host->parent, "Got WP GPIO #%d.\n", 412 gpio); 413 } 414 } 415 if (explicit_inv_wp ^ gpio_inv_wp) 416 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 417 418 if (of_find_property(np, "cap-sd-highspeed", &len)) 419 host->caps |= MMC_CAP_SD_HIGHSPEED; 420 if (of_find_property(np, "cap-mmc-highspeed", &len)) 421 host->caps |= MMC_CAP_MMC_HIGHSPEED; 422 if (of_find_property(np, "cap-power-off-card", &len)) 423 host->caps |= MMC_CAP_POWER_OFF_CARD; 424 if (of_find_property(np, "cap-sdio-irq", &len)) 425 host->caps |= MMC_CAP_SDIO_IRQ; 426 if (of_find_property(np, "full-pwr-cycle", &len)) 427 host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; 428 if (of_find_property(np, "keep-power-in-suspend", &len)) 429 host->pm_caps |= MMC_PM_KEEP_POWER; 430 if (of_find_property(np, "enable-sdio-wakeup", &len)) 431 host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 432 433 return 0; 434 435 out: 436 mmc_gpio_free_cd(host); 437 return ret; 438 } 439 440 EXPORT_SYMBOL(mmc_of_parse); 441 442 /** 443 * mmc_alloc_host - initialise the per-host structure. 444 * @extra: sizeof private data structure 445 * @dev: pointer to host device model structure 446 * 447 * Initialise the per-host structure. 448 */ 449 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 450 { 451 int err; 452 struct mmc_host *host; 453 454 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 455 if (!host) 456 return NULL; 457 458 /* scanning will be enabled when we're ready */ 459 host->rescan_disable = 1; 460 idr_preload(GFP_KERNEL); 461 spin_lock(&mmc_host_lock); 462 err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); 463 if (err >= 0) 464 host->index = err; 465 spin_unlock(&mmc_host_lock); 466 idr_preload_end(); 467 if (err < 0) 468 goto free; 469 470 dev_set_name(&host->class_dev, "mmc%d", host->index); 471 472 host->parent = dev; 473 host->class_dev.parent = dev; 474 host->class_dev.class = &mmc_host_class; 475 device_initialize(&host->class_dev); 476 477 mmc_host_clk_init(host); 478 479 mutex_init(&host->slot.lock); 480 host->slot.cd_irq = -EINVAL; 481 482 spin_lock_init(&host->lock); 483 init_waitqueue_head(&host->wq); 484 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 485 #ifdef CONFIG_PM 486 host->pm_notify.notifier_call = mmc_pm_notify; 487 #endif 488 489 /* 490 * By default, hosts do not support SGIO or large requests. 491 * They have to set these according to their abilities. 492 */ 493 host->max_segs = 1; 494 host->max_seg_size = PAGE_CACHE_SIZE; 495 496 host->max_req_size = PAGE_CACHE_SIZE; 497 host->max_blk_size = 512; 498 host->max_blk_count = PAGE_CACHE_SIZE / 512; 499 500 return host; 501 502 free: 503 kfree(host); 504 return NULL; 505 } 506 507 EXPORT_SYMBOL(mmc_alloc_host); 508 509 /** 510 * mmc_add_host - initialise host hardware 511 * @host: mmc host 512 * 513 * Register the host with the driver model. The host must be 514 * prepared to start servicing requests before this function 515 * completes. 516 */ 517 int mmc_add_host(struct mmc_host *host) 518 { 519 int err; 520 521 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 522 !host->ops->enable_sdio_irq); 523 524 err = device_add(&host->class_dev); 525 if (err) 526 return err; 527 528 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 529 530 #ifdef CONFIG_DEBUG_FS 531 mmc_add_host_debugfs(host); 532 #endif 533 mmc_host_clk_sysfs_init(host); 534 535 mmc_start_host(host); 536 register_pm_notifier(&host->pm_notify); 537 538 return 0; 539 } 540 541 EXPORT_SYMBOL(mmc_add_host); 542 543 /** 544 * mmc_remove_host - remove host hardware 545 * @host: mmc host 546 * 547 * Unregister and remove all cards associated with this host, 548 * and power down the MMC bus. No new requests will be issued 549 * after this function has returned. 550 */ 551 void mmc_remove_host(struct mmc_host *host) 552 { 553 unregister_pm_notifier(&host->pm_notify); 554 mmc_stop_host(host); 555 556 #ifdef CONFIG_DEBUG_FS 557 mmc_remove_host_debugfs(host); 558 #endif 559 560 device_del(&host->class_dev); 561 562 led_trigger_unregister_simple(host->led); 563 564 mmc_host_clk_exit(host); 565 } 566 567 EXPORT_SYMBOL(mmc_remove_host); 568 569 /** 570 * mmc_free_host - free the host structure 571 * @host: mmc host 572 * 573 * Free the host once all references to it have been dropped. 574 */ 575 void mmc_free_host(struct mmc_host *host) 576 { 577 spin_lock(&mmc_host_lock); 578 idr_remove(&mmc_host_idr, host->index); 579 spin_unlock(&mmc_host_lock); 580 581 put_device(&host->class_dev); 582 } 583 584 EXPORT_SYMBOL(mmc_free_host); 585