1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/pagemap.h> 19 #include <linux/leds.h> 20 #include <linux/slab.h> 21 #include <linux/suspend.h> 22 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/card.h> 25 26 #include "core.h" 27 #include "host.h" 28 29 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 30 31 static void mmc_host_classdev_release(struct device *dev) 32 { 33 struct mmc_host *host = cls_dev_to_mmc_host(dev); 34 kfree(host); 35 } 36 37 static struct class mmc_host_class = { 38 .name = "mmc_host", 39 .dev_release = mmc_host_classdev_release, 40 }; 41 42 int mmc_register_host_class(void) 43 { 44 return class_register(&mmc_host_class); 45 } 46 47 void mmc_unregister_host_class(void) 48 { 49 class_unregister(&mmc_host_class); 50 } 51 52 static DEFINE_IDR(mmc_host_idr); 53 static DEFINE_SPINLOCK(mmc_host_lock); 54 55 #ifdef CONFIG_MMC_CLKGATE 56 57 /* 58 * Enabling clock gating will make the core call out to the host 59 * once up and once down when it performs a request or card operation 60 * intermingled in any fashion. The driver will see this through 61 * set_ios() operations with ios.clock field set to 0 to gate (disable) 62 * the block clock, and to the old frequency to enable it again. 63 */ 64 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 65 { 66 unsigned long tick_ns; 67 unsigned long freq = host->ios.clock; 68 unsigned long flags; 69 70 if (!freq) { 71 pr_debug("%s: frequency set to 0 in disable function, " 72 "this means the clock is already disabled.\n", 73 mmc_hostname(host)); 74 return; 75 } 76 /* 77 * New requests may have appeared while we were scheduling, 78 * then there is no reason to delay the check before 79 * clk_disable(). 80 */ 81 spin_lock_irqsave(&host->clk_lock, flags); 82 83 /* 84 * Delay n bus cycles (at least 8 from MMC spec) before attempting 85 * to disable the MCI block clock. The reference count may have 86 * gone up again after this delay due to rescheduling! 87 */ 88 if (!host->clk_requests) { 89 spin_unlock_irqrestore(&host->clk_lock, flags); 90 tick_ns = DIV_ROUND_UP(1000000000, freq); 91 ndelay(host->clk_delay * tick_ns); 92 } else { 93 /* New users appeared while waiting for this work */ 94 spin_unlock_irqrestore(&host->clk_lock, flags); 95 return; 96 } 97 mutex_lock(&host->clk_gate_mutex); 98 spin_lock_irqsave(&host->clk_lock, flags); 99 if (!host->clk_requests) { 100 spin_unlock_irqrestore(&host->clk_lock, flags); 101 /* This will set host->ios.clock to 0 */ 102 mmc_gate_clock(host); 103 spin_lock_irqsave(&host->clk_lock, flags); 104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 105 } 106 spin_unlock_irqrestore(&host->clk_lock, flags); 107 mutex_unlock(&host->clk_gate_mutex); 108 } 109 110 /* 111 * Internal work. Work to disable the clock at some later point. 112 */ 113 static void mmc_host_clk_gate_work(struct work_struct *work) 114 { 115 struct mmc_host *host = container_of(work, struct mmc_host, 116 clk_gate_work); 117 118 mmc_host_clk_gate_delayed(host); 119 } 120 121 /** 122 * mmc_host_clk_ungate - ungate hardware MCI clocks 123 * @host: host to ungate. 124 * 125 * Makes sure the host ios.clock is restored to a non-zero value 126 * past this call. Increase clock reference count and ungate clock 127 * if we're the first user. 128 */ 129 void mmc_host_clk_ungate(struct mmc_host *host) 130 { 131 unsigned long flags; 132 133 mutex_lock(&host->clk_gate_mutex); 134 spin_lock_irqsave(&host->clk_lock, flags); 135 if (host->clk_gated) { 136 spin_unlock_irqrestore(&host->clk_lock, flags); 137 mmc_ungate_clock(host); 138 spin_lock_irqsave(&host->clk_lock, flags); 139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 140 } 141 host->clk_requests++; 142 spin_unlock_irqrestore(&host->clk_lock, flags); 143 mutex_unlock(&host->clk_gate_mutex); 144 } 145 146 /** 147 * mmc_host_may_gate_card - check if this card may be gated 148 * @card: card to check. 149 */ 150 static bool mmc_host_may_gate_card(struct mmc_card *card) 151 { 152 /* If there is no card we may gate it */ 153 if (!card) 154 return true; 155 /* 156 * Don't gate SDIO cards! These need to be clocked at all times 157 * since they may be independent systems generating interrupts 158 * and other events. The clock requests counter from the core will 159 * go down to zero since the core does not need it, but we will not 160 * gate the clock, because there is somebody out there that may still 161 * be using it. 162 */ 163 return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); 164 } 165 166 /** 167 * mmc_host_clk_gate - gate off hardware MCI clocks 168 * @host: host to gate. 169 * 170 * Calls the host driver with ios.clock set to zero as often as possible 171 * in order to gate off hardware MCI clocks. Decrease clock reference 172 * count and schedule disabling of clock. 173 */ 174 void mmc_host_clk_gate(struct mmc_host *host) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&host->clk_lock, flags); 179 host->clk_requests--; 180 if (mmc_host_may_gate_card(host->card) && 181 !host->clk_requests) 182 schedule_work(&host->clk_gate_work); 183 spin_unlock_irqrestore(&host->clk_lock, flags); 184 } 185 186 /** 187 * mmc_host_clk_rate - get current clock frequency setting 188 * @host: host to get the clock frequency for. 189 * 190 * Returns current clock frequency regardless of gating. 191 */ 192 unsigned int mmc_host_clk_rate(struct mmc_host *host) 193 { 194 unsigned long freq; 195 unsigned long flags; 196 197 spin_lock_irqsave(&host->clk_lock, flags); 198 if (host->clk_gated) 199 freq = host->clk_old; 200 else 201 freq = host->ios.clock; 202 spin_unlock_irqrestore(&host->clk_lock, flags); 203 return freq; 204 } 205 206 /** 207 * mmc_host_clk_init - set up clock gating code 208 * @host: host with potential clock to control 209 */ 210 static inline void mmc_host_clk_init(struct mmc_host *host) 211 { 212 host->clk_requests = 0; 213 /* Hold MCI clock for 8 cycles by default */ 214 host->clk_delay = 8; 215 host->clk_gated = false; 216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 217 spin_lock_init(&host->clk_lock); 218 mutex_init(&host->clk_gate_mutex); 219 } 220 221 /** 222 * mmc_host_clk_exit - shut down clock gating code 223 * @host: host with potential clock to control 224 */ 225 static inline void mmc_host_clk_exit(struct mmc_host *host) 226 { 227 /* 228 * Wait for any outstanding gate and then make sure we're 229 * ungated before exiting. 230 */ 231 if (cancel_work_sync(&host->clk_gate_work)) 232 mmc_host_clk_gate_delayed(host); 233 if (host->clk_gated) 234 mmc_host_clk_ungate(host); 235 /* There should be only one user now */ 236 WARN_ON(host->clk_requests > 1); 237 } 238 239 #else 240 241 static inline void mmc_host_clk_init(struct mmc_host *host) 242 { 243 } 244 245 static inline void mmc_host_clk_exit(struct mmc_host *host) 246 { 247 } 248 249 #endif 250 251 /** 252 * mmc_alloc_host - initialise the per-host structure. 253 * @extra: sizeof private data structure 254 * @dev: pointer to host device model structure 255 * 256 * Initialise the per-host structure. 257 */ 258 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 259 { 260 int err; 261 struct mmc_host *host; 262 263 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) 264 return NULL; 265 266 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 267 if (!host) 268 return NULL; 269 270 spin_lock(&mmc_host_lock); 271 err = idr_get_new(&mmc_host_idr, host, &host->index); 272 spin_unlock(&mmc_host_lock); 273 if (err) 274 goto free; 275 276 dev_set_name(&host->class_dev, "mmc%d", host->index); 277 278 host->parent = dev; 279 host->class_dev.parent = dev; 280 host->class_dev.class = &mmc_host_class; 281 device_initialize(&host->class_dev); 282 283 mmc_host_clk_init(host); 284 285 spin_lock_init(&host->lock); 286 init_waitqueue_head(&host->wq); 287 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 288 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 289 #ifdef CONFIG_PM 290 host->pm_notify.notifier_call = mmc_pm_notify; 291 #endif 292 293 /* 294 * By default, hosts do not support SGIO or large requests. 295 * They have to set these according to their abilities. 296 */ 297 host->max_segs = 1; 298 host->max_seg_size = PAGE_CACHE_SIZE; 299 300 host->max_req_size = PAGE_CACHE_SIZE; 301 host->max_blk_size = 512; 302 host->max_blk_count = PAGE_CACHE_SIZE / 512; 303 304 return host; 305 306 free: 307 kfree(host); 308 return NULL; 309 } 310 311 EXPORT_SYMBOL(mmc_alloc_host); 312 313 /** 314 * mmc_add_host - initialise host hardware 315 * @host: mmc host 316 * 317 * Register the host with the driver model. The host must be 318 * prepared to start servicing requests before this function 319 * completes. 320 */ 321 int mmc_add_host(struct mmc_host *host) 322 { 323 int err; 324 325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 326 !host->ops->enable_sdio_irq); 327 328 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 329 330 err = device_add(&host->class_dev); 331 if (err) 332 return err; 333 334 #ifdef CONFIG_DEBUG_FS 335 mmc_add_host_debugfs(host); 336 #endif 337 338 mmc_start_host(host); 339 register_pm_notifier(&host->pm_notify); 340 341 return 0; 342 } 343 344 EXPORT_SYMBOL(mmc_add_host); 345 346 /** 347 * mmc_remove_host - remove host hardware 348 * @host: mmc host 349 * 350 * Unregister and remove all cards associated with this host, 351 * and power down the MMC bus. No new requests will be issued 352 * after this function has returned. 353 */ 354 void mmc_remove_host(struct mmc_host *host) 355 { 356 unregister_pm_notifier(&host->pm_notify); 357 mmc_stop_host(host); 358 359 #ifdef CONFIG_DEBUG_FS 360 mmc_remove_host_debugfs(host); 361 #endif 362 363 device_del(&host->class_dev); 364 365 led_trigger_unregister_simple(host->led); 366 367 mmc_host_clk_exit(host); 368 } 369 370 EXPORT_SYMBOL(mmc_remove_host); 371 372 /** 373 * mmc_free_host - free the host structure 374 * @host: mmc host 375 * 376 * Free the host once all references to it have been dropped. 377 */ 378 void mmc_free_host(struct mmc_host *host) 379 { 380 spin_lock(&mmc_host_lock); 381 idr_remove(&mmc_host_idr, host->index); 382 spin_unlock(&mmc_host_lock); 383 384 put_device(&host->class_dev); 385 } 386 387 EXPORT_SYMBOL(mmc_free_host); 388