1 /* 2 * linux/drivers/mmc/core/host.c 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * Copyright (C) 2007-2008 Pierre Ossman 6 * Copyright (C) 2010 Linus Walleij 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMC host class device management 13 */ 14 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/idr.h> 18 #include <linux/pagemap.h> 19 #include <linux/leds.h> 20 #include <linux/slab.h> 21 #include <linux/suspend.h> 22 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/card.h> 25 26 #include "core.h" 27 #include "host.h" 28 29 #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) 30 31 static void mmc_host_classdev_release(struct device *dev) 32 { 33 struct mmc_host *host = cls_dev_to_mmc_host(dev); 34 kfree(host); 35 } 36 37 static struct class mmc_host_class = { 38 .name = "mmc_host", 39 .dev_release = mmc_host_classdev_release, 40 }; 41 42 int mmc_register_host_class(void) 43 { 44 return class_register(&mmc_host_class); 45 } 46 47 void mmc_unregister_host_class(void) 48 { 49 class_unregister(&mmc_host_class); 50 } 51 52 static DEFINE_IDR(mmc_host_idr); 53 static DEFINE_SPINLOCK(mmc_host_lock); 54 55 #ifdef CONFIG_MMC_CLKGATE 56 57 /* 58 * Enabling clock gating will make the core call out to the host 59 * once up and once down when it performs a request or card operation 60 * intermingled in any fashion. The driver will see this through 61 * set_ios() operations with ios.clock field set to 0 to gate (disable) 62 * the block clock, and to the old frequency to enable it again. 63 */ 64 static void mmc_host_clk_gate_delayed(struct mmc_host *host) 65 { 66 unsigned long tick_ns; 67 unsigned long freq = host->ios.clock; 68 unsigned long flags; 69 70 if (!freq) { 71 pr_debug("%s: frequency set to 0 in disable function, " 72 "this means the clock is already disabled.\n", 73 mmc_hostname(host)); 74 return; 75 } 76 /* 77 * New requests may have appeared while we were scheduling, 78 * then there is no reason to delay the check before 79 * clk_disable(). 80 */ 81 spin_lock_irqsave(&host->clk_lock, flags); 82 83 /* 84 * Delay n bus cycles (at least 8 from MMC spec) before attempting 85 * to disable the MCI block clock. The reference count may have 86 * gone up again after this delay due to rescheduling! 87 */ 88 if (!host->clk_requests) { 89 spin_unlock_irqrestore(&host->clk_lock, flags); 90 tick_ns = DIV_ROUND_UP(1000000000, freq); 91 ndelay(host->clk_delay * tick_ns); 92 } else { 93 /* New users appeared while waiting for this work */ 94 spin_unlock_irqrestore(&host->clk_lock, flags); 95 return; 96 } 97 mutex_lock(&host->clk_gate_mutex); 98 spin_lock_irqsave(&host->clk_lock, flags); 99 if (!host->clk_requests) { 100 spin_unlock_irqrestore(&host->clk_lock, flags); 101 /* This will set host->ios.clock to 0 */ 102 mmc_gate_clock(host); 103 spin_lock_irqsave(&host->clk_lock, flags); 104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 105 } 106 spin_unlock_irqrestore(&host->clk_lock, flags); 107 mutex_unlock(&host->clk_gate_mutex); 108 } 109 110 /* 111 * Internal work. Work to disable the clock at some later point. 112 */ 113 static void mmc_host_clk_gate_work(struct work_struct *work) 114 { 115 struct mmc_host *host = container_of(work, struct mmc_host, 116 clk_gate_work); 117 118 mmc_host_clk_gate_delayed(host); 119 } 120 121 /** 122 * mmc_host_clk_ungate - ungate hardware MCI clocks 123 * @host: host to ungate. 124 * 125 * Makes sure the host ios.clock is restored to a non-zero value 126 * past this call. Increase clock reference count and ungate clock 127 * if we're the first user. 128 */ 129 void mmc_host_clk_ungate(struct mmc_host *host) 130 { 131 unsigned long flags; 132 133 mutex_lock(&host->clk_gate_mutex); 134 spin_lock_irqsave(&host->clk_lock, flags); 135 if (host->clk_gated) { 136 spin_unlock_irqrestore(&host->clk_lock, flags); 137 mmc_ungate_clock(host); 138 spin_lock_irqsave(&host->clk_lock, flags); 139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); 140 } 141 host->clk_requests++; 142 spin_unlock_irqrestore(&host->clk_lock, flags); 143 mutex_unlock(&host->clk_gate_mutex); 144 } 145 146 /** 147 * mmc_host_may_gate_card - check if this card may be gated 148 * @card: card to check. 149 */ 150 static bool mmc_host_may_gate_card(struct mmc_card *card) 151 { 152 /* If there is no card we may gate it */ 153 if (!card) 154 return true; 155 /* 156 * Don't gate SDIO cards! These need to be clocked at all times 157 * since they may be independent systems generating interrupts 158 * and other events. The clock requests counter from the core will 159 * go down to zero since the core does not need it, but we will not 160 * gate the clock, because there is somebody out there that may still 161 * be using it. 162 */ 163 if (mmc_card_sdio(card)) 164 return false; 165 166 return true; 167 } 168 169 /** 170 * mmc_host_clk_gate - gate off hardware MCI clocks 171 * @host: host to gate. 172 * 173 * Calls the host driver with ios.clock set to zero as often as possible 174 * in order to gate off hardware MCI clocks. Decrease clock reference 175 * count and schedule disabling of clock. 176 */ 177 void mmc_host_clk_gate(struct mmc_host *host) 178 { 179 unsigned long flags; 180 181 spin_lock_irqsave(&host->clk_lock, flags); 182 host->clk_requests--; 183 if (mmc_host_may_gate_card(host->card) && 184 !host->clk_requests) 185 schedule_work(&host->clk_gate_work); 186 spin_unlock_irqrestore(&host->clk_lock, flags); 187 } 188 189 /** 190 * mmc_host_clk_rate - get current clock frequency setting 191 * @host: host to get the clock frequency for. 192 * 193 * Returns current clock frequency regardless of gating. 194 */ 195 unsigned int mmc_host_clk_rate(struct mmc_host *host) 196 { 197 unsigned long freq; 198 unsigned long flags; 199 200 spin_lock_irqsave(&host->clk_lock, flags); 201 if (host->clk_gated) 202 freq = host->clk_old; 203 else 204 freq = host->ios.clock; 205 spin_unlock_irqrestore(&host->clk_lock, flags); 206 return freq; 207 } 208 209 /** 210 * mmc_host_clk_init - set up clock gating code 211 * @host: host with potential clock to control 212 */ 213 static inline void mmc_host_clk_init(struct mmc_host *host) 214 { 215 host->clk_requests = 0; 216 /* Hold MCI clock for 8 cycles by default */ 217 host->clk_delay = 8; 218 host->clk_gated = false; 219 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 220 spin_lock_init(&host->clk_lock); 221 mutex_init(&host->clk_gate_mutex); 222 } 223 224 /** 225 * mmc_host_clk_exit - shut down clock gating code 226 * @host: host with potential clock to control 227 */ 228 static inline void mmc_host_clk_exit(struct mmc_host *host) 229 { 230 /* 231 * Wait for any outstanding gate and then make sure we're 232 * ungated before exiting. 233 */ 234 if (cancel_work_sync(&host->clk_gate_work)) 235 mmc_host_clk_gate_delayed(host); 236 if (host->clk_gated) 237 mmc_host_clk_ungate(host); 238 /* There should be only one user now */ 239 WARN_ON(host->clk_requests > 1); 240 } 241 242 #else 243 244 static inline void mmc_host_clk_init(struct mmc_host *host) 245 { 246 } 247 248 static inline void mmc_host_clk_exit(struct mmc_host *host) 249 { 250 } 251 252 #endif 253 254 /** 255 * mmc_alloc_host - initialise the per-host structure. 256 * @extra: sizeof private data structure 257 * @dev: pointer to host device model structure 258 * 259 * Initialise the per-host structure. 260 */ 261 struct mmc_host *mmc_alloc_host(int extra, struct device *dev) 262 { 263 int err; 264 struct mmc_host *host; 265 266 if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) 267 return NULL; 268 269 host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); 270 if (!host) 271 return NULL; 272 273 spin_lock(&mmc_host_lock); 274 err = idr_get_new(&mmc_host_idr, host, &host->index); 275 spin_unlock(&mmc_host_lock); 276 if (err) 277 goto free; 278 279 dev_set_name(&host->class_dev, "mmc%d", host->index); 280 281 host->parent = dev; 282 host->class_dev.parent = dev; 283 host->class_dev.class = &mmc_host_class; 284 device_initialize(&host->class_dev); 285 286 mmc_host_clk_init(host); 287 288 spin_lock_init(&host->lock); 289 init_waitqueue_head(&host->wq); 290 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 291 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 292 #ifdef CONFIG_PM 293 host->pm_notify.notifier_call = mmc_pm_notify; 294 #endif 295 296 /* 297 * By default, hosts do not support SGIO or large requests. 298 * They have to set these according to their abilities. 299 */ 300 host->max_segs = 1; 301 host->max_seg_size = PAGE_CACHE_SIZE; 302 303 host->max_req_size = PAGE_CACHE_SIZE; 304 host->max_blk_size = 512; 305 host->max_blk_count = PAGE_CACHE_SIZE / 512; 306 307 return host; 308 309 free: 310 kfree(host); 311 return NULL; 312 } 313 314 EXPORT_SYMBOL(mmc_alloc_host); 315 316 /** 317 * mmc_add_host - initialise host hardware 318 * @host: mmc host 319 * 320 * Register the host with the driver model. The host must be 321 * prepared to start servicing requests before this function 322 * completes. 323 */ 324 int mmc_add_host(struct mmc_host *host) 325 { 326 int err; 327 328 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 329 !host->ops->enable_sdio_irq); 330 331 led_trigger_register_simple(dev_name(&host->class_dev), &host->led); 332 333 err = device_add(&host->class_dev); 334 if (err) 335 return err; 336 337 #ifdef CONFIG_DEBUG_FS 338 mmc_add_host_debugfs(host); 339 #endif 340 341 mmc_start_host(host); 342 register_pm_notifier(&host->pm_notify); 343 344 return 0; 345 } 346 347 EXPORT_SYMBOL(mmc_add_host); 348 349 /** 350 * mmc_remove_host - remove host hardware 351 * @host: mmc host 352 * 353 * Unregister and remove all cards associated with this host, 354 * and power down the MMC bus. No new requests will be issued 355 * after this function has returned. 356 */ 357 void mmc_remove_host(struct mmc_host *host) 358 { 359 unregister_pm_notifier(&host->pm_notify); 360 mmc_stop_host(host); 361 362 #ifdef CONFIG_DEBUG_FS 363 mmc_remove_host_debugfs(host); 364 #endif 365 366 device_del(&host->class_dev); 367 368 led_trigger_unregister_simple(host->led); 369 370 mmc_host_clk_exit(host); 371 } 372 373 EXPORT_SYMBOL(mmc_remove_host); 374 375 /** 376 * mmc_free_host - free the host structure 377 * @host: mmc host 378 * 379 * Free the host once all references to it have been dropped. 380 */ 381 void mmc_free_host(struct mmc_host *host) 382 { 383 spin_lock(&mmc_host_lock); 384 idr_remove(&mmc_host_idr, host->index); 385 spin_unlock(&mmc_host_lock); 386 387 put_device(&host->class_dev); 388 } 389 390 EXPORT_SYMBOL(mmc_free_host); 391