Lines Matching +full:no +full:- +full:mmc
1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
19 struct mmc_host *mmc = hsq->mmc; in mmc_hsq_retry_handler() local
21 mmc->ops->request(mmc, hsq->mrq); in mmc_hsq_retry_handler()
26 struct mmc_host *mmc = hsq->mmc; in mmc_hsq_pump_requests() local
31 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_pump_requests()
34 if (hsq->mrq || hsq->recovery_halt) { in mmc_hsq_pump_requests()
35 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
40 if (!hsq->qcnt || !hsq->enabled) { in mmc_hsq_pump_requests()
41 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
45 slot = &hsq->slot[hsq->next_tag]; in mmc_hsq_pump_requests()
46 hsq->mrq = slot->mrq; in mmc_hsq_pump_requests()
47 hsq->qcnt--; in mmc_hsq_pump_requests()
49 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
51 if (mmc->ops->request_atomic) in mmc_hsq_pump_requests()
52 ret = mmc->ops->request_atomic(mmc, hsq->mrq); in mmc_hsq_pump_requests()
54 mmc->ops->request(mmc, hsq->mrq); in mmc_hsq_pump_requests()
58 * may be busy now, and we should change to non-atomic context to in mmc_hsq_pump_requests()
59 * try again for this unusual case, to avoid time-consuming operations in mmc_hsq_pump_requests()
65 if (ret == -EBUSY) in mmc_hsq_pump_requests()
66 schedule_work(&hsq->retry_work); in mmc_hsq_pump_requests()
76 * If there are no remain requests in software queue, then set a invalid in mmc_hsq_update_next_tag()
80 hsq->next_tag = HSQ_INVALID_TAG; in mmc_hsq_update_next_tag()
81 hsq->tail_tag = HSQ_INVALID_TAG; in mmc_hsq_update_next_tag()
85 tag = hsq->tag_slot[hsq->next_tag]; in mmc_hsq_update_next_tag()
86 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG; in mmc_hsq_update_next_tag()
87 hsq->next_tag = tag; in mmc_hsq_update_next_tag()
95 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_post_request()
97 remains = hsq->qcnt; in mmc_hsq_post_request()
98 hsq->mrq = NULL; in mmc_hsq_post_request()
103 if (hsq->waiting_for_idle && !remains) { in mmc_hsq_post_request()
104 hsq->waiting_for_idle = false; in mmc_hsq_post_request()
105 wake_up(&hsq->wait_queue); in mmc_hsq_post_request()
109 if (hsq->recovery_halt) { in mmc_hsq_post_request()
110 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_post_request()
114 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_post_request()
125 * mmc_hsq_finalize_request - finalize one request if the request is done
126 * @mmc: the host controller
132 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_finalize_request() argument
134 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_finalize_request()
137 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_finalize_request()
139 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) { in mmc_hsq_finalize_request()
140 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_finalize_request()
147 hsq->slot[hsq->next_tag].mrq = NULL; in mmc_hsq_finalize_request()
149 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_finalize_request()
151 mmc_cqe_request_done(mmc, hsq->mrq); in mmc_hsq_finalize_request()
159 static void mmc_hsq_recovery_start(struct mmc_host *mmc) in mmc_hsq_recovery_start() argument
161 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_recovery_start()
164 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_recovery_start()
166 hsq->recovery_halt = true; in mmc_hsq_recovery_start()
168 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_recovery_start()
171 static void mmc_hsq_recovery_finish(struct mmc_host *mmc) in mmc_hsq_recovery_finish() argument
173 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_recovery_finish()
176 spin_lock_irq(&hsq->lock); in mmc_hsq_recovery_finish()
178 hsq->recovery_halt = false; in mmc_hsq_recovery_finish()
179 remains = hsq->qcnt; in mmc_hsq_recovery_finish()
181 spin_unlock_irq(&hsq->lock); in mmc_hsq_recovery_finish()
191 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_request() argument
193 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_request()
194 int tag = mrq->tag; in mmc_hsq_request()
196 spin_lock_irq(&hsq->lock); in mmc_hsq_request()
198 if (!hsq->enabled) { in mmc_hsq_request()
199 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
200 return -ESHUTDOWN; in mmc_hsq_request()
204 if (hsq->recovery_halt) { in mmc_hsq_request()
205 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
206 return -EBUSY; in mmc_hsq_request()
209 hsq->slot[tag].mrq = mrq; in mmc_hsq_request()
212 * Set the next tag as current request tag if no available in mmc_hsq_request()
215 if (hsq->next_tag == HSQ_INVALID_TAG) { in mmc_hsq_request()
216 hsq->next_tag = tag; in mmc_hsq_request()
217 hsq->tail_tag = tag; in mmc_hsq_request()
218 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG; in mmc_hsq_request()
220 hsq->tag_slot[hsq->tail_tag] = tag; in mmc_hsq_request()
221 hsq->tail_tag = tag; in mmc_hsq_request()
224 hsq->qcnt++; in mmc_hsq_request()
226 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
233 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_post_req() argument
235 if (mmc->ops->post_req) in mmc_hsq_post_req()
236 mmc->ops->post_req(mmc, mrq, 0); in mmc_hsq_post_req()
243 spin_lock_irq(&hsq->lock); in mmc_hsq_queue_is_idle()
245 is_idle = (!hsq->mrq && !hsq->qcnt) || in mmc_hsq_queue_is_idle()
246 hsq->recovery_halt; in mmc_hsq_queue_is_idle()
248 *ret = hsq->recovery_halt ? -EBUSY : 0; in mmc_hsq_queue_is_idle()
249 hsq->waiting_for_idle = !is_idle; in mmc_hsq_queue_is_idle()
251 spin_unlock_irq(&hsq->lock); in mmc_hsq_queue_is_idle()
256 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc) in mmc_hsq_wait_for_idle() argument
258 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_wait_for_idle()
261 wait_event(hsq->wait_queue, in mmc_hsq_wait_for_idle()
267 static void mmc_hsq_disable(struct mmc_host *mmc) in mmc_hsq_disable() argument
269 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_disable()
273 spin_lock_irq(&hsq->lock); in mmc_hsq_disable()
275 if (!hsq->enabled) { in mmc_hsq_disable()
276 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
280 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
282 ret = wait_event_timeout(hsq->wait_queue, in mmc_hsq_disable()
286 pr_warn("could not stop mmc software queue\n"); in mmc_hsq_disable()
290 spin_lock_irq(&hsq->lock); in mmc_hsq_disable()
292 hsq->enabled = false; in mmc_hsq_disable()
294 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
297 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card) in mmc_hsq_enable() argument
299 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_enable()
301 spin_lock_irq(&hsq->lock); in mmc_hsq_enable()
303 if (hsq->enabled) { in mmc_hsq_enable()
304 spin_unlock_irq(&hsq->lock); in mmc_hsq_enable()
305 return -EBUSY; in mmc_hsq_enable()
308 hsq->enabled = true; in mmc_hsq_enable()
310 spin_unlock_irq(&hsq->lock); in mmc_hsq_enable()
325 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc) in mmc_hsq_init() argument
328 hsq->num_slots = HSQ_NUM_SLOTS; in mmc_hsq_init()
329 hsq->next_tag = HSQ_INVALID_TAG; in mmc_hsq_init()
330 hsq->tail_tag = HSQ_INVALID_TAG; in mmc_hsq_init()
332 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots, in mmc_hsq_init()
334 if (!hsq->slot) in mmc_hsq_init()
335 return -ENOMEM; in mmc_hsq_init()
337 hsq->mmc = mmc; in mmc_hsq_init()
338 hsq->mmc->cqe_private = hsq; in mmc_hsq_init()
339 mmc->cqe_ops = &mmc_hsq_ops; in mmc_hsq_init()
342 hsq->tag_slot[i] = HSQ_INVALID_TAG; in mmc_hsq_init()
344 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler); in mmc_hsq_init()
345 spin_lock_init(&hsq->lock); in mmc_hsq_init()
346 init_waitqueue_head(&hsq->wait_queue); in mmc_hsq_init()
352 void mmc_hsq_suspend(struct mmc_host *mmc) in mmc_hsq_suspend() argument
354 mmc_hsq_disable(mmc); in mmc_hsq_suspend()
358 int mmc_hsq_resume(struct mmc_host *mmc) in mmc_hsq_resume() argument
360 return mmc_hsq_enable(mmc, NULL); in mmc_hsq_resume()
364 MODULE_DESCRIPTION("MMC Host Software Queue support");