xref: /openbmc/linux/drivers/mmc/host/mmc_hsq.c (revision e026a3f9)
1511ce378SBaolin Wang // SPDX-License-Identifier: GPL-2.0
2511ce378SBaolin Wang /*
3511ce378SBaolin Wang  *
4511ce378SBaolin Wang  * MMC software queue support based on command queue interfaces
5511ce378SBaolin Wang  *
6511ce378SBaolin Wang  * Copyright (C) 2019 Linaro, Inc.
7511ce378SBaolin Wang  * Author: Baolin Wang <baolin.wang@linaro.org>
8511ce378SBaolin Wang  */
9511ce378SBaolin Wang 
10511ce378SBaolin Wang #include <linux/mmc/card.h>
11511ce378SBaolin Wang #include <linux/mmc/host.h>
12d1709abbSBaolin Wang #include <linux/module.h>
13511ce378SBaolin Wang 
14511ce378SBaolin Wang #include "mmc_hsq.h"
15511ce378SBaolin Wang 
mmc_hsq_retry_handler(struct work_struct * work)166db96e58SBaolin Wang static void mmc_hsq_retry_handler(struct work_struct *work)
176db96e58SBaolin Wang {
186db96e58SBaolin Wang 	struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
196db96e58SBaolin Wang 	struct mmc_host *mmc = hsq->mmc;
206db96e58SBaolin Wang 
216db96e58SBaolin Wang 	mmc->ops->request(mmc, hsq->mrq);
226db96e58SBaolin Wang }
236db96e58SBaolin Wang 
mmc_hsq_pump_requests(struct mmc_hsq * hsq)24511ce378SBaolin Wang static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
25511ce378SBaolin Wang {
26511ce378SBaolin Wang 	struct mmc_host *mmc = hsq->mmc;
27511ce378SBaolin Wang 	struct hsq_slot *slot;
28511ce378SBaolin Wang 	unsigned long flags;
296db96e58SBaolin Wang 	int ret = 0;
30511ce378SBaolin Wang 
31511ce378SBaolin Wang 	spin_lock_irqsave(&hsq->lock, flags);
32511ce378SBaolin Wang 
33511ce378SBaolin Wang 	/* Make sure we are not already running a request now */
34e7afa79aSWenchao Chen 	if (hsq->mrq || hsq->recovery_halt) {
35511ce378SBaolin Wang 		spin_unlock_irqrestore(&hsq->lock, flags);
36511ce378SBaolin Wang 		return;
37511ce378SBaolin Wang 	}
38511ce378SBaolin Wang 
39511ce378SBaolin Wang 	/* Make sure there are remain requests need to pump */
40511ce378SBaolin Wang 	if (!hsq->qcnt || !hsq->enabled) {
41511ce378SBaolin Wang 		spin_unlock_irqrestore(&hsq->lock, flags);
42511ce378SBaolin Wang 		return;
43511ce378SBaolin Wang 	}
44511ce378SBaolin Wang 
45511ce378SBaolin Wang 	slot = &hsq->slot[hsq->next_tag];
46511ce378SBaolin Wang 	hsq->mrq = slot->mrq;
47511ce378SBaolin Wang 	hsq->qcnt--;
48511ce378SBaolin Wang 
49511ce378SBaolin Wang 	spin_unlock_irqrestore(&hsq->lock, flags);
50511ce378SBaolin Wang 
516db96e58SBaolin Wang 	if (mmc->ops->request_atomic)
526db96e58SBaolin Wang 		ret = mmc->ops->request_atomic(mmc, hsq->mrq);
536db96e58SBaolin Wang 	else
54511ce378SBaolin Wang 		mmc->ops->request(mmc, hsq->mrq);
556db96e58SBaolin Wang 
566db96e58SBaolin Wang 	/*
576db96e58SBaolin Wang 	 * If returning BUSY from request_atomic(), which means the card
586db96e58SBaolin Wang 	 * may be busy now, and we should change to non-atomic context to
596db96e58SBaolin Wang 	 * try again for this unusual case, to avoid time-consuming operations
606db96e58SBaolin Wang 	 * in the atomic context.
616db96e58SBaolin Wang 	 *
626db96e58SBaolin Wang 	 * Note: we just give a warning for other error cases, since the host
636db96e58SBaolin Wang 	 * driver will handle them.
646db96e58SBaolin Wang 	 */
656db96e58SBaolin Wang 	if (ret == -EBUSY)
666db96e58SBaolin Wang 		schedule_work(&hsq->retry_work);
676db96e58SBaolin Wang 	else
686db96e58SBaolin Wang 		WARN_ON_ONCE(ret);
69511ce378SBaolin Wang }
70511ce378SBaolin Wang 
mmc_hsq_update_next_tag(struct mmc_hsq * hsq,int remains)71511ce378SBaolin Wang static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
72511ce378SBaolin Wang {
73511ce378SBaolin Wang 	int tag;
74511ce378SBaolin Wang 
75511ce378SBaolin Wang 	/*
76511ce378SBaolin Wang 	 * If there are no remain requests in software queue, then set a invalid
77511ce378SBaolin Wang 	 * tag.
78511ce378SBaolin Wang 	 */
79511ce378SBaolin Wang 	if (!remains) {
80511ce378SBaolin Wang 		hsq->next_tag = HSQ_INVALID_TAG;
81*e026a3f9SMichael Wu 		hsq->tail_tag = HSQ_INVALID_TAG;
82511ce378SBaolin Wang 		return;
83511ce378SBaolin Wang 	}
84511ce378SBaolin Wang 
85*e026a3f9SMichael Wu 	tag = hsq->tag_slot[hsq->next_tag];
86*e026a3f9SMichael Wu 	hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
87511ce378SBaolin Wang 	hsq->next_tag = tag;
88511ce378SBaolin Wang }
89511ce378SBaolin Wang 
mmc_hsq_post_request(struct mmc_hsq * hsq)90511ce378SBaolin Wang static void mmc_hsq_post_request(struct mmc_hsq *hsq)
91511ce378SBaolin Wang {
92511ce378SBaolin Wang 	unsigned long flags;
93511ce378SBaolin Wang 	int remains;
94511ce378SBaolin Wang 
95511ce378SBaolin Wang 	spin_lock_irqsave(&hsq->lock, flags);
96511ce378SBaolin Wang 
97511ce378SBaolin Wang 	remains = hsq->qcnt;
98511ce378SBaolin Wang 	hsq->mrq = NULL;
99511ce378SBaolin Wang 
100511ce378SBaolin Wang 	/* Update the next available tag to be queued. */
101511ce378SBaolin Wang 	mmc_hsq_update_next_tag(hsq, remains);
102511ce378SBaolin Wang 
103511ce378SBaolin Wang 	if (hsq->waiting_for_idle && !remains) {
104511ce378SBaolin Wang 		hsq->waiting_for_idle = false;
105511ce378SBaolin Wang 		wake_up(&hsq->wait_queue);
106511ce378SBaolin Wang 	}
107511ce378SBaolin Wang 
108511ce378SBaolin Wang 	/* Do not pump new request in recovery mode. */
109511ce378SBaolin Wang 	if (hsq->recovery_halt) {
110511ce378SBaolin Wang 		spin_unlock_irqrestore(&hsq->lock, flags);
111511ce378SBaolin Wang 		return;
112511ce378SBaolin Wang 	}
113511ce378SBaolin Wang 
114511ce378SBaolin Wang 	spin_unlock_irqrestore(&hsq->lock, flags);
115511ce378SBaolin Wang 
116511ce378SBaolin Wang 	 /*
117511ce378SBaolin Wang 	  * Try to pump new request to host controller as fast as possible,
118511ce378SBaolin Wang 	  * after completing previous request.
119511ce378SBaolin Wang 	  */
120511ce378SBaolin Wang 	if (remains > 0)
121511ce378SBaolin Wang 		mmc_hsq_pump_requests(hsq);
122511ce378SBaolin Wang }
123511ce378SBaolin Wang 
124511ce378SBaolin Wang /**
125511ce378SBaolin Wang  * mmc_hsq_finalize_request - finalize one request if the request is done
126511ce378SBaolin Wang  * @mmc: the host controller
127511ce378SBaolin Wang  * @mrq: the request need to be finalized
128511ce378SBaolin Wang  *
129511ce378SBaolin Wang  * Return true if we finalized the corresponding request in software queue,
130511ce378SBaolin Wang  * otherwise return false.
131511ce378SBaolin Wang  */
mmc_hsq_finalize_request(struct mmc_host * mmc,struct mmc_request * mrq)132511ce378SBaolin Wang bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
133511ce378SBaolin Wang {
134511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
135511ce378SBaolin Wang 	unsigned long flags;
136511ce378SBaolin Wang 
137511ce378SBaolin Wang 	spin_lock_irqsave(&hsq->lock, flags);
138511ce378SBaolin Wang 
139511ce378SBaolin Wang 	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140511ce378SBaolin Wang 		spin_unlock_irqrestore(&hsq->lock, flags);
141511ce378SBaolin Wang 		return false;
142511ce378SBaolin Wang 	}
143511ce378SBaolin Wang 
144511ce378SBaolin Wang 	/*
145511ce378SBaolin Wang 	 * Clear current completed slot request to make a room for new request.
146511ce378SBaolin Wang 	 */
147511ce378SBaolin Wang 	hsq->slot[hsq->next_tag].mrq = NULL;
148511ce378SBaolin Wang 
149511ce378SBaolin Wang 	spin_unlock_irqrestore(&hsq->lock, flags);
150511ce378SBaolin Wang 
151511ce378SBaolin Wang 	mmc_cqe_request_done(mmc, hsq->mrq);
152511ce378SBaolin Wang 
153511ce378SBaolin Wang 	mmc_hsq_post_request(hsq);
154511ce378SBaolin Wang 
155511ce378SBaolin Wang 	return true;
156511ce378SBaolin Wang }
157511ce378SBaolin Wang EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
158511ce378SBaolin Wang 
mmc_hsq_recovery_start(struct mmc_host * mmc)159511ce378SBaolin Wang static void mmc_hsq_recovery_start(struct mmc_host *mmc)
160511ce378SBaolin Wang {
161511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
162511ce378SBaolin Wang 	unsigned long flags;
163511ce378SBaolin Wang 
164511ce378SBaolin Wang 	spin_lock_irqsave(&hsq->lock, flags);
165511ce378SBaolin Wang 
166511ce378SBaolin Wang 	hsq->recovery_halt = true;
167511ce378SBaolin Wang 
168511ce378SBaolin Wang 	spin_unlock_irqrestore(&hsq->lock, flags);
169511ce378SBaolin Wang }
170511ce378SBaolin Wang 
mmc_hsq_recovery_finish(struct mmc_host * mmc)171511ce378SBaolin Wang static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
172511ce378SBaolin Wang {
173511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
174511ce378SBaolin Wang 	int remains;
175511ce378SBaolin Wang 
176511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
177511ce378SBaolin Wang 
178511ce378SBaolin Wang 	hsq->recovery_halt = false;
179511ce378SBaolin Wang 	remains = hsq->qcnt;
180511ce378SBaolin Wang 
181511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
182511ce378SBaolin Wang 
183511ce378SBaolin Wang 	/*
184511ce378SBaolin Wang 	 * Try to pump new request if there are request pending in software
185511ce378SBaolin Wang 	 * queue after finishing recovery.
186511ce378SBaolin Wang 	 */
187511ce378SBaolin Wang 	if (remains > 0)
188511ce378SBaolin Wang 		mmc_hsq_pump_requests(hsq);
189511ce378SBaolin Wang }
190511ce378SBaolin Wang 
mmc_hsq_request(struct mmc_host * mmc,struct mmc_request * mrq)191511ce378SBaolin Wang static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
192511ce378SBaolin Wang {
193511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
194511ce378SBaolin Wang 	int tag = mrq->tag;
195511ce378SBaolin Wang 
196511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
197511ce378SBaolin Wang 
198511ce378SBaolin Wang 	if (!hsq->enabled) {
199511ce378SBaolin Wang 		spin_unlock_irq(&hsq->lock);
200511ce378SBaolin Wang 		return -ESHUTDOWN;
201511ce378SBaolin Wang 	}
202511ce378SBaolin Wang 
203511ce378SBaolin Wang 	/* Do not queue any new requests in recovery mode. */
204511ce378SBaolin Wang 	if (hsq->recovery_halt) {
205511ce378SBaolin Wang 		spin_unlock_irq(&hsq->lock);
206511ce378SBaolin Wang 		return -EBUSY;
207511ce378SBaolin Wang 	}
208511ce378SBaolin Wang 
209511ce378SBaolin Wang 	hsq->slot[tag].mrq = mrq;
210511ce378SBaolin Wang 
211511ce378SBaolin Wang 	/*
212511ce378SBaolin Wang 	 * Set the next tag as current request tag if no available
213511ce378SBaolin Wang 	 * next tag.
214511ce378SBaolin Wang 	 */
215*e026a3f9SMichael Wu 	if (hsq->next_tag == HSQ_INVALID_TAG) {
216511ce378SBaolin Wang 		hsq->next_tag = tag;
217*e026a3f9SMichael Wu 		hsq->tail_tag = tag;
218*e026a3f9SMichael Wu 		hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
219*e026a3f9SMichael Wu 	} else {
220*e026a3f9SMichael Wu 		hsq->tag_slot[hsq->tail_tag] = tag;
221*e026a3f9SMichael Wu 		hsq->tail_tag = tag;
222*e026a3f9SMichael Wu 	}
223511ce378SBaolin Wang 
224511ce378SBaolin Wang 	hsq->qcnt++;
225511ce378SBaolin Wang 
226511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
227511ce378SBaolin Wang 
228511ce378SBaolin Wang 	mmc_hsq_pump_requests(hsq);
229511ce378SBaolin Wang 
230511ce378SBaolin Wang 	return 0;
231511ce378SBaolin Wang }
232511ce378SBaolin Wang 
mmc_hsq_post_req(struct mmc_host * mmc,struct mmc_request * mrq)233511ce378SBaolin Wang static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
234511ce378SBaolin Wang {
235511ce378SBaolin Wang 	if (mmc->ops->post_req)
236511ce378SBaolin Wang 		mmc->ops->post_req(mmc, mrq, 0);
237511ce378SBaolin Wang }
238511ce378SBaolin Wang 
mmc_hsq_queue_is_idle(struct mmc_hsq * hsq,int * ret)239511ce378SBaolin Wang static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
240511ce378SBaolin Wang {
241511ce378SBaolin Wang 	bool is_idle;
242511ce378SBaolin Wang 
243511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
244511ce378SBaolin Wang 
245511ce378SBaolin Wang 	is_idle = (!hsq->mrq && !hsq->qcnt) ||
246511ce378SBaolin Wang 		hsq->recovery_halt;
247511ce378SBaolin Wang 
248511ce378SBaolin Wang 	*ret = hsq->recovery_halt ? -EBUSY : 0;
249511ce378SBaolin Wang 	hsq->waiting_for_idle = !is_idle;
250511ce378SBaolin Wang 
251511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
252511ce378SBaolin Wang 
253511ce378SBaolin Wang 	return is_idle;
254511ce378SBaolin Wang }
255511ce378SBaolin Wang 
mmc_hsq_wait_for_idle(struct mmc_host * mmc)256511ce378SBaolin Wang static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
257511ce378SBaolin Wang {
258511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
259511ce378SBaolin Wang 	int ret;
260511ce378SBaolin Wang 
261511ce378SBaolin Wang 	wait_event(hsq->wait_queue,
262511ce378SBaolin Wang 		   mmc_hsq_queue_is_idle(hsq, &ret));
263511ce378SBaolin Wang 
264511ce378SBaolin Wang 	return ret;
265511ce378SBaolin Wang }
266511ce378SBaolin Wang 
mmc_hsq_disable(struct mmc_host * mmc)267511ce378SBaolin Wang static void mmc_hsq_disable(struct mmc_host *mmc)
268511ce378SBaolin Wang {
269511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
270511ce378SBaolin Wang 	u32 timeout = 500;
271511ce378SBaolin Wang 	int ret;
272511ce378SBaolin Wang 
273511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
274511ce378SBaolin Wang 
275511ce378SBaolin Wang 	if (!hsq->enabled) {
276511ce378SBaolin Wang 		spin_unlock_irq(&hsq->lock);
277511ce378SBaolin Wang 		return;
278511ce378SBaolin Wang 	}
279511ce378SBaolin Wang 
280511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
281511ce378SBaolin Wang 
282511ce378SBaolin Wang 	ret = wait_event_timeout(hsq->wait_queue,
283511ce378SBaolin Wang 				 mmc_hsq_queue_is_idle(hsq, &ret),
284511ce378SBaolin Wang 				 msecs_to_jiffies(timeout));
285511ce378SBaolin Wang 	if (ret == 0) {
286511ce378SBaolin Wang 		pr_warn("could not stop mmc software queue\n");
287511ce378SBaolin Wang 		return;
288511ce378SBaolin Wang 	}
289511ce378SBaolin Wang 
290511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
291511ce378SBaolin Wang 
292511ce378SBaolin Wang 	hsq->enabled = false;
293511ce378SBaolin Wang 
294511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
295511ce378SBaolin Wang }
296511ce378SBaolin Wang 
mmc_hsq_enable(struct mmc_host * mmc,struct mmc_card * card)297511ce378SBaolin Wang static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
298511ce378SBaolin Wang {
299511ce378SBaolin Wang 	struct mmc_hsq *hsq = mmc->cqe_private;
300511ce378SBaolin Wang 
301511ce378SBaolin Wang 	spin_lock_irq(&hsq->lock);
302511ce378SBaolin Wang 
303511ce378SBaolin Wang 	if (hsq->enabled) {
304511ce378SBaolin Wang 		spin_unlock_irq(&hsq->lock);
305511ce378SBaolin Wang 		return -EBUSY;
306511ce378SBaolin Wang 	}
307511ce378SBaolin Wang 
308511ce378SBaolin Wang 	hsq->enabled = true;
309511ce378SBaolin Wang 
310511ce378SBaolin Wang 	spin_unlock_irq(&hsq->lock);
311511ce378SBaolin Wang 
312511ce378SBaolin Wang 	return 0;
313511ce378SBaolin Wang }
314511ce378SBaolin Wang 
315511ce378SBaolin Wang static const struct mmc_cqe_ops mmc_hsq_ops = {
316511ce378SBaolin Wang 	.cqe_enable = mmc_hsq_enable,
317511ce378SBaolin Wang 	.cqe_disable = mmc_hsq_disable,
318511ce378SBaolin Wang 	.cqe_request = mmc_hsq_request,
319511ce378SBaolin Wang 	.cqe_post_req = mmc_hsq_post_req,
320511ce378SBaolin Wang 	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
321511ce378SBaolin Wang 	.cqe_recovery_start = mmc_hsq_recovery_start,
322511ce378SBaolin Wang 	.cqe_recovery_finish = mmc_hsq_recovery_finish,
323511ce378SBaolin Wang };
324511ce378SBaolin Wang 
mmc_hsq_init(struct mmc_hsq * hsq,struct mmc_host * mmc)325511ce378SBaolin Wang int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
326511ce378SBaolin Wang {
327*e026a3f9SMichael Wu 	int i;
328511ce378SBaolin Wang 	hsq->num_slots = HSQ_NUM_SLOTS;
329511ce378SBaolin Wang 	hsq->next_tag = HSQ_INVALID_TAG;
330*e026a3f9SMichael Wu 	hsq->tail_tag = HSQ_INVALID_TAG;
331511ce378SBaolin Wang 
332511ce378SBaolin Wang 	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
333511ce378SBaolin Wang 				 sizeof(struct hsq_slot), GFP_KERNEL);
334511ce378SBaolin Wang 	if (!hsq->slot)
335511ce378SBaolin Wang 		return -ENOMEM;
336511ce378SBaolin Wang 
337511ce378SBaolin Wang 	hsq->mmc = mmc;
338511ce378SBaolin Wang 	hsq->mmc->cqe_private = hsq;
339511ce378SBaolin Wang 	mmc->cqe_ops = &mmc_hsq_ops;
340511ce378SBaolin Wang 
341*e026a3f9SMichael Wu 	for (i = 0; i < HSQ_NUM_SLOTS; i++)
342*e026a3f9SMichael Wu 		hsq->tag_slot[i] = HSQ_INVALID_TAG;
343*e026a3f9SMichael Wu 
3446db96e58SBaolin Wang 	INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345511ce378SBaolin Wang 	spin_lock_init(&hsq->lock);
346511ce378SBaolin Wang 	init_waitqueue_head(&hsq->wait_queue);
347511ce378SBaolin Wang 
348511ce378SBaolin Wang 	return 0;
349511ce378SBaolin Wang }
350511ce378SBaolin Wang EXPORT_SYMBOL_GPL(mmc_hsq_init);
351511ce378SBaolin Wang 
mmc_hsq_suspend(struct mmc_host * mmc)352511ce378SBaolin Wang void mmc_hsq_suspend(struct mmc_host *mmc)
353511ce378SBaolin Wang {
354511ce378SBaolin Wang 	mmc_hsq_disable(mmc);
355511ce378SBaolin Wang }
356511ce378SBaolin Wang EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
357511ce378SBaolin Wang 
mmc_hsq_resume(struct mmc_host * mmc)358511ce378SBaolin Wang int mmc_hsq_resume(struct mmc_host *mmc)
359511ce378SBaolin Wang {
360511ce378SBaolin Wang 	return mmc_hsq_enable(mmc, NULL);
361511ce378SBaolin Wang }
362511ce378SBaolin Wang EXPORT_SYMBOL_GPL(mmc_hsq_resume);
363d1709abbSBaolin Wang 
364d1709abbSBaolin Wang MODULE_DESCRIPTION("MMC Host Software Queue support");
365d1709abbSBaolin Wang MODULE_LICENSE("GPL v2");
366