xref: /openbmc/linux/drivers/mmc/host/mmc_hsq.c (revision 165f2d28)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * MMC software queue support based on command queue interfaces
5  *
6  * Copyright (C) 2019 Linaro, Inc.
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
13 
14 #include "mmc_hsq.h"
15 
16 #define HSQ_NUM_SLOTS	64
17 #define HSQ_INVALID_TAG	HSQ_NUM_SLOTS
18 
19 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
20 {
21 	struct mmc_host *mmc = hsq->mmc;
22 	struct hsq_slot *slot;
23 	unsigned long flags;
24 
25 	spin_lock_irqsave(&hsq->lock, flags);
26 
27 	/* Make sure we are not already running a request now */
28 	if (hsq->mrq) {
29 		spin_unlock_irqrestore(&hsq->lock, flags);
30 		return;
31 	}
32 
33 	/* Make sure there are remain requests need to pump */
34 	if (!hsq->qcnt || !hsq->enabled) {
35 		spin_unlock_irqrestore(&hsq->lock, flags);
36 		return;
37 	}
38 
39 	slot = &hsq->slot[hsq->next_tag];
40 	hsq->mrq = slot->mrq;
41 	hsq->qcnt--;
42 
43 	spin_unlock_irqrestore(&hsq->lock, flags);
44 
45 	mmc->ops->request(mmc, hsq->mrq);
46 }
47 
48 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
49 {
50 	struct hsq_slot *slot;
51 	int tag;
52 
53 	/*
54 	 * If there are no remain requests in software queue, then set a invalid
55 	 * tag.
56 	 */
57 	if (!remains) {
58 		hsq->next_tag = HSQ_INVALID_TAG;
59 		return;
60 	}
61 
62 	/*
63 	 * Increasing the next tag and check if the corresponding request is
64 	 * available, if yes, then we found a candidate request.
65 	 */
66 	if (++hsq->next_tag != HSQ_INVALID_TAG) {
67 		slot = &hsq->slot[hsq->next_tag];
68 		if (slot->mrq)
69 			return;
70 	}
71 
72 	/* Othersie we should iterate all slots to find a available tag. */
73 	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
74 		slot = &hsq->slot[tag];
75 		if (slot->mrq)
76 			break;
77 	}
78 
79 	if (tag == HSQ_NUM_SLOTS)
80 		tag = HSQ_INVALID_TAG;
81 
82 	hsq->next_tag = tag;
83 }
84 
85 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
86 {
87 	unsigned long flags;
88 	int remains;
89 
90 	spin_lock_irqsave(&hsq->lock, flags);
91 
92 	remains = hsq->qcnt;
93 	hsq->mrq = NULL;
94 
95 	/* Update the next available tag to be queued. */
96 	mmc_hsq_update_next_tag(hsq, remains);
97 
98 	if (hsq->waiting_for_idle && !remains) {
99 		hsq->waiting_for_idle = false;
100 		wake_up(&hsq->wait_queue);
101 	}
102 
103 	/* Do not pump new request in recovery mode. */
104 	if (hsq->recovery_halt) {
105 		spin_unlock_irqrestore(&hsq->lock, flags);
106 		return;
107 	}
108 
109 	spin_unlock_irqrestore(&hsq->lock, flags);
110 
111 	 /*
112 	  * Try to pump new request to host controller as fast as possible,
113 	  * after completing previous request.
114 	  */
115 	if (remains > 0)
116 		mmc_hsq_pump_requests(hsq);
117 }
118 
119 /**
120  * mmc_hsq_finalize_request - finalize one request if the request is done
121  * @mmc: the host controller
122  * @mrq: the request need to be finalized
123  *
124  * Return true if we finalized the corresponding request in software queue,
125  * otherwise return false.
126  */
127 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
128 {
129 	struct mmc_hsq *hsq = mmc->cqe_private;
130 	unsigned long flags;
131 
132 	spin_lock_irqsave(&hsq->lock, flags);
133 
134 	if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
135 		spin_unlock_irqrestore(&hsq->lock, flags);
136 		return false;
137 	}
138 
139 	/*
140 	 * Clear current completed slot request to make a room for new request.
141 	 */
142 	hsq->slot[hsq->next_tag].mrq = NULL;
143 
144 	spin_unlock_irqrestore(&hsq->lock, flags);
145 
146 	mmc_cqe_request_done(mmc, hsq->mrq);
147 
148 	mmc_hsq_post_request(hsq);
149 
150 	return true;
151 }
152 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
153 
154 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
155 {
156 	struct mmc_hsq *hsq = mmc->cqe_private;
157 	unsigned long flags;
158 
159 	spin_lock_irqsave(&hsq->lock, flags);
160 
161 	hsq->recovery_halt = true;
162 
163 	spin_unlock_irqrestore(&hsq->lock, flags);
164 }
165 
166 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
167 {
168 	struct mmc_hsq *hsq = mmc->cqe_private;
169 	int remains;
170 
171 	spin_lock_irq(&hsq->lock);
172 
173 	hsq->recovery_halt = false;
174 	remains = hsq->qcnt;
175 
176 	spin_unlock_irq(&hsq->lock);
177 
178 	/*
179 	 * Try to pump new request if there are request pending in software
180 	 * queue after finishing recovery.
181 	 */
182 	if (remains > 0)
183 		mmc_hsq_pump_requests(hsq);
184 }
185 
186 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
187 {
188 	struct mmc_hsq *hsq = mmc->cqe_private;
189 	int tag = mrq->tag;
190 
191 	spin_lock_irq(&hsq->lock);
192 
193 	if (!hsq->enabled) {
194 		spin_unlock_irq(&hsq->lock);
195 		return -ESHUTDOWN;
196 	}
197 
198 	/* Do not queue any new requests in recovery mode. */
199 	if (hsq->recovery_halt) {
200 		spin_unlock_irq(&hsq->lock);
201 		return -EBUSY;
202 	}
203 
204 	hsq->slot[tag].mrq = mrq;
205 
206 	/*
207 	 * Set the next tag as current request tag if no available
208 	 * next tag.
209 	 */
210 	if (hsq->next_tag == HSQ_INVALID_TAG)
211 		hsq->next_tag = tag;
212 
213 	hsq->qcnt++;
214 
215 	spin_unlock_irq(&hsq->lock);
216 
217 	mmc_hsq_pump_requests(hsq);
218 
219 	return 0;
220 }
221 
222 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
223 {
224 	if (mmc->ops->post_req)
225 		mmc->ops->post_req(mmc, mrq, 0);
226 }
227 
228 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
229 {
230 	bool is_idle;
231 
232 	spin_lock_irq(&hsq->lock);
233 
234 	is_idle = (!hsq->mrq && !hsq->qcnt) ||
235 		hsq->recovery_halt;
236 
237 	*ret = hsq->recovery_halt ? -EBUSY : 0;
238 	hsq->waiting_for_idle = !is_idle;
239 
240 	spin_unlock_irq(&hsq->lock);
241 
242 	return is_idle;
243 }
244 
245 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
246 {
247 	struct mmc_hsq *hsq = mmc->cqe_private;
248 	int ret;
249 
250 	wait_event(hsq->wait_queue,
251 		   mmc_hsq_queue_is_idle(hsq, &ret));
252 
253 	return ret;
254 }
255 
256 static void mmc_hsq_disable(struct mmc_host *mmc)
257 {
258 	struct mmc_hsq *hsq = mmc->cqe_private;
259 	u32 timeout = 500;
260 	int ret;
261 
262 	spin_lock_irq(&hsq->lock);
263 
264 	if (!hsq->enabled) {
265 		spin_unlock_irq(&hsq->lock);
266 		return;
267 	}
268 
269 	spin_unlock_irq(&hsq->lock);
270 
271 	ret = wait_event_timeout(hsq->wait_queue,
272 				 mmc_hsq_queue_is_idle(hsq, &ret),
273 				 msecs_to_jiffies(timeout));
274 	if (ret == 0) {
275 		pr_warn("could not stop mmc software queue\n");
276 		return;
277 	}
278 
279 	spin_lock_irq(&hsq->lock);
280 
281 	hsq->enabled = false;
282 
283 	spin_unlock_irq(&hsq->lock);
284 }
285 
286 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
287 {
288 	struct mmc_hsq *hsq = mmc->cqe_private;
289 
290 	spin_lock_irq(&hsq->lock);
291 
292 	if (hsq->enabled) {
293 		spin_unlock_irq(&hsq->lock);
294 		return -EBUSY;
295 	}
296 
297 	hsq->enabled = true;
298 
299 	spin_unlock_irq(&hsq->lock);
300 
301 	return 0;
302 }
303 
304 static const struct mmc_cqe_ops mmc_hsq_ops = {
305 	.cqe_enable = mmc_hsq_enable,
306 	.cqe_disable = mmc_hsq_disable,
307 	.cqe_request = mmc_hsq_request,
308 	.cqe_post_req = mmc_hsq_post_req,
309 	.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
310 	.cqe_recovery_start = mmc_hsq_recovery_start,
311 	.cqe_recovery_finish = mmc_hsq_recovery_finish,
312 };
313 
314 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
315 {
316 	hsq->num_slots = HSQ_NUM_SLOTS;
317 	hsq->next_tag = HSQ_INVALID_TAG;
318 
319 	hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
320 				 sizeof(struct hsq_slot), GFP_KERNEL);
321 	if (!hsq->slot)
322 		return -ENOMEM;
323 
324 	hsq->mmc = mmc;
325 	hsq->mmc->cqe_private = hsq;
326 	mmc->cqe_ops = &mmc_hsq_ops;
327 
328 	spin_lock_init(&hsq->lock);
329 	init_waitqueue_head(&hsq->wait_queue);
330 
331 	return 0;
332 }
333 EXPORT_SYMBOL_GPL(mmc_hsq_init);
334 
335 void mmc_hsq_suspend(struct mmc_host *mmc)
336 {
337 	mmc_hsq_disable(mmc);
338 }
339 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
340 
341 int mmc_hsq_resume(struct mmc_host *mmc)
342 {
343 	return mmc_hsq_enable(mmc, NULL);
344 }
345 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
346 
347 MODULE_DESCRIPTION("MMC Host Software Queue support");
348 MODULE_LICENSE("GPL v2");
349