xref: /openbmc/linux/drivers/s390/crypto/ap_queue.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /*
22  * some AP queue helper functions
23  */
24 
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
26 {
27 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
28 		ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
29 }
30 
31 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
32 {
33 	return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
34 }
35 
36 /**
37  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
38  * @aq: The AP queue
39  * @ind: the notification indicator byte
40  *
41  * Enables interruption on AP queue via ap_aqic(). Based on the return
42  * value it waits a while and tests the AP queue if interrupts
43  * have been switched on using ap_test_queue().
44  */
45 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
46 {
47 	union ap_qirq_ctrl qirqctrl = { .value = 0 };
48 	struct ap_queue_status status;
49 
50 	qirqctrl.ir = 1;
51 	qirqctrl.isc = AP_ISC;
52 	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
53 	if (status.async)
54 		return -EPERM;
55 	switch (status.response_code) {
56 	case AP_RESPONSE_NORMAL:
57 	case AP_RESPONSE_OTHERWISE_CHANGED:
58 		return 0;
59 	case AP_RESPONSE_Q_NOT_AVAIL:
60 	case AP_RESPONSE_DECONFIGURED:
61 	case AP_RESPONSE_CHECKSTOPPED:
62 	case AP_RESPONSE_INVALID_ADDRESS:
63 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
64 		       AP_QID_CARD(aq->qid),
65 		       AP_QID_QUEUE(aq->qid));
66 		return -EOPNOTSUPP;
67 	case AP_RESPONSE_RESET_IN_PROGRESS:
68 	case AP_RESPONSE_BUSY:
69 	default:
70 		return -EBUSY;
71 	}
72 }
73 
74 /**
75  * __ap_send(): Send message to adjunct processor queue.
76  * @qid: The AP queue number
77  * @psmid: The program supplied message identifier
78  * @msg: The message text
79  * @msglen: The message length
80  * @special: Special Bit
81  *
82  * Returns AP queue status structure.
83  * Condition code 1 on NQAP can't happen because the L bit is 1.
84  * Condition code 2 on NQAP also means the send is incomplete,
85  * because a segment boundary was reached. The NQAP is repeated.
86  */
87 static inline struct ap_queue_status
88 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
89 	  int special)
90 {
91 	if (special)
92 		qid |= 0x400000UL;
93 	return ap_nqap(qid, psmid, msg, msglen);
94 }
95 
96 int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
97 {
98 	struct ap_queue_status status;
99 
100 	status = __ap_send(qid, psmid, msg, msglen, 0);
101 	if (status.async)
102 		return -EPERM;
103 	switch (status.response_code) {
104 	case AP_RESPONSE_NORMAL:
105 		return 0;
106 	case AP_RESPONSE_Q_FULL:
107 	case AP_RESPONSE_RESET_IN_PROGRESS:
108 		return -EBUSY;
109 	case AP_RESPONSE_REQ_FAC_NOT_INST:
110 		return -EINVAL;
111 	default:	/* Device is gone. */
112 		return -ENODEV;
113 	}
114 }
115 EXPORT_SYMBOL(ap_send);
116 
117 int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
118 {
119 	struct ap_queue_status status;
120 
121 	if (!msg)
122 		return -EINVAL;
123 	status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
124 	if (status.async)
125 		return -EPERM;
126 	switch (status.response_code) {
127 	case AP_RESPONSE_NORMAL:
128 		return 0;
129 	case AP_RESPONSE_NO_PENDING_REPLY:
130 		if (status.queue_empty)
131 			return -ENOENT;
132 		return -EBUSY;
133 	case AP_RESPONSE_RESET_IN_PROGRESS:
134 		return -EBUSY;
135 	default:
136 		return -ENODEV;
137 	}
138 }
139 EXPORT_SYMBOL(ap_recv);
140 
141 /* State machine definitions and helpers */
142 
143 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
144 {
145 	return AP_SM_WAIT_NONE;
146 }
147 
148 /**
149  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
150  *	not change the state of the device.
151  * @aq: pointer to the AP queue
152  *
153  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
154  */
155 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
156 {
157 	struct ap_queue_status status;
158 	struct ap_message *ap_msg;
159 	bool found = false;
160 	size_t reslen;
161 	unsigned long resgr0 = 0;
162 	int parts = 0;
163 
164 	/*
165 	 * DQAP loop until response code and resgr0 indicate that
166 	 * the msg is totally received. As we use the very same buffer
167 	 * the msg is overwritten with each invocation. That's intended
168 	 * and the receiver of the msg is informed with a msg rc code
169 	 * of EMSGSIZE in such a case.
170 	 */
171 	do {
172 		status = ap_dqap(aq->qid, &aq->reply->psmid,
173 				 aq->reply->msg, aq->reply->bufsize,
174 				 &aq->reply->len, &reslen, &resgr0);
175 		parts++;
176 	} while (status.response_code == 0xFF && resgr0 != 0);
177 
178 	switch (status.response_code) {
179 	case AP_RESPONSE_NORMAL:
180 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
181 		if (!status.queue_empty && !aq->queue_count)
182 			aq->queue_count++;
183 		if (aq->queue_count > 0)
184 			mod_timer(&aq->timeout,
185 				  jiffies + aq->request_timeout);
186 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
187 			if (ap_msg->psmid != aq->reply->psmid)
188 				continue;
189 			list_del_init(&ap_msg->list);
190 			aq->pendingq_count--;
191 			if (parts > 1) {
192 				ap_msg->rc = -EMSGSIZE;
193 				ap_msg->receive(aq, ap_msg, NULL);
194 			} else {
195 				ap_msg->receive(aq, ap_msg, aq->reply);
196 			}
197 			found = true;
198 			break;
199 		}
200 		if (!found) {
201 			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
202 				    __func__, aq->reply->psmid,
203 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
204 		}
205 		fallthrough;
206 	case AP_RESPONSE_NO_PENDING_REPLY:
207 		if (!status.queue_empty || aq->queue_count <= 0)
208 			break;
209 		/* The card shouldn't forget requests but who knows. */
210 		aq->queue_count = 0;
211 		list_splice_init(&aq->pendingq, &aq->requestq);
212 		aq->requestq_count += aq->pendingq_count;
213 		aq->pendingq_count = 0;
214 		break;
215 	default:
216 		break;
217 	}
218 	return status;
219 }
220 
221 /**
222  * ap_sm_read(): Receive pending reply messages from an AP queue.
223  * @aq: pointer to the AP queue
224  *
225  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
226  */
227 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
228 {
229 	struct ap_queue_status status;
230 
231 	if (!aq->reply)
232 		return AP_SM_WAIT_NONE;
233 	status = ap_sm_recv(aq);
234 	if (status.async)
235 		return AP_SM_WAIT_NONE;
236 	switch (status.response_code) {
237 	case AP_RESPONSE_NORMAL:
238 		if (aq->queue_count > 0) {
239 			aq->sm_state = AP_SM_STATE_WORKING;
240 			return AP_SM_WAIT_AGAIN;
241 		}
242 		aq->sm_state = AP_SM_STATE_IDLE;
243 		return AP_SM_WAIT_NONE;
244 	case AP_RESPONSE_NO_PENDING_REPLY:
245 		if (aq->queue_count > 0)
246 			return aq->interrupt ?
247 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
248 		aq->sm_state = AP_SM_STATE_IDLE;
249 		return AP_SM_WAIT_NONE;
250 	default:
251 		aq->dev_state = AP_DEV_STATE_ERROR;
252 		aq->last_err_rc = status.response_code;
253 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
254 			    __func__, status.response_code,
255 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
256 		return AP_SM_WAIT_NONE;
257 	}
258 }
259 
260 /**
261  * ap_sm_write(): Send messages from the request queue to an AP queue.
262  * @aq: pointer to the AP queue
263  *
264  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
265  */
266 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
267 {
268 	struct ap_queue_status status;
269 	struct ap_message *ap_msg;
270 	ap_qid_t qid = aq->qid;
271 
272 	if (aq->requestq_count <= 0)
273 		return AP_SM_WAIT_NONE;
274 
275 	/* Start the next request on the queue. */
276 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
277 #ifdef CONFIG_ZCRYPT_DEBUG
278 	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
279 		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
280 			    __func__, ap_msg->fi.cmd);
281 		qid = 0xFF00;
282 	}
283 #endif
284 	status = __ap_send(qid, ap_msg->psmid,
285 			   ap_msg->msg, ap_msg->len,
286 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
287 	if (status.async)
288 		return AP_SM_WAIT_NONE;
289 	switch (status.response_code) {
290 	case AP_RESPONSE_NORMAL:
291 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
292 		if (aq->queue_count == 1)
293 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
294 		list_move_tail(&ap_msg->list, &aq->pendingq);
295 		aq->requestq_count--;
296 		aq->pendingq_count++;
297 		if (aq->queue_count < aq->card->queue_depth) {
298 			aq->sm_state = AP_SM_STATE_WORKING;
299 			return AP_SM_WAIT_AGAIN;
300 		}
301 		fallthrough;
302 	case AP_RESPONSE_Q_FULL:
303 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
304 		return aq->interrupt ?
305 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
306 	case AP_RESPONSE_RESET_IN_PROGRESS:
307 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
308 		return AP_SM_WAIT_LOW_TIMEOUT;
309 	case AP_RESPONSE_INVALID_DOMAIN:
310 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
311 		fallthrough;
312 	case AP_RESPONSE_MESSAGE_TOO_BIG:
313 	case AP_RESPONSE_REQ_FAC_NOT_INST:
314 		list_del_init(&ap_msg->list);
315 		aq->requestq_count--;
316 		ap_msg->rc = -EINVAL;
317 		ap_msg->receive(aq, ap_msg, NULL);
318 		return AP_SM_WAIT_AGAIN;
319 	default:
320 		aq->dev_state = AP_DEV_STATE_ERROR;
321 		aq->last_err_rc = status.response_code;
322 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
323 			    __func__, status.response_code,
324 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
325 		return AP_SM_WAIT_NONE;
326 	}
327 }
328 
329 /**
330  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
331  * @aq: pointer to the AP queue
332  *
333  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
334  */
335 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
336 {
337 	return min(ap_sm_read(aq), ap_sm_write(aq));
338 }
339 
340 /**
341  * ap_sm_reset(): Reset an AP queue.
342  * @aq: The AP queue
343  *
344  * Submit the Reset command to an AP queue.
345  */
346 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
347 {
348 	struct ap_queue_status status;
349 
350 	status = ap_rapq(aq->qid, aq->rapq_fbit);
351 	if (status.async)
352 		return AP_SM_WAIT_NONE;
353 	switch (status.response_code) {
354 	case AP_RESPONSE_NORMAL:
355 	case AP_RESPONSE_RESET_IN_PROGRESS:
356 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
357 		aq->interrupt = false;
358 		aq->rapq_fbit = 0;
359 		return AP_SM_WAIT_LOW_TIMEOUT;
360 	default:
361 		aq->dev_state = AP_DEV_STATE_ERROR;
362 		aq->last_err_rc = status.response_code;
363 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
364 			    __func__, status.response_code,
365 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
366 		return AP_SM_WAIT_NONE;
367 	}
368 }
369 
370 /**
371  * ap_sm_reset_wait(): Test queue for completion of the reset operation
372  * @aq: pointer to the AP queue
373  *
374  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
375  */
376 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
377 {
378 	struct ap_queue_status status;
379 	void *lsi_ptr;
380 
381 	if (aq->queue_count > 0 && aq->reply)
382 		/* Try to read a completed message and get the status */
383 		status = ap_sm_recv(aq);
384 	else
385 		/* Get the status with TAPQ */
386 		status = ap_tapq(aq->qid, NULL);
387 
388 	switch (status.response_code) {
389 	case AP_RESPONSE_NORMAL:
390 		lsi_ptr = ap_airq_ptr();
391 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
392 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
393 		else
394 			aq->sm_state = (aq->queue_count > 0) ?
395 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
396 		return AP_SM_WAIT_AGAIN;
397 	case AP_RESPONSE_BUSY:
398 	case AP_RESPONSE_RESET_IN_PROGRESS:
399 		return AP_SM_WAIT_LOW_TIMEOUT;
400 	case AP_RESPONSE_Q_NOT_AVAIL:
401 	case AP_RESPONSE_DECONFIGURED:
402 	case AP_RESPONSE_CHECKSTOPPED:
403 	default:
404 		aq->dev_state = AP_DEV_STATE_ERROR;
405 		aq->last_err_rc = status.response_code;
406 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
407 			    __func__, status.response_code,
408 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
409 		return AP_SM_WAIT_NONE;
410 	}
411 }
412 
413 /**
414  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
415  * @aq: pointer to the AP queue
416  *
417  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
418  */
419 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
420 {
421 	struct ap_queue_status status;
422 
423 	if (aq->queue_count > 0 && aq->reply)
424 		/* Try to read a completed message and get the status */
425 		status = ap_sm_recv(aq);
426 	else
427 		/* Get the status with TAPQ */
428 		status = ap_tapq(aq->qid, NULL);
429 
430 	if (status.irq_enabled == 1) {
431 		/* Irqs are now enabled */
432 		aq->interrupt = true;
433 		aq->sm_state = (aq->queue_count > 0) ?
434 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
435 	}
436 
437 	switch (status.response_code) {
438 	case AP_RESPONSE_NORMAL:
439 		if (aq->queue_count > 0)
440 			return AP_SM_WAIT_AGAIN;
441 		fallthrough;
442 	case AP_RESPONSE_NO_PENDING_REPLY:
443 		return AP_SM_WAIT_LOW_TIMEOUT;
444 	default:
445 		aq->dev_state = AP_DEV_STATE_ERROR;
446 		aq->last_err_rc = status.response_code;
447 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
448 			    __func__, status.response_code,
449 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
450 		return AP_SM_WAIT_NONE;
451 	}
452 }
453 
454 /**
455  * ap_sm_assoc_wait(): Test queue for completion of a pending
456  *		       association request.
457  * @aq: pointer to the AP queue
458  */
459 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
460 {
461 	struct ap_queue_status status;
462 	struct ap_tapq_gr2 info;
463 
464 	status = ap_test_queue(aq->qid, 1, &info);
465 	/* handle asynchronous error on this queue */
466 	if (status.async && status.response_code) {
467 		aq->dev_state = AP_DEV_STATE_ERROR;
468 		aq->last_err_rc = status.response_code;
469 		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
470 			    __func__, status.response_code,
471 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
472 		return AP_SM_WAIT_NONE;
473 	}
474 	if (status.response_code > AP_RESPONSE_BUSY) {
475 		aq->dev_state = AP_DEV_STATE_ERROR;
476 		aq->last_err_rc = status.response_code;
477 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
478 			    __func__, status.response_code,
479 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
480 		return AP_SM_WAIT_NONE;
481 	}
482 
483 	/* check bs bits */
484 	switch (info.bs) {
485 	case AP_BS_Q_USABLE:
486 		/* association is through */
487 		aq->sm_state = AP_SM_STATE_IDLE;
488 		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
489 			   __func__, AP_QID_CARD(aq->qid),
490 			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
491 		return AP_SM_WAIT_NONE;
492 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
493 		/* association still pending */
494 		return AP_SM_WAIT_LOW_TIMEOUT;
495 	default:
496 		/* reset from 'outside' happened or no idea at all */
497 		aq->assoc_idx = ASSOC_IDX_INVALID;
498 		aq->dev_state = AP_DEV_STATE_ERROR;
499 		aq->last_err_rc = status.response_code;
500 		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
501 			    __func__, info.bs,
502 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
503 		return AP_SM_WAIT_NONE;
504 	}
505 }
506 
507 /*
508  * AP state machine jump table
509  */
510 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
511 	[AP_SM_STATE_RESET_START] = {
512 		[AP_SM_EVENT_POLL] = ap_sm_reset,
513 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
514 	},
515 	[AP_SM_STATE_RESET_WAIT] = {
516 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
517 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
518 	},
519 	[AP_SM_STATE_SETIRQ_WAIT] = {
520 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
521 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
522 	},
523 	[AP_SM_STATE_IDLE] = {
524 		[AP_SM_EVENT_POLL] = ap_sm_write,
525 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
526 	},
527 	[AP_SM_STATE_WORKING] = {
528 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
529 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
530 	},
531 	[AP_SM_STATE_QUEUE_FULL] = {
532 		[AP_SM_EVENT_POLL] = ap_sm_read,
533 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
534 	},
535 	[AP_SM_STATE_ASSOC_WAIT] = {
536 		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
537 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
538 	},
539 };
540 
541 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
542 {
543 	if (aq->config && !aq->chkstop &&
544 	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
545 		return ap_jumptable[aq->sm_state][event](aq);
546 	else
547 		return AP_SM_WAIT_NONE;
548 }
549 
550 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
551 {
552 	enum ap_sm_wait wait;
553 
554 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
555 		;
556 	return wait;
557 }
558 
559 /*
560  * AP queue related attributes.
561  */
562 static ssize_t request_count_show(struct device *dev,
563 				  struct device_attribute *attr,
564 				  char *buf)
565 {
566 	struct ap_queue *aq = to_ap_queue(dev);
567 	bool valid = false;
568 	u64 req_cnt;
569 
570 	spin_lock_bh(&aq->lock);
571 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
572 		req_cnt = aq->total_request_count;
573 		valid = true;
574 	}
575 	spin_unlock_bh(&aq->lock);
576 
577 	if (valid)
578 		return sysfs_emit(buf, "%llu\n", req_cnt);
579 	else
580 		return sysfs_emit(buf, "-\n");
581 }
582 
583 static ssize_t request_count_store(struct device *dev,
584 				   struct device_attribute *attr,
585 				   const char *buf, size_t count)
586 {
587 	struct ap_queue *aq = to_ap_queue(dev);
588 
589 	spin_lock_bh(&aq->lock);
590 	aq->total_request_count = 0;
591 	spin_unlock_bh(&aq->lock);
592 
593 	return count;
594 }
595 
596 static DEVICE_ATTR_RW(request_count);
597 
598 static ssize_t requestq_count_show(struct device *dev,
599 				   struct device_attribute *attr, char *buf)
600 {
601 	struct ap_queue *aq = to_ap_queue(dev);
602 	unsigned int reqq_cnt = 0;
603 
604 	spin_lock_bh(&aq->lock);
605 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
606 		reqq_cnt = aq->requestq_count;
607 	spin_unlock_bh(&aq->lock);
608 	return sysfs_emit(buf, "%d\n", reqq_cnt);
609 }
610 
611 static DEVICE_ATTR_RO(requestq_count);
612 
613 static ssize_t pendingq_count_show(struct device *dev,
614 				   struct device_attribute *attr, char *buf)
615 {
616 	struct ap_queue *aq = to_ap_queue(dev);
617 	unsigned int penq_cnt = 0;
618 
619 	spin_lock_bh(&aq->lock);
620 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
621 		penq_cnt = aq->pendingq_count;
622 	spin_unlock_bh(&aq->lock);
623 	return sysfs_emit(buf, "%d\n", penq_cnt);
624 }
625 
626 static DEVICE_ATTR_RO(pendingq_count);
627 
628 static ssize_t reset_show(struct device *dev,
629 			  struct device_attribute *attr, char *buf)
630 {
631 	struct ap_queue *aq = to_ap_queue(dev);
632 	int rc = 0;
633 
634 	spin_lock_bh(&aq->lock);
635 	switch (aq->sm_state) {
636 	case AP_SM_STATE_RESET_START:
637 	case AP_SM_STATE_RESET_WAIT:
638 		rc = sysfs_emit(buf, "Reset in progress.\n");
639 		break;
640 	case AP_SM_STATE_WORKING:
641 	case AP_SM_STATE_QUEUE_FULL:
642 		rc = sysfs_emit(buf, "Reset Timer armed.\n");
643 		break;
644 	default:
645 		rc = sysfs_emit(buf, "No Reset Timer set.\n");
646 	}
647 	spin_unlock_bh(&aq->lock);
648 	return rc;
649 }
650 
651 static ssize_t reset_store(struct device *dev,
652 			   struct device_attribute *attr,
653 			   const char *buf, size_t count)
654 {
655 	struct ap_queue *aq = to_ap_queue(dev);
656 
657 	spin_lock_bh(&aq->lock);
658 	__ap_flush_queue(aq);
659 	aq->sm_state = AP_SM_STATE_RESET_START;
660 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
661 	spin_unlock_bh(&aq->lock);
662 
663 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
664 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
665 
666 	return count;
667 }
668 
669 static DEVICE_ATTR_RW(reset);
670 
671 static ssize_t interrupt_show(struct device *dev,
672 			      struct device_attribute *attr, char *buf)
673 {
674 	struct ap_queue *aq = to_ap_queue(dev);
675 	int rc = 0;
676 
677 	spin_lock_bh(&aq->lock);
678 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
679 		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
680 	else if (aq->interrupt)
681 		rc = sysfs_emit(buf, "Interrupts enabled.\n");
682 	else
683 		rc = sysfs_emit(buf, "Interrupts disabled.\n");
684 	spin_unlock_bh(&aq->lock);
685 	return rc;
686 }
687 
688 static DEVICE_ATTR_RO(interrupt);
689 
690 static ssize_t config_show(struct device *dev,
691 			   struct device_attribute *attr, char *buf)
692 {
693 	struct ap_queue *aq = to_ap_queue(dev);
694 	int rc;
695 
696 	spin_lock_bh(&aq->lock);
697 	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
698 	spin_unlock_bh(&aq->lock);
699 	return rc;
700 }
701 
702 static DEVICE_ATTR_RO(config);
703 
704 static ssize_t chkstop_show(struct device *dev,
705 			    struct device_attribute *attr, char *buf)
706 {
707 	struct ap_queue *aq = to_ap_queue(dev);
708 	int rc;
709 
710 	spin_lock_bh(&aq->lock);
711 	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
712 	spin_unlock_bh(&aq->lock);
713 	return rc;
714 }
715 
716 static DEVICE_ATTR_RO(chkstop);
717 
718 static ssize_t ap_functions_show(struct device *dev,
719 				 struct device_attribute *attr, char *buf)
720 {
721 	struct ap_queue *aq = to_ap_queue(dev);
722 	struct ap_queue_status status;
723 	struct ap_tapq_gr2 info;
724 
725 	status = ap_test_queue(aq->qid, 1, &info);
726 	if (status.response_code > AP_RESPONSE_BUSY) {
727 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
728 			   __func__, status.response_code,
729 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
730 		return -EIO;
731 	}
732 
733 	return sysfs_emit(buf, "0x%08X\n", info.fac);
734 }
735 
736 static DEVICE_ATTR_RO(ap_functions);
737 
738 #ifdef CONFIG_ZCRYPT_DEBUG
739 static ssize_t states_show(struct device *dev,
740 			   struct device_attribute *attr, char *buf)
741 {
742 	struct ap_queue *aq = to_ap_queue(dev);
743 	int rc = 0;
744 
745 	spin_lock_bh(&aq->lock);
746 	/* queue device state */
747 	switch (aq->dev_state) {
748 	case AP_DEV_STATE_UNINITIATED:
749 		rc = sysfs_emit(buf, "UNINITIATED\n");
750 		break;
751 	case AP_DEV_STATE_OPERATING:
752 		rc = sysfs_emit(buf, "OPERATING");
753 		break;
754 	case AP_DEV_STATE_SHUTDOWN:
755 		rc = sysfs_emit(buf, "SHUTDOWN");
756 		break;
757 	case AP_DEV_STATE_ERROR:
758 		rc = sysfs_emit(buf, "ERROR");
759 		break;
760 	default:
761 		rc = sysfs_emit(buf, "UNKNOWN");
762 	}
763 	/* state machine state */
764 	if (aq->dev_state) {
765 		switch (aq->sm_state) {
766 		case AP_SM_STATE_RESET_START:
767 			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
768 			break;
769 		case AP_SM_STATE_RESET_WAIT:
770 			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
771 			break;
772 		case AP_SM_STATE_SETIRQ_WAIT:
773 			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
774 			break;
775 		case AP_SM_STATE_IDLE:
776 			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
777 			break;
778 		case AP_SM_STATE_WORKING:
779 			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
780 			break;
781 		case AP_SM_STATE_QUEUE_FULL:
782 			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
783 			break;
784 		case AP_SM_STATE_ASSOC_WAIT:
785 			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
786 			break;
787 		default:
788 			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
789 		}
790 	}
791 	spin_unlock_bh(&aq->lock);
792 
793 	return rc;
794 }
795 static DEVICE_ATTR_RO(states);
796 
797 static ssize_t last_err_rc_show(struct device *dev,
798 				struct device_attribute *attr, char *buf)
799 {
800 	struct ap_queue *aq = to_ap_queue(dev);
801 	int rc;
802 
803 	spin_lock_bh(&aq->lock);
804 	rc = aq->last_err_rc;
805 	spin_unlock_bh(&aq->lock);
806 
807 	switch (rc) {
808 	case AP_RESPONSE_NORMAL:
809 		return sysfs_emit(buf, "NORMAL\n");
810 	case AP_RESPONSE_Q_NOT_AVAIL:
811 		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
812 	case AP_RESPONSE_RESET_IN_PROGRESS:
813 		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
814 	case AP_RESPONSE_DECONFIGURED:
815 		return sysfs_emit(buf, "DECONFIGURED\n");
816 	case AP_RESPONSE_CHECKSTOPPED:
817 		return sysfs_emit(buf, "CHECKSTOPPED\n");
818 	case AP_RESPONSE_BUSY:
819 		return sysfs_emit(buf, "BUSY\n");
820 	case AP_RESPONSE_INVALID_ADDRESS:
821 		return sysfs_emit(buf, "INVALID_ADDRESS\n");
822 	case AP_RESPONSE_OTHERWISE_CHANGED:
823 		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
824 	case AP_RESPONSE_Q_FULL:
825 		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
826 	case AP_RESPONSE_INDEX_TOO_BIG:
827 		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
828 	case AP_RESPONSE_NO_FIRST_PART:
829 		return sysfs_emit(buf, "NO_FIRST_PART\n");
830 	case AP_RESPONSE_MESSAGE_TOO_BIG:
831 		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
832 	case AP_RESPONSE_REQ_FAC_NOT_INST:
833 		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
834 	default:
835 		return sysfs_emit(buf, "response code %d\n", rc);
836 	}
837 }
838 static DEVICE_ATTR_RO(last_err_rc);
839 #endif
840 
841 static struct attribute *ap_queue_dev_attrs[] = {
842 	&dev_attr_request_count.attr,
843 	&dev_attr_requestq_count.attr,
844 	&dev_attr_pendingq_count.attr,
845 	&dev_attr_reset.attr,
846 	&dev_attr_interrupt.attr,
847 	&dev_attr_config.attr,
848 	&dev_attr_chkstop.attr,
849 	&dev_attr_ap_functions.attr,
850 #ifdef CONFIG_ZCRYPT_DEBUG
851 	&dev_attr_states.attr,
852 	&dev_attr_last_err_rc.attr,
853 #endif
854 	NULL
855 };
856 
857 static struct attribute_group ap_queue_dev_attr_group = {
858 	.attrs = ap_queue_dev_attrs
859 };
860 
861 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
862 	&ap_queue_dev_attr_group,
863 	NULL
864 };
865 
866 static struct device_type ap_queue_type = {
867 	.name = "ap_queue",
868 	.groups = ap_queue_dev_attr_groups,
869 };
870 
871 static ssize_t se_bind_show(struct device *dev,
872 			    struct device_attribute *attr, char *buf)
873 {
874 	struct ap_queue *aq = to_ap_queue(dev);
875 	struct ap_queue_status status;
876 	struct ap_tapq_gr2 info;
877 
878 	if (!ap_q_supports_bind(aq))
879 		return sysfs_emit(buf, "-\n");
880 
881 	status = ap_test_queue(aq->qid, 1, &info);
882 	if (status.response_code > AP_RESPONSE_BUSY) {
883 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
884 			   __func__, status.response_code,
885 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
886 		return -EIO;
887 	}
888 	switch (info.bs) {
889 	case AP_BS_Q_USABLE:
890 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
891 		return sysfs_emit(buf, "bound\n");
892 	default:
893 		return sysfs_emit(buf, "unbound\n");
894 	}
895 }
896 
897 static ssize_t se_bind_store(struct device *dev,
898 			     struct device_attribute *attr,
899 			     const char *buf, size_t count)
900 {
901 	struct ap_queue *aq = to_ap_queue(dev);
902 	struct ap_queue_status status;
903 	bool value;
904 	int rc;
905 
906 	if (!ap_q_supports_bind(aq))
907 		return -EINVAL;
908 
909 	/* only 0 (unbind) and 1 (bind) allowed */
910 	rc = kstrtobool(buf, &value);
911 	if (rc)
912 		return rc;
913 
914 	if (value) {
915 		/* bind, do BAPQ */
916 		spin_lock_bh(&aq->lock);
917 		if (aq->sm_state < AP_SM_STATE_IDLE) {
918 			spin_unlock_bh(&aq->lock);
919 			return -EBUSY;
920 		}
921 		status = ap_bapq(aq->qid);
922 		spin_unlock_bh(&aq->lock);
923 		if (status.response_code) {
924 			AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
925 				    __func__, status.response_code,
926 				    AP_QID_CARD(aq->qid),
927 				    AP_QID_QUEUE(aq->qid));
928 			return -EIO;
929 		}
930 	} else {
931 		/* unbind, set F bit arg and trigger RAPQ */
932 		spin_lock_bh(&aq->lock);
933 		__ap_flush_queue(aq);
934 		aq->rapq_fbit = 1;
935 		aq->assoc_idx = ASSOC_IDX_INVALID;
936 		aq->sm_state = AP_SM_STATE_RESET_START;
937 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
938 		spin_unlock_bh(&aq->lock);
939 	}
940 
941 	return count;
942 }
943 
944 static DEVICE_ATTR_RW(se_bind);
945 
946 static ssize_t se_associate_show(struct device *dev,
947 				 struct device_attribute *attr, char *buf)
948 {
949 	struct ap_queue *aq = to_ap_queue(dev);
950 	struct ap_queue_status status;
951 	struct ap_tapq_gr2 info;
952 
953 	if (!ap_q_supports_assoc(aq))
954 		return sysfs_emit(buf, "-\n");
955 
956 	status = ap_test_queue(aq->qid, 1, &info);
957 	if (status.response_code > AP_RESPONSE_BUSY) {
958 		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
959 			   __func__, status.response_code,
960 			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
961 		return -EIO;
962 	}
963 
964 	switch (info.bs) {
965 	case AP_BS_Q_USABLE:
966 		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
967 			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
968 			return -EIO;
969 		}
970 		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
971 	case AP_BS_Q_USABLE_NO_SECURE_KEY:
972 		if (aq->assoc_idx != ASSOC_IDX_INVALID)
973 			return sysfs_emit(buf, "association pending\n");
974 		fallthrough;
975 	default:
976 		return sysfs_emit(buf, "unassociated\n");
977 	}
978 }
979 
980 static ssize_t se_associate_store(struct device *dev,
981 				  struct device_attribute *attr,
982 				  const char *buf, size_t count)
983 {
984 	struct ap_queue *aq = to_ap_queue(dev);
985 	struct ap_queue_status status;
986 	unsigned int value;
987 	int rc;
988 
989 	if (!ap_q_supports_assoc(aq))
990 		return -EINVAL;
991 
992 	/* association index needs to be >= 0 */
993 	rc = kstrtouint(buf, 0, &value);
994 	if (rc)
995 		return rc;
996 	if (value >= ASSOC_IDX_INVALID)
997 		return -EINVAL;
998 
999 	spin_lock_bh(&aq->lock);
1000 
1001 	/* sm should be in idle state */
1002 	if (aq->sm_state != AP_SM_STATE_IDLE) {
1003 		spin_unlock_bh(&aq->lock);
1004 		return -EBUSY;
1005 	}
1006 
1007 	/* already associated or association pending ? */
1008 	if (aq->assoc_idx != ASSOC_IDX_INVALID) {
1009 		spin_unlock_bh(&aq->lock);
1010 		return -EINVAL;
1011 	}
1012 
1013 	/* trigger the asynchronous association request */
1014 	status = ap_aapq(aq->qid, value);
1015 	switch (status.response_code) {
1016 	case AP_RESPONSE_NORMAL:
1017 	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1018 		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1019 		aq->assoc_idx = value;
1020 		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1021 		spin_unlock_bh(&aq->lock);
1022 		break;
1023 	default:
1024 		spin_unlock_bh(&aq->lock);
1025 		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1026 			    __func__, status.response_code,
1027 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1028 		return -EIO;
1029 	}
1030 
1031 	return count;
1032 }
1033 
1034 static DEVICE_ATTR_RW(se_associate);
1035 
1036 static struct attribute *ap_queue_dev_sb_attrs[] = {
1037 	&dev_attr_se_bind.attr,
1038 	&dev_attr_se_associate.attr,
1039 	NULL
1040 };
1041 
1042 static struct attribute_group ap_queue_dev_sb_attr_group = {
1043 	.attrs = ap_queue_dev_sb_attrs
1044 };
1045 
1046 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1047 	&ap_queue_dev_sb_attr_group,
1048 	NULL
1049 };
1050 
1051 static void ap_queue_device_release(struct device *dev)
1052 {
1053 	struct ap_queue *aq = to_ap_queue(dev);
1054 
1055 	spin_lock_bh(&ap_queues_lock);
1056 	hash_del(&aq->hnode);
1057 	spin_unlock_bh(&ap_queues_lock);
1058 
1059 	kfree(aq);
1060 }
1061 
1062 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1063 {
1064 	struct ap_queue *aq;
1065 
1066 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1067 	if (!aq)
1068 		return NULL;
1069 	aq->ap_dev.device.release = ap_queue_device_release;
1070 	aq->ap_dev.device.type = &ap_queue_type;
1071 	aq->ap_dev.device_type = device_type;
1072 	// add optional SE secure binding attributes group
1073 	if (ap_sb_available() && is_prot_virt_guest())
1074 		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1075 	aq->qid = qid;
1076 	aq->interrupt = false;
1077 	spin_lock_init(&aq->lock);
1078 	INIT_LIST_HEAD(&aq->pendingq);
1079 	INIT_LIST_HEAD(&aq->requestq);
1080 	timer_setup(&aq->timeout, ap_request_timeout, 0);
1081 
1082 	return aq;
1083 }
1084 
1085 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1086 {
1087 	aq->reply = reply;
1088 
1089 	spin_lock_bh(&aq->lock);
1090 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1091 	spin_unlock_bh(&aq->lock);
1092 }
1093 EXPORT_SYMBOL(ap_queue_init_reply);
1094 
1095 /**
1096  * ap_queue_message(): Queue a request to an AP device.
1097  * @aq: The AP device to queue the message to
1098  * @ap_msg: The message that is to be added
1099  */
1100 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1101 {
1102 	int rc = 0;
1103 
1104 	/* msg needs to have a valid receive-callback */
1105 	BUG_ON(!ap_msg->receive);
1106 
1107 	spin_lock_bh(&aq->lock);
1108 
1109 	/* only allow to queue new messages if device state is ok */
1110 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1111 		list_add_tail(&ap_msg->list, &aq->requestq);
1112 		aq->requestq_count++;
1113 		aq->total_request_count++;
1114 		atomic64_inc(&aq->card->total_request_count);
1115 	} else {
1116 		rc = -ENODEV;
1117 	}
1118 
1119 	/* Send/receive as many request from the queue as possible. */
1120 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1121 
1122 	spin_unlock_bh(&aq->lock);
1123 
1124 	return rc;
1125 }
1126 EXPORT_SYMBOL(ap_queue_message);
1127 
1128 /**
1129  * ap_cancel_message(): Cancel a crypto request.
1130  * @aq: The AP device that has the message queued
1131  * @ap_msg: The message that is to be removed
1132  *
1133  * Cancel a crypto request. This is done by removing the request
1134  * from the device pending or request queue. Note that the
1135  * request stays on the AP queue. When it finishes the message
1136  * reply will be discarded because the psmid can't be found.
1137  */
1138 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1139 {
1140 	struct ap_message *tmp;
1141 
1142 	spin_lock_bh(&aq->lock);
1143 	if (!list_empty(&ap_msg->list)) {
1144 		list_for_each_entry(tmp, &aq->pendingq, list)
1145 			if (tmp->psmid == ap_msg->psmid) {
1146 				aq->pendingq_count--;
1147 				goto found;
1148 			}
1149 		aq->requestq_count--;
1150 found:
1151 		list_del_init(&ap_msg->list);
1152 	}
1153 	spin_unlock_bh(&aq->lock);
1154 }
1155 EXPORT_SYMBOL(ap_cancel_message);
1156 
1157 /**
1158  * __ap_flush_queue(): Flush requests.
1159  * @aq: Pointer to the AP queue
1160  *
1161  * Flush all requests from the request/pending queue of an AP device.
1162  */
1163 static void __ap_flush_queue(struct ap_queue *aq)
1164 {
1165 	struct ap_message *ap_msg, *next;
1166 
1167 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1168 		list_del_init(&ap_msg->list);
1169 		aq->pendingq_count--;
1170 		ap_msg->rc = -EAGAIN;
1171 		ap_msg->receive(aq, ap_msg, NULL);
1172 	}
1173 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1174 		list_del_init(&ap_msg->list);
1175 		aq->requestq_count--;
1176 		ap_msg->rc = -EAGAIN;
1177 		ap_msg->receive(aq, ap_msg, NULL);
1178 	}
1179 	aq->queue_count = 0;
1180 }
1181 
1182 void ap_flush_queue(struct ap_queue *aq)
1183 {
1184 	spin_lock_bh(&aq->lock);
1185 	__ap_flush_queue(aq);
1186 	spin_unlock_bh(&aq->lock);
1187 }
1188 EXPORT_SYMBOL(ap_flush_queue);
1189 
1190 void ap_queue_prepare_remove(struct ap_queue *aq)
1191 {
1192 	spin_lock_bh(&aq->lock);
1193 	/* flush queue */
1194 	__ap_flush_queue(aq);
1195 	/* move queue device state to SHUTDOWN in progress */
1196 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1197 	spin_unlock_bh(&aq->lock);
1198 	del_timer_sync(&aq->timeout);
1199 }
1200 
1201 void ap_queue_remove(struct ap_queue *aq)
1202 {
1203 	/*
1204 	 * all messages have been flushed and the device state
1205 	 * is SHUTDOWN. Now reset with zero which also clears
1206 	 * the irq registration and move the device state
1207 	 * to the initial value AP_DEV_STATE_UNINITIATED.
1208 	 */
1209 	spin_lock_bh(&aq->lock);
1210 	ap_zapq(aq->qid, 0);
1211 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1212 	spin_unlock_bh(&aq->lock);
1213 }
1214 
1215 void ap_queue_init_state(struct ap_queue *aq)
1216 {
1217 	spin_lock_bh(&aq->lock);
1218 	aq->dev_state = AP_DEV_STATE_OPERATING;
1219 	aq->sm_state = AP_SM_STATE_RESET_START;
1220 	aq->last_err_rc = 0;
1221 	aq->assoc_idx = ASSOC_IDX_INVALID;
1222 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1223 	spin_unlock_bh(&aq->lock);
1224 }
1225 EXPORT_SYMBOL(ap_queue_init_state);
1226