xref: /openbmc/linux/drivers/s390/crypto/ap_queue.c (revision f97769fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /**
22  * ap_queue_enable_interruption(): Enable interruption on an AP queue.
23  * @qid: The AP queue number
24  * @ind: the notification indicator byte
25  *
26  * Enables interruption on AP queue via ap_aqic(). Based on the return
27  * value it waits a while and tests the AP queue if interrupts
28  * have been switched on using ap_test_queue().
29  */
30 static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
31 {
32 	struct ap_queue_status status;
33 	struct ap_qirq_ctrl qirqctrl = { 0 };
34 
35 	qirqctrl.ir = 1;
36 	qirqctrl.isc = AP_ISC;
37 	status = ap_aqic(aq->qid, qirqctrl, ind);
38 	switch (status.response_code) {
39 	case AP_RESPONSE_NORMAL:
40 	case AP_RESPONSE_OTHERWISE_CHANGED:
41 		return 0;
42 	case AP_RESPONSE_Q_NOT_AVAIL:
43 	case AP_RESPONSE_DECONFIGURED:
44 	case AP_RESPONSE_CHECKSTOPPED:
45 	case AP_RESPONSE_INVALID_ADDRESS:
46 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 		       AP_QID_CARD(aq->qid),
48 		       AP_QID_QUEUE(aq->qid));
49 		return -EOPNOTSUPP;
50 	case AP_RESPONSE_RESET_IN_PROGRESS:
51 	case AP_RESPONSE_BUSY:
52 	default:
53 		return -EBUSY;
54 	}
55 }
56 
57 /**
58  * __ap_send(): Send message to adjunct processor queue.
59  * @qid: The AP queue number
60  * @psmid: The program supplied message identifier
61  * @msg: The message text
62  * @length: The message length
63  * @special: Special Bit
64  *
65  * Returns AP queue status structure.
66  * Condition code 1 on NQAP can't happen because the L bit is 1.
67  * Condition code 2 on NQAP also means the send is incomplete,
68  * because a segment boundary was reached. The NQAP is repeated.
69  */
70 static inline struct ap_queue_status
71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
72 	  int special)
73 {
74 	if (special)
75 		qid |= 0x400000UL;
76 	return ap_nqap(qid, psmid, msg, length);
77 }
78 
79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80 {
81 	struct ap_queue_status status;
82 
83 	status = __ap_send(qid, psmid, msg, length, 0);
84 	switch (status.response_code) {
85 	case AP_RESPONSE_NORMAL:
86 		return 0;
87 	case AP_RESPONSE_Q_FULL:
88 	case AP_RESPONSE_RESET_IN_PROGRESS:
89 		return -EBUSY;
90 	case AP_RESPONSE_REQ_FAC_NOT_INST:
91 		return -EINVAL;
92 	default:	/* Device is gone. */
93 		return -ENODEV;
94 	}
95 }
96 EXPORT_SYMBOL(ap_send);
97 
98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99 {
100 	struct ap_queue_status status;
101 
102 	if (msg == NULL)
103 		return -EINVAL;
104 	status = ap_dqap(qid, psmid, msg, length);
105 	switch (status.response_code) {
106 	case AP_RESPONSE_NORMAL:
107 		return 0;
108 	case AP_RESPONSE_NO_PENDING_REPLY:
109 		if (status.queue_empty)
110 			return -ENOENT;
111 		return -EBUSY;
112 	case AP_RESPONSE_RESET_IN_PROGRESS:
113 		return -EBUSY;
114 	default:
115 		return -ENODEV;
116 	}
117 }
118 EXPORT_SYMBOL(ap_recv);
119 
120 /* State machine definitions and helpers */
121 
122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 	return AP_SM_WAIT_NONE;
125 }
126 
127 /**
128  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129  *	not change the state of the device.
130  * @aq: pointer to the AP queue
131  *
132  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133  */
134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 	struct ap_queue_status status;
137 	struct ap_message *ap_msg;
138 
139 	status = ap_dqap(aq->qid, &aq->reply->psmid,
140 			 aq->reply->msg, aq->reply->len);
141 	switch (status.response_code) {
142 	case AP_RESPONSE_NORMAL:
143 		aq->queue_count--;
144 		if (aq->queue_count > 0)
145 			mod_timer(&aq->timeout,
146 				  jiffies + aq->request_timeout);
147 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
148 			if (ap_msg->psmid != aq->reply->psmid)
149 				continue;
150 			list_del_init(&ap_msg->list);
151 			aq->pendingq_count--;
152 			ap_msg->receive(aq, ap_msg, aq->reply);
153 			break;
154 		}
155 		fallthrough;
156 	case AP_RESPONSE_NO_PENDING_REPLY:
157 		if (!status.queue_empty || aq->queue_count <= 0)
158 			break;
159 		/* The card shouldn't forget requests but who knows. */
160 		aq->queue_count = 0;
161 		list_splice_init(&aq->pendingq, &aq->requestq);
162 		aq->requestq_count += aq->pendingq_count;
163 		aq->pendingq_count = 0;
164 		break;
165 	default:
166 		break;
167 	}
168 	return status;
169 }
170 
171 /**
172  * ap_sm_read(): Receive pending reply messages from an AP queue.
173  * @aq: pointer to the AP queue
174  *
175  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
176  */
177 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
178 {
179 	struct ap_queue_status status;
180 
181 	if (!aq->reply)
182 		return AP_SM_WAIT_NONE;
183 	status = ap_sm_recv(aq);
184 	switch (status.response_code) {
185 	case AP_RESPONSE_NORMAL:
186 		if (aq->queue_count > 0) {
187 			aq->sm_state = AP_SM_STATE_WORKING;
188 			return AP_SM_WAIT_AGAIN;
189 		}
190 		aq->sm_state = AP_SM_STATE_IDLE;
191 		return AP_SM_WAIT_NONE;
192 	case AP_RESPONSE_NO_PENDING_REPLY:
193 		if (aq->queue_count > 0)
194 			return AP_SM_WAIT_INTERRUPT;
195 		aq->sm_state = AP_SM_STATE_IDLE;
196 		return AP_SM_WAIT_NONE;
197 	default:
198 		aq->sm_state = AP_SM_STATE_BORKED;
199 		return AP_SM_WAIT_NONE;
200 	}
201 }
202 
203 /**
204  * ap_sm_write(): Send messages from the request queue to an AP queue.
205  * @aq: pointer to the AP queue
206  *
207  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
208  */
209 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
210 {
211 	struct ap_queue_status status;
212 	struct ap_message *ap_msg;
213 
214 	if (aq->requestq_count <= 0)
215 		return AP_SM_WAIT_NONE;
216 	/* Start the next request on the queue. */
217 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
218 	status = __ap_send(aq->qid, ap_msg->psmid,
219 			   ap_msg->msg, ap_msg->len,
220 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
221 	switch (status.response_code) {
222 	case AP_RESPONSE_NORMAL:
223 		aq->queue_count++;
224 		if (aq->queue_count == 1)
225 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
226 		list_move_tail(&ap_msg->list, &aq->pendingq);
227 		aq->requestq_count--;
228 		aq->pendingq_count++;
229 		if (aq->queue_count < aq->card->queue_depth) {
230 			aq->sm_state = AP_SM_STATE_WORKING;
231 			return AP_SM_WAIT_AGAIN;
232 		}
233 		fallthrough;
234 	case AP_RESPONSE_Q_FULL:
235 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
236 		return AP_SM_WAIT_INTERRUPT;
237 	case AP_RESPONSE_RESET_IN_PROGRESS:
238 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
239 		return AP_SM_WAIT_TIMEOUT;
240 	case AP_RESPONSE_MESSAGE_TOO_BIG:
241 	case AP_RESPONSE_REQ_FAC_NOT_INST:
242 		list_del_init(&ap_msg->list);
243 		aq->requestq_count--;
244 		ap_msg->rc = -EINVAL;
245 		ap_msg->receive(aq, ap_msg, NULL);
246 		return AP_SM_WAIT_AGAIN;
247 	default:
248 		aq->sm_state = AP_SM_STATE_BORKED;
249 		return AP_SM_WAIT_NONE;
250 	}
251 }
252 
253 /**
254  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
255  * @aq: pointer to the AP queue
256  *
257  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
258  */
259 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
260 {
261 	return min(ap_sm_read(aq), ap_sm_write(aq));
262 }
263 
264 /**
265  * ap_sm_reset(): Reset an AP queue.
266  * @qid: The AP queue number
267  *
268  * Submit the Reset command to an AP queue.
269  */
270 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
271 {
272 	struct ap_queue_status status;
273 
274 	status = ap_rapq(aq->qid);
275 	switch (status.response_code) {
276 	case AP_RESPONSE_NORMAL:
277 	case AP_RESPONSE_RESET_IN_PROGRESS:
278 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
279 		aq->interrupt = AP_INTR_DISABLED;
280 		return AP_SM_WAIT_TIMEOUT;
281 	case AP_RESPONSE_BUSY:
282 		return AP_SM_WAIT_TIMEOUT;
283 	case AP_RESPONSE_Q_NOT_AVAIL:
284 	case AP_RESPONSE_DECONFIGURED:
285 	case AP_RESPONSE_CHECKSTOPPED:
286 	default:
287 		aq->sm_state = AP_SM_STATE_BORKED;
288 		return AP_SM_WAIT_NONE;
289 	}
290 }
291 
292 /**
293  * ap_sm_reset_wait(): Test queue for completion of the reset operation
294  * @aq: pointer to the AP queue
295  *
296  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
297  */
298 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
299 {
300 	struct ap_queue_status status;
301 	void *lsi_ptr;
302 
303 	if (aq->queue_count > 0 && aq->reply)
304 		/* Try to read a completed message and get the status */
305 		status = ap_sm_recv(aq);
306 	else
307 		/* Get the status with TAPQ */
308 		status = ap_tapq(aq->qid, NULL);
309 
310 	switch (status.response_code) {
311 	case AP_RESPONSE_NORMAL:
312 		lsi_ptr = ap_airq_ptr();
313 		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
314 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
315 		else
316 			aq->sm_state = (aq->queue_count > 0) ?
317 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
318 		return AP_SM_WAIT_AGAIN;
319 	case AP_RESPONSE_BUSY:
320 	case AP_RESPONSE_RESET_IN_PROGRESS:
321 		return AP_SM_WAIT_TIMEOUT;
322 	case AP_RESPONSE_Q_NOT_AVAIL:
323 	case AP_RESPONSE_DECONFIGURED:
324 	case AP_RESPONSE_CHECKSTOPPED:
325 	default:
326 		aq->sm_state = AP_SM_STATE_BORKED;
327 		return AP_SM_WAIT_NONE;
328 	}
329 }
330 
331 /**
332  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
333  * @aq: pointer to the AP queue
334  *
335  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
336  */
337 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
338 {
339 	struct ap_queue_status status;
340 
341 	if (aq->queue_count > 0 && aq->reply)
342 		/* Try to read a completed message and get the status */
343 		status = ap_sm_recv(aq);
344 	else
345 		/* Get the status with TAPQ */
346 		status = ap_tapq(aq->qid, NULL);
347 
348 	if (status.irq_enabled == 1) {
349 		/* Irqs are now enabled */
350 		aq->interrupt = AP_INTR_ENABLED;
351 		aq->sm_state = (aq->queue_count > 0) ?
352 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
353 	}
354 
355 	switch (status.response_code) {
356 	case AP_RESPONSE_NORMAL:
357 		if (aq->queue_count > 0)
358 			return AP_SM_WAIT_AGAIN;
359 		fallthrough;
360 	case AP_RESPONSE_NO_PENDING_REPLY:
361 		return AP_SM_WAIT_TIMEOUT;
362 	default:
363 		aq->sm_state = AP_SM_STATE_BORKED;
364 		return AP_SM_WAIT_NONE;
365 	}
366 }
367 
368 /*
369  * AP state machine jump table
370  */
371 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
372 	[AP_SM_STATE_RESET_START] = {
373 		[AP_SM_EVENT_POLL] = ap_sm_reset,
374 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
375 	},
376 	[AP_SM_STATE_RESET_WAIT] = {
377 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
378 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
379 	},
380 	[AP_SM_STATE_SETIRQ_WAIT] = {
381 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
382 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
383 	},
384 	[AP_SM_STATE_IDLE] = {
385 		[AP_SM_EVENT_POLL] = ap_sm_write,
386 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
387 	},
388 	[AP_SM_STATE_WORKING] = {
389 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
390 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
391 	},
392 	[AP_SM_STATE_QUEUE_FULL] = {
393 		[AP_SM_EVENT_POLL] = ap_sm_read,
394 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
395 	},
396 	[AP_SM_STATE_REMOVE] = {
397 		[AP_SM_EVENT_POLL] = ap_sm_nop,
398 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
399 	},
400 	[AP_SM_STATE_UNBOUND] = {
401 		[AP_SM_EVENT_POLL] = ap_sm_nop,
402 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
403 	},
404 	[AP_SM_STATE_BORKED] = {
405 		[AP_SM_EVENT_POLL] = ap_sm_nop,
406 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
407 	},
408 };
409 
410 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
411 {
412 	return ap_jumptable[aq->sm_state][event](aq);
413 }
414 
415 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
416 {
417 	enum ap_sm_wait wait;
418 
419 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
420 		;
421 	return wait;
422 }
423 
424 /*
425  * AP queue related attributes.
426  */
427 static ssize_t request_count_show(struct device *dev,
428 				  struct device_attribute *attr,
429 				  char *buf)
430 {
431 	struct ap_queue *aq = to_ap_queue(dev);
432 	u64 req_cnt;
433 
434 	spin_lock_bh(&aq->lock);
435 	req_cnt = aq->total_request_count;
436 	spin_unlock_bh(&aq->lock);
437 	return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
438 }
439 
440 static ssize_t request_count_store(struct device *dev,
441 				   struct device_attribute *attr,
442 				   const char *buf, size_t count)
443 {
444 	struct ap_queue *aq = to_ap_queue(dev);
445 
446 	spin_lock_bh(&aq->lock);
447 	aq->total_request_count = 0;
448 	spin_unlock_bh(&aq->lock);
449 
450 	return count;
451 }
452 
453 static DEVICE_ATTR_RW(request_count);
454 
455 static ssize_t requestq_count_show(struct device *dev,
456 				   struct device_attribute *attr, char *buf)
457 {
458 	struct ap_queue *aq = to_ap_queue(dev);
459 	unsigned int reqq_cnt = 0;
460 
461 	spin_lock_bh(&aq->lock);
462 	reqq_cnt = aq->requestq_count;
463 	spin_unlock_bh(&aq->lock);
464 	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
465 }
466 
467 static DEVICE_ATTR_RO(requestq_count);
468 
469 static ssize_t pendingq_count_show(struct device *dev,
470 				   struct device_attribute *attr, char *buf)
471 {
472 	struct ap_queue *aq = to_ap_queue(dev);
473 	unsigned int penq_cnt = 0;
474 
475 	spin_lock_bh(&aq->lock);
476 	penq_cnt = aq->pendingq_count;
477 	spin_unlock_bh(&aq->lock);
478 	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
479 }
480 
481 static DEVICE_ATTR_RO(pendingq_count);
482 
483 static ssize_t reset_show(struct device *dev,
484 			  struct device_attribute *attr, char *buf)
485 {
486 	struct ap_queue *aq = to_ap_queue(dev);
487 	int rc = 0;
488 
489 	spin_lock_bh(&aq->lock);
490 	switch (aq->sm_state) {
491 	case AP_SM_STATE_RESET_START:
492 	case AP_SM_STATE_RESET_WAIT:
493 		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
494 		break;
495 	case AP_SM_STATE_WORKING:
496 	case AP_SM_STATE_QUEUE_FULL:
497 		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
498 		break;
499 	default:
500 		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
501 	}
502 	spin_unlock_bh(&aq->lock);
503 	return rc;
504 }
505 
506 static ssize_t reset_store(struct device *dev,
507 			   struct device_attribute *attr,
508 			   const char *buf, size_t count)
509 {
510 	struct ap_queue *aq = to_ap_queue(dev);
511 
512 	spin_lock_bh(&aq->lock);
513 	__ap_flush_queue(aq);
514 	aq->sm_state = AP_SM_STATE_RESET_START;
515 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
516 	spin_unlock_bh(&aq->lock);
517 
518 	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
519 	       AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
520 
521 	return count;
522 }
523 
524 static DEVICE_ATTR_RW(reset);
525 
526 static ssize_t interrupt_show(struct device *dev,
527 			      struct device_attribute *attr, char *buf)
528 {
529 	struct ap_queue *aq = to_ap_queue(dev);
530 	int rc = 0;
531 
532 	spin_lock_bh(&aq->lock);
533 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
534 		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
535 	else if (aq->interrupt == AP_INTR_ENABLED)
536 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
537 	else
538 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
539 	spin_unlock_bh(&aq->lock);
540 	return rc;
541 }
542 
543 static DEVICE_ATTR_RO(interrupt);
544 
545 static struct attribute *ap_queue_dev_attrs[] = {
546 	&dev_attr_request_count.attr,
547 	&dev_attr_requestq_count.attr,
548 	&dev_attr_pendingq_count.attr,
549 	&dev_attr_reset.attr,
550 	&dev_attr_interrupt.attr,
551 	NULL
552 };
553 
554 static struct attribute_group ap_queue_dev_attr_group = {
555 	.attrs = ap_queue_dev_attrs
556 };
557 
558 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
559 	&ap_queue_dev_attr_group,
560 	NULL
561 };
562 
563 static struct device_type ap_queue_type = {
564 	.name = "ap_queue",
565 	.groups = ap_queue_dev_attr_groups,
566 };
567 
568 static void ap_queue_device_release(struct device *dev)
569 {
570 	struct ap_queue *aq = to_ap_queue(dev);
571 
572 	spin_lock_bh(&ap_queues_lock);
573 	hash_del(&aq->hnode);
574 	spin_unlock_bh(&ap_queues_lock);
575 
576 	kfree(aq);
577 }
578 
579 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
580 {
581 	struct ap_queue *aq;
582 
583 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
584 	if (!aq)
585 		return NULL;
586 	aq->ap_dev.device.release = ap_queue_device_release;
587 	aq->ap_dev.device.type = &ap_queue_type;
588 	aq->ap_dev.device_type = device_type;
589 	aq->qid = qid;
590 	aq->sm_state = AP_SM_STATE_UNBOUND;
591 	aq->interrupt = AP_INTR_DISABLED;
592 	spin_lock_init(&aq->lock);
593 	INIT_LIST_HEAD(&aq->pendingq);
594 	INIT_LIST_HEAD(&aq->requestq);
595 	timer_setup(&aq->timeout, ap_request_timeout, 0);
596 
597 	return aq;
598 }
599 
600 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
601 {
602 	aq->reply = reply;
603 
604 	spin_lock_bh(&aq->lock);
605 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
606 	spin_unlock_bh(&aq->lock);
607 }
608 EXPORT_SYMBOL(ap_queue_init_reply);
609 
610 /**
611  * ap_queue_message(): Queue a request to an AP device.
612  * @aq: The AP device to queue the message to
613  * @ap_msg: The message that is to be added
614  */
615 void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
616 {
617 	/* For asynchronous message handling a valid receive-callback
618 	 * is required.
619 	 */
620 	BUG_ON(!ap_msg->receive);
621 
622 	spin_lock_bh(&aq->lock);
623 	/* Queue the message. */
624 	list_add_tail(&ap_msg->list, &aq->requestq);
625 	aq->requestq_count++;
626 	aq->total_request_count++;
627 	atomic64_inc(&aq->card->total_request_count);
628 	/* Send/receive as many request from the queue as possible. */
629 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
630 	spin_unlock_bh(&aq->lock);
631 }
632 EXPORT_SYMBOL(ap_queue_message);
633 
634 /**
635  * ap_cancel_message(): Cancel a crypto request.
636  * @aq: The AP device that has the message queued
637  * @ap_msg: The message that is to be removed
638  *
639  * Cancel a crypto request. This is done by removing the request
640  * from the device pending or request queue. Note that the
641  * request stays on the AP queue. When it finishes the message
642  * reply will be discarded because the psmid can't be found.
643  */
644 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
645 {
646 	struct ap_message *tmp;
647 
648 	spin_lock_bh(&aq->lock);
649 	if (!list_empty(&ap_msg->list)) {
650 		list_for_each_entry(tmp, &aq->pendingq, list)
651 			if (tmp->psmid == ap_msg->psmid) {
652 				aq->pendingq_count--;
653 				goto found;
654 			}
655 		aq->requestq_count--;
656 found:
657 		list_del_init(&ap_msg->list);
658 	}
659 	spin_unlock_bh(&aq->lock);
660 }
661 EXPORT_SYMBOL(ap_cancel_message);
662 
663 /**
664  * __ap_flush_queue(): Flush requests.
665  * @aq: Pointer to the AP queue
666  *
667  * Flush all requests from the request/pending queue of an AP device.
668  */
669 static void __ap_flush_queue(struct ap_queue *aq)
670 {
671 	struct ap_message *ap_msg, *next;
672 
673 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
674 		list_del_init(&ap_msg->list);
675 		aq->pendingq_count--;
676 		ap_msg->rc = -EAGAIN;
677 		ap_msg->receive(aq, ap_msg, NULL);
678 	}
679 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
680 		list_del_init(&ap_msg->list);
681 		aq->requestq_count--;
682 		ap_msg->rc = -EAGAIN;
683 		ap_msg->receive(aq, ap_msg, NULL);
684 	}
685 	aq->queue_count = 0;
686 }
687 
688 void ap_flush_queue(struct ap_queue *aq)
689 {
690 	spin_lock_bh(&aq->lock);
691 	__ap_flush_queue(aq);
692 	spin_unlock_bh(&aq->lock);
693 }
694 EXPORT_SYMBOL(ap_flush_queue);
695 
696 void ap_queue_prepare_remove(struct ap_queue *aq)
697 {
698 	spin_lock_bh(&aq->lock);
699 	/* flush queue */
700 	__ap_flush_queue(aq);
701 	/* set REMOVE state to prevent new messages are queued in */
702 	aq->sm_state = AP_SM_STATE_REMOVE;
703 	spin_unlock_bh(&aq->lock);
704 	del_timer_sync(&aq->timeout);
705 }
706 
707 void ap_queue_remove(struct ap_queue *aq)
708 {
709 	/*
710 	 * all messages have been flushed and the state is
711 	 * AP_SM_STATE_REMOVE. Now reset with zero which also
712 	 * clears the irq registration and move the state
713 	 * to AP_SM_STATE_UNBOUND to signal that this queue
714 	 * is not used by any driver currently.
715 	 */
716 	spin_lock_bh(&aq->lock);
717 	ap_zapq(aq->qid);
718 	aq->sm_state = AP_SM_STATE_UNBOUND;
719 	spin_unlock_bh(&aq->lock);
720 }
721 
722 void ap_queue_init_state(struct ap_queue *aq)
723 {
724 	spin_lock_bh(&aq->lock);
725 	aq->sm_state = AP_SM_STATE_RESET_START;
726 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
727 	spin_unlock_bh(&aq->lock);
728 }
729 EXPORT_SYMBOL(ap_queue_init_state);
730