xref: /openbmc/linux/drivers/s390/crypto/ap_queue.c (revision 1f012283)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /**
22  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
23  * @aq: The AP queue
24  * @ind: the notification indicator byte
25  *
26  * Enables interruption on AP queue via ap_aqic(). Based on the return
27  * value it waits a while and tests the AP queue if interrupts
28  * have been switched on using ap_test_queue().
29  */
30 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
31 {
32 	struct ap_queue_status status;
33 	struct ap_qirq_ctrl qirqctrl = { 0 };
34 
35 	qirqctrl.ir = 1;
36 	qirqctrl.isc = AP_ISC;
37 	status = ap_aqic(aq->qid, qirqctrl, ind);
38 	switch (status.response_code) {
39 	case AP_RESPONSE_NORMAL:
40 	case AP_RESPONSE_OTHERWISE_CHANGED:
41 		return 0;
42 	case AP_RESPONSE_Q_NOT_AVAIL:
43 	case AP_RESPONSE_DECONFIGURED:
44 	case AP_RESPONSE_CHECKSTOPPED:
45 	case AP_RESPONSE_INVALID_ADDRESS:
46 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 		       AP_QID_CARD(aq->qid),
48 		       AP_QID_QUEUE(aq->qid));
49 		return -EOPNOTSUPP;
50 	case AP_RESPONSE_RESET_IN_PROGRESS:
51 	case AP_RESPONSE_BUSY:
52 	default:
53 		return -EBUSY;
54 	}
55 }
56 
57 /**
58  * __ap_send(): Send message to adjunct processor queue.
59  * @qid: The AP queue number
60  * @psmid: The program supplied message identifier
61  * @msg: The message text
62  * @length: The message length
63  * @special: Special Bit
64  *
65  * Returns AP queue status structure.
66  * Condition code 1 on NQAP can't happen because the L bit is 1.
67  * Condition code 2 on NQAP also means the send is incomplete,
68  * because a segment boundary was reached. The NQAP is repeated.
69  */
70 static inline struct ap_queue_status
71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
72 	  int special)
73 {
74 	if (special)
75 		qid |= 0x400000UL;
76 	return ap_nqap(qid, psmid, msg, length);
77 }
78 
79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80 {
81 	struct ap_queue_status status;
82 
83 	status = __ap_send(qid, psmid, msg, length, 0);
84 	switch (status.response_code) {
85 	case AP_RESPONSE_NORMAL:
86 		return 0;
87 	case AP_RESPONSE_Q_FULL:
88 	case AP_RESPONSE_RESET_IN_PROGRESS:
89 		return -EBUSY;
90 	case AP_RESPONSE_REQ_FAC_NOT_INST:
91 		return -EINVAL;
92 	default:	/* Device is gone. */
93 		return -ENODEV;
94 	}
95 }
96 EXPORT_SYMBOL(ap_send);
97 
98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99 {
100 	struct ap_queue_status status;
101 
102 	if (msg == NULL)
103 		return -EINVAL;
104 	status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
105 	switch (status.response_code) {
106 	case AP_RESPONSE_NORMAL:
107 		return 0;
108 	case AP_RESPONSE_NO_PENDING_REPLY:
109 		if (status.queue_empty)
110 			return -ENOENT;
111 		return -EBUSY;
112 	case AP_RESPONSE_RESET_IN_PROGRESS:
113 		return -EBUSY;
114 	default:
115 		return -ENODEV;
116 	}
117 }
118 EXPORT_SYMBOL(ap_recv);
119 
120 /* State machine definitions and helpers */
121 
122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 	return AP_SM_WAIT_NONE;
125 }
126 
127 /**
128  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129  *	not change the state of the device.
130  * @aq: pointer to the AP queue
131  *
132  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133  */
134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 	struct ap_queue_status status;
137 	struct ap_message *ap_msg;
138 	bool found = false;
139 	size_t reslen;
140 	unsigned long resgr0 = 0;
141 	int parts = 0;
142 
143 	/*
144 	 * DQAP loop until response code and resgr0 indicate that
145 	 * the msg is totally received. As we use the very same buffer
146 	 * the msg is overwritten with each invocation. That's intended
147 	 * and the receiver of the msg is informed with a msg rc code
148 	 * of EMSGSIZE in such a case.
149 	 */
150 	do {
151 		status = ap_dqap(aq->qid, &aq->reply->psmid,
152 				 aq->reply->msg, aq->reply->bufsize,
153 				 &reslen, &resgr0);
154 		parts++;
155 	} while (status.response_code == 0xFF && resgr0 != 0);
156 
157 	switch (status.response_code) {
158 	case AP_RESPONSE_NORMAL:
159 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
160 		if (!status.queue_empty && !aq->queue_count)
161 			aq->queue_count++;
162 		if (aq->queue_count > 0)
163 			mod_timer(&aq->timeout,
164 				  jiffies + aq->request_timeout);
165 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
166 			if (ap_msg->psmid != aq->reply->psmid)
167 				continue;
168 			list_del_init(&ap_msg->list);
169 			aq->pendingq_count--;
170 			if (parts > 1) {
171 				ap_msg->rc = -EMSGSIZE;
172 				ap_msg->receive(aq, ap_msg, NULL);
173 			} else {
174 				ap_msg->receive(aq, ap_msg, aq->reply);
175 			}
176 			found = true;
177 			break;
178 		}
179 		if (!found) {
180 			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
181 				    __func__, aq->reply->psmid,
182 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
183 		}
184 		fallthrough;
185 	case AP_RESPONSE_NO_PENDING_REPLY:
186 		if (!status.queue_empty || aq->queue_count <= 0)
187 			break;
188 		/* The card shouldn't forget requests but who knows. */
189 		aq->queue_count = 0;
190 		list_splice_init(&aq->pendingq, &aq->requestq);
191 		aq->requestq_count += aq->pendingq_count;
192 		aq->pendingq_count = 0;
193 		break;
194 	default:
195 		break;
196 	}
197 	return status;
198 }
199 
200 /**
201  * ap_sm_read(): Receive pending reply messages from an AP queue.
202  * @aq: pointer to the AP queue
203  *
204  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
205  */
206 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
207 {
208 	struct ap_queue_status status;
209 
210 	if (!aq->reply)
211 		return AP_SM_WAIT_NONE;
212 	status = ap_sm_recv(aq);
213 	switch (status.response_code) {
214 	case AP_RESPONSE_NORMAL:
215 		if (aq->queue_count > 0) {
216 			aq->sm_state = AP_SM_STATE_WORKING;
217 			return AP_SM_WAIT_AGAIN;
218 		}
219 		aq->sm_state = AP_SM_STATE_IDLE;
220 		return AP_SM_WAIT_NONE;
221 	case AP_RESPONSE_NO_PENDING_REPLY:
222 		if (aq->queue_count > 0)
223 			return aq->interrupt ?
224 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
225 		aq->sm_state = AP_SM_STATE_IDLE;
226 		return AP_SM_WAIT_NONE;
227 	default:
228 		aq->dev_state = AP_DEV_STATE_ERROR;
229 		aq->last_err_rc = status.response_code;
230 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
231 			    __func__, status.response_code,
232 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
233 		return AP_SM_WAIT_NONE;
234 	}
235 }
236 
237 /**
238  * ap_sm_write(): Send messages from the request queue to an AP queue.
239  * @aq: pointer to the AP queue
240  *
241  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
242  */
243 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
244 {
245 	struct ap_queue_status status;
246 	struct ap_message *ap_msg;
247 	ap_qid_t qid = aq->qid;
248 
249 	if (aq->requestq_count <= 0)
250 		return AP_SM_WAIT_NONE;
251 
252 	/* Start the next request on the queue. */
253 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
254 #ifdef CONFIG_ZCRYPT_DEBUG
255 	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
256 		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
257 			    __func__, ap_msg->fi.cmd);
258 		qid = 0xFF00;
259 	}
260 #endif
261 	status = __ap_send(qid, ap_msg->psmid,
262 			   ap_msg->msg, ap_msg->len,
263 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
264 	switch (status.response_code) {
265 	case AP_RESPONSE_NORMAL:
266 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
267 		if (aq->queue_count == 1)
268 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
269 		list_move_tail(&ap_msg->list, &aq->pendingq);
270 		aq->requestq_count--;
271 		aq->pendingq_count++;
272 		if (aq->queue_count < aq->card->queue_depth) {
273 			aq->sm_state = AP_SM_STATE_WORKING;
274 			return AP_SM_WAIT_AGAIN;
275 		}
276 		fallthrough;
277 	case AP_RESPONSE_Q_FULL:
278 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
279 		return aq->interrupt ?
280 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
281 	case AP_RESPONSE_RESET_IN_PROGRESS:
282 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
283 		return AP_SM_WAIT_TIMEOUT;
284 	case AP_RESPONSE_INVALID_DOMAIN:
285 		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
286 		fallthrough;
287 	case AP_RESPONSE_MESSAGE_TOO_BIG:
288 	case AP_RESPONSE_REQ_FAC_NOT_INST:
289 		list_del_init(&ap_msg->list);
290 		aq->requestq_count--;
291 		ap_msg->rc = -EINVAL;
292 		ap_msg->receive(aq, ap_msg, NULL);
293 		return AP_SM_WAIT_AGAIN;
294 	default:
295 		aq->dev_state = AP_DEV_STATE_ERROR;
296 		aq->last_err_rc = status.response_code;
297 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
298 			    __func__, status.response_code,
299 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
300 		return AP_SM_WAIT_NONE;
301 	}
302 }
303 
304 /**
305  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
306  * @aq: pointer to the AP queue
307  *
308  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
309  */
310 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
311 {
312 	return min(ap_sm_read(aq), ap_sm_write(aq));
313 }
314 
315 /**
316  * ap_sm_reset(): Reset an AP queue.
317  * @aq: The AP queue
318  *
319  * Submit the Reset command to an AP queue.
320  */
321 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
322 {
323 	struct ap_queue_status status;
324 
325 	status = ap_rapq(aq->qid);
326 	switch (status.response_code) {
327 	case AP_RESPONSE_NORMAL:
328 	case AP_RESPONSE_RESET_IN_PROGRESS:
329 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
330 		aq->interrupt = false;
331 		return AP_SM_WAIT_TIMEOUT;
332 	default:
333 		aq->dev_state = AP_DEV_STATE_ERROR;
334 		aq->last_err_rc = status.response_code;
335 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
336 			    __func__, status.response_code,
337 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
338 		return AP_SM_WAIT_NONE;
339 	}
340 }
341 
342 /**
343  * ap_sm_reset_wait(): Test queue for completion of the reset operation
344  * @aq: pointer to the AP queue
345  *
346  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
347  */
348 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
349 {
350 	struct ap_queue_status status;
351 	void *lsi_ptr;
352 
353 	if (aq->queue_count > 0 && aq->reply)
354 		/* Try to read a completed message and get the status */
355 		status = ap_sm_recv(aq);
356 	else
357 		/* Get the status with TAPQ */
358 		status = ap_tapq(aq->qid, NULL);
359 
360 	switch (status.response_code) {
361 	case AP_RESPONSE_NORMAL:
362 		lsi_ptr = ap_airq_ptr();
363 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
364 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
365 		else
366 			aq->sm_state = (aq->queue_count > 0) ?
367 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
368 		return AP_SM_WAIT_AGAIN;
369 	case AP_RESPONSE_BUSY:
370 	case AP_RESPONSE_RESET_IN_PROGRESS:
371 		return AP_SM_WAIT_TIMEOUT;
372 	case AP_RESPONSE_Q_NOT_AVAIL:
373 	case AP_RESPONSE_DECONFIGURED:
374 	case AP_RESPONSE_CHECKSTOPPED:
375 	default:
376 		aq->dev_state = AP_DEV_STATE_ERROR;
377 		aq->last_err_rc = status.response_code;
378 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
379 			    __func__, status.response_code,
380 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
381 		return AP_SM_WAIT_NONE;
382 	}
383 }
384 
385 /**
386  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
387  * @aq: pointer to the AP queue
388  *
389  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
390  */
391 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
392 {
393 	struct ap_queue_status status;
394 
395 	if (aq->queue_count > 0 && aq->reply)
396 		/* Try to read a completed message and get the status */
397 		status = ap_sm_recv(aq);
398 	else
399 		/* Get the status with TAPQ */
400 		status = ap_tapq(aq->qid, NULL);
401 
402 	if (status.irq_enabled == 1) {
403 		/* Irqs are now enabled */
404 		aq->interrupt = true;
405 		aq->sm_state = (aq->queue_count > 0) ?
406 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
407 	}
408 
409 	switch (status.response_code) {
410 	case AP_RESPONSE_NORMAL:
411 		if (aq->queue_count > 0)
412 			return AP_SM_WAIT_AGAIN;
413 		fallthrough;
414 	case AP_RESPONSE_NO_PENDING_REPLY:
415 		return AP_SM_WAIT_TIMEOUT;
416 	default:
417 		aq->dev_state = AP_DEV_STATE_ERROR;
418 		aq->last_err_rc = status.response_code;
419 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
420 			    __func__, status.response_code,
421 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
422 		return AP_SM_WAIT_NONE;
423 	}
424 }
425 
426 /*
427  * AP state machine jump table
428  */
429 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
430 	[AP_SM_STATE_RESET_START] = {
431 		[AP_SM_EVENT_POLL] = ap_sm_reset,
432 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
433 	},
434 	[AP_SM_STATE_RESET_WAIT] = {
435 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
436 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
437 	},
438 	[AP_SM_STATE_SETIRQ_WAIT] = {
439 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
440 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
441 	},
442 	[AP_SM_STATE_IDLE] = {
443 		[AP_SM_EVENT_POLL] = ap_sm_write,
444 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
445 	},
446 	[AP_SM_STATE_WORKING] = {
447 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
448 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
449 	},
450 	[AP_SM_STATE_QUEUE_FULL] = {
451 		[AP_SM_EVENT_POLL] = ap_sm_read,
452 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
453 	},
454 };
455 
456 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
457 {
458 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
459 		return ap_jumptable[aq->sm_state][event](aq);
460 	else
461 		return AP_SM_WAIT_NONE;
462 }
463 
464 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
465 {
466 	enum ap_sm_wait wait;
467 
468 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
469 		;
470 	return wait;
471 }
472 
473 /*
474  * AP queue related attributes.
475  */
476 static ssize_t request_count_show(struct device *dev,
477 				  struct device_attribute *attr,
478 				  char *buf)
479 {
480 	struct ap_queue *aq = to_ap_queue(dev);
481 	bool valid = false;
482 	u64 req_cnt;
483 
484 	spin_lock_bh(&aq->lock);
485 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
486 		req_cnt = aq->total_request_count;
487 		valid = true;
488 	}
489 	spin_unlock_bh(&aq->lock);
490 
491 	if (valid)
492 		return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
493 	else
494 		return scnprintf(buf, PAGE_SIZE, "-\n");
495 }
496 
497 static ssize_t request_count_store(struct device *dev,
498 				   struct device_attribute *attr,
499 				   const char *buf, size_t count)
500 {
501 	struct ap_queue *aq = to_ap_queue(dev);
502 
503 	spin_lock_bh(&aq->lock);
504 	aq->total_request_count = 0;
505 	spin_unlock_bh(&aq->lock);
506 
507 	return count;
508 }
509 
510 static DEVICE_ATTR_RW(request_count);
511 
512 static ssize_t requestq_count_show(struct device *dev,
513 				   struct device_attribute *attr, char *buf)
514 {
515 	struct ap_queue *aq = to_ap_queue(dev);
516 	unsigned int reqq_cnt = 0;
517 
518 	spin_lock_bh(&aq->lock);
519 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
520 		reqq_cnt = aq->requestq_count;
521 	spin_unlock_bh(&aq->lock);
522 	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
523 }
524 
525 static DEVICE_ATTR_RO(requestq_count);
526 
527 static ssize_t pendingq_count_show(struct device *dev,
528 				   struct device_attribute *attr, char *buf)
529 {
530 	struct ap_queue *aq = to_ap_queue(dev);
531 	unsigned int penq_cnt = 0;
532 
533 	spin_lock_bh(&aq->lock);
534 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
535 		penq_cnt = aq->pendingq_count;
536 	spin_unlock_bh(&aq->lock);
537 	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
538 }
539 
540 static DEVICE_ATTR_RO(pendingq_count);
541 
542 static ssize_t reset_show(struct device *dev,
543 			  struct device_attribute *attr, char *buf)
544 {
545 	struct ap_queue *aq = to_ap_queue(dev);
546 	int rc = 0;
547 
548 	spin_lock_bh(&aq->lock);
549 	switch (aq->sm_state) {
550 	case AP_SM_STATE_RESET_START:
551 	case AP_SM_STATE_RESET_WAIT:
552 		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
553 		break;
554 	case AP_SM_STATE_WORKING:
555 	case AP_SM_STATE_QUEUE_FULL:
556 		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
557 		break;
558 	default:
559 		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
560 	}
561 	spin_unlock_bh(&aq->lock);
562 	return rc;
563 }
564 
565 static ssize_t reset_store(struct device *dev,
566 			   struct device_attribute *attr,
567 			   const char *buf, size_t count)
568 {
569 	struct ap_queue *aq = to_ap_queue(dev);
570 
571 	spin_lock_bh(&aq->lock);
572 	__ap_flush_queue(aq);
573 	aq->sm_state = AP_SM_STATE_RESET_START;
574 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
575 	spin_unlock_bh(&aq->lock);
576 
577 	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
578 		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
579 
580 	return count;
581 }
582 
583 static DEVICE_ATTR_RW(reset);
584 
585 static ssize_t interrupt_show(struct device *dev,
586 			      struct device_attribute *attr, char *buf)
587 {
588 	struct ap_queue *aq = to_ap_queue(dev);
589 	int rc = 0;
590 
591 	spin_lock_bh(&aq->lock);
592 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
593 		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
594 	else if (aq->interrupt)
595 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
596 	else
597 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
598 	spin_unlock_bh(&aq->lock);
599 	return rc;
600 }
601 
602 static DEVICE_ATTR_RO(interrupt);
603 
604 static ssize_t config_show(struct device *dev,
605 			     struct device_attribute *attr, char *buf)
606 {
607 	struct ap_queue *aq = to_ap_queue(dev);
608 	int rc;
609 
610 	spin_lock_bh(&aq->lock);
611 	rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
612 	spin_unlock_bh(&aq->lock);
613 	return rc;
614 }
615 
616 static DEVICE_ATTR_RO(config);
617 
618 #ifdef CONFIG_ZCRYPT_DEBUG
619 static ssize_t states_show(struct device *dev,
620 			   struct device_attribute *attr, char *buf)
621 {
622 	struct ap_queue *aq = to_ap_queue(dev);
623 	int rc = 0;
624 
625 	spin_lock_bh(&aq->lock);
626 	/* queue device state */
627 	switch (aq->dev_state) {
628 	case AP_DEV_STATE_UNINITIATED:
629 		rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
630 		break;
631 	case AP_DEV_STATE_OPERATING:
632 		rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
633 		break;
634 	case AP_DEV_STATE_SHUTDOWN:
635 		rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
636 		break;
637 	case AP_DEV_STATE_ERROR:
638 		rc = scnprintf(buf, PAGE_SIZE, "ERROR");
639 		break;
640 	default:
641 		rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
642 	}
643 	/* state machine state */
644 	if (aq->dev_state) {
645 		switch (aq->sm_state) {
646 		case AP_SM_STATE_RESET_START:
647 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
648 					" [RESET_START]\n");
649 			break;
650 		case AP_SM_STATE_RESET_WAIT:
651 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
652 					" [RESET_WAIT]\n");
653 			break;
654 		case AP_SM_STATE_SETIRQ_WAIT:
655 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
656 					" [SETIRQ_WAIT]\n");
657 			break;
658 		case AP_SM_STATE_IDLE:
659 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
660 					" [IDLE]\n");
661 			break;
662 		case AP_SM_STATE_WORKING:
663 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
664 					" [WORKING]\n");
665 			break;
666 		case AP_SM_STATE_QUEUE_FULL:
667 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
668 					" [FULL]\n");
669 			break;
670 		default:
671 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
672 					" [UNKNOWN]\n");
673 		}
674 	}
675 	spin_unlock_bh(&aq->lock);
676 
677 	return rc;
678 }
679 static DEVICE_ATTR_RO(states);
680 
681 static ssize_t last_err_rc_show(struct device *dev,
682 				struct device_attribute *attr, char *buf)
683 {
684 	struct ap_queue *aq = to_ap_queue(dev);
685 	int rc;
686 
687 	spin_lock_bh(&aq->lock);
688 	rc = aq->last_err_rc;
689 	spin_unlock_bh(&aq->lock);
690 
691 	switch (rc) {
692 	case AP_RESPONSE_NORMAL:
693 		return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
694 	case AP_RESPONSE_Q_NOT_AVAIL:
695 		return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
696 	case AP_RESPONSE_RESET_IN_PROGRESS:
697 		return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
698 	case AP_RESPONSE_DECONFIGURED:
699 		return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
700 	case AP_RESPONSE_CHECKSTOPPED:
701 		return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
702 	case AP_RESPONSE_BUSY:
703 		return scnprintf(buf, PAGE_SIZE, "BUSY\n");
704 	case AP_RESPONSE_INVALID_ADDRESS:
705 		return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
706 	case AP_RESPONSE_OTHERWISE_CHANGED:
707 		return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
708 	case AP_RESPONSE_Q_FULL:
709 		return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
710 	case AP_RESPONSE_INDEX_TOO_BIG:
711 		return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
712 	case AP_RESPONSE_NO_FIRST_PART:
713 		return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
714 	case AP_RESPONSE_MESSAGE_TOO_BIG:
715 		return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
716 	case AP_RESPONSE_REQ_FAC_NOT_INST:
717 		return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
718 	default:
719 		return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
720 	}
721 }
722 static DEVICE_ATTR_RO(last_err_rc);
723 #endif
724 
725 static struct attribute *ap_queue_dev_attrs[] = {
726 	&dev_attr_request_count.attr,
727 	&dev_attr_requestq_count.attr,
728 	&dev_attr_pendingq_count.attr,
729 	&dev_attr_reset.attr,
730 	&dev_attr_interrupt.attr,
731 	&dev_attr_config.attr,
732 #ifdef CONFIG_ZCRYPT_DEBUG
733 	&dev_attr_states.attr,
734 	&dev_attr_last_err_rc.attr,
735 #endif
736 	NULL
737 };
738 
739 static struct attribute_group ap_queue_dev_attr_group = {
740 	.attrs = ap_queue_dev_attrs
741 };
742 
743 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
744 	&ap_queue_dev_attr_group,
745 	NULL
746 };
747 
748 static struct device_type ap_queue_type = {
749 	.name = "ap_queue",
750 	.groups = ap_queue_dev_attr_groups,
751 };
752 
753 static void ap_queue_device_release(struct device *dev)
754 {
755 	struct ap_queue *aq = to_ap_queue(dev);
756 
757 	spin_lock_bh(&ap_queues_lock);
758 	hash_del(&aq->hnode);
759 	spin_unlock_bh(&ap_queues_lock);
760 
761 	kfree(aq);
762 }
763 
764 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
765 {
766 	struct ap_queue *aq;
767 
768 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
769 	if (!aq)
770 		return NULL;
771 	aq->ap_dev.device.release = ap_queue_device_release;
772 	aq->ap_dev.device.type = &ap_queue_type;
773 	aq->ap_dev.device_type = device_type;
774 	aq->qid = qid;
775 	aq->interrupt = false;
776 	spin_lock_init(&aq->lock);
777 	INIT_LIST_HEAD(&aq->pendingq);
778 	INIT_LIST_HEAD(&aq->requestq);
779 	timer_setup(&aq->timeout, ap_request_timeout, 0);
780 
781 	return aq;
782 }
783 
784 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
785 {
786 	aq->reply = reply;
787 
788 	spin_lock_bh(&aq->lock);
789 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
790 	spin_unlock_bh(&aq->lock);
791 }
792 EXPORT_SYMBOL(ap_queue_init_reply);
793 
794 /**
795  * ap_queue_message(): Queue a request to an AP device.
796  * @aq: The AP device to queue the message to
797  * @ap_msg: The message that is to be added
798  */
799 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
800 {
801 	int rc = 0;
802 
803 	/* msg needs to have a valid receive-callback */
804 	BUG_ON(!ap_msg->receive);
805 
806 	spin_lock_bh(&aq->lock);
807 
808 	/* only allow to queue new messages if device state is ok */
809 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
810 		list_add_tail(&ap_msg->list, &aq->requestq);
811 		aq->requestq_count++;
812 		aq->total_request_count++;
813 		atomic64_inc(&aq->card->total_request_count);
814 	} else
815 		rc = -ENODEV;
816 
817 	/* Send/receive as many request from the queue as possible. */
818 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
819 
820 	spin_unlock_bh(&aq->lock);
821 
822 	return rc;
823 }
824 EXPORT_SYMBOL(ap_queue_message);
825 
826 /**
827  * ap_cancel_message(): Cancel a crypto request.
828  * @aq: The AP device that has the message queued
829  * @ap_msg: The message that is to be removed
830  *
831  * Cancel a crypto request. This is done by removing the request
832  * from the device pending or request queue. Note that the
833  * request stays on the AP queue. When it finishes the message
834  * reply will be discarded because the psmid can't be found.
835  */
836 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
837 {
838 	struct ap_message *tmp;
839 
840 	spin_lock_bh(&aq->lock);
841 	if (!list_empty(&ap_msg->list)) {
842 		list_for_each_entry(tmp, &aq->pendingq, list)
843 			if (tmp->psmid == ap_msg->psmid) {
844 				aq->pendingq_count--;
845 				goto found;
846 			}
847 		aq->requestq_count--;
848 found:
849 		list_del_init(&ap_msg->list);
850 	}
851 	spin_unlock_bh(&aq->lock);
852 }
853 EXPORT_SYMBOL(ap_cancel_message);
854 
855 /**
856  * __ap_flush_queue(): Flush requests.
857  * @aq: Pointer to the AP queue
858  *
859  * Flush all requests from the request/pending queue of an AP device.
860  */
861 static void __ap_flush_queue(struct ap_queue *aq)
862 {
863 	struct ap_message *ap_msg, *next;
864 
865 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
866 		list_del_init(&ap_msg->list);
867 		aq->pendingq_count--;
868 		ap_msg->rc = -EAGAIN;
869 		ap_msg->receive(aq, ap_msg, NULL);
870 	}
871 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
872 		list_del_init(&ap_msg->list);
873 		aq->requestq_count--;
874 		ap_msg->rc = -EAGAIN;
875 		ap_msg->receive(aq, ap_msg, NULL);
876 	}
877 	aq->queue_count = 0;
878 }
879 
880 void ap_flush_queue(struct ap_queue *aq)
881 {
882 	spin_lock_bh(&aq->lock);
883 	__ap_flush_queue(aq);
884 	spin_unlock_bh(&aq->lock);
885 }
886 EXPORT_SYMBOL(ap_flush_queue);
887 
888 void ap_queue_prepare_remove(struct ap_queue *aq)
889 {
890 	spin_lock_bh(&aq->lock);
891 	/* flush queue */
892 	__ap_flush_queue(aq);
893 	/* move queue device state to SHUTDOWN in progress */
894 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
895 	spin_unlock_bh(&aq->lock);
896 	del_timer_sync(&aq->timeout);
897 }
898 
899 void ap_queue_remove(struct ap_queue *aq)
900 {
901 	/*
902 	 * all messages have been flushed and the device state
903 	 * is SHUTDOWN. Now reset with zero which also clears
904 	 * the irq registration and move the device state
905 	 * to the initial value AP_DEV_STATE_UNINITIATED.
906 	 */
907 	spin_lock_bh(&aq->lock);
908 	ap_zapq(aq->qid);
909 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
910 	spin_unlock_bh(&aq->lock);
911 }
912 
913 void ap_queue_init_state(struct ap_queue *aq)
914 {
915 	spin_lock_bh(&aq->lock);
916 	aq->dev_state = AP_DEV_STATE_OPERATING;
917 	aq->sm_state = AP_SM_STATE_RESET_START;
918 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
919 	spin_unlock_bh(&aq->lock);
920 }
921 EXPORT_SYMBOL(ap_queue_init_state);
922