xref: /openbmc/linux/drivers/s390/crypto/ap_queue.c (revision e825b29a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  *
6  * Adjunct processor bus, queue related code.
7  */
8 
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
15 
16 #include "ap_bus.h"
17 #include "ap_debug.h"
18 
19 static void __ap_flush_queue(struct ap_queue *aq);
20 
21 /**
22  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
23  * @aq: The AP queue
24  * @ind: the notification indicator byte
25  *
26  * Enables interruption on AP queue via ap_aqic(). Based on the return
27  * value it waits a while and tests the AP queue if interrupts
28  * have been switched on using ap_test_queue().
29  */
30 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
31 {
32 	struct ap_queue_status status;
33 	struct ap_qirq_ctrl qirqctrl = { 0 };
34 
35 	qirqctrl.ir = 1;
36 	qirqctrl.isc = AP_ISC;
37 	status = ap_aqic(aq->qid, qirqctrl, ind);
38 	switch (status.response_code) {
39 	case AP_RESPONSE_NORMAL:
40 	case AP_RESPONSE_OTHERWISE_CHANGED:
41 		return 0;
42 	case AP_RESPONSE_Q_NOT_AVAIL:
43 	case AP_RESPONSE_DECONFIGURED:
44 	case AP_RESPONSE_CHECKSTOPPED:
45 	case AP_RESPONSE_INVALID_ADDRESS:
46 		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 		       AP_QID_CARD(aq->qid),
48 		       AP_QID_QUEUE(aq->qid));
49 		return -EOPNOTSUPP;
50 	case AP_RESPONSE_RESET_IN_PROGRESS:
51 	case AP_RESPONSE_BUSY:
52 	default:
53 		return -EBUSY;
54 	}
55 }
56 
57 /**
58  * __ap_send(): Send message to adjunct processor queue.
59  * @qid: The AP queue number
60  * @psmid: The program supplied message identifier
61  * @msg: The message text
62  * @length: The message length
63  * @special: Special Bit
64  *
65  * Returns AP queue status structure.
66  * Condition code 1 on NQAP can't happen because the L bit is 1.
67  * Condition code 2 on NQAP also means the send is incomplete,
68  * because a segment boundary was reached. The NQAP is repeated.
69  */
70 static inline struct ap_queue_status
71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
72 	  int special)
73 {
74 	if (special)
75 		qid |= 0x400000UL;
76 	return ap_nqap(qid, psmid, msg, length);
77 }
78 
79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80 {
81 	struct ap_queue_status status;
82 
83 	status = __ap_send(qid, psmid, msg, length, 0);
84 	switch (status.response_code) {
85 	case AP_RESPONSE_NORMAL:
86 		return 0;
87 	case AP_RESPONSE_Q_FULL:
88 	case AP_RESPONSE_RESET_IN_PROGRESS:
89 		return -EBUSY;
90 	case AP_RESPONSE_REQ_FAC_NOT_INST:
91 		return -EINVAL;
92 	default:	/* Device is gone. */
93 		return -ENODEV;
94 	}
95 }
96 EXPORT_SYMBOL(ap_send);
97 
98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99 {
100 	struct ap_queue_status status;
101 
102 	if (msg == NULL)
103 		return -EINVAL;
104 	status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
105 	switch (status.response_code) {
106 	case AP_RESPONSE_NORMAL:
107 		return 0;
108 	case AP_RESPONSE_NO_PENDING_REPLY:
109 		if (status.queue_empty)
110 			return -ENOENT;
111 		return -EBUSY;
112 	case AP_RESPONSE_RESET_IN_PROGRESS:
113 		return -EBUSY;
114 	default:
115 		return -ENODEV;
116 	}
117 }
118 EXPORT_SYMBOL(ap_recv);
119 
120 /* State machine definitions and helpers */
121 
122 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123 {
124 	return AP_SM_WAIT_NONE;
125 }
126 
127 /**
128  * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129  *	not change the state of the device.
130  * @aq: pointer to the AP queue
131  *
132  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133  */
134 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135 {
136 	struct ap_queue_status status;
137 	struct ap_message *ap_msg;
138 	bool found = false;
139 	size_t reslen;
140 	unsigned long resgr0 = 0;
141 	int parts = 0;
142 
143 	/*
144 	 * DQAP loop until response code and resgr0 indicate that
145 	 * the msg is totally received. As we use the very same buffer
146 	 * the msg is overwritten with each invocation. That's intended
147 	 * and the receiver of the msg is informed with a msg rc code
148 	 * of EMSGSIZE in such a case.
149 	 */
150 	do {
151 		status = ap_dqap(aq->qid, &aq->reply->psmid,
152 				 aq->reply->msg, aq->reply->bufsize,
153 				 &reslen, &resgr0);
154 		parts++;
155 	} while (status.response_code == 0xFF && resgr0 != 0);
156 
157 	switch (status.response_code) {
158 	case AP_RESPONSE_NORMAL:
159 		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
160 		if (aq->queue_count > 0)
161 			mod_timer(&aq->timeout,
162 				  jiffies + aq->request_timeout);
163 		list_for_each_entry(ap_msg, &aq->pendingq, list) {
164 			if (ap_msg->psmid != aq->reply->psmid)
165 				continue;
166 			list_del_init(&ap_msg->list);
167 			aq->pendingq_count--;
168 			if (parts > 1) {
169 				ap_msg->rc = -EMSGSIZE;
170 				ap_msg->receive(aq, ap_msg, NULL);
171 			} else {
172 				ap_msg->receive(aq, ap_msg, aq->reply);
173 			}
174 			found = true;
175 			break;
176 		}
177 		if (!found) {
178 			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
179 				    __func__, aq->reply->psmid,
180 				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
181 		}
182 		fallthrough;
183 	case AP_RESPONSE_NO_PENDING_REPLY:
184 		if (!status.queue_empty || aq->queue_count <= 0)
185 			break;
186 		/* The card shouldn't forget requests but who knows. */
187 		aq->queue_count = 0;
188 		list_splice_init(&aq->pendingq, &aq->requestq);
189 		aq->requestq_count += aq->pendingq_count;
190 		aq->pendingq_count = 0;
191 		break;
192 	default:
193 		break;
194 	}
195 	return status;
196 }
197 
198 /**
199  * ap_sm_read(): Receive pending reply messages from an AP queue.
200  * @aq: pointer to the AP queue
201  *
202  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
203  */
204 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
205 {
206 	struct ap_queue_status status;
207 
208 	if (!aq->reply)
209 		return AP_SM_WAIT_NONE;
210 	status = ap_sm_recv(aq);
211 	switch (status.response_code) {
212 	case AP_RESPONSE_NORMAL:
213 		if (aq->queue_count > 0) {
214 			aq->sm_state = AP_SM_STATE_WORKING;
215 			return AP_SM_WAIT_AGAIN;
216 		}
217 		aq->sm_state = AP_SM_STATE_IDLE;
218 		return AP_SM_WAIT_NONE;
219 	case AP_RESPONSE_NO_PENDING_REPLY:
220 		if (aq->queue_count > 0)
221 			return aq->interrupt ?
222 				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
223 		aq->sm_state = AP_SM_STATE_IDLE;
224 		return AP_SM_WAIT_NONE;
225 	default:
226 		aq->dev_state = AP_DEV_STATE_ERROR;
227 		aq->last_err_rc = status.response_code;
228 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
229 			    __func__, status.response_code,
230 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
231 		return AP_SM_WAIT_NONE;
232 	}
233 }
234 
235 /**
236  * ap_sm_write(): Send messages from the request queue to an AP queue.
237  * @aq: pointer to the AP queue
238  *
239  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
240  */
241 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
242 {
243 	struct ap_queue_status status;
244 	struct ap_message *ap_msg;
245 	ap_qid_t qid = aq->qid;
246 
247 	if (aq->requestq_count <= 0)
248 		return AP_SM_WAIT_NONE;
249 	/* Start the next request on the queue. */
250 	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
251 #ifdef CONFIG_ZCRYPT_DEBUG
252 	if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
253 		AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
254 			    __func__, ap_msg->fi.cmd);
255 		qid = 0xFF00;
256 	}
257 #endif
258 	status = __ap_send(qid, ap_msg->psmid,
259 			   ap_msg->msg, ap_msg->len,
260 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
261 	switch (status.response_code) {
262 	case AP_RESPONSE_NORMAL:
263 		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
264 		if (aq->queue_count == 1)
265 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
266 		list_move_tail(&ap_msg->list, &aq->pendingq);
267 		aq->requestq_count--;
268 		aq->pendingq_count++;
269 		if (aq->queue_count < aq->card->queue_depth) {
270 			aq->sm_state = AP_SM_STATE_WORKING;
271 			return AP_SM_WAIT_AGAIN;
272 		}
273 		fallthrough;
274 	case AP_RESPONSE_Q_FULL:
275 		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
276 		return aq->interrupt ?
277 			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
278 	case AP_RESPONSE_RESET_IN_PROGRESS:
279 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
280 		return AP_SM_WAIT_TIMEOUT;
281 	case AP_RESPONSE_INVALID_DOMAIN:
282 		AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
283 		fallthrough;
284 	case AP_RESPONSE_MESSAGE_TOO_BIG:
285 	case AP_RESPONSE_REQ_FAC_NOT_INST:
286 		list_del_init(&ap_msg->list);
287 		aq->requestq_count--;
288 		ap_msg->rc = -EINVAL;
289 		ap_msg->receive(aq, ap_msg, NULL);
290 		return AP_SM_WAIT_AGAIN;
291 	default:
292 		aq->dev_state = AP_DEV_STATE_ERROR;
293 		aq->last_err_rc = status.response_code;
294 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
295 			    __func__, status.response_code,
296 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
297 		return AP_SM_WAIT_NONE;
298 	}
299 }
300 
301 /**
302  * ap_sm_read_write(): Send and receive messages to/from an AP queue.
303  * @aq: pointer to the AP queue
304  *
305  * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
306  */
307 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
308 {
309 	return min(ap_sm_read(aq), ap_sm_write(aq));
310 }
311 
312 /**
313  * ap_sm_reset(): Reset an AP queue.
314  * @aq: The AP queue
315  *
316  * Submit the Reset command to an AP queue.
317  */
318 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
319 {
320 	struct ap_queue_status status;
321 
322 	status = ap_rapq(aq->qid);
323 	switch (status.response_code) {
324 	case AP_RESPONSE_NORMAL:
325 	case AP_RESPONSE_RESET_IN_PROGRESS:
326 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
327 		aq->interrupt = false;
328 		return AP_SM_WAIT_TIMEOUT;
329 	default:
330 		aq->dev_state = AP_DEV_STATE_ERROR;
331 		aq->last_err_rc = status.response_code;
332 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
333 			    __func__, status.response_code,
334 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
335 		return AP_SM_WAIT_NONE;
336 	}
337 }
338 
339 /**
340  * ap_sm_reset_wait(): Test queue for completion of the reset operation
341  * @aq: pointer to the AP queue
342  *
343  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
344  */
345 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
346 {
347 	struct ap_queue_status status;
348 	void *lsi_ptr;
349 
350 	if (aq->queue_count > 0 && aq->reply)
351 		/* Try to read a completed message and get the status */
352 		status = ap_sm_recv(aq);
353 	else
354 		/* Get the status with TAPQ */
355 		status = ap_tapq(aq->qid, NULL);
356 
357 	switch (status.response_code) {
358 	case AP_RESPONSE_NORMAL:
359 		lsi_ptr = ap_airq_ptr();
360 		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
361 			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
362 		else
363 			aq->sm_state = (aq->queue_count > 0) ?
364 				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
365 		return AP_SM_WAIT_AGAIN;
366 	case AP_RESPONSE_BUSY:
367 	case AP_RESPONSE_RESET_IN_PROGRESS:
368 		return AP_SM_WAIT_TIMEOUT;
369 	case AP_RESPONSE_Q_NOT_AVAIL:
370 	case AP_RESPONSE_DECONFIGURED:
371 	case AP_RESPONSE_CHECKSTOPPED:
372 	default:
373 		aq->dev_state = AP_DEV_STATE_ERROR;
374 		aq->last_err_rc = status.response_code;
375 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
376 			    __func__, status.response_code,
377 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
378 		return AP_SM_WAIT_NONE;
379 	}
380 }
381 
382 /**
383  * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
384  * @aq: pointer to the AP queue
385  *
386  * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
387  */
388 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
389 {
390 	struct ap_queue_status status;
391 
392 	if (aq->queue_count > 0 && aq->reply)
393 		/* Try to read a completed message and get the status */
394 		status = ap_sm_recv(aq);
395 	else
396 		/* Get the status with TAPQ */
397 		status = ap_tapq(aq->qid, NULL);
398 
399 	if (status.irq_enabled == 1) {
400 		/* Irqs are now enabled */
401 		aq->interrupt = true;
402 		aq->sm_state = (aq->queue_count > 0) ?
403 			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
404 	}
405 
406 	switch (status.response_code) {
407 	case AP_RESPONSE_NORMAL:
408 		if (aq->queue_count > 0)
409 			return AP_SM_WAIT_AGAIN;
410 		fallthrough;
411 	case AP_RESPONSE_NO_PENDING_REPLY:
412 		return AP_SM_WAIT_TIMEOUT;
413 	default:
414 		aq->dev_state = AP_DEV_STATE_ERROR;
415 		aq->last_err_rc = status.response_code;
416 		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
417 			    __func__, status.response_code,
418 			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
419 		return AP_SM_WAIT_NONE;
420 	}
421 }
422 
423 /*
424  * AP state machine jump table
425  */
426 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
427 	[AP_SM_STATE_RESET_START] = {
428 		[AP_SM_EVENT_POLL] = ap_sm_reset,
429 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
430 	},
431 	[AP_SM_STATE_RESET_WAIT] = {
432 		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
433 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
434 	},
435 	[AP_SM_STATE_SETIRQ_WAIT] = {
436 		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
437 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
438 	},
439 	[AP_SM_STATE_IDLE] = {
440 		[AP_SM_EVENT_POLL] = ap_sm_write,
441 		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
442 	},
443 	[AP_SM_STATE_WORKING] = {
444 		[AP_SM_EVENT_POLL] = ap_sm_read_write,
445 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
446 	},
447 	[AP_SM_STATE_QUEUE_FULL] = {
448 		[AP_SM_EVENT_POLL] = ap_sm_read,
449 		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
450 	},
451 };
452 
453 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
454 {
455 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
456 		return ap_jumptable[aq->sm_state][event](aq);
457 	else
458 		return AP_SM_WAIT_NONE;
459 }
460 
461 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
462 {
463 	enum ap_sm_wait wait;
464 
465 	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
466 		;
467 	return wait;
468 }
469 
470 /*
471  * AP queue related attributes.
472  */
473 static ssize_t request_count_show(struct device *dev,
474 				  struct device_attribute *attr,
475 				  char *buf)
476 {
477 	struct ap_queue *aq = to_ap_queue(dev);
478 	bool valid = false;
479 	u64 req_cnt;
480 
481 	spin_lock_bh(&aq->lock);
482 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
483 		req_cnt = aq->total_request_count;
484 		valid = true;
485 	}
486 	spin_unlock_bh(&aq->lock);
487 
488 	if (valid)
489 		return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
490 	else
491 		return scnprintf(buf, PAGE_SIZE, "-\n");
492 }
493 
494 static ssize_t request_count_store(struct device *dev,
495 				   struct device_attribute *attr,
496 				   const char *buf, size_t count)
497 {
498 	struct ap_queue *aq = to_ap_queue(dev);
499 
500 	spin_lock_bh(&aq->lock);
501 	aq->total_request_count = 0;
502 	spin_unlock_bh(&aq->lock);
503 
504 	return count;
505 }
506 
507 static DEVICE_ATTR_RW(request_count);
508 
509 static ssize_t requestq_count_show(struct device *dev,
510 				   struct device_attribute *attr, char *buf)
511 {
512 	struct ap_queue *aq = to_ap_queue(dev);
513 	unsigned int reqq_cnt = 0;
514 
515 	spin_lock_bh(&aq->lock);
516 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
517 		reqq_cnt = aq->requestq_count;
518 	spin_unlock_bh(&aq->lock);
519 	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
520 }
521 
522 static DEVICE_ATTR_RO(requestq_count);
523 
524 static ssize_t pendingq_count_show(struct device *dev,
525 				   struct device_attribute *attr, char *buf)
526 {
527 	struct ap_queue *aq = to_ap_queue(dev);
528 	unsigned int penq_cnt = 0;
529 
530 	spin_lock_bh(&aq->lock);
531 	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
532 		penq_cnt = aq->pendingq_count;
533 	spin_unlock_bh(&aq->lock);
534 	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
535 }
536 
537 static DEVICE_ATTR_RO(pendingq_count);
538 
539 static ssize_t reset_show(struct device *dev,
540 			  struct device_attribute *attr, char *buf)
541 {
542 	struct ap_queue *aq = to_ap_queue(dev);
543 	int rc = 0;
544 
545 	spin_lock_bh(&aq->lock);
546 	switch (aq->sm_state) {
547 	case AP_SM_STATE_RESET_START:
548 	case AP_SM_STATE_RESET_WAIT:
549 		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
550 		break;
551 	case AP_SM_STATE_WORKING:
552 	case AP_SM_STATE_QUEUE_FULL:
553 		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
554 		break;
555 	default:
556 		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
557 	}
558 	spin_unlock_bh(&aq->lock);
559 	return rc;
560 }
561 
562 static ssize_t reset_store(struct device *dev,
563 			   struct device_attribute *attr,
564 			   const char *buf, size_t count)
565 {
566 	struct ap_queue *aq = to_ap_queue(dev);
567 
568 	spin_lock_bh(&aq->lock);
569 	__ap_flush_queue(aq);
570 	aq->sm_state = AP_SM_STATE_RESET_START;
571 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
572 	spin_unlock_bh(&aq->lock);
573 
574 	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
575 	       AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
576 
577 	return count;
578 }
579 
580 static DEVICE_ATTR_RW(reset);
581 
582 static ssize_t interrupt_show(struct device *dev,
583 			      struct device_attribute *attr, char *buf)
584 {
585 	struct ap_queue *aq = to_ap_queue(dev);
586 	int rc = 0;
587 
588 	spin_lock_bh(&aq->lock);
589 	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
590 		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
591 	else if (aq->interrupt)
592 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
593 	else
594 		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
595 	spin_unlock_bh(&aq->lock);
596 	return rc;
597 }
598 
599 static DEVICE_ATTR_RO(interrupt);
600 
601 static ssize_t config_show(struct device *dev,
602 			     struct device_attribute *attr, char *buf)
603 {
604 	struct ap_queue *aq = to_ap_queue(dev);
605 	int rc;
606 
607 	spin_lock_bh(&aq->lock);
608 	rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
609 	spin_unlock_bh(&aq->lock);
610 	return rc;
611 }
612 
613 static DEVICE_ATTR_RO(config);
614 
615 #ifdef CONFIG_ZCRYPT_DEBUG
616 static ssize_t states_show(struct device *dev,
617 			   struct device_attribute *attr, char *buf)
618 {
619 	struct ap_queue *aq = to_ap_queue(dev);
620 	int rc = 0;
621 
622 	spin_lock_bh(&aq->lock);
623 	/* queue device state */
624 	switch (aq->dev_state) {
625 	case AP_DEV_STATE_UNINITIATED:
626 		rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
627 		break;
628 	case AP_DEV_STATE_OPERATING:
629 		rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
630 		break;
631 	case AP_DEV_STATE_SHUTDOWN:
632 		rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
633 		break;
634 	case AP_DEV_STATE_ERROR:
635 		rc = scnprintf(buf, PAGE_SIZE, "ERROR");
636 		break;
637 	default:
638 		rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
639 	}
640 	/* state machine state */
641 	if (aq->dev_state) {
642 		switch (aq->sm_state) {
643 		case AP_SM_STATE_RESET_START:
644 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
645 					" [RESET_START]\n");
646 			break;
647 		case AP_SM_STATE_RESET_WAIT:
648 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
649 					" [RESET_WAIT]\n");
650 			break;
651 		case AP_SM_STATE_SETIRQ_WAIT:
652 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
653 					" [SETIRQ_WAIT]\n");
654 			break;
655 		case AP_SM_STATE_IDLE:
656 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
657 					" [IDLE]\n");
658 			break;
659 		case AP_SM_STATE_WORKING:
660 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
661 					" [WORKING]\n");
662 			break;
663 		case AP_SM_STATE_QUEUE_FULL:
664 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
665 					" [FULL]\n");
666 			break;
667 		default:
668 			rc += scnprintf(buf + rc, PAGE_SIZE - rc,
669 					" [UNKNOWN]\n");
670 		}
671 	}
672 	spin_unlock_bh(&aq->lock);
673 
674 	return rc;
675 }
676 static DEVICE_ATTR_RO(states);
677 
678 static ssize_t last_err_rc_show(struct device *dev,
679 				struct device_attribute *attr, char *buf)
680 {
681 	struct ap_queue *aq = to_ap_queue(dev);
682 	int rc;
683 
684 	spin_lock_bh(&aq->lock);
685 	rc = aq->last_err_rc;
686 	spin_unlock_bh(&aq->lock);
687 
688 	switch (rc) {
689 	case AP_RESPONSE_NORMAL:
690 		return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
691 	case AP_RESPONSE_Q_NOT_AVAIL:
692 		return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
693 	case AP_RESPONSE_RESET_IN_PROGRESS:
694 		return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
695 	case AP_RESPONSE_DECONFIGURED:
696 		return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
697 	case AP_RESPONSE_CHECKSTOPPED:
698 		return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
699 	case AP_RESPONSE_BUSY:
700 		return scnprintf(buf, PAGE_SIZE, "BUSY\n");
701 	case AP_RESPONSE_INVALID_ADDRESS:
702 		return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
703 	case AP_RESPONSE_OTHERWISE_CHANGED:
704 		return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
705 	case AP_RESPONSE_Q_FULL:
706 		return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
707 	case AP_RESPONSE_INDEX_TOO_BIG:
708 		return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
709 	case AP_RESPONSE_NO_FIRST_PART:
710 		return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
711 	case AP_RESPONSE_MESSAGE_TOO_BIG:
712 		return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
713 	case AP_RESPONSE_REQ_FAC_NOT_INST:
714 		return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
715 	default:
716 		return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
717 	}
718 }
719 static DEVICE_ATTR_RO(last_err_rc);
720 #endif
721 
722 static struct attribute *ap_queue_dev_attrs[] = {
723 	&dev_attr_request_count.attr,
724 	&dev_attr_requestq_count.attr,
725 	&dev_attr_pendingq_count.attr,
726 	&dev_attr_reset.attr,
727 	&dev_attr_interrupt.attr,
728 	&dev_attr_config.attr,
729 #ifdef CONFIG_ZCRYPT_DEBUG
730 	&dev_attr_states.attr,
731 	&dev_attr_last_err_rc.attr,
732 #endif
733 	NULL
734 };
735 
736 static struct attribute_group ap_queue_dev_attr_group = {
737 	.attrs = ap_queue_dev_attrs
738 };
739 
740 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
741 	&ap_queue_dev_attr_group,
742 	NULL
743 };
744 
745 static struct device_type ap_queue_type = {
746 	.name = "ap_queue",
747 	.groups = ap_queue_dev_attr_groups,
748 };
749 
750 static void ap_queue_device_release(struct device *dev)
751 {
752 	struct ap_queue *aq = to_ap_queue(dev);
753 
754 	spin_lock_bh(&ap_queues_lock);
755 	hash_del(&aq->hnode);
756 	spin_unlock_bh(&ap_queues_lock);
757 
758 	kfree(aq);
759 }
760 
761 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
762 {
763 	struct ap_queue *aq;
764 
765 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
766 	if (!aq)
767 		return NULL;
768 	aq->ap_dev.device.release = ap_queue_device_release;
769 	aq->ap_dev.device.type = &ap_queue_type;
770 	aq->ap_dev.device_type = device_type;
771 	aq->qid = qid;
772 	aq->interrupt = false;
773 	spin_lock_init(&aq->lock);
774 	INIT_LIST_HEAD(&aq->pendingq);
775 	INIT_LIST_HEAD(&aq->requestq);
776 	timer_setup(&aq->timeout, ap_request_timeout, 0);
777 
778 	return aq;
779 }
780 
781 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
782 {
783 	aq->reply = reply;
784 
785 	spin_lock_bh(&aq->lock);
786 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
787 	spin_unlock_bh(&aq->lock);
788 }
789 EXPORT_SYMBOL(ap_queue_init_reply);
790 
791 /**
792  * ap_queue_message(): Queue a request to an AP device.
793  * @aq: The AP device to queue the message to
794  * @ap_msg: The message that is to be added
795  */
796 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
797 {
798 	int rc = 0;
799 
800 	/* msg needs to have a valid receive-callback */
801 	BUG_ON(!ap_msg->receive);
802 
803 	spin_lock_bh(&aq->lock);
804 
805 	/* only allow to queue new messages if device state is ok */
806 	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
807 		list_add_tail(&ap_msg->list, &aq->requestq);
808 		aq->requestq_count++;
809 		aq->total_request_count++;
810 		atomic64_inc(&aq->card->total_request_count);
811 	} else
812 		rc = -ENODEV;
813 
814 	/* Send/receive as many request from the queue as possible. */
815 	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
816 
817 	spin_unlock_bh(&aq->lock);
818 
819 	return rc;
820 }
821 EXPORT_SYMBOL(ap_queue_message);
822 
823 /**
824  * ap_cancel_message(): Cancel a crypto request.
825  * @aq: The AP device that has the message queued
826  * @ap_msg: The message that is to be removed
827  *
828  * Cancel a crypto request. This is done by removing the request
829  * from the device pending or request queue. Note that the
830  * request stays on the AP queue. When it finishes the message
831  * reply will be discarded because the psmid can't be found.
832  */
833 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
834 {
835 	struct ap_message *tmp;
836 
837 	spin_lock_bh(&aq->lock);
838 	if (!list_empty(&ap_msg->list)) {
839 		list_for_each_entry(tmp, &aq->pendingq, list)
840 			if (tmp->psmid == ap_msg->psmid) {
841 				aq->pendingq_count--;
842 				goto found;
843 			}
844 		aq->requestq_count--;
845 found:
846 		list_del_init(&ap_msg->list);
847 	}
848 	spin_unlock_bh(&aq->lock);
849 }
850 EXPORT_SYMBOL(ap_cancel_message);
851 
852 /**
853  * __ap_flush_queue(): Flush requests.
854  * @aq: Pointer to the AP queue
855  *
856  * Flush all requests from the request/pending queue of an AP device.
857  */
858 static void __ap_flush_queue(struct ap_queue *aq)
859 {
860 	struct ap_message *ap_msg, *next;
861 
862 	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
863 		list_del_init(&ap_msg->list);
864 		aq->pendingq_count--;
865 		ap_msg->rc = -EAGAIN;
866 		ap_msg->receive(aq, ap_msg, NULL);
867 	}
868 	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
869 		list_del_init(&ap_msg->list);
870 		aq->requestq_count--;
871 		ap_msg->rc = -EAGAIN;
872 		ap_msg->receive(aq, ap_msg, NULL);
873 	}
874 	aq->queue_count = 0;
875 }
876 
877 void ap_flush_queue(struct ap_queue *aq)
878 {
879 	spin_lock_bh(&aq->lock);
880 	__ap_flush_queue(aq);
881 	spin_unlock_bh(&aq->lock);
882 }
883 EXPORT_SYMBOL(ap_flush_queue);
884 
885 void ap_queue_prepare_remove(struct ap_queue *aq)
886 {
887 	spin_lock_bh(&aq->lock);
888 	/* flush queue */
889 	__ap_flush_queue(aq);
890 	/* move queue device state to SHUTDOWN in progress */
891 	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
892 	spin_unlock_bh(&aq->lock);
893 	del_timer_sync(&aq->timeout);
894 }
895 
896 void ap_queue_remove(struct ap_queue *aq)
897 {
898 	/*
899 	 * all messages have been flushed and the device state
900 	 * is SHUTDOWN. Now reset with zero which also clears
901 	 * the irq registration and move the device state
902 	 * to the initial value AP_DEV_STATE_UNINITIATED.
903 	 */
904 	spin_lock_bh(&aq->lock);
905 	ap_zapq(aq->qid);
906 	aq->dev_state = AP_DEV_STATE_UNINITIATED;
907 	spin_unlock_bh(&aq->lock);
908 }
909 
910 void ap_queue_init_state(struct ap_queue *aq)
911 {
912 	spin_lock_bh(&aq->lock);
913 	aq->dev_state = AP_DEV_STATE_OPERATING;
914 	aq->sm_state = AP_SM_STATE_RESET_START;
915 	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
916 	spin_unlock_bh(&aq->lock);
917 }
918 EXPORT_SYMBOL(ap_queue_init_state);
919