xref: /openbmc/linux/drivers/s390/crypto/ap_bus.c (revision b6dcefde)
1 /*
2  * linux/drivers/s390/crypto/ap_bus.c
3  *
4  * Copyright (C) 2006 IBM Corporation
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
8  *	      Felix Beck <felix.beck@de.ibm.com>
9  *
10  * Adjunct processor bus.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26 
27 #define KMSG_COMPONENT "ap"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/notifier.h>
37 #include <linux/kthread.h>
38 #include <linux/mutex.h>
39 #include <asm/reset.h>
40 #include <asm/airq.h>
41 #include <asm/atomic.h>
42 #include <asm/system.h>
43 #include <asm/isc.h>
44 #include <linux/hrtimer.h>
45 #include <linux/ktime.h>
46 
47 #include "ap_bus.h"
48 
49 /* Some prototypes. */
50 static void ap_scan_bus(struct work_struct *);
51 static void ap_poll_all(unsigned long);
52 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
53 static int ap_poll_thread_start(void);
54 static void ap_poll_thread_stop(void);
55 static void ap_request_timeout(unsigned long);
56 static inline void ap_schedule_poll_timer(void);
57 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
58 static int ap_device_remove(struct device *dev);
59 static int ap_device_probe(struct device *dev);
60 static void ap_interrupt_handler(void *unused1, void *unused2);
61 static void ap_reset(struct ap_device *ap_dev);
62 static void ap_config_timeout(unsigned long ptr);
63 static int ap_select_domain(void);
64 
65 /*
66  * Module description.
67  */
68 MODULE_AUTHOR("IBM Corporation");
69 MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
70 		   "Copyright 2006 IBM Corporation");
71 MODULE_LICENSE("GPL");
72 
73 /*
74  * Module parameter
75  */
76 int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
77 module_param_named(domain, ap_domain_index, int, 0000);
78 MODULE_PARM_DESC(domain, "domain index for ap devices");
79 EXPORT_SYMBOL(ap_domain_index);
80 
81 static int ap_thread_flag = 0;
82 module_param_named(poll_thread, ap_thread_flag, int, 0000);
83 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
84 
85 static struct device *ap_root_device = NULL;
86 static DEFINE_SPINLOCK(ap_device_list_lock);
87 static LIST_HEAD(ap_device_list);
88 
89 /*
90  * Workqueue & timer for bus rescan.
91  */
92 static struct workqueue_struct *ap_work_queue;
93 static struct timer_list ap_config_timer;
94 static int ap_config_time = AP_CONFIG_TIME;
95 static DECLARE_WORK(ap_config_work, ap_scan_bus);
96 
97 /*
98  * Tasklet & timer for AP request polling and interrupts
99  */
100 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
101 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
102 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
103 static struct task_struct *ap_poll_kthread = NULL;
104 static DEFINE_MUTEX(ap_poll_thread_mutex);
105 static DEFINE_SPINLOCK(ap_poll_timer_lock);
106 static void *ap_interrupt_indicator;
107 static struct hrtimer ap_poll_timer;
108 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
109  * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
110 static unsigned long long poll_timeout = 250000;
111 
112 /* Suspend flag */
113 static int ap_suspend_flag;
114 /* Flag to check if domain was set through module parameter domain=. This is
115  * important when supsend and resume is done in a z/VM environment where the
116  * domain might change. */
117 static int user_set_domain = 0;
118 static struct bus_type ap_bus_type;
119 
120 /**
121  * ap_using_interrupts() - Returns non-zero if interrupt support is
122  * available.
123  */
124 static inline int ap_using_interrupts(void)
125 {
126 	return ap_interrupt_indicator != NULL;
127 }
128 
129 /**
130  * ap_intructions_available() - Test if AP instructions are available.
131  *
132  * Returns 0 if the AP instructions are installed.
133  */
134 static inline int ap_instructions_available(void)
135 {
136 	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
137 	register unsigned long reg1 asm ("1") = -ENODEV;
138 	register unsigned long reg2 asm ("2") = 0UL;
139 
140 	asm volatile(
141 		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
142 		"0: la    %1,0\n"
143 		"1:\n"
144 		EX_TABLE(0b, 1b)
145 		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
146 	return reg1;
147 }
148 
149 /**
150  * ap_interrupts_available(): Test if AP interrupts are available.
151  *
152  * Returns 1 if AP interrupts are available.
153  */
154 static int ap_interrupts_available(void)
155 {
156 	unsigned long long facility_bits[2];
157 
158 	if (stfle(facility_bits, 2) <= 1)
159 		return 0;
160 	if (!(facility_bits[0] & (1ULL << 61)) ||
161 	    !(facility_bits[1] & (1ULL << 62)))
162 		return 0;
163 	return 1;
164 }
165 
166 /**
167  * ap_test_queue(): Test adjunct processor queue.
168  * @qid: The AP queue number
169  * @queue_depth: Pointer to queue depth value
170  * @device_type: Pointer to device type value
171  *
172  * Returns AP queue status structure.
173  */
174 static inline struct ap_queue_status
175 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
176 {
177 	register unsigned long reg0 asm ("0") = qid;
178 	register struct ap_queue_status reg1 asm ("1");
179 	register unsigned long reg2 asm ("2") = 0UL;
180 
181 	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
182 		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
183 	*device_type = (int) (reg2 >> 24);
184 	*queue_depth = (int) (reg2 & 0xff);
185 	return reg1;
186 }
187 
188 /**
189  * ap_reset_queue(): Reset adjunct processor queue.
190  * @qid: The AP queue number
191  *
192  * Returns AP queue status structure.
193  */
194 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
195 {
196 	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
197 	register struct ap_queue_status reg1 asm ("1");
198 	register unsigned long reg2 asm ("2") = 0UL;
199 
200 	asm volatile(
201 		".long 0xb2af0000"		/* PQAP(RAPQ) */
202 		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
203 	return reg1;
204 }
205 
206 #ifdef CONFIG_64BIT
207 /**
208  * ap_queue_interruption_control(): Enable interruption for a specific AP.
209  * @qid: The AP queue number
210  * @ind: The notification indicator byte
211  *
212  * Returns AP queue status.
213  */
214 static inline struct ap_queue_status
215 ap_queue_interruption_control(ap_qid_t qid, void *ind)
216 {
217 	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
218 	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
219 	register struct ap_queue_status reg1_out asm ("1");
220 	register void *reg2 asm ("2") = ind;
221 	asm volatile(
222 		".long 0xb2af0000"		/* PQAP(RAPQ) */
223 		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
224 		:
225 		: "cc" );
226 	return reg1_out;
227 }
228 #endif
229 
230 /**
231  * ap_queue_enable_interruption(): Enable interruption on an AP.
232  * @qid: The AP queue number
233  * @ind: the notification indicator byte
234  *
235  * Enables interruption on AP queue via ap_queue_interruption_control(). Based
236  * on the return value it waits a while and tests the AP queue if interrupts
237  * have been switched on using ap_test_queue().
238  */
239 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
240 {
241 #ifdef CONFIG_64BIT
242 	struct ap_queue_status status;
243 	int t_depth, t_device_type, rc, i;
244 
245 	rc = -EBUSY;
246 	status = ap_queue_interruption_control(qid, ind);
247 
248 	for (i = 0; i < AP_MAX_RESET; i++) {
249 		switch (status.response_code) {
250 		case AP_RESPONSE_NORMAL:
251 			if (status.int_enabled)
252 				return 0;
253 			break;
254 		case AP_RESPONSE_RESET_IN_PROGRESS:
255 		case AP_RESPONSE_BUSY:
256 			break;
257 		case AP_RESPONSE_Q_NOT_AVAIL:
258 		case AP_RESPONSE_DECONFIGURED:
259 		case AP_RESPONSE_CHECKSTOPPED:
260 		case AP_RESPONSE_INVALID_ADDRESS:
261 			return -ENODEV;
262 		case AP_RESPONSE_OTHERWISE_CHANGED:
263 			if (status.int_enabled)
264 				return 0;
265 			break;
266 		default:
267 			break;
268 		}
269 		if (i < AP_MAX_RESET - 1) {
270 			udelay(5);
271 			status = ap_test_queue(qid, &t_depth, &t_device_type);
272 		}
273 	}
274 	return rc;
275 #else
276 	return -EINVAL;
277 #endif
278 }
279 
280 /**
281  * __ap_send(): Send message to adjunct processor queue.
282  * @qid: The AP queue number
283  * @psmid: The program supplied message identifier
284  * @msg: The message text
285  * @length: The message length
286  * @special: Special Bit
287  *
288  * Returns AP queue status structure.
289  * Condition code 1 on NQAP can't happen because the L bit is 1.
290  * Condition code 2 on NQAP also means the send is incomplete,
291  * because a segment boundary was reached. The NQAP is repeated.
292  */
293 static inline struct ap_queue_status
294 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
295 	  unsigned int special)
296 {
297 	typedef struct { char _[length]; } msgblock;
298 	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
299 	register struct ap_queue_status reg1 asm ("1");
300 	register unsigned long reg2 asm ("2") = (unsigned long) msg;
301 	register unsigned long reg3 asm ("3") = (unsigned long) length;
302 	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
303 	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
304 
305 	if (special == 1)
306 		reg0 |= 0x400000UL;
307 
308 	asm volatile (
309 		"0: .long 0xb2ad0042\n"		/* DQAP */
310 		"   brc   2,0b"
311 		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
312 		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
313 		: "cc" );
314 	return reg1;
315 }
316 
317 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
318 {
319 	struct ap_queue_status status;
320 
321 	status = __ap_send(qid, psmid, msg, length, 0);
322 	switch (status.response_code) {
323 	case AP_RESPONSE_NORMAL:
324 		return 0;
325 	case AP_RESPONSE_Q_FULL:
326 	case AP_RESPONSE_RESET_IN_PROGRESS:
327 		return -EBUSY;
328 	case AP_RESPONSE_REQ_FAC_NOT_INST:
329 		return -EINVAL;
330 	default:	/* Device is gone. */
331 		return -ENODEV;
332 	}
333 }
334 EXPORT_SYMBOL(ap_send);
335 
336 /**
337  * __ap_recv(): Receive message from adjunct processor queue.
338  * @qid: The AP queue number
339  * @psmid: Pointer to program supplied message identifier
340  * @msg: The message text
341  * @length: The message length
342  *
343  * Returns AP queue status structure.
344  * Condition code 1 on DQAP means the receive has taken place
345  * but only partially.	The response is incomplete, hence the
346  * DQAP is repeated.
347  * Condition code 2 on DQAP also means the receive is incomplete,
348  * this time because a segment boundary was reached. Again, the
349  * DQAP is repeated.
350  * Note that gpr2 is used by the DQAP instruction to keep track of
351  * any 'residual' length, in case the instruction gets interrupted.
352  * Hence it gets zeroed before the instruction.
353  */
354 static inline struct ap_queue_status
355 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
356 {
357 	typedef struct { char _[length]; } msgblock;
358 	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
359 	register struct ap_queue_status reg1 asm ("1");
360 	register unsigned long reg2 asm("2") = 0UL;
361 	register unsigned long reg4 asm("4") = (unsigned long) msg;
362 	register unsigned long reg5 asm("5") = (unsigned long) length;
363 	register unsigned long reg6 asm("6") = 0UL;
364 	register unsigned long reg7 asm("7") = 0UL;
365 
366 
367 	asm volatile(
368 		"0: .long 0xb2ae0064\n"
369 		"   brc   6,0b\n"
370 		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
371 		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
372 		"=m" (*(msgblock *) msg) : : "cc" );
373 	*psmid = (((unsigned long long) reg6) << 32) + reg7;
374 	return reg1;
375 }
376 
377 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
378 {
379 	struct ap_queue_status status;
380 
381 	status = __ap_recv(qid, psmid, msg, length);
382 	switch (status.response_code) {
383 	case AP_RESPONSE_NORMAL:
384 		return 0;
385 	case AP_RESPONSE_NO_PENDING_REPLY:
386 		if (status.queue_empty)
387 			return -ENOENT;
388 		return -EBUSY;
389 	case AP_RESPONSE_RESET_IN_PROGRESS:
390 		return -EBUSY;
391 	default:
392 		return -ENODEV;
393 	}
394 }
395 EXPORT_SYMBOL(ap_recv);
396 
397 /**
398  * ap_query_queue(): Check if an AP queue is available.
399  * @qid: The AP queue number
400  * @queue_depth: Pointer to queue depth value
401  * @device_type: Pointer to device type value
402  *
403  * The test is repeated for AP_MAX_RESET times.
404  */
405 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
406 {
407 	struct ap_queue_status status;
408 	int t_depth, t_device_type, rc, i;
409 
410 	rc = -EBUSY;
411 	for (i = 0; i < AP_MAX_RESET; i++) {
412 		status = ap_test_queue(qid, &t_depth, &t_device_type);
413 		switch (status.response_code) {
414 		case AP_RESPONSE_NORMAL:
415 			*queue_depth = t_depth + 1;
416 			*device_type = t_device_type;
417 			rc = 0;
418 			break;
419 		case AP_RESPONSE_Q_NOT_AVAIL:
420 			rc = -ENODEV;
421 			break;
422 		case AP_RESPONSE_RESET_IN_PROGRESS:
423 			break;
424 		case AP_RESPONSE_DECONFIGURED:
425 			rc = -ENODEV;
426 			break;
427 		case AP_RESPONSE_CHECKSTOPPED:
428 			rc = -ENODEV;
429 			break;
430 		case AP_RESPONSE_INVALID_ADDRESS:
431 			rc = -ENODEV;
432 			break;
433 		case AP_RESPONSE_OTHERWISE_CHANGED:
434 			break;
435 		case AP_RESPONSE_BUSY:
436 			break;
437 		default:
438 			BUG();
439 		}
440 		if (rc != -EBUSY)
441 			break;
442 		if (i < AP_MAX_RESET - 1)
443 			udelay(5);
444 	}
445 	return rc;
446 }
447 
448 /**
449  * ap_init_queue(): Reset an AP queue.
450  * @qid: The AP queue number
451  *
452  * Reset an AP queue and wait for it to become available again.
453  */
454 static int ap_init_queue(ap_qid_t qid)
455 {
456 	struct ap_queue_status status;
457 	int rc, dummy, i;
458 
459 	rc = -ENODEV;
460 	status = ap_reset_queue(qid);
461 	for (i = 0; i < AP_MAX_RESET; i++) {
462 		switch (status.response_code) {
463 		case AP_RESPONSE_NORMAL:
464 			if (status.queue_empty)
465 				rc = 0;
466 			break;
467 		case AP_RESPONSE_Q_NOT_AVAIL:
468 		case AP_RESPONSE_DECONFIGURED:
469 		case AP_RESPONSE_CHECKSTOPPED:
470 			i = AP_MAX_RESET;	/* return with -ENODEV */
471 			break;
472 		case AP_RESPONSE_RESET_IN_PROGRESS:
473 			rc = -EBUSY;
474 		case AP_RESPONSE_BUSY:
475 		default:
476 			break;
477 		}
478 		if (rc != -ENODEV && rc != -EBUSY)
479 			break;
480 		if (i < AP_MAX_RESET - 1) {
481 			udelay(5);
482 			status = ap_test_queue(qid, &dummy, &dummy);
483 		}
484 	}
485 	if (rc == 0 && ap_using_interrupts()) {
486 		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
487 		/* If interruption mode is supported by the machine,
488 		* but an AP can not be enabled for interruption then
489 		* the AP will be discarded.    */
490 		if (rc)
491 			pr_err("Registering adapter interrupts for "
492 			       "AP %d failed\n", AP_QID_DEVICE(qid));
493 	}
494 	return rc;
495 }
496 
497 /**
498  * ap_increase_queue_count(): Arm request timeout.
499  * @ap_dev: Pointer to an AP device.
500  *
501  * Arm request timeout if an AP device was idle and a new request is submitted.
502  */
503 static void ap_increase_queue_count(struct ap_device *ap_dev)
504 {
505 	int timeout = ap_dev->drv->request_timeout;
506 
507 	ap_dev->queue_count++;
508 	if (ap_dev->queue_count == 1) {
509 		mod_timer(&ap_dev->timeout, jiffies + timeout);
510 		ap_dev->reset = AP_RESET_ARMED;
511 	}
512 }
513 
514 /**
515  * ap_decrease_queue_count(): Decrease queue count.
516  * @ap_dev: Pointer to an AP device.
517  *
518  * If AP device is still alive, re-schedule request timeout if there are still
519  * pending requests.
520  */
521 static void ap_decrease_queue_count(struct ap_device *ap_dev)
522 {
523 	int timeout = ap_dev->drv->request_timeout;
524 
525 	ap_dev->queue_count--;
526 	if (ap_dev->queue_count > 0)
527 		mod_timer(&ap_dev->timeout, jiffies + timeout);
528 	else
529 		/*
530 		 * The timeout timer should to be disabled now - since
531 		 * del_timer_sync() is very expensive, we just tell via the
532 		 * reset flag to ignore the pending timeout timer.
533 		 */
534 		ap_dev->reset = AP_RESET_IGNORE;
535 }
536 
537 /*
538  * AP device related attributes.
539  */
540 static ssize_t ap_hwtype_show(struct device *dev,
541 			      struct device_attribute *attr, char *buf)
542 {
543 	struct ap_device *ap_dev = to_ap_dev(dev);
544 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
545 }
546 
547 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
548 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
549 			     char *buf)
550 {
551 	struct ap_device *ap_dev = to_ap_dev(dev);
552 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
553 }
554 
555 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
556 static ssize_t ap_request_count_show(struct device *dev,
557 				     struct device_attribute *attr,
558 				     char *buf)
559 {
560 	struct ap_device *ap_dev = to_ap_dev(dev);
561 	int rc;
562 
563 	spin_lock_bh(&ap_dev->lock);
564 	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
565 	spin_unlock_bh(&ap_dev->lock);
566 	return rc;
567 }
568 
569 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
570 
571 static ssize_t ap_modalias_show(struct device *dev,
572 				struct device_attribute *attr, char *buf)
573 {
574 	return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
575 }
576 
577 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
578 
579 static struct attribute *ap_dev_attrs[] = {
580 	&dev_attr_hwtype.attr,
581 	&dev_attr_depth.attr,
582 	&dev_attr_request_count.attr,
583 	&dev_attr_modalias.attr,
584 	NULL
585 };
586 static struct attribute_group ap_dev_attr_group = {
587 	.attrs = ap_dev_attrs
588 };
589 
590 /**
591  * ap_bus_match()
592  * @dev: Pointer to device
593  * @drv: Pointer to device_driver
594  *
595  * AP bus driver registration/unregistration.
596  */
597 static int ap_bus_match(struct device *dev, struct device_driver *drv)
598 {
599 	struct ap_device *ap_dev = to_ap_dev(dev);
600 	struct ap_driver *ap_drv = to_ap_drv(drv);
601 	struct ap_device_id *id;
602 
603 	/*
604 	 * Compare device type of the device with the list of
605 	 * supported types of the device_driver.
606 	 */
607 	for (id = ap_drv->ids; id->match_flags; id++) {
608 		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
609 		    (id->dev_type != ap_dev->device_type))
610 			continue;
611 		return 1;
612 	}
613 	return 0;
614 }
615 
616 /**
617  * ap_uevent(): Uevent function for AP devices.
618  * @dev: Pointer to device
619  * @env: Pointer to kobj_uevent_env
620  *
621  * It sets up a single environment variable DEV_TYPE which contains the
622  * hardware device type.
623  */
624 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
625 {
626 	struct ap_device *ap_dev = to_ap_dev(dev);
627 	int retval = 0;
628 
629 	if (!ap_dev)
630 		return -ENODEV;
631 
632 	/* Set up DEV_TYPE environment variable. */
633 	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
634 	if (retval)
635 		return retval;
636 
637 	/* Add MODALIAS= */
638 	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
639 
640 	return retval;
641 }
642 
643 static int ap_bus_suspend(struct device *dev, pm_message_t state)
644 {
645 	struct ap_device *ap_dev = to_ap_dev(dev);
646 	unsigned long flags;
647 
648 	if (!ap_suspend_flag) {
649 		ap_suspend_flag = 1;
650 
651 		/* Disable scanning for devices, thus we do not want to scan
652 		 * for them after removing.
653 		 */
654 		del_timer_sync(&ap_config_timer);
655 		if (ap_work_queue != NULL) {
656 			destroy_workqueue(ap_work_queue);
657 			ap_work_queue = NULL;
658 		}
659 
660 		tasklet_disable(&ap_tasklet);
661 	}
662 	/* Poll on the device until all requests are finished. */
663 	do {
664 		flags = 0;
665 		spin_lock_bh(&ap_dev->lock);
666 		__ap_poll_device(ap_dev, &flags);
667 		spin_unlock_bh(&ap_dev->lock);
668 	} while ((flags & 1) || (flags & 2));
669 
670 	spin_lock_bh(&ap_dev->lock);
671 	ap_dev->unregistered = 1;
672 	spin_unlock_bh(&ap_dev->lock);
673 
674 	return 0;
675 }
676 
677 static int ap_bus_resume(struct device *dev)
678 {
679 	int rc = 0;
680 	struct ap_device *ap_dev = to_ap_dev(dev);
681 
682 	if (ap_suspend_flag) {
683 		ap_suspend_flag = 0;
684 		if (!ap_interrupts_available())
685 			ap_interrupt_indicator = NULL;
686 		if (!user_set_domain) {
687 			ap_domain_index = -1;
688 			ap_select_domain();
689 		}
690 		init_timer(&ap_config_timer);
691 		ap_config_timer.function = ap_config_timeout;
692 		ap_config_timer.data = 0;
693 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
694 		add_timer(&ap_config_timer);
695 		ap_work_queue = create_singlethread_workqueue("kapwork");
696 		if (!ap_work_queue)
697 			return -ENOMEM;
698 		tasklet_enable(&ap_tasklet);
699 		if (!ap_using_interrupts())
700 			ap_schedule_poll_timer();
701 		else
702 			tasklet_schedule(&ap_tasklet);
703 		if (ap_thread_flag)
704 			rc = ap_poll_thread_start();
705 	}
706 	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
707 		spin_lock_bh(&ap_dev->lock);
708 		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
709 				       ap_domain_index);
710 		spin_unlock_bh(&ap_dev->lock);
711 	}
712 	queue_work(ap_work_queue, &ap_config_work);
713 
714 	return rc;
715 }
716 
717 static struct bus_type ap_bus_type = {
718 	.name = "ap",
719 	.match = &ap_bus_match,
720 	.uevent = &ap_uevent,
721 	.suspend = ap_bus_suspend,
722 	.resume = ap_bus_resume
723 };
724 
725 static int ap_device_probe(struct device *dev)
726 {
727 	struct ap_device *ap_dev = to_ap_dev(dev);
728 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
729 	int rc;
730 
731 	ap_dev->drv = ap_drv;
732 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
733 	if (!rc) {
734 		spin_lock_bh(&ap_device_list_lock);
735 		list_add(&ap_dev->list, &ap_device_list);
736 		spin_unlock_bh(&ap_device_list_lock);
737 	}
738 	return rc;
739 }
740 
741 /**
742  * __ap_flush_queue(): Flush requests.
743  * @ap_dev: Pointer to the AP device
744  *
745  * Flush all requests from the request/pending queue of an AP device.
746  */
747 static void __ap_flush_queue(struct ap_device *ap_dev)
748 {
749 	struct ap_message *ap_msg, *next;
750 
751 	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
752 		list_del_init(&ap_msg->list);
753 		ap_dev->pendingq_count--;
754 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
755 	}
756 	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
757 		list_del_init(&ap_msg->list);
758 		ap_dev->requestq_count--;
759 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
760 	}
761 }
762 
763 void ap_flush_queue(struct ap_device *ap_dev)
764 {
765 	spin_lock_bh(&ap_dev->lock);
766 	__ap_flush_queue(ap_dev);
767 	spin_unlock_bh(&ap_dev->lock);
768 }
769 EXPORT_SYMBOL(ap_flush_queue);
770 
771 static int ap_device_remove(struct device *dev)
772 {
773 	struct ap_device *ap_dev = to_ap_dev(dev);
774 	struct ap_driver *ap_drv = ap_dev->drv;
775 
776 	ap_flush_queue(ap_dev);
777 	del_timer_sync(&ap_dev->timeout);
778 	spin_lock_bh(&ap_device_list_lock);
779 	list_del_init(&ap_dev->list);
780 	spin_unlock_bh(&ap_device_list_lock);
781 	if (ap_drv->remove)
782 		ap_drv->remove(ap_dev);
783 	spin_lock_bh(&ap_dev->lock);
784 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
785 	spin_unlock_bh(&ap_dev->lock);
786 	return 0;
787 }
788 
789 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
790 		       char *name)
791 {
792 	struct device_driver *drv = &ap_drv->driver;
793 
794 	drv->bus = &ap_bus_type;
795 	drv->probe = ap_device_probe;
796 	drv->remove = ap_device_remove;
797 	drv->owner = owner;
798 	drv->name = name;
799 	return driver_register(drv);
800 }
801 EXPORT_SYMBOL(ap_driver_register);
802 
803 void ap_driver_unregister(struct ap_driver *ap_drv)
804 {
805 	driver_unregister(&ap_drv->driver);
806 }
807 EXPORT_SYMBOL(ap_driver_unregister);
808 
809 /*
810  * AP bus attributes.
811  */
812 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
813 {
814 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
815 }
816 
817 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
818 
819 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
820 {
821 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
822 }
823 
824 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
825 {
826 	return snprintf(buf, PAGE_SIZE, "%d\n",
827 			ap_using_interrupts() ? 1 : 0);
828 }
829 
830 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
831 
832 static ssize_t ap_config_time_store(struct bus_type *bus,
833 				    const char *buf, size_t count)
834 {
835 	int time;
836 
837 	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
838 		return -EINVAL;
839 	ap_config_time = time;
840 	if (!timer_pending(&ap_config_timer) ||
841 	    !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
842 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
843 		add_timer(&ap_config_timer);
844 	}
845 	return count;
846 }
847 
848 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
849 
850 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
851 {
852 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
853 }
854 
855 static ssize_t ap_poll_thread_store(struct bus_type *bus,
856 				    const char *buf, size_t count)
857 {
858 	int flag, rc;
859 
860 	if (sscanf(buf, "%d\n", &flag) != 1)
861 		return -EINVAL;
862 	if (flag) {
863 		rc = ap_poll_thread_start();
864 		if (rc)
865 			return rc;
866 	}
867 	else
868 		ap_poll_thread_stop();
869 	return count;
870 }
871 
872 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
873 
874 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
875 {
876 	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
877 }
878 
879 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
880 				  size_t count)
881 {
882 	unsigned long long time;
883 	ktime_t hr_time;
884 
885 	/* 120 seconds = maximum poll interval */
886 	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
887 	    time > 120000000000ULL)
888 		return -EINVAL;
889 	poll_timeout = time;
890 	hr_time = ktime_set(0, poll_timeout);
891 
892 	if (!hrtimer_is_queued(&ap_poll_timer) ||
893 	    !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
894 		hrtimer_set_expires(&ap_poll_timer, hr_time);
895 		hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
896 	}
897 	return count;
898 }
899 
900 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
901 
902 static struct bus_attribute *const ap_bus_attrs[] = {
903 	&bus_attr_ap_domain,
904 	&bus_attr_config_time,
905 	&bus_attr_poll_thread,
906 	&bus_attr_ap_interrupts,
907 	&bus_attr_poll_timeout,
908 	NULL,
909 };
910 
911 /**
912  * ap_select_domain(): Select an AP domain.
913  *
914  * Pick one of the 16 AP domains.
915  */
916 static int ap_select_domain(void)
917 {
918 	int queue_depth, device_type, count, max_count, best_domain;
919 	int rc, i, j;
920 
921 	/*
922 	 * We want to use a single domain. Either the one specified with
923 	 * the "domain=" parameter or the domain with the maximum number
924 	 * of devices.
925 	 */
926 	if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
927 		/* Domain has already been selected. */
928 		return 0;
929 	best_domain = -1;
930 	max_count = 0;
931 	for (i = 0; i < AP_DOMAINS; i++) {
932 		count = 0;
933 		for (j = 0; j < AP_DEVICES; j++) {
934 			ap_qid_t qid = AP_MKQID(j, i);
935 			rc = ap_query_queue(qid, &queue_depth, &device_type);
936 			if (rc)
937 				continue;
938 			count++;
939 		}
940 		if (count > max_count) {
941 			max_count = count;
942 			best_domain = i;
943 		}
944 	}
945 	if (best_domain >= 0){
946 		ap_domain_index = best_domain;
947 		return 0;
948 	}
949 	return -ENODEV;
950 }
951 
952 /**
953  * ap_probe_device_type(): Find the device type of an AP.
954  * @ap_dev: pointer to the AP device.
955  *
956  * Find the device type if query queue returned a device type of 0.
957  */
958 static int ap_probe_device_type(struct ap_device *ap_dev)
959 {
960 	static unsigned char msg[] = {
961 		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
962 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
963 		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
964 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
965 		0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
966 		0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
967 		0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
968 		0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
969 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
970 		0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
971 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
972 		0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
973 		0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
974 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
975 		0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
976 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
977 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
978 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
979 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
980 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
981 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
982 		0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
983 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
984 		0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
985 		0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
986 		0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
987 		0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
988 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
989 		0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
990 		0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
991 		0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
992 		0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
993 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
994 		0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
995 		0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
996 		0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
997 		0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
998 		0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
999 		0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1000 		0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1001 		0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1002 		0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1003 		0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1004 		0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1005 		0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1006 	};
1007 	struct ap_queue_status status;
1008 	unsigned long long psmid;
1009 	char *reply;
1010 	int rc, i;
1011 
1012 	reply = (void *) get_zeroed_page(GFP_KERNEL);
1013 	if (!reply) {
1014 		rc = -ENOMEM;
1015 		goto out;
1016 	}
1017 
1018 	status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1019 			   msg, sizeof(msg), 0);
1020 	if (status.response_code != AP_RESPONSE_NORMAL) {
1021 		rc = -ENODEV;
1022 		goto out_free;
1023 	}
1024 
1025 	/* Wait for the test message to complete. */
1026 	for (i = 0; i < 6; i++) {
1027 		mdelay(300);
1028 		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1029 		if (status.response_code == AP_RESPONSE_NORMAL &&
1030 		    psmid == 0x0102030405060708ULL)
1031 			break;
1032 	}
1033 	if (i < 6) {
1034 		/* Got an answer. */
1035 		if (reply[0] == 0x00 && reply[1] == 0x86)
1036 			ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1037 		else
1038 			ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1039 		rc = 0;
1040 	} else
1041 		rc = -ENODEV;
1042 
1043 out_free:
1044 	free_page((unsigned long) reply);
1045 out:
1046 	return rc;
1047 }
1048 
1049 static void ap_interrupt_handler(void *unused1, void *unused2)
1050 {
1051 	tasklet_schedule(&ap_tasklet);
1052 }
1053 
1054 /**
1055  * __ap_scan_bus(): Scan the AP bus.
1056  * @dev: Pointer to device
1057  * @data: Pointer to data
1058  *
1059  * Scan the AP bus for new devices.
1060  */
1061 static int __ap_scan_bus(struct device *dev, void *data)
1062 {
1063 	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1064 }
1065 
1066 static void ap_device_release(struct device *dev)
1067 {
1068 	struct ap_device *ap_dev = to_ap_dev(dev);
1069 
1070 	kfree(ap_dev);
1071 }
1072 
1073 static void ap_scan_bus(struct work_struct *unused)
1074 {
1075 	struct ap_device *ap_dev;
1076 	struct device *dev;
1077 	ap_qid_t qid;
1078 	int queue_depth, device_type;
1079 	int rc, i;
1080 
1081 	if (ap_select_domain() != 0)
1082 		return;
1083 	for (i = 0; i < AP_DEVICES; i++) {
1084 		qid = AP_MKQID(i, ap_domain_index);
1085 		dev = bus_find_device(&ap_bus_type, NULL,
1086 				      (void *)(unsigned long)qid,
1087 				      __ap_scan_bus);
1088 		rc = ap_query_queue(qid, &queue_depth, &device_type);
1089 		if (dev) {
1090 			if (rc == -EBUSY) {
1091 				set_current_state(TASK_UNINTERRUPTIBLE);
1092 				schedule_timeout(AP_RESET_TIMEOUT);
1093 				rc = ap_query_queue(qid, &queue_depth,
1094 						    &device_type);
1095 			}
1096 			ap_dev = to_ap_dev(dev);
1097 			spin_lock_bh(&ap_dev->lock);
1098 			if (rc || ap_dev->unregistered) {
1099 				spin_unlock_bh(&ap_dev->lock);
1100 				if (ap_dev->unregistered)
1101 					i--;
1102 				device_unregister(dev);
1103 				put_device(dev);
1104 				continue;
1105 			}
1106 			spin_unlock_bh(&ap_dev->lock);
1107 			put_device(dev);
1108 			continue;
1109 		}
1110 		if (rc)
1111 			continue;
1112 		rc = ap_init_queue(qid);
1113 		if (rc)
1114 			continue;
1115 		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1116 		if (!ap_dev)
1117 			break;
1118 		ap_dev->qid = qid;
1119 		ap_dev->queue_depth = queue_depth;
1120 		ap_dev->unregistered = 1;
1121 		spin_lock_init(&ap_dev->lock);
1122 		INIT_LIST_HEAD(&ap_dev->pendingq);
1123 		INIT_LIST_HEAD(&ap_dev->requestq);
1124 		INIT_LIST_HEAD(&ap_dev->list);
1125 		setup_timer(&ap_dev->timeout, ap_request_timeout,
1126 			    (unsigned long) ap_dev);
1127 		if (device_type == 0)
1128 			ap_probe_device_type(ap_dev);
1129 		else
1130 			ap_dev->device_type = device_type;
1131 
1132 		ap_dev->device.bus = &ap_bus_type;
1133 		ap_dev->device.parent = ap_root_device;
1134 		if (dev_set_name(&ap_dev->device, "card%02x",
1135 				 AP_QID_DEVICE(ap_dev->qid))) {
1136 			kfree(ap_dev);
1137 			continue;
1138 		}
1139 		ap_dev->device.release = ap_device_release;
1140 		rc = device_register(&ap_dev->device);
1141 		if (rc) {
1142 			put_device(&ap_dev->device);
1143 			continue;
1144 		}
1145 		/* Add device attributes. */
1146 		rc = sysfs_create_group(&ap_dev->device.kobj,
1147 					&ap_dev_attr_group);
1148 		if (!rc) {
1149 			spin_lock_bh(&ap_dev->lock);
1150 			ap_dev->unregistered = 0;
1151 			spin_unlock_bh(&ap_dev->lock);
1152 		}
1153 		else
1154 			device_unregister(&ap_dev->device);
1155 	}
1156 }
1157 
1158 static void
1159 ap_config_timeout(unsigned long ptr)
1160 {
1161 	queue_work(ap_work_queue, &ap_config_work);
1162 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1163 	add_timer(&ap_config_timer);
1164 }
1165 
1166 /**
1167  * ap_schedule_poll_timer(): Schedule poll timer.
1168  *
1169  * Set up the timer to run the poll tasklet
1170  */
1171 static inline void ap_schedule_poll_timer(void)
1172 {
1173 	ktime_t hr_time;
1174 
1175 	spin_lock_bh(&ap_poll_timer_lock);
1176 	if (ap_using_interrupts() || ap_suspend_flag)
1177 		goto out;
1178 	if (hrtimer_is_queued(&ap_poll_timer))
1179 		goto out;
1180 	if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1181 		hr_time = ktime_set(0, poll_timeout);
1182 		hrtimer_forward_now(&ap_poll_timer, hr_time);
1183 		hrtimer_restart(&ap_poll_timer);
1184 	}
1185 out:
1186 	spin_unlock_bh(&ap_poll_timer_lock);
1187 }
1188 
1189 /**
1190  * ap_poll_read(): Receive pending reply messages from an AP device.
1191  * @ap_dev: pointer to the AP device
1192  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1193  *	   required, bit 2^1 is set if the poll timer needs to get armed
1194  *
1195  * Returns 0 if the device is still present, -ENODEV if not.
1196  */
1197 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1198 {
1199 	struct ap_queue_status status;
1200 	struct ap_message *ap_msg;
1201 
1202 	if (ap_dev->queue_count <= 0)
1203 		return 0;
1204 	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1205 			   ap_dev->reply->message, ap_dev->reply->length);
1206 	switch (status.response_code) {
1207 	case AP_RESPONSE_NORMAL:
1208 		atomic_dec(&ap_poll_requests);
1209 		ap_decrease_queue_count(ap_dev);
1210 		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1211 			if (ap_msg->psmid != ap_dev->reply->psmid)
1212 				continue;
1213 			list_del_init(&ap_msg->list);
1214 			ap_dev->pendingq_count--;
1215 			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1216 			break;
1217 		}
1218 		if (ap_dev->queue_count > 0)
1219 			*flags |= 1;
1220 		break;
1221 	case AP_RESPONSE_NO_PENDING_REPLY:
1222 		if (status.queue_empty) {
1223 			/* The card shouldn't forget requests but who knows. */
1224 			atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1225 			ap_dev->queue_count = 0;
1226 			list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1227 			ap_dev->requestq_count += ap_dev->pendingq_count;
1228 			ap_dev->pendingq_count = 0;
1229 		} else
1230 			*flags |= 2;
1231 		break;
1232 	default:
1233 		return -ENODEV;
1234 	}
1235 	return 0;
1236 }
1237 
1238 /**
1239  * ap_poll_write(): Send messages from the request queue to an AP device.
1240  * @ap_dev: pointer to the AP device
1241  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1242  *	   required, bit 2^1 is set if the poll timer needs to get armed
1243  *
1244  * Returns 0 if the device is still present, -ENODEV if not.
1245  */
1246 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1247 {
1248 	struct ap_queue_status status;
1249 	struct ap_message *ap_msg;
1250 
1251 	if (ap_dev->requestq_count <= 0 ||
1252 	    ap_dev->queue_count >= ap_dev->queue_depth)
1253 		return 0;
1254 	/* Start the next request on the queue. */
1255 	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1256 	status = __ap_send(ap_dev->qid, ap_msg->psmid,
1257 			   ap_msg->message, ap_msg->length, ap_msg->special);
1258 	switch (status.response_code) {
1259 	case AP_RESPONSE_NORMAL:
1260 		atomic_inc(&ap_poll_requests);
1261 		ap_increase_queue_count(ap_dev);
1262 		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1263 		ap_dev->requestq_count--;
1264 		ap_dev->pendingq_count++;
1265 		if (ap_dev->queue_count < ap_dev->queue_depth &&
1266 		    ap_dev->requestq_count > 0)
1267 			*flags |= 1;
1268 		*flags |= 2;
1269 		break;
1270 	case AP_RESPONSE_Q_FULL:
1271 	case AP_RESPONSE_RESET_IN_PROGRESS:
1272 		*flags |= 2;
1273 		break;
1274 	case AP_RESPONSE_MESSAGE_TOO_BIG:
1275 	case AP_RESPONSE_REQ_FAC_NOT_INST:
1276 		return -EINVAL;
1277 	default:
1278 		return -ENODEV;
1279 	}
1280 	return 0;
1281 }
1282 
1283 /**
1284  * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1285  * @ap_dev: pointer to the bus device
1286  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1287  *	   required, bit 2^1 is set if the poll timer needs to get armed
1288  *
1289  * Poll AP device for pending replies and send new messages. If either
1290  * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1291  * Returns 0.
1292  */
1293 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1294 {
1295 	int rc;
1296 
1297 	rc = ap_poll_read(ap_dev, flags);
1298 	if (rc)
1299 		return rc;
1300 	return ap_poll_write(ap_dev, flags);
1301 }
1302 
1303 /**
1304  * __ap_queue_message(): Queue a message to a device.
1305  * @ap_dev: pointer to the AP device
1306  * @ap_msg: the message to be queued
1307  *
1308  * Queue a message to a device. Returns 0 if successful.
1309  */
1310 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1311 {
1312 	struct ap_queue_status status;
1313 
1314 	if (list_empty(&ap_dev->requestq) &&
1315 	    ap_dev->queue_count < ap_dev->queue_depth) {
1316 		status = __ap_send(ap_dev->qid, ap_msg->psmid,
1317 				   ap_msg->message, ap_msg->length,
1318 				   ap_msg->special);
1319 		switch (status.response_code) {
1320 		case AP_RESPONSE_NORMAL:
1321 			list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1322 			atomic_inc(&ap_poll_requests);
1323 			ap_dev->pendingq_count++;
1324 			ap_increase_queue_count(ap_dev);
1325 			ap_dev->total_request_count++;
1326 			break;
1327 		case AP_RESPONSE_Q_FULL:
1328 		case AP_RESPONSE_RESET_IN_PROGRESS:
1329 			list_add_tail(&ap_msg->list, &ap_dev->requestq);
1330 			ap_dev->requestq_count++;
1331 			ap_dev->total_request_count++;
1332 			return -EBUSY;
1333 		case AP_RESPONSE_REQ_FAC_NOT_INST:
1334 		case AP_RESPONSE_MESSAGE_TOO_BIG:
1335 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1336 			return -EINVAL;
1337 		default:	/* Device is gone. */
1338 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1339 			return -ENODEV;
1340 		}
1341 	} else {
1342 		list_add_tail(&ap_msg->list, &ap_dev->requestq);
1343 		ap_dev->requestq_count++;
1344 		ap_dev->total_request_count++;
1345 		return -EBUSY;
1346 	}
1347 	ap_schedule_poll_timer();
1348 	return 0;
1349 }
1350 
1351 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1352 {
1353 	unsigned long flags;
1354 	int rc;
1355 
1356 	spin_lock_bh(&ap_dev->lock);
1357 	if (!ap_dev->unregistered) {
1358 		/* Make room on the queue by polling for finished requests. */
1359 		rc = ap_poll_queue(ap_dev, &flags);
1360 		if (!rc)
1361 			rc = __ap_queue_message(ap_dev, ap_msg);
1362 		if (!rc)
1363 			wake_up(&ap_poll_wait);
1364 		if (rc == -ENODEV)
1365 			ap_dev->unregistered = 1;
1366 	} else {
1367 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1368 		rc = -ENODEV;
1369 	}
1370 	spin_unlock_bh(&ap_dev->lock);
1371 	if (rc == -ENODEV)
1372 		device_unregister(&ap_dev->device);
1373 }
1374 EXPORT_SYMBOL(ap_queue_message);
1375 
1376 /**
1377  * ap_cancel_message(): Cancel a crypto request.
1378  * @ap_dev: The AP device that has the message queued
1379  * @ap_msg: The message that is to be removed
1380  *
1381  * Cancel a crypto request. This is done by removing the request
1382  * from the device pending or request queue. Note that the
1383  * request stays on the AP queue. When it finishes the message
1384  * reply will be discarded because the psmid can't be found.
1385  */
1386 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1387 {
1388 	struct ap_message *tmp;
1389 
1390 	spin_lock_bh(&ap_dev->lock);
1391 	if (!list_empty(&ap_msg->list)) {
1392 		list_for_each_entry(tmp, &ap_dev->pendingq, list)
1393 			if (tmp->psmid == ap_msg->psmid) {
1394 				ap_dev->pendingq_count--;
1395 				goto found;
1396 			}
1397 		ap_dev->requestq_count--;
1398 	found:
1399 		list_del_init(&ap_msg->list);
1400 	}
1401 	spin_unlock_bh(&ap_dev->lock);
1402 }
1403 EXPORT_SYMBOL(ap_cancel_message);
1404 
1405 /**
1406  * ap_poll_timeout(): AP receive polling for finished AP requests.
1407  * @unused: Unused pointer.
1408  *
1409  * Schedules the AP tasklet using a high resolution timer.
1410  */
1411 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1412 {
1413 	tasklet_schedule(&ap_tasklet);
1414 	return HRTIMER_NORESTART;
1415 }
1416 
1417 /**
1418  * ap_reset(): Reset a not responding AP device.
1419  * @ap_dev: Pointer to the AP device
1420  *
1421  * Reset a not responding AP device and move all requests from the
1422  * pending queue to the request queue.
1423  */
1424 static void ap_reset(struct ap_device *ap_dev)
1425 {
1426 	int rc;
1427 
1428 	ap_dev->reset = AP_RESET_IGNORE;
1429 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1430 	ap_dev->queue_count = 0;
1431 	list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1432 	ap_dev->requestq_count += ap_dev->pendingq_count;
1433 	ap_dev->pendingq_count = 0;
1434 	rc = ap_init_queue(ap_dev->qid);
1435 	if (rc == -ENODEV)
1436 		ap_dev->unregistered = 1;
1437 }
1438 
1439 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1440 {
1441 	if (!ap_dev->unregistered) {
1442 		if (ap_poll_queue(ap_dev, flags))
1443 			ap_dev->unregistered = 1;
1444 		if (ap_dev->reset == AP_RESET_DO)
1445 			ap_reset(ap_dev);
1446 	}
1447 	return 0;
1448 }
1449 
1450 /**
1451  * ap_poll_all(): Poll all AP devices.
1452  * @dummy: Unused variable
1453  *
1454  * Poll all AP devices on the bus in a round robin fashion. Continue
1455  * polling until bit 2^0 of the control flags is not set. If bit 2^1
1456  * of the control flags has been set arm the poll timer.
1457  */
1458 static void ap_poll_all(unsigned long dummy)
1459 {
1460 	unsigned long flags;
1461 	struct ap_device *ap_dev;
1462 
1463 	/* Reset the indicator if interrupts are used. Thus new interrupts can
1464 	 * be received. Doing it in the beginning of the tasklet is therefor
1465 	 * important that no requests on any AP get lost.
1466 	 */
1467 	if (ap_using_interrupts())
1468 		xchg((u8 *)ap_interrupt_indicator, 0);
1469 	do {
1470 		flags = 0;
1471 		spin_lock(&ap_device_list_lock);
1472 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1473 			spin_lock(&ap_dev->lock);
1474 			__ap_poll_device(ap_dev, &flags);
1475 			spin_unlock(&ap_dev->lock);
1476 		}
1477 		spin_unlock(&ap_device_list_lock);
1478 	} while (flags & 1);
1479 	if (flags & 2)
1480 		ap_schedule_poll_timer();
1481 }
1482 
1483 /**
1484  * ap_poll_thread(): Thread that polls for finished requests.
1485  * @data: Unused pointer
1486  *
1487  * AP bus poll thread. The purpose of this thread is to poll for
1488  * finished requests in a loop if there is a "free" cpu - that is
1489  * a cpu that doesn't have anything better to do. The polling stops
1490  * as soon as there is another task or if all messages have been
1491  * delivered.
1492  */
1493 static int ap_poll_thread(void *data)
1494 {
1495 	DECLARE_WAITQUEUE(wait, current);
1496 	unsigned long flags;
1497 	int requests;
1498 	struct ap_device *ap_dev;
1499 
1500 	set_user_nice(current, 19);
1501 	while (1) {
1502 		if (ap_suspend_flag)
1503 			return 0;
1504 		if (need_resched()) {
1505 			schedule();
1506 			continue;
1507 		}
1508 		add_wait_queue(&ap_poll_wait, &wait);
1509 		set_current_state(TASK_INTERRUPTIBLE);
1510 		if (kthread_should_stop())
1511 			break;
1512 		requests = atomic_read(&ap_poll_requests);
1513 		if (requests <= 0)
1514 			schedule();
1515 		set_current_state(TASK_RUNNING);
1516 		remove_wait_queue(&ap_poll_wait, &wait);
1517 
1518 		flags = 0;
1519 		spin_lock_bh(&ap_device_list_lock);
1520 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1521 			spin_lock(&ap_dev->lock);
1522 			__ap_poll_device(ap_dev, &flags);
1523 			spin_unlock(&ap_dev->lock);
1524 		}
1525 		spin_unlock_bh(&ap_device_list_lock);
1526 	}
1527 	set_current_state(TASK_RUNNING);
1528 	remove_wait_queue(&ap_poll_wait, &wait);
1529 	return 0;
1530 }
1531 
1532 static int ap_poll_thread_start(void)
1533 {
1534 	int rc;
1535 
1536 	if (ap_using_interrupts() || ap_suspend_flag)
1537 		return 0;
1538 	mutex_lock(&ap_poll_thread_mutex);
1539 	if (!ap_poll_kthread) {
1540 		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1541 		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1542 		if (rc)
1543 			ap_poll_kthread = NULL;
1544 	}
1545 	else
1546 		rc = 0;
1547 	mutex_unlock(&ap_poll_thread_mutex);
1548 	return rc;
1549 }
1550 
1551 static void ap_poll_thread_stop(void)
1552 {
1553 	mutex_lock(&ap_poll_thread_mutex);
1554 	if (ap_poll_kthread) {
1555 		kthread_stop(ap_poll_kthread);
1556 		ap_poll_kthread = NULL;
1557 	}
1558 	mutex_unlock(&ap_poll_thread_mutex);
1559 }
1560 
1561 /**
1562  * ap_request_timeout(): Handling of request timeouts
1563  * @data: Holds the AP device.
1564  *
1565  * Handles request timeouts.
1566  */
1567 static void ap_request_timeout(unsigned long data)
1568 {
1569 	struct ap_device *ap_dev = (struct ap_device *) data;
1570 
1571 	if (ap_dev->reset == AP_RESET_ARMED) {
1572 		ap_dev->reset = AP_RESET_DO;
1573 
1574 		if (ap_using_interrupts())
1575 			tasklet_schedule(&ap_tasklet);
1576 	}
1577 }
1578 
1579 static void ap_reset_domain(void)
1580 {
1581 	int i;
1582 
1583 	if (ap_domain_index != -1)
1584 		for (i = 0; i < AP_DEVICES; i++)
1585 			ap_reset_queue(AP_MKQID(i, ap_domain_index));
1586 }
1587 
1588 static void ap_reset_all(void)
1589 {
1590 	int i, j;
1591 
1592 	for (i = 0; i < AP_DOMAINS; i++)
1593 		for (j = 0; j < AP_DEVICES; j++)
1594 			ap_reset_queue(AP_MKQID(j, i));
1595 }
1596 
1597 static struct reset_call ap_reset_call = {
1598 	.fn = ap_reset_all,
1599 };
1600 
1601 /**
1602  * ap_module_init(): The module initialization code.
1603  *
1604  * Initializes the module.
1605  */
1606 int __init ap_module_init(void)
1607 {
1608 	int rc, i;
1609 
1610 	if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1611 		pr_warning("%d is not a valid cryptographic domain\n",
1612 			   ap_domain_index);
1613 		return -EINVAL;
1614 	}
1615 	/* In resume callback we need to know if the user had set the domain.
1616 	 * If so, we can not just reset it.
1617 	 */
1618 	if (ap_domain_index >= 0)
1619 		user_set_domain = 1;
1620 
1621 	if (ap_instructions_available() != 0) {
1622 		pr_warning("The hardware system does not support "
1623 			   "AP instructions\n");
1624 		return -ENODEV;
1625 	}
1626 	if (ap_interrupts_available()) {
1627 		isc_register(AP_ISC);
1628 		ap_interrupt_indicator = s390_register_adapter_interrupt(
1629 			&ap_interrupt_handler, NULL, AP_ISC);
1630 		if (IS_ERR(ap_interrupt_indicator)) {
1631 			ap_interrupt_indicator = NULL;
1632 			isc_unregister(AP_ISC);
1633 		}
1634 	}
1635 
1636 	register_reset_call(&ap_reset_call);
1637 
1638 	/* Create /sys/bus/ap. */
1639 	rc = bus_register(&ap_bus_type);
1640 	if (rc)
1641 		goto out;
1642 	for (i = 0; ap_bus_attrs[i]; i++) {
1643 		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1644 		if (rc)
1645 			goto out_bus;
1646 	}
1647 
1648 	/* Create /sys/devices/ap. */
1649 	ap_root_device = root_device_register("ap");
1650 	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1651 	if (rc)
1652 		goto out_bus;
1653 
1654 	ap_work_queue = create_singlethread_workqueue("kapwork");
1655 	if (!ap_work_queue) {
1656 		rc = -ENOMEM;
1657 		goto out_root;
1658 	}
1659 
1660 	if (ap_select_domain() == 0)
1661 		ap_scan_bus(NULL);
1662 
1663 	/* Setup the AP bus rescan timer. */
1664 	init_timer(&ap_config_timer);
1665 	ap_config_timer.function = ap_config_timeout;
1666 	ap_config_timer.data = 0;
1667 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1668 	add_timer(&ap_config_timer);
1669 
1670 	/* Setup the high resultion poll timer.
1671 	 * If we are running under z/VM adjust polling to z/VM polling rate.
1672 	 */
1673 	if (MACHINE_IS_VM)
1674 		poll_timeout = 1500000;
1675 	spin_lock_init(&ap_poll_timer_lock);
1676 	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1677 	ap_poll_timer.function = ap_poll_timeout;
1678 
1679 	/* Start the low priority AP bus poll thread. */
1680 	if (ap_thread_flag) {
1681 		rc = ap_poll_thread_start();
1682 		if (rc)
1683 			goto out_work;
1684 	}
1685 
1686 	return 0;
1687 
1688 out_work:
1689 	del_timer_sync(&ap_config_timer);
1690 	hrtimer_cancel(&ap_poll_timer);
1691 	destroy_workqueue(ap_work_queue);
1692 out_root:
1693 	root_device_unregister(ap_root_device);
1694 out_bus:
1695 	while (i--)
1696 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1697 	bus_unregister(&ap_bus_type);
1698 out:
1699 	unregister_reset_call(&ap_reset_call);
1700 	if (ap_using_interrupts()) {
1701 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1702 		isc_unregister(AP_ISC);
1703 	}
1704 	return rc;
1705 }
1706 
1707 static int __ap_match_all(struct device *dev, void *data)
1708 {
1709 	return 1;
1710 }
1711 
1712 /**
1713  * ap_modules_exit(): The module termination code
1714  *
1715  * Terminates the module.
1716  */
1717 void ap_module_exit(void)
1718 {
1719 	int i;
1720 	struct device *dev;
1721 
1722 	ap_reset_domain();
1723 	ap_poll_thread_stop();
1724 	del_timer_sync(&ap_config_timer);
1725 	hrtimer_cancel(&ap_poll_timer);
1726 	destroy_workqueue(ap_work_queue);
1727 	tasklet_kill(&ap_tasklet);
1728 	root_device_unregister(ap_root_device);
1729 	while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1730 		    __ap_match_all)))
1731 	{
1732 		device_unregister(dev);
1733 		put_device(dev);
1734 	}
1735 	for (i = 0; ap_bus_attrs[i]; i++)
1736 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1737 	bus_unregister(&ap_bus_type);
1738 	unregister_reset_call(&ap_reset_call);
1739 	if (ap_using_interrupts()) {
1740 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1741 		isc_unregister(AP_ISC);
1742 	}
1743 }
1744 
1745 #ifndef CONFIG_ZCRYPT_MONOLITHIC
1746 module_init(ap_module_init);
1747 module_exit(ap_module_exit);
1748 #endif
1749