xref: /openbmc/linux/drivers/s390/crypto/ap_bus.c (revision 1fa6ac37)
1 /*
2  * linux/drivers/s390/crypto/ap_bus.c
3  *
4  * Copyright (C) 2006 IBM Corporation
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
8  *	      Felix Beck <felix.beck@de.ibm.com>
9  *
10  * Adjunct processor bus.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26 
27 #define KMSG_COMPONENT "ap"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/slab.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/mutex.h>
40 #include <asm/reset.h>
41 #include <asm/airq.h>
42 #include <asm/atomic.h>
43 #include <asm/system.h>
44 #include <asm/isc.h>
45 #include <linux/hrtimer.h>
46 #include <linux/ktime.h>
47 
48 #include "ap_bus.h"
49 
50 /* Some prototypes. */
51 static void ap_scan_bus(struct work_struct *);
52 static void ap_poll_all(unsigned long);
53 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
54 static int ap_poll_thread_start(void);
55 static void ap_poll_thread_stop(void);
56 static void ap_request_timeout(unsigned long);
57 static inline void ap_schedule_poll_timer(void);
58 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
59 static int ap_device_remove(struct device *dev);
60 static int ap_device_probe(struct device *dev);
61 static void ap_interrupt_handler(void *unused1, void *unused2);
62 static void ap_reset(struct ap_device *ap_dev);
63 static void ap_config_timeout(unsigned long ptr);
64 static int ap_select_domain(void);
65 
66 /*
67  * Module description.
68  */
69 MODULE_AUTHOR("IBM Corporation");
70 MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
71 		   "Copyright 2006 IBM Corporation");
72 MODULE_LICENSE("GPL");
73 
74 /*
75  * Module parameter
76  */
77 int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
78 module_param_named(domain, ap_domain_index, int, 0000);
79 MODULE_PARM_DESC(domain, "domain index for ap devices");
80 EXPORT_SYMBOL(ap_domain_index);
81 
82 static int ap_thread_flag = 0;
83 module_param_named(poll_thread, ap_thread_flag, int, 0000);
84 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
85 
86 static struct device *ap_root_device = NULL;
87 static DEFINE_SPINLOCK(ap_device_list_lock);
88 static LIST_HEAD(ap_device_list);
89 
90 /*
91  * Workqueue & timer for bus rescan.
92  */
93 static struct workqueue_struct *ap_work_queue;
94 static struct timer_list ap_config_timer;
95 static int ap_config_time = AP_CONFIG_TIME;
96 static DECLARE_WORK(ap_config_work, ap_scan_bus);
97 
98 /*
99  * Tasklet & timer for AP request polling and interrupts
100  */
101 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
102 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
103 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
104 static struct task_struct *ap_poll_kthread = NULL;
105 static DEFINE_MUTEX(ap_poll_thread_mutex);
106 static DEFINE_SPINLOCK(ap_poll_timer_lock);
107 static void *ap_interrupt_indicator;
108 static struct hrtimer ap_poll_timer;
109 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
110  * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
111 static unsigned long long poll_timeout = 250000;
112 
113 /* Suspend flag */
114 static int ap_suspend_flag;
115 /* Flag to check if domain was set through module parameter domain=. This is
116  * important when supsend and resume is done in a z/VM environment where the
117  * domain might change. */
118 static int user_set_domain = 0;
119 static struct bus_type ap_bus_type;
120 
121 /**
122  * ap_using_interrupts() - Returns non-zero if interrupt support is
123  * available.
124  */
125 static inline int ap_using_interrupts(void)
126 {
127 	return ap_interrupt_indicator != NULL;
128 }
129 
130 /**
131  * ap_intructions_available() - Test if AP instructions are available.
132  *
133  * Returns 0 if the AP instructions are installed.
134  */
135 static inline int ap_instructions_available(void)
136 {
137 	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
138 	register unsigned long reg1 asm ("1") = -ENODEV;
139 	register unsigned long reg2 asm ("2") = 0UL;
140 
141 	asm volatile(
142 		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
143 		"0: la    %1,0\n"
144 		"1:\n"
145 		EX_TABLE(0b, 1b)
146 		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
147 	return reg1;
148 }
149 
150 /**
151  * ap_interrupts_available(): Test if AP interrupts are available.
152  *
153  * Returns 1 if AP interrupts are available.
154  */
155 static int ap_interrupts_available(void)
156 {
157 	unsigned long long facility_bits[2];
158 
159 	if (stfle(facility_bits, 2) <= 1)
160 		return 0;
161 	if (!(facility_bits[0] & (1ULL << 61)) ||
162 	    !(facility_bits[1] & (1ULL << 62)))
163 		return 0;
164 	return 1;
165 }
166 
167 /**
168  * ap_test_queue(): Test adjunct processor queue.
169  * @qid: The AP queue number
170  * @queue_depth: Pointer to queue depth value
171  * @device_type: Pointer to device type value
172  *
173  * Returns AP queue status structure.
174  */
175 static inline struct ap_queue_status
176 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
177 {
178 	register unsigned long reg0 asm ("0") = qid;
179 	register struct ap_queue_status reg1 asm ("1");
180 	register unsigned long reg2 asm ("2") = 0UL;
181 
182 	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
183 		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
184 	*device_type = (int) (reg2 >> 24);
185 	*queue_depth = (int) (reg2 & 0xff);
186 	return reg1;
187 }
188 
189 /**
190  * ap_reset_queue(): Reset adjunct processor queue.
191  * @qid: The AP queue number
192  *
193  * Returns AP queue status structure.
194  */
195 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
196 {
197 	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
198 	register struct ap_queue_status reg1 asm ("1");
199 	register unsigned long reg2 asm ("2") = 0UL;
200 
201 	asm volatile(
202 		".long 0xb2af0000"		/* PQAP(RAPQ) */
203 		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
204 	return reg1;
205 }
206 
207 #ifdef CONFIG_64BIT
208 /**
209  * ap_queue_interruption_control(): Enable interruption for a specific AP.
210  * @qid: The AP queue number
211  * @ind: The notification indicator byte
212  *
213  * Returns AP queue status.
214  */
215 static inline struct ap_queue_status
216 ap_queue_interruption_control(ap_qid_t qid, void *ind)
217 {
218 	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
219 	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
220 	register struct ap_queue_status reg1_out asm ("1");
221 	register void *reg2 asm ("2") = ind;
222 	asm volatile(
223 		".long 0xb2af0000"		/* PQAP(RAPQ) */
224 		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
225 		:
226 		: "cc" );
227 	return reg1_out;
228 }
229 #endif
230 
231 /**
232  * ap_queue_enable_interruption(): Enable interruption on an AP.
233  * @qid: The AP queue number
234  * @ind: the notification indicator byte
235  *
236  * Enables interruption on AP queue via ap_queue_interruption_control(). Based
237  * on the return value it waits a while and tests the AP queue if interrupts
238  * have been switched on using ap_test_queue().
239  */
240 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
241 {
242 #ifdef CONFIG_64BIT
243 	struct ap_queue_status status;
244 	int t_depth, t_device_type, rc, i;
245 
246 	rc = -EBUSY;
247 	status = ap_queue_interruption_control(qid, ind);
248 
249 	for (i = 0; i < AP_MAX_RESET; i++) {
250 		switch (status.response_code) {
251 		case AP_RESPONSE_NORMAL:
252 			if (status.int_enabled)
253 				return 0;
254 			break;
255 		case AP_RESPONSE_RESET_IN_PROGRESS:
256 		case AP_RESPONSE_BUSY:
257 			break;
258 		case AP_RESPONSE_Q_NOT_AVAIL:
259 		case AP_RESPONSE_DECONFIGURED:
260 		case AP_RESPONSE_CHECKSTOPPED:
261 		case AP_RESPONSE_INVALID_ADDRESS:
262 			return -ENODEV;
263 		case AP_RESPONSE_OTHERWISE_CHANGED:
264 			if (status.int_enabled)
265 				return 0;
266 			break;
267 		default:
268 			break;
269 		}
270 		if (i < AP_MAX_RESET - 1) {
271 			udelay(5);
272 			status = ap_test_queue(qid, &t_depth, &t_device_type);
273 		}
274 	}
275 	return rc;
276 #else
277 	return -EINVAL;
278 #endif
279 }
280 
281 /**
282  * __ap_send(): Send message to adjunct processor queue.
283  * @qid: The AP queue number
284  * @psmid: The program supplied message identifier
285  * @msg: The message text
286  * @length: The message length
287  * @special: Special Bit
288  *
289  * Returns AP queue status structure.
290  * Condition code 1 on NQAP can't happen because the L bit is 1.
291  * Condition code 2 on NQAP also means the send is incomplete,
292  * because a segment boundary was reached. The NQAP is repeated.
293  */
294 static inline struct ap_queue_status
295 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
296 	  unsigned int special)
297 {
298 	typedef struct { char _[length]; } msgblock;
299 	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
300 	register struct ap_queue_status reg1 asm ("1");
301 	register unsigned long reg2 asm ("2") = (unsigned long) msg;
302 	register unsigned long reg3 asm ("3") = (unsigned long) length;
303 	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
304 	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
305 
306 	if (special == 1)
307 		reg0 |= 0x400000UL;
308 
309 	asm volatile (
310 		"0: .long 0xb2ad0042\n"		/* DQAP */
311 		"   brc   2,0b"
312 		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
313 		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
314 		: "cc" );
315 	return reg1;
316 }
317 
318 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
319 {
320 	struct ap_queue_status status;
321 
322 	status = __ap_send(qid, psmid, msg, length, 0);
323 	switch (status.response_code) {
324 	case AP_RESPONSE_NORMAL:
325 		return 0;
326 	case AP_RESPONSE_Q_FULL:
327 	case AP_RESPONSE_RESET_IN_PROGRESS:
328 		return -EBUSY;
329 	case AP_RESPONSE_REQ_FAC_NOT_INST:
330 		return -EINVAL;
331 	default:	/* Device is gone. */
332 		return -ENODEV;
333 	}
334 }
335 EXPORT_SYMBOL(ap_send);
336 
337 /**
338  * __ap_recv(): Receive message from adjunct processor queue.
339  * @qid: The AP queue number
340  * @psmid: Pointer to program supplied message identifier
341  * @msg: The message text
342  * @length: The message length
343  *
344  * Returns AP queue status structure.
345  * Condition code 1 on DQAP means the receive has taken place
346  * but only partially.	The response is incomplete, hence the
347  * DQAP is repeated.
348  * Condition code 2 on DQAP also means the receive is incomplete,
349  * this time because a segment boundary was reached. Again, the
350  * DQAP is repeated.
351  * Note that gpr2 is used by the DQAP instruction to keep track of
352  * any 'residual' length, in case the instruction gets interrupted.
353  * Hence it gets zeroed before the instruction.
354  */
355 static inline struct ap_queue_status
356 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
357 {
358 	typedef struct { char _[length]; } msgblock;
359 	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
360 	register struct ap_queue_status reg1 asm ("1");
361 	register unsigned long reg2 asm("2") = 0UL;
362 	register unsigned long reg4 asm("4") = (unsigned long) msg;
363 	register unsigned long reg5 asm("5") = (unsigned long) length;
364 	register unsigned long reg6 asm("6") = 0UL;
365 	register unsigned long reg7 asm("7") = 0UL;
366 
367 
368 	asm volatile(
369 		"0: .long 0xb2ae0064\n"
370 		"   brc   6,0b\n"
371 		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
372 		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
373 		"=m" (*(msgblock *) msg) : : "cc" );
374 	*psmid = (((unsigned long long) reg6) << 32) + reg7;
375 	return reg1;
376 }
377 
378 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
379 {
380 	struct ap_queue_status status;
381 
382 	status = __ap_recv(qid, psmid, msg, length);
383 	switch (status.response_code) {
384 	case AP_RESPONSE_NORMAL:
385 		return 0;
386 	case AP_RESPONSE_NO_PENDING_REPLY:
387 		if (status.queue_empty)
388 			return -ENOENT;
389 		return -EBUSY;
390 	case AP_RESPONSE_RESET_IN_PROGRESS:
391 		return -EBUSY;
392 	default:
393 		return -ENODEV;
394 	}
395 }
396 EXPORT_SYMBOL(ap_recv);
397 
398 /**
399  * ap_query_queue(): Check if an AP queue is available.
400  * @qid: The AP queue number
401  * @queue_depth: Pointer to queue depth value
402  * @device_type: Pointer to device type value
403  *
404  * The test is repeated for AP_MAX_RESET times.
405  */
406 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
407 {
408 	struct ap_queue_status status;
409 	int t_depth, t_device_type, rc, i;
410 
411 	rc = -EBUSY;
412 	for (i = 0; i < AP_MAX_RESET; i++) {
413 		status = ap_test_queue(qid, &t_depth, &t_device_type);
414 		switch (status.response_code) {
415 		case AP_RESPONSE_NORMAL:
416 			*queue_depth = t_depth + 1;
417 			*device_type = t_device_type;
418 			rc = 0;
419 			break;
420 		case AP_RESPONSE_Q_NOT_AVAIL:
421 			rc = -ENODEV;
422 			break;
423 		case AP_RESPONSE_RESET_IN_PROGRESS:
424 			break;
425 		case AP_RESPONSE_DECONFIGURED:
426 			rc = -ENODEV;
427 			break;
428 		case AP_RESPONSE_CHECKSTOPPED:
429 			rc = -ENODEV;
430 			break;
431 		case AP_RESPONSE_INVALID_ADDRESS:
432 			rc = -ENODEV;
433 			break;
434 		case AP_RESPONSE_OTHERWISE_CHANGED:
435 			break;
436 		case AP_RESPONSE_BUSY:
437 			break;
438 		default:
439 			BUG();
440 		}
441 		if (rc != -EBUSY)
442 			break;
443 		if (i < AP_MAX_RESET - 1)
444 			udelay(5);
445 	}
446 	return rc;
447 }
448 
449 /**
450  * ap_init_queue(): Reset an AP queue.
451  * @qid: The AP queue number
452  *
453  * Reset an AP queue and wait for it to become available again.
454  */
455 static int ap_init_queue(ap_qid_t qid)
456 {
457 	struct ap_queue_status status;
458 	int rc, dummy, i;
459 
460 	rc = -ENODEV;
461 	status = ap_reset_queue(qid);
462 	for (i = 0; i < AP_MAX_RESET; i++) {
463 		switch (status.response_code) {
464 		case AP_RESPONSE_NORMAL:
465 			if (status.queue_empty)
466 				rc = 0;
467 			break;
468 		case AP_RESPONSE_Q_NOT_AVAIL:
469 		case AP_RESPONSE_DECONFIGURED:
470 		case AP_RESPONSE_CHECKSTOPPED:
471 			i = AP_MAX_RESET;	/* return with -ENODEV */
472 			break;
473 		case AP_RESPONSE_RESET_IN_PROGRESS:
474 			rc = -EBUSY;
475 		case AP_RESPONSE_BUSY:
476 		default:
477 			break;
478 		}
479 		if (rc != -ENODEV && rc != -EBUSY)
480 			break;
481 		if (i < AP_MAX_RESET - 1) {
482 			udelay(5);
483 			status = ap_test_queue(qid, &dummy, &dummy);
484 		}
485 	}
486 	if (rc == 0 && ap_using_interrupts()) {
487 		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
488 		/* If interruption mode is supported by the machine,
489 		* but an AP can not be enabled for interruption then
490 		* the AP will be discarded.    */
491 		if (rc)
492 			pr_err("Registering adapter interrupts for "
493 			       "AP %d failed\n", AP_QID_DEVICE(qid));
494 	}
495 	return rc;
496 }
497 
498 /**
499  * ap_increase_queue_count(): Arm request timeout.
500  * @ap_dev: Pointer to an AP device.
501  *
502  * Arm request timeout if an AP device was idle and a new request is submitted.
503  */
504 static void ap_increase_queue_count(struct ap_device *ap_dev)
505 {
506 	int timeout = ap_dev->drv->request_timeout;
507 
508 	ap_dev->queue_count++;
509 	if (ap_dev->queue_count == 1) {
510 		mod_timer(&ap_dev->timeout, jiffies + timeout);
511 		ap_dev->reset = AP_RESET_ARMED;
512 	}
513 }
514 
515 /**
516  * ap_decrease_queue_count(): Decrease queue count.
517  * @ap_dev: Pointer to an AP device.
518  *
519  * If AP device is still alive, re-schedule request timeout if there are still
520  * pending requests.
521  */
522 static void ap_decrease_queue_count(struct ap_device *ap_dev)
523 {
524 	int timeout = ap_dev->drv->request_timeout;
525 
526 	ap_dev->queue_count--;
527 	if (ap_dev->queue_count > 0)
528 		mod_timer(&ap_dev->timeout, jiffies + timeout);
529 	else
530 		/*
531 		 * The timeout timer should to be disabled now - since
532 		 * del_timer_sync() is very expensive, we just tell via the
533 		 * reset flag to ignore the pending timeout timer.
534 		 */
535 		ap_dev->reset = AP_RESET_IGNORE;
536 }
537 
538 /*
539  * AP device related attributes.
540  */
541 static ssize_t ap_hwtype_show(struct device *dev,
542 			      struct device_attribute *attr, char *buf)
543 {
544 	struct ap_device *ap_dev = to_ap_dev(dev);
545 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
546 }
547 
548 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
549 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
550 			     char *buf)
551 {
552 	struct ap_device *ap_dev = to_ap_dev(dev);
553 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
554 }
555 
556 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
557 static ssize_t ap_request_count_show(struct device *dev,
558 				     struct device_attribute *attr,
559 				     char *buf)
560 {
561 	struct ap_device *ap_dev = to_ap_dev(dev);
562 	int rc;
563 
564 	spin_lock_bh(&ap_dev->lock);
565 	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
566 	spin_unlock_bh(&ap_dev->lock);
567 	return rc;
568 }
569 
570 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
571 
572 static ssize_t ap_modalias_show(struct device *dev,
573 				struct device_attribute *attr, char *buf)
574 {
575 	return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
576 }
577 
578 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
579 
580 static struct attribute *ap_dev_attrs[] = {
581 	&dev_attr_hwtype.attr,
582 	&dev_attr_depth.attr,
583 	&dev_attr_request_count.attr,
584 	&dev_attr_modalias.attr,
585 	NULL
586 };
587 static struct attribute_group ap_dev_attr_group = {
588 	.attrs = ap_dev_attrs
589 };
590 
591 /**
592  * ap_bus_match()
593  * @dev: Pointer to device
594  * @drv: Pointer to device_driver
595  *
596  * AP bus driver registration/unregistration.
597  */
598 static int ap_bus_match(struct device *dev, struct device_driver *drv)
599 {
600 	struct ap_device *ap_dev = to_ap_dev(dev);
601 	struct ap_driver *ap_drv = to_ap_drv(drv);
602 	struct ap_device_id *id;
603 
604 	/*
605 	 * Compare device type of the device with the list of
606 	 * supported types of the device_driver.
607 	 */
608 	for (id = ap_drv->ids; id->match_flags; id++) {
609 		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
610 		    (id->dev_type != ap_dev->device_type))
611 			continue;
612 		return 1;
613 	}
614 	return 0;
615 }
616 
617 /**
618  * ap_uevent(): Uevent function for AP devices.
619  * @dev: Pointer to device
620  * @env: Pointer to kobj_uevent_env
621  *
622  * It sets up a single environment variable DEV_TYPE which contains the
623  * hardware device type.
624  */
625 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
626 {
627 	struct ap_device *ap_dev = to_ap_dev(dev);
628 	int retval = 0;
629 
630 	if (!ap_dev)
631 		return -ENODEV;
632 
633 	/* Set up DEV_TYPE environment variable. */
634 	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
635 	if (retval)
636 		return retval;
637 
638 	/* Add MODALIAS= */
639 	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
640 
641 	return retval;
642 }
643 
644 static int ap_bus_suspend(struct device *dev, pm_message_t state)
645 {
646 	struct ap_device *ap_dev = to_ap_dev(dev);
647 	unsigned long flags;
648 
649 	if (!ap_suspend_flag) {
650 		ap_suspend_flag = 1;
651 
652 		/* Disable scanning for devices, thus we do not want to scan
653 		 * for them after removing.
654 		 */
655 		del_timer_sync(&ap_config_timer);
656 		if (ap_work_queue != NULL) {
657 			destroy_workqueue(ap_work_queue);
658 			ap_work_queue = NULL;
659 		}
660 
661 		tasklet_disable(&ap_tasklet);
662 	}
663 	/* Poll on the device until all requests are finished. */
664 	do {
665 		flags = 0;
666 		spin_lock_bh(&ap_dev->lock);
667 		__ap_poll_device(ap_dev, &flags);
668 		spin_unlock_bh(&ap_dev->lock);
669 	} while ((flags & 1) || (flags & 2));
670 
671 	spin_lock_bh(&ap_dev->lock);
672 	ap_dev->unregistered = 1;
673 	spin_unlock_bh(&ap_dev->lock);
674 
675 	return 0;
676 }
677 
678 static int ap_bus_resume(struct device *dev)
679 {
680 	int rc = 0;
681 	struct ap_device *ap_dev = to_ap_dev(dev);
682 
683 	if (ap_suspend_flag) {
684 		ap_suspend_flag = 0;
685 		if (!ap_interrupts_available())
686 			ap_interrupt_indicator = NULL;
687 		if (!user_set_domain) {
688 			ap_domain_index = -1;
689 			ap_select_domain();
690 		}
691 		init_timer(&ap_config_timer);
692 		ap_config_timer.function = ap_config_timeout;
693 		ap_config_timer.data = 0;
694 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
695 		add_timer(&ap_config_timer);
696 		ap_work_queue = create_singlethread_workqueue("kapwork");
697 		if (!ap_work_queue)
698 			return -ENOMEM;
699 		tasklet_enable(&ap_tasklet);
700 		if (!ap_using_interrupts())
701 			ap_schedule_poll_timer();
702 		else
703 			tasklet_schedule(&ap_tasklet);
704 		if (ap_thread_flag)
705 			rc = ap_poll_thread_start();
706 	}
707 	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
708 		spin_lock_bh(&ap_dev->lock);
709 		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
710 				       ap_domain_index);
711 		spin_unlock_bh(&ap_dev->lock);
712 	}
713 	queue_work(ap_work_queue, &ap_config_work);
714 
715 	return rc;
716 }
717 
718 static struct bus_type ap_bus_type = {
719 	.name = "ap",
720 	.match = &ap_bus_match,
721 	.uevent = &ap_uevent,
722 	.suspend = ap_bus_suspend,
723 	.resume = ap_bus_resume
724 };
725 
726 static int ap_device_probe(struct device *dev)
727 {
728 	struct ap_device *ap_dev = to_ap_dev(dev);
729 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
730 	int rc;
731 
732 	ap_dev->drv = ap_drv;
733 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
734 	if (!rc) {
735 		spin_lock_bh(&ap_device_list_lock);
736 		list_add(&ap_dev->list, &ap_device_list);
737 		spin_unlock_bh(&ap_device_list_lock);
738 	}
739 	return rc;
740 }
741 
742 /**
743  * __ap_flush_queue(): Flush requests.
744  * @ap_dev: Pointer to the AP device
745  *
746  * Flush all requests from the request/pending queue of an AP device.
747  */
748 static void __ap_flush_queue(struct ap_device *ap_dev)
749 {
750 	struct ap_message *ap_msg, *next;
751 
752 	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
753 		list_del_init(&ap_msg->list);
754 		ap_dev->pendingq_count--;
755 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
756 	}
757 	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
758 		list_del_init(&ap_msg->list);
759 		ap_dev->requestq_count--;
760 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
761 	}
762 }
763 
764 void ap_flush_queue(struct ap_device *ap_dev)
765 {
766 	spin_lock_bh(&ap_dev->lock);
767 	__ap_flush_queue(ap_dev);
768 	spin_unlock_bh(&ap_dev->lock);
769 }
770 EXPORT_SYMBOL(ap_flush_queue);
771 
772 static int ap_device_remove(struct device *dev)
773 {
774 	struct ap_device *ap_dev = to_ap_dev(dev);
775 	struct ap_driver *ap_drv = ap_dev->drv;
776 
777 	ap_flush_queue(ap_dev);
778 	del_timer_sync(&ap_dev->timeout);
779 	spin_lock_bh(&ap_device_list_lock);
780 	list_del_init(&ap_dev->list);
781 	spin_unlock_bh(&ap_device_list_lock);
782 	if (ap_drv->remove)
783 		ap_drv->remove(ap_dev);
784 	spin_lock_bh(&ap_dev->lock);
785 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
786 	spin_unlock_bh(&ap_dev->lock);
787 	return 0;
788 }
789 
790 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
791 		       char *name)
792 {
793 	struct device_driver *drv = &ap_drv->driver;
794 
795 	drv->bus = &ap_bus_type;
796 	drv->probe = ap_device_probe;
797 	drv->remove = ap_device_remove;
798 	drv->owner = owner;
799 	drv->name = name;
800 	return driver_register(drv);
801 }
802 EXPORT_SYMBOL(ap_driver_register);
803 
804 void ap_driver_unregister(struct ap_driver *ap_drv)
805 {
806 	driver_unregister(&ap_drv->driver);
807 }
808 EXPORT_SYMBOL(ap_driver_unregister);
809 
810 /*
811  * AP bus attributes.
812  */
813 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
814 {
815 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
816 }
817 
818 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
819 
820 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
821 {
822 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
823 }
824 
825 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
826 {
827 	return snprintf(buf, PAGE_SIZE, "%d\n",
828 			ap_using_interrupts() ? 1 : 0);
829 }
830 
831 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
832 
833 static ssize_t ap_config_time_store(struct bus_type *bus,
834 				    const char *buf, size_t count)
835 {
836 	int time;
837 
838 	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
839 		return -EINVAL;
840 	ap_config_time = time;
841 	if (!timer_pending(&ap_config_timer) ||
842 	    !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
843 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
844 		add_timer(&ap_config_timer);
845 	}
846 	return count;
847 }
848 
849 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
850 
851 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
852 {
853 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
854 }
855 
856 static ssize_t ap_poll_thread_store(struct bus_type *bus,
857 				    const char *buf, size_t count)
858 {
859 	int flag, rc;
860 
861 	if (sscanf(buf, "%d\n", &flag) != 1)
862 		return -EINVAL;
863 	if (flag) {
864 		rc = ap_poll_thread_start();
865 		if (rc)
866 			return rc;
867 	}
868 	else
869 		ap_poll_thread_stop();
870 	return count;
871 }
872 
873 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
874 
875 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
876 {
877 	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
878 }
879 
880 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
881 				  size_t count)
882 {
883 	unsigned long long time;
884 	ktime_t hr_time;
885 
886 	/* 120 seconds = maximum poll interval */
887 	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
888 	    time > 120000000000ULL)
889 		return -EINVAL;
890 	poll_timeout = time;
891 	hr_time = ktime_set(0, poll_timeout);
892 
893 	if (!hrtimer_is_queued(&ap_poll_timer) ||
894 	    !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
895 		hrtimer_set_expires(&ap_poll_timer, hr_time);
896 		hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
897 	}
898 	return count;
899 }
900 
901 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
902 
903 static struct bus_attribute *const ap_bus_attrs[] = {
904 	&bus_attr_ap_domain,
905 	&bus_attr_config_time,
906 	&bus_attr_poll_thread,
907 	&bus_attr_ap_interrupts,
908 	&bus_attr_poll_timeout,
909 	NULL,
910 };
911 
912 /**
913  * ap_select_domain(): Select an AP domain.
914  *
915  * Pick one of the 16 AP domains.
916  */
917 static int ap_select_domain(void)
918 {
919 	int queue_depth, device_type, count, max_count, best_domain;
920 	int rc, i, j;
921 
922 	/*
923 	 * We want to use a single domain. Either the one specified with
924 	 * the "domain=" parameter or the domain with the maximum number
925 	 * of devices.
926 	 */
927 	if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
928 		/* Domain has already been selected. */
929 		return 0;
930 	best_domain = -1;
931 	max_count = 0;
932 	for (i = 0; i < AP_DOMAINS; i++) {
933 		count = 0;
934 		for (j = 0; j < AP_DEVICES; j++) {
935 			ap_qid_t qid = AP_MKQID(j, i);
936 			rc = ap_query_queue(qid, &queue_depth, &device_type);
937 			if (rc)
938 				continue;
939 			count++;
940 		}
941 		if (count > max_count) {
942 			max_count = count;
943 			best_domain = i;
944 		}
945 	}
946 	if (best_domain >= 0){
947 		ap_domain_index = best_domain;
948 		return 0;
949 	}
950 	return -ENODEV;
951 }
952 
953 /**
954  * ap_probe_device_type(): Find the device type of an AP.
955  * @ap_dev: pointer to the AP device.
956  *
957  * Find the device type if query queue returned a device type of 0.
958  */
959 static int ap_probe_device_type(struct ap_device *ap_dev)
960 {
961 	static unsigned char msg[] = {
962 		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
963 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
964 		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
965 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
966 		0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
967 		0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
968 		0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
969 		0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
970 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
971 		0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
972 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
973 		0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
974 		0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
975 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
976 		0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
977 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
978 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
979 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
980 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
981 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
982 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
983 		0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
984 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
985 		0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
986 		0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
987 		0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
988 		0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
989 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
990 		0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
991 		0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
992 		0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
993 		0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
994 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
995 		0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
996 		0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
997 		0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
998 		0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
999 		0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1000 		0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1001 		0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1002 		0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1003 		0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1004 		0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1005 		0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1006 		0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1007 	};
1008 	struct ap_queue_status status;
1009 	unsigned long long psmid;
1010 	char *reply;
1011 	int rc, i;
1012 
1013 	reply = (void *) get_zeroed_page(GFP_KERNEL);
1014 	if (!reply) {
1015 		rc = -ENOMEM;
1016 		goto out;
1017 	}
1018 
1019 	status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1020 			   msg, sizeof(msg), 0);
1021 	if (status.response_code != AP_RESPONSE_NORMAL) {
1022 		rc = -ENODEV;
1023 		goto out_free;
1024 	}
1025 
1026 	/* Wait for the test message to complete. */
1027 	for (i = 0; i < 6; i++) {
1028 		mdelay(300);
1029 		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1030 		if (status.response_code == AP_RESPONSE_NORMAL &&
1031 		    psmid == 0x0102030405060708ULL)
1032 			break;
1033 	}
1034 	if (i < 6) {
1035 		/* Got an answer. */
1036 		if (reply[0] == 0x00 && reply[1] == 0x86)
1037 			ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1038 		else
1039 			ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1040 		rc = 0;
1041 	} else
1042 		rc = -ENODEV;
1043 
1044 out_free:
1045 	free_page((unsigned long) reply);
1046 out:
1047 	return rc;
1048 }
1049 
1050 static void ap_interrupt_handler(void *unused1, void *unused2)
1051 {
1052 	tasklet_schedule(&ap_tasklet);
1053 }
1054 
1055 /**
1056  * __ap_scan_bus(): Scan the AP bus.
1057  * @dev: Pointer to device
1058  * @data: Pointer to data
1059  *
1060  * Scan the AP bus for new devices.
1061  */
1062 static int __ap_scan_bus(struct device *dev, void *data)
1063 {
1064 	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1065 }
1066 
1067 static void ap_device_release(struct device *dev)
1068 {
1069 	struct ap_device *ap_dev = to_ap_dev(dev);
1070 
1071 	kfree(ap_dev);
1072 }
1073 
1074 static void ap_scan_bus(struct work_struct *unused)
1075 {
1076 	struct ap_device *ap_dev;
1077 	struct device *dev;
1078 	ap_qid_t qid;
1079 	int queue_depth, device_type;
1080 	int rc, i;
1081 
1082 	if (ap_select_domain() != 0)
1083 		return;
1084 	for (i = 0; i < AP_DEVICES; i++) {
1085 		qid = AP_MKQID(i, ap_domain_index);
1086 		dev = bus_find_device(&ap_bus_type, NULL,
1087 				      (void *)(unsigned long)qid,
1088 				      __ap_scan_bus);
1089 		rc = ap_query_queue(qid, &queue_depth, &device_type);
1090 		if (dev) {
1091 			if (rc == -EBUSY) {
1092 				set_current_state(TASK_UNINTERRUPTIBLE);
1093 				schedule_timeout(AP_RESET_TIMEOUT);
1094 				rc = ap_query_queue(qid, &queue_depth,
1095 						    &device_type);
1096 			}
1097 			ap_dev = to_ap_dev(dev);
1098 			spin_lock_bh(&ap_dev->lock);
1099 			if (rc || ap_dev->unregistered) {
1100 				spin_unlock_bh(&ap_dev->lock);
1101 				if (ap_dev->unregistered)
1102 					i--;
1103 				device_unregister(dev);
1104 				put_device(dev);
1105 				continue;
1106 			}
1107 			spin_unlock_bh(&ap_dev->lock);
1108 			put_device(dev);
1109 			continue;
1110 		}
1111 		if (rc)
1112 			continue;
1113 		rc = ap_init_queue(qid);
1114 		if (rc)
1115 			continue;
1116 		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1117 		if (!ap_dev)
1118 			break;
1119 		ap_dev->qid = qid;
1120 		ap_dev->queue_depth = queue_depth;
1121 		ap_dev->unregistered = 1;
1122 		spin_lock_init(&ap_dev->lock);
1123 		INIT_LIST_HEAD(&ap_dev->pendingq);
1124 		INIT_LIST_HEAD(&ap_dev->requestq);
1125 		INIT_LIST_HEAD(&ap_dev->list);
1126 		setup_timer(&ap_dev->timeout, ap_request_timeout,
1127 			    (unsigned long) ap_dev);
1128 		if (device_type == 0)
1129 			ap_probe_device_type(ap_dev);
1130 		else
1131 			ap_dev->device_type = device_type;
1132 
1133 		ap_dev->device.bus = &ap_bus_type;
1134 		ap_dev->device.parent = ap_root_device;
1135 		if (dev_set_name(&ap_dev->device, "card%02x",
1136 				 AP_QID_DEVICE(ap_dev->qid))) {
1137 			kfree(ap_dev);
1138 			continue;
1139 		}
1140 		ap_dev->device.release = ap_device_release;
1141 		rc = device_register(&ap_dev->device);
1142 		if (rc) {
1143 			put_device(&ap_dev->device);
1144 			continue;
1145 		}
1146 		/* Add device attributes. */
1147 		rc = sysfs_create_group(&ap_dev->device.kobj,
1148 					&ap_dev_attr_group);
1149 		if (!rc) {
1150 			spin_lock_bh(&ap_dev->lock);
1151 			ap_dev->unregistered = 0;
1152 			spin_unlock_bh(&ap_dev->lock);
1153 		}
1154 		else
1155 			device_unregister(&ap_dev->device);
1156 	}
1157 }
1158 
1159 static void
1160 ap_config_timeout(unsigned long ptr)
1161 {
1162 	queue_work(ap_work_queue, &ap_config_work);
1163 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1164 	add_timer(&ap_config_timer);
1165 }
1166 
1167 /**
1168  * ap_schedule_poll_timer(): Schedule poll timer.
1169  *
1170  * Set up the timer to run the poll tasklet
1171  */
1172 static inline void ap_schedule_poll_timer(void)
1173 {
1174 	ktime_t hr_time;
1175 
1176 	spin_lock_bh(&ap_poll_timer_lock);
1177 	if (ap_using_interrupts() || ap_suspend_flag)
1178 		goto out;
1179 	if (hrtimer_is_queued(&ap_poll_timer))
1180 		goto out;
1181 	if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1182 		hr_time = ktime_set(0, poll_timeout);
1183 		hrtimer_forward_now(&ap_poll_timer, hr_time);
1184 		hrtimer_restart(&ap_poll_timer);
1185 	}
1186 out:
1187 	spin_unlock_bh(&ap_poll_timer_lock);
1188 }
1189 
1190 /**
1191  * ap_poll_read(): Receive pending reply messages from an AP device.
1192  * @ap_dev: pointer to the AP device
1193  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1194  *	   required, bit 2^1 is set if the poll timer needs to get armed
1195  *
1196  * Returns 0 if the device is still present, -ENODEV if not.
1197  */
1198 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1199 {
1200 	struct ap_queue_status status;
1201 	struct ap_message *ap_msg;
1202 
1203 	if (ap_dev->queue_count <= 0)
1204 		return 0;
1205 	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1206 			   ap_dev->reply->message, ap_dev->reply->length);
1207 	switch (status.response_code) {
1208 	case AP_RESPONSE_NORMAL:
1209 		atomic_dec(&ap_poll_requests);
1210 		ap_decrease_queue_count(ap_dev);
1211 		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1212 			if (ap_msg->psmid != ap_dev->reply->psmid)
1213 				continue;
1214 			list_del_init(&ap_msg->list);
1215 			ap_dev->pendingq_count--;
1216 			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1217 			break;
1218 		}
1219 		if (ap_dev->queue_count > 0)
1220 			*flags |= 1;
1221 		break;
1222 	case AP_RESPONSE_NO_PENDING_REPLY:
1223 		if (status.queue_empty) {
1224 			/* The card shouldn't forget requests but who knows. */
1225 			atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1226 			ap_dev->queue_count = 0;
1227 			list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1228 			ap_dev->requestq_count += ap_dev->pendingq_count;
1229 			ap_dev->pendingq_count = 0;
1230 		} else
1231 			*flags |= 2;
1232 		break;
1233 	default:
1234 		return -ENODEV;
1235 	}
1236 	return 0;
1237 }
1238 
1239 /**
1240  * ap_poll_write(): Send messages from the request queue to an AP device.
1241  * @ap_dev: pointer to the AP device
1242  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1243  *	   required, bit 2^1 is set if the poll timer needs to get armed
1244  *
1245  * Returns 0 if the device is still present, -ENODEV if not.
1246  */
1247 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1248 {
1249 	struct ap_queue_status status;
1250 	struct ap_message *ap_msg;
1251 
1252 	if (ap_dev->requestq_count <= 0 ||
1253 	    ap_dev->queue_count >= ap_dev->queue_depth)
1254 		return 0;
1255 	/* Start the next request on the queue. */
1256 	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1257 	status = __ap_send(ap_dev->qid, ap_msg->psmid,
1258 			   ap_msg->message, ap_msg->length, ap_msg->special);
1259 	switch (status.response_code) {
1260 	case AP_RESPONSE_NORMAL:
1261 		atomic_inc(&ap_poll_requests);
1262 		ap_increase_queue_count(ap_dev);
1263 		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1264 		ap_dev->requestq_count--;
1265 		ap_dev->pendingq_count++;
1266 		if (ap_dev->queue_count < ap_dev->queue_depth &&
1267 		    ap_dev->requestq_count > 0)
1268 			*flags |= 1;
1269 		*flags |= 2;
1270 		break;
1271 	case AP_RESPONSE_Q_FULL:
1272 	case AP_RESPONSE_RESET_IN_PROGRESS:
1273 		*flags |= 2;
1274 		break;
1275 	case AP_RESPONSE_MESSAGE_TOO_BIG:
1276 	case AP_RESPONSE_REQ_FAC_NOT_INST:
1277 		return -EINVAL;
1278 	default:
1279 		return -ENODEV;
1280 	}
1281 	return 0;
1282 }
1283 
1284 /**
1285  * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1286  * @ap_dev: pointer to the bus device
1287  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1288  *	   required, bit 2^1 is set if the poll timer needs to get armed
1289  *
1290  * Poll AP device for pending replies and send new messages. If either
1291  * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1292  * Returns 0.
1293  */
1294 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1295 {
1296 	int rc;
1297 
1298 	rc = ap_poll_read(ap_dev, flags);
1299 	if (rc)
1300 		return rc;
1301 	return ap_poll_write(ap_dev, flags);
1302 }
1303 
1304 /**
1305  * __ap_queue_message(): Queue a message to a device.
1306  * @ap_dev: pointer to the AP device
1307  * @ap_msg: the message to be queued
1308  *
1309  * Queue a message to a device. Returns 0 if successful.
1310  */
1311 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1312 {
1313 	struct ap_queue_status status;
1314 
1315 	if (list_empty(&ap_dev->requestq) &&
1316 	    ap_dev->queue_count < ap_dev->queue_depth) {
1317 		status = __ap_send(ap_dev->qid, ap_msg->psmid,
1318 				   ap_msg->message, ap_msg->length,
1319 				   ap_msg->special);
1320 		switch (status.response_code) {
1321 		case AP_RESPONSE_NORMAL:
1322 			list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1323 			atomic_inc(&ap_poll_requests);
1324 			ap_dev->pendingq_count++;
1325 			ap_increase_queue_count(ap_dev);
1326 			ap_dev->total_request_count++;
1327 			break;
1328 		case AP_RESPONSE_Q_FULL:
1329 		case AP_RESPONSE_RESET_IN_PROGRESS:
1330 			list_add_tail(&ap_msg->list, &ap_dev->requestq);
1331 			ap_dev->requestq_count++;
1332 			ap_dev->total_request_count++;
1333 			return -EBUSY;
1334 		case AP_RESPONSE_REQ_FAC_NOT_INST:
1335 		case AP_RESPONSE_MESSAGE_TOO_BIG:
1336 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1337 			return -EINVAL;
1338 		default:	/* Device is gone. */
1339 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1340 			return -ENODEV;
1341 		}
1342 	} else {
1343 		list_add_tail(&ap_msg->list, &ap_dev->requestq);
1344 		ap_dev->requestq_count++;
1345 		ap_dev->total_request_count++;
1346 		return -EBUSY;
1347 	}
1348 	ap_schedule_poll_timer();
1349 	return 0;
1350 }
1351 
1352 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1353 {
1354 	unsigned long flags;
1355 	int rc;
1356 
1357 	spin_lock_bh(&ap_dev->lock);
1358 	if (!ap_dev->unregistered) {
1359 		/* Make room on the queue by polling for finished requests. */
1360 		rc = ap_poll_queue(ap_dev, &flags);
1361 		if (!rc)
1362 			rc = __ap_queue_message(ap_dev, ap_msg);
1363 		if (!rc)
1364 			wake_up(&ap_poll_wait);
1365 		if (rc == -ENODEV)
1366 			ap_dev->unregistered = 1;
1367 	} else {
1368 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1369 		rc = -ENODEV;
1370 	}
1371 	spin_unlock_bh(&ap_dev->lock);
1372 	if (rc == -ENODEV)
1373 		device_unregister(&ap_dev->device);
1374 }
1375 EXPORT_SYMBOL(ap_queue_message);
1376 
1377 /**
1378  * ap_cancel_message(): Cancel a crypto request.
1379  * @ap_dev: The AP device that has the message queued
1380  * @ap_msg: The message that is to be removed
1381  *
1382  * Cancel a crypto request. This is done by removing the request
1383  * from the device pending or request queue. Note that the
1384  * request stays on the AP queue. When it finishes the message
1385  * reply will be discarded because the psmid can't be found.
1386  */
1387 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1388 {
1389 	struct ap_message *tmp;
1390 
1391 	spin_lock_bh(&ap_dev->lock);
1392 	if (!list_empty(&ap_msg->list)) {
1393 		list_for_each_entry(tmp, &ap_dev->pendingq, list)
1394 			if (tmp->psmid == ap_msg->psmid) {
1395 				ap_dev->pendingq_count--;
1396 				goto found;
1397 			}
1398 		ap_dev->requestq_count--;
1399 	found:
1400 		list_del_init(&ap_msg->list);
1401 	}
1402 	spin_unlock_bh(&ap_dev->lock);
1403 }
1404 EXPORT_SYMBOL(ap_cancel_message);
1405 
1406 /**
1407  * ap_poll_timeout(): AP receive polling for finished AP requests.
1408  * @unused: Unused pointer.
1409  *
1410  * Schedules the AP tasklet using a high resolution timer.
1411  */
1412 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1413 {
1414 	tasklet_schedule(&ap_tasklet);
1415 	return HRTIMER_NORESTART;
1416 }
1417 
1418 /**
1419  * ap_reset(): Reset a not responding AP device.
1420  * @ap_dev: Pointer to the AP device
1421  *
1422  * Reset a not responding AP device and move all requests from the
1423  * pending queue to the request queue.
1424  */
1425 static void ap_reset(struct ap_device *ap_dev)
1426 {
1427 	int rc;
1428 
1429 	ap_dev->reset = AP_RESET_IGNORE;
1430 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1431 	ap_dev->queue_count = 0;
1432 	list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1433 	ap_dev->requestq_count += ap_dev->pendingq_count;
1434 	ap_dev->pendingq_count = 0;
1435 	rc = ap_init_queue(ap_dev->qid);
1436 	if (rc == -ENODEV)
1437 		ap_dev->unregistered = 1;
1438 }
1439 
1440 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1441 {
1442 	if (!ap_dev->unregistered) {
1443 		if (ap_poll_queue(ap_dev, flags))
1444 			ap_dev->unregistered = 1;
1445 		if (ap_dev->reset == AP_RESET_DO)
1446 			ap_reset(ap_dev);
1447 	}
1448 	return 0;
1449 }
1450 
1451 /**
1452  * ap_poll_all(): Poll all AP devices.
1453  * @dummy: Unused variable
1454  *
1455  * Poll all AP devices on the bus in a round robin fashion. Continue
1456  * polling until bit 2^0 of the control flags is not set. If bit 2^1
1457  * of the control flags has been set arm the poll timer.
1458  */
1459 static void ap_poll_all(unsigned long dummy)
1460 {
1461 	unsigned long flags;
1462 	struct ap_device *ap_dev;
1463 
1464 	/* Reset the indicator if interrupts are used. Thus new interrupts can
1465 	 * be received. Doing it in the beginning of the tasklet is therefor
1466 	 * important that no requests on any AP get lost.
1467 	 */
1468 	if (ap_using_interrupts())
1469 		xchg((u8 *)ap_interrupt_indicator, 0);
1470 	do {
1471 		flags = 0;
1472 		spin_lock(&ap_device_list_lock);
1473 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1474 			spin_lock(&ap_dev->lock);
1475 			__ap_poll_device(ap_dev, &flags);
1476 			spin_unlock(&ap_dev->lock);
1477 		}
1478 		spin_unlock(&ap_device_list_lock);
1479 	} while (flags & 1);
1480 	if (flags & 2)
1481 		ap_schedule_poll_timer();
1482 }
1483 
1484 /**
1485  * ap_poll_thread(): Thread that polls for finished requests.
1486  * @data: Unused pointer
1487  *
1488  * AP bus poll thread. The purpose of this thread is to poll for
1489  * finished requests in a loop if there is a "free" cpu - that is
1490  * a cpu that doesn't have anything better to do. The polling stops
1491  * as soon as there is another task or if all messages have been
1492  * delivered.
1493  */
1494 static int ap_poll_thread(void *data)
1495 {
1496 	DECLARE_WAITQUEUE(wait, current);
1497 	unsigned long flags;
1498 	int requests;
1499 	struct ap_device *ap_dev;
1500 
1501 	set_user_nice(current, 19);
1502 	while (1) {
1503 		if (ap_suspend_flag)
1504 			return 0;
1505 		if (need_resched()) {
1506 			schedule();
1507 			continue;
1508 		}
1509 		add_wait_queue(&ap_poll_wait, &wait);
1510 		set_current_state(TASK_INTERRUPTIBLE);
1511 		if (kthread_should_stop())
1512 			break;
1513 		requests = atomic_read(&ap_poll_requests);
1514 		if (requests <= 0)
1515 			schedule();
1516 		set_current_state(TASK_RUNNING);
1517 		remove_wait_queue(&ap_poll_wait, &wait);
1518 
1519 		flags = 0;
1520 		spin_lock_bh(&ap_device_list_lock);
1521 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1522 			spin_lock(&ap_dev->lock);
1523 			__ap_poll_device(ap_dev, &flags);
1524 			spin_unlock(&ap_dev->lock);
1525 		}
1526 		spin_unlock_bh(&ap_device_list_lock);
1527 	}
1528 	set_current_state(TASK_RUNNING);
1529 	remove_wait_queue(&ap_poll_wait, &wait);
1530 	return 0;
1531 }
1532 
1533 static int ap_poll_thread_start(void)
1534 {
1535 	int rc;
1536 
1537 	if (ap_using_interrupts() || ap_suspend_flag)
1538 		return 0;
1539 	mutex_lock(&ap_poll_thread_mutex);
1540 	if (!ap_poll_kthread) {
1541 		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1542 		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1543 		if (rc)
1544 			ap_poll_kthread = NULL;
1545 	}
1546 	else
1547 		rc = 0;
1548 	mutex_unlock(&ap_poll_thread_mutex);
1549 	return rc;
1550 }
1551 
1552 static void ap_poll_thread_stop(void)
1553 {
1554 	mutex_lock(&ap_poll_thread_mutex);
1555 	if (ap_poll_kthread) {
1556 		kthread_stop(ap_poll_kthread);
1557 		ap_poll_kthread = NULL;
1558 	}
1559 	mutex_unlock(&ap_poll_thread_mutex);
1560 }
1561 
1562 /**
1563  * ap_request_timeout(): Handling of request timeouts
1564  * @data: Holds the AP device.
1565  *
1566  * Handles request timeouts.
1567  */
1568 static void ap_request_timeout(unsigned long data)
1569 {
1570 	struct ap_device *ap_dev = (struct ap_device *) data;
1571 
1572 	if (ap_dev->reset == AP_RESET_ARMED) {
1573 		ap_dev->reset = AP_RESET_DO;
1574 
1575 		if (ap_using_interrupts())
1576 			tasklet_schedule(&ap_tasklet);
1577 	}
1578 }
1579 
1580 static void ap_reset_domain(void)
1581 {
1582 	int i;
1583 
1584 	if (ap_domain_index != -1)
1585 		for (i = 0; i < AP_DEVICES; i++)
1586 			ap_reset_queue(AP_MKQID(i, ap_domain_index));
1587 }
1588 
1589 static void ap_reset_all(void)
1590 {
1591 	int i, j;
1592 
1593 	for (i = 0; i < AP_DOMAINS; i++)
1594 		for (j = 0; j < AP_DEVICES; j++)
1595 			ap_reset_queue(AP_MKQID(j, i));
1596 }
1597 
1598 static struct reset_call ap_reset_call = {
1599 	.fn = ap_reset_all,
1600 };
1601 
1602 /**
1603  * ap_module_init(): The module initialization code.
1604  *
1605  * Initializes the module.
1606  */
1607 int __init ap_module_init(void)
1608 {
1609 	int rc, i;
1610 
1611 	if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1612 		pr_warning("%d is not a valid cryptographic domain\n",
1613 			   ap_domain_index);
1614 		return -EINVAL;
1615 	}
1616 	/* In resume callback we need to know if the user had set the domain.
1617 	 * If so, we can not just reset it.
1618 	 */
1619 	if (ap_domain_index >= 0)
1620 		user_set_domain = 1;
1621 
1622 	if (ap_instructions_available() != 0) {
1623 		pr_warning("The hardware system does not support "
1624 			   "AP instructions\n");
1625 		return -ENODEV;
1626 	}
1627 	if (ap_interrupts_available()) {
1628 		isc_register(AP_ISC);
1629 		ap_interrupt_indicator = s390_register_adapter_interrupt(
1630 			&ap_interrupt_handler, NULL, AP_ISC);
1631 		if (IS_ERR(ap_interrupt_indicator)) {
1632 			ap_interrupt_indicator = NULL;
1633 			isc_unregister(AP_ISC);
1634 		}
1635 	}
1636 
1637 	register_reset_call(&ap_reset_call);
1638 
1639 	/* Create /sys/bus/ap. */
1640 	rc = bus_register(&ap_bus_type);
1641 	if (rc)
1642 		goto out;
1643 	for (i = 0; ap_bus_attrs[i]; i++) {
1644 		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1645 		if (rc)
1646 			goto out_bus;
1647 	}
1648 
1649 	/* Create /sys/devices/ap. */
1650 	ap_root_device = root_device_register("ap");
1651 	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1652 	if (rc)
1653 		goto out_bus;
1654 
1655 	ap_work_queue = create_singlethread_workqueue("kapwork");
1656 	if (!ap_work_queue) {
1657 		rc = -ENOMEM;
1658 		goto out_root;
1659 	}
1660 
1661 	if (ap_select_domain() == 0)
1662 		ap_scan_bus(NULL);
1663 
1664 	/* Setup the AP bus rescan timer. */
1665 	init_timer(&ap_config_timer);
1666 	ap_config_timer.function = ap_config_timeout;
1667 	ap_config_timer.data = 0;
1668 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1669 	add_timer(&ap_config_timer);
1670 
1671 	/* Setup the high resultion poll timer.
1672 	 * If we are running under z/VM adjust polling to z/VM polling rate.
1673 	 */
1674 	if (MACHINE_IS_VM)
1675 		poll_timeout = 1500000;
1676 	spin_lock_init(&ap_poll_timer_lock);
1677 	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1678 	ap_poll_timer.function = ap_poll_timeout;
1679 
1680 	/* Start the low priority AP bus poll thread. */
1681 	if (ap_thread_flag) {
1682 		rc = ap_poll_thread_start();
1683 		if (rc)
1684 			goto out_work;
1685 	}
1686 
1687 	return 0;
1688 
1689 out_work:
1690 	del_timer_sync(&ap_config_timer);
1691 	hrtimer_cancel(&ap_poll_timer);
1692 	destroy_workqueue(ap_work_queue);
1693 out_root:
1694 	root_device_unregister(ap_root_device);
1695 out_bus:
1696 	while (i--)
1697 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1698 	bus_unregister(&ap_bus_type);
1699 out:
1700 	unregister_reset_call(&ap_reset_call);
1701 	if (ap_using_interrupts()) {
1702 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1703 		isc_unregister(AP_ISC);
1704 	}
1705 	return rc;
1706 }
1707 
1708 static int __ap_match_all(struct device *dev, void *data)
1709 {
1710 	return 1;
1711 }
1712 
1713 /**
1714  * ap_modules_exit(): The module termination code
1715  *
1716  * Terminates the module.
1717  */
1718 void ap_module_exit(void)
1719 {
1720 	int i;
1721 	struct device *dev;
1722 
1723 	ap_reset_domain();
1724 	ap_poll_thread_stop();
1725 	del_timer_sync(&ap_config_timer);
1726 	hrtimer_cancel(&ap_poll_timer);
1727 	destroy_workqueue(ap_work_queue);
1728 	tasklet_kill(&ap_tasklet);
1729 	root_device_unregister(ap_root_device);
1730 	while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1731 		    __ap_match_all)))
1732 	{
1733 		device_unregister(dev);
1734 		put_device(dev);
1735 	}
1736 	for (i = 0; ap_bus_attrs[i]; i++)
1737 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1738 	bus_unregister(&ap_bus_type);
1739 	unregister_reset_call(&ap_reset_call);
1740 	if (ap_using_interrupts()) {
1741 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1742 		isc_unregister(AP_ISC);
1743 	}
1744 }
1745 
1746 #ifndef CONFIG_ZCRYPT_MONOLITHIC
1747 module_init(ap_module_init);
1748 module_exit(ap_module_exit);
1749 #endif
1750