xref: /openbmc/linux/drivers/s390/char/sclp.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1*1da177e4SLinus Torvalds /*
2*1da177e4SLinus Torvalds  *  drivers/s390/char/sclp.c
3*1da177e4SLinus Torvalds  *     core function to access sclp interface
4*1da177e4SLinus Torvalds  *
5*1da177e4SLinus Torvalds  *  S390 version
6*1da177e4SLinus Torvalds  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7*1da177e4SLinus Torvalds  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8*1da177e4SLinus Torvalds  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
9*1da177e4SLinus Torvalds  */
10*1da177e4SLinus Torvalds 
11*1da177e4SLinus Torvalds #include <linux/module.h>
12*1da177e4SLinus Torvalds #include <linux/err.h>
13*1da177e4SLinus Torvalds #include <linux/spinlock.h>
14*1da177e4SLinus Torvalds #include <linux/interrupt.h>
15*1da177e4SLinus Torvalds #include <linux/timer.h>
16*1da177e4SLinus Torvalds #include <linux/reboot.h>
17*1da177e4SLinus Torvalds #include <linux/jiffies.h>
18*1da177e4SLinus Torvalds #include <asm/types.h>
19*1da177e4SLinus Torvalds #include <asm/s390_ext.h>
20*1da177e4SLinus Torvalds 
21*1da177e4SLinus Torvalds #include "sclp.h"
22*1da177e4SLinus Torvalds 
23*1da177e4SLinus Torvalds #define SCLP_HEADER		"sclp: "
24*1da177e4SLinus Torvalds 
25*1da177e4SLinus Torvalds /* Structure for register_early_external_interrupt. */
26*1da177e4SLinus Torvalds static ext_int_info_t ext_int_info_hwc;
27*1da177e4SLinus Torvalds 
28*1da177e4SLinus Torvalds /* Lock to protect internal data consistency. */
29*1da177e4SLinus Torvalds static DEFINE_SPINLOCK(sclp_lock);
30*1da177e4SLinus Torvalds 
31*1da177e4SLinus Torvalds /* Mask of events that we can receive from the sclp interface. */
32*1da177e4SLinus Torvalds static sccb_mask_t sclp_receive_mask;
33*1da177e4SLinus Torvalds 
34*1da177e4SLinus Torvalds /* Mask of events that we can send to the sclp interface. */
35*1da177e4SLinus Torvalds static sccb_mask_t sclp_send_mask;
36*1da177e4SLinus Torvalds 
37*1da177e4SLinus Torvalds /* List of registered event listeners and senders. */
38*1da177e4SLinus Torvalds static struct list_head sclp_reg_list;
39*1da177e4SLinus Torvalds 
40*1da177e4SLinus Torvalds /* List of queued requests. */
41*1da177e4SLinus Torvalds static struct list_head sclp_req_queue;
42*1da177e4SLinus Torvalds 
43*1da177e4SLinus Torvalds /* Data for read and and init requests. */
44*1da177e4SLinus Torvalds static struct sclp_req sclp_read_req;
45*1da177e4SLinus Torvalds static struct sclp_req sclp_init_req;
46*1da177e4SLinus Torvalds static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47*1da177e4SLinus Torvalds static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48*1da177e4SLinus Torvalds 
49*1da177e4SLinus Torvalds /* Timer for request retries. */
50*1da177e4SLinus Torvalds static struct timer_list sclp_request_timer;
51*1da177e4SLinus Torvalds 
52*1da177e4SLinus Torvalds /* Internal state: is the driver initialized? */
53*1da177e4SLinus Torvalds static volatile enum sclp_init_state_t {
54*1da177e4SLinus Torvalds 	sclp_init_state_uninitialized,
55*1da177e4SLinus Torvalds 	sclp_init_state_initializing,
56*1da177e4SLinus Torvalds 	sclp_init_state_initialized
57*1da177e4SLinus Torvalds } sclp_init_state = sclp_init_state_uninitialized;
58*1da177e4SLinus Torvalds 
59*1da177e4SLinus Torvalds /* Internal state: is a request active at the sclp? */
60*1da177e4SLinus Torvalds static volatile enum sclp_running_state_t {
61*1da177e4SLinus Torvalds 	sclp_running_state_idle,
62*1da177e4SLinus Torvalds 	sclp_running_state_running
63*1da177e4SLinus Torvalds } sclp_running_state = sclp_running_state_idle;
64*1da177e4SLinus Torvalds 
65*1da177e4SLinus Torvalds /* Internal state: is a read request pending? */
66*1da177e4SLinus Torvalds static volatile enum sclp_reading_state_t {
67*1da177e4SLinus Torvalds 	sclp_reading_state_idle,
68*1da177e4SLinus Torvalds 	sclp_reading_state_reading
69*1da177e4SLinus Torvalds } sclp_reading_state = sclp_reading_state_idle;
70*1da177e4SLinus Torvalds 
71*1da177e4SLinus Torvalds /* Internal state: is the driver currently serving requests? */
72*1da177e4SLinus Torvalds static volatile enum sclp_activation_state_t {
73*1da177e4SLinus Torvalds 	sclp_activation_state_active,
74*1da177e4SLinus Torvalds 	sclp_activation_state_deactivating,
75*1da177e4SLinus Torvalds 	sclp_activation_state_inactive,
76*1da177e4SLinus Torvalds 	sclp_activation_state_activating
77*1da177e4SLinus Torvalds } sclp_activation_state = sclp_activation_state_active;
78*1da177e4SLinus Torvalds 
79*1da177e4SLinus Torvalds /* Internal state: is an init mask request pending? */
80*1da177e4SLinus Torvalds static volatile enum sclp_mask_state_t {
81*1da177e4SLinus Torvalds 	sclp_mask_state_idle,
82*1da177e4SLinus Torvalds 	sclp_mask_state_initializing
83*1da177e4SLinus Torvalds } sclp_mask_state = sclp_mask_state_idle;
84*1da177e4SLinus Torvalds 
85*1da177e4SLinus Torvalds /* Maximum retry counts */
86*1da177e4SLinus Torvalds #define SCLP_INIT_RETRY		3
87*1da177e4SLinus Torvalds #define SCLP_MASK_RETRY		3
88*1da177e4SLinus Torvalds #define SCLP_REQUEST_RETRY	3
89*1da177e4SLinus Torvalds 
90*1da177e4SLinus Torvalds /* Timeout intervals in seconds.*/
91*1da177e4SLinus Torvalds #define SCLP_BUSY_INTERVAL	2
92*1da177e4SLinus Torvalds #define SCLP_RETRY_INTERVAL	5
93*1da177e4SLinus Torvalds 
94*1da177e4SLinus Torvalds static void sclp_process_queue(void);
95*1da177e4SLinus Torvalds static int sclp_init_mask(int calculate);
96*1da177e4SLinus Torvalds static int sclp_init(void);
97*1da177e4SLinus Torvalds 
98*1da177e4SLinus Torvalds /* Perform service call. Return 0 on success, non-zero otherwise. */
99*1da177e4SLinus Torvalds static int
100*1da177e4SLinus Torvalds service_call(sclp_cmdw_t command, void *sccb)
101*1da177e4SLinus Torvalds {
102*1da177e4SLinus Torvalds 	int cc;
103*1da177e4SLinus Torvalds 
104*1da177e4SLinus Torvalds 	__asm__ __volatile__(
105*1da177e4SLinus Torvalds 		"   .insn rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
106*1da177e4SLinus Torvalds 		"   ipm	  %0\n"
107*1da177e4SLinus Torvalds 		"   srl	  %0,28"
108*1da177e4SLinus Torvalds 		: "=&d" (cc)
109*1da177e4SLinus Torvalds 		: "d" (command), "a" (__pa(sccb))
110*1da177e4SLinus Torvalds 		: "cc", "memory" );
111*1da177e4SLinus Torvalds 	if (cc == 3)
112*1da177e4SLinus Torvalds 		return -EIO;
113*1da177e4SLinus Torvalds 	if (cc == 2)
114*1da177e4SLinus Torvalds 		return -EBUSY;
115*1da177e4SLinus Torvalds 	return 0;
116*1da177e4SLinus Torvalds }
117*1da177e4SLinus Torvalds 
118*1da177e4SLinus Torvalds /* Request timeout handler. Restart the request queue. If DATA is non-zero,
119*1da177e4SLinus Torvalds  * force restart of running request. */
120*1da177e4SLinus Torvalds static void
121*1da177e4SLinus Torvalds sclp_request_timeout(unsigned long data)
122*1da177e4SLinus Torvalds {
123*1da177e4SLinus Torvalds 	unsigned long flags;
124*1da177e4SLinus Torvalds 
125*1da177e4SLinus Torvalds 	if (data) {
126*1da177e4SLinus Torvalds 		spin_lock_irqsave(&sclp_lock, flags);
127*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_idle;
128*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
129*1da177e4SLinus Torvalds 	}
130*1da177e4SLinus Torvalds 	sclp_process_queue();
131*1da177e4SLinus Torvalds }
132*1da177e4SLinus Torvalds 
133*1da177e4SLinus Torvalds /* Set up request retry timer. Called while sclp_lock is locked. */
134*1da177e4SLinus Torvalds static inline void
135*1da177e4SLinus Torvalds __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
136*1da177e4SLinus Torvalds 			 unsigned long data)
137*1da177e4SLinus Torvalds {
138*1da177e4SLinus Torvalds 	del_timer(&sclp_request_timer);
139*1da177e4SLinus Torvalds 	sclp_request_timer.function = function;
140*1da177e4SLinus Torvalds 	sclp_request_timer.data = data;
141*1da177e4SLinus Torvalds 	sclp_request_timer.expires = jiffies + time;
142*1da177e4SLinus Torvalds 	add_timer(&sclp_request_timer);
143*1da177e4SLinus Torvalds }
144*1da177e4SLinus Torvalds 
145*1da177e4SLinus Torvalds /* Try to start a request. Return zero if the request was successfully
146*1da177e4SLinus Torvalds  * started or if it will be started at a later time. Return non-zero otherwise.
147*1da177e4SLinus Torvalds  * Called while sclp_lock is locked. */
148*1da177e4SLinus Torvalds static int
149*1da177e4SLinus Torvalds __sclp_start_request(struct sclp_req *req)
150*1da177e4SLinus Torvalds {
151*1da177e4SLinus Torvalds 	int rc;
152*1da177e4SLinus Torvalds 
153*1da177e4SLinus Torvalds 	if (sclp_running_state != sclp_running_state_idle)
154*1da177e4SLinus Torvalds 		return 0;
155*1da177e4SLinus Torvalds 	del_timer(&sclp_request_timer);
156*1da177e4SLinus Torvalds 	if (req->start_count <= SCLP_REQUEST_RETRY) {
157*1da177e4SLinus Torvalds 		rc = service_call(req->command, req->sccb);
158*1da177e4SLinus Torvalds 		req->start_count++;
159*1da177e4SLinus Torvalds 	} else
160*1da177e4SLinus Torvalds 		rc = -EIO;
161*1da177e4SLinus Torvalds 	if (rc == 0) {
162*1da177e4SLinus Torvalds 		/* Sucessfully started request */
163*1da177e4SLinus Torvalds 		req->status = SCLP_REQ_RUNNING;
164*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_running;
165*1da177e4SLinus Torvalds 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
166*1da177e4SLinus Torvalds 					 sclp_request_timeout, 1);
167*1da177e4SLinus Torvalds 		return 0;
168*1da177e4SLinus Torvalds 	} else if (rc == -EBUSY) {
169*1da177e4SLinus Torvalds 		/* Try again later */
170*1da177e4SLinus Torvalds 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
171*1da177e4SLinus Torvalds 					 sclp_request_timeout, 0);
172*1da177e4SLinus Torvalds 		return 0;
173*1da177e4SLinus Torvalds 	}
174*1da177e4SLinus Torvalds 	/* Request failed */
175*1da177e4SLinus Torvalds 	req->status = SCLP_REQ_FAILED;
176*1da177e4SLinus Torvalds 	return rc;
177*1da177e4SLinus Torvalds }
178*1da177e4SLinus Torvalds 
179*1da177e4SLinus Torvalds /* Try to start queued requests. */
180*1da177e4SLinus Torvalds static void
181*1da177e4SLinus Torvalds sclp_process_queue(void)
182*1da177e4SLinus Torvalds {
183*1da177e4SLinus Torvalds 	struct sclp_req *req;
184*1da177e4SLinus Torvalds 	int rc;
185*1da177e4SLinus Torvalds 	unsigned long flags;
186*1da177e4SLinus Torvalds 
187*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
188*1da177e4SLinus Torvalds 	if (sclp_running_state != sclp_running_state_idle) {
189*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
190*1da177e4SLinus Torvalds 		return;
191*1da177e4SLinus Torvalds 	}
192*1da177e4SLinus Torvalds 	del_timer(&sclp_request_timer);
193*1da177e4SLinus Torvalds 	while (!list_empty(&sclp_req_queue)) {
194*1da177e4SLinus Torvalds 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
195*1da177e4SLinus Torvalds 		rc = __sclp_start_request(req);
196*1da177e4SLinus Torvalds 		if (rc == 0)
197*1da177e4SLinus Torvalds 			break;
198*1da177e4SLinus Torvalds 		/* Request failed. */
199*1da177e4SLinus Torvalds 		list_del(&req->list);
200*1da177e4SLinus Torvalds 		if (req->callback) {
201*1da177e4SLinus Torvalds 			spin_unlock_irqrestore(&sclp_lock, flags);
202*1da177e4SLinus Torvalds 			req->callback(req, req->callback_data);
203*1da177e4SLinus Torvalds 			spin_lock_irqsave(&sclp_lock, flags);
204*1da177e4SLinus Torvalds 		}
205*1da177e4SLinus Torvalds 	}
206*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
207*1da177e4SLinus Torvalds }
208*1da177e4SLinus Torvalds 
209*1da177e4SLinus Torvalds /* Queue a new request. Return zero on success, non-zero otherwise. */
210*1da177e4SLinus Torvalds int
211*1da177e4SLinus Torvalds sclp_add_request(struct sclp_req *req)
212*1da177e4SLinus Torvalds {
213*1da177e4SLinus Torvalds 	unsigned long flags;
214*1da177e4SLinus Torvalds 	int rc;
215*1da177e4SLinus Torvalds 
216*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
217*1da177e4SLinus Torvalds 	if ((sclp_init_state != sclp_init_state_initialized ||
218*1da177e4SLinus Torvalds 	     sclp_activation_state != sclp_activation_state_active) &&
219*1da177e4SLinus Torvalds 	    req != &sclp_init_req) {
220*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
221*1da177e4SLinus Torvalds 		return -EIO;
222*1da177e4SLinus Torvalds 	}
223*1da177e4SLinus Torvalds 	req->status = SCLP_REQ_QUEUED;
224*1da177e4SLinus Torvalds 	req->start_count = 0;
225*1da177e4SLinus Torvalds 	list_add_tail(&req->list, &sclp_req_queue);
226*1da177e4SLinus Torvalds 	rc = 0;
227*1da177e4SLinus Torvalds 	/* Start if request is first in list */
228*1da177e4SLinus Torvalds 	if (req->list.prev == &sclp_req_queue) {
229*1da177e4SLinus Torvalds 		rc = __sclp_start_request(req);
230*1da177e4SLinus Torvalds 		if (rc)
231*1da177e4SLinus Torvalds 			list_del(&req->list);
232*1da177e4SLinus Torvalds 	}
233*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
234*1da177e4SLinus Torvalds 	return rc;
235*1da177e4SLinus Torvalds }
236*1da177e4SLinus Torvalds 
237*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_add_request);
238*1da177e4SLinus Torvalds 
239*1da177e4SLinus Torvalds /* Dispatch events found in request buffer to registered listeners. Return 0
240*1da177e4SLinus Torvalds  * if all events were dispatched, non-zero otherwise. */
241*1da177e4SLinus Torvalds static int
242*1da177e4SLinus Torvalds sclp_dispatch_evbufs(struct sccb_header *sccb)
243*1da177e4SLinus Torvalds {
244*1da177e4SLinus Torvalds 	unsigned long flags;
245*1da177e4SLinus Torvalds 	struct evbuf_header *evbuf;
246*1da177e4SLinus Torvalds 	struct list_head *l;
247*1da177e4SLinus Torvalds 	struct sclp_register *reg;
248*1da177e4SLinus Torvalds 	int offset;
249*1da177e4SLinus Torvalds 	int rc;
250*1da177e4SLinus Torvalds 
251*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
252*1da177e4SLinus Torvalds 	rc = 0;
253*1da177e4SLinus Torvalds 	for (offset = sizeof(struct sccb_header); offset < sccb->length;
254*1da177e4SLinus Torvalds 	     offset += evbuf->length) {
255*1da177e4SLinus Torvalds 		/* Search for event handler */
256*1da177e4SLinus Torvalds 		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
257*1da177e4SLinus Torvalds 		reg = NULL;
258*1da177e4SLinus Torvalds 		list_for_each(l, &sclp_reg_list) {
259*1da177e4SLinus Torvalds 			reg = list_entry(l, struct sclp_register, list);
260*1da177e4SLinus Torvalds 			if (reg->receive_mask & (1 << (32 - evbuf->type)))
261*1da177e4SLinus Torvalds 				break;
262*1da177e4SLinus Torvalds 			else
263*1da177e4SLinus Torvalds 				reg = NULL;
264*1da177e4SLinus Torvalds 		}
265*1da177e4SLinus Torvalds 		if (reg && reg->receiver_fn) {
266*1da177e4SLinus Torvalds 			spin_unlock_irqrestore(&sclp_lock, flags);
267*1da177e4SLinus Torvalds 			reg->receiver_fn(evbuf);
268*1da177e4SLinus Torvalds 			spin_lock_irqsave(&sclp_lock, flags);
269*1da177e4SLinus Torvalds 		} else if (reg == NULL)
270*1da177e4SLinus Torvalds 			rc = -ENOSYS;
271*1da177e4SLinus Torvalds 	}
272*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
273*1da177e4SLinus Torvalds 	return rc;
274*1da177e4SLinus Torvalds }
275*1da177e4SLinus Torvalds 
276*1da177e4SLinus Torvalds /* Read event data request callback. */
277*1da177e4SLinus Torvalds static void
278*1da177e4SLinus Torvalds sclp_read_cb(struct sclp_req *req, void *data)
279*1da177e4SLinus Torvalds {
280*1da177e4SLinus Torvalds 	unsigned long flags;
281*1da177e4SLinus Torvalds 	struct sccb_header *sccb;
282*1da177e4SLinus Torvalds 
283*1da177e4SLinus Torvalds 	sccb = (struct sccb_header *) req->sccb;
284*1da177e4SLinus Torvalds 	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
285*1da177e4SLinus Torvalds 	    sccb->response_code == 0x220))
286*1da177e4SLinus Torvalds 		sclp_dispatch_evbufs(sccb);
287*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
288*1da177e4SLinus Torvalds 	sclp_reading_state = sclp_reading_state_idle;
289*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
290*1da177e4SLinus Torvalds }
291*1da177e4SLinus Torvalds 
292*1da177e4SLinus Torvalds /* Prepare read event data request. Called while sclp_lock is locked. */
293*1da177e4SLinus Torvalds static inline void
294*1da177e4SLinus Torvalds __sclp_make_read_req(void)
295*1da177e4SLinus Torvalds {
296*1da177e4SLinus Torvalds 	struct sccb_header *sccb;
297*1da177e4SLinus Torvalds 
298*1da177e4SLinus Torvalds 	sccb = (struct sccb_header *) sclp_read_sccb;
299*1da177e4SLinus Torvalds 	clear_page(sccb);
300*1da177e4SLinus Torvalds 	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
301*1da177e4SLinus Torvalds 	sclp_read_req.command = SCLP_CMDW_READDATA;
302*1da177e4SLinus Torvalds 	sclp_read_req.status = SCLP_REQ_QUEUED;
303*1da177e4SLinus Torvalds 	sclp_read_req.start_count = 0;
304*1da177e4SLinus Torvalds 	sclp_read_req.callback = sclp_read_cb;
305*1da177e4SLinus Torvalds 	sclp_read_req.sccb = sccb;
306*1da177e4SLinus Torvalds 	sccb->length = PAGE_SIZE;
307*1da177e4SLinus Torvalds 	sccb->function_code = 0;
308*1da177e4SLinus Torvalds 	sccb->control_mask[2] = 0x80;
309*1da177e4SLinus Torvalds }
310*1da177e4SLinus Torvalds 
311*1da177e4SLinus Torvalds /* Search request list for request with matching sccb. Return request if found,
312*1da177e4SLinus Torvalds  * NULL otherwise. Called while sclp_lock is locked. */
313*1da177e4SLinus Torvalds static inline struct sclp_req *
314*1da177e4SLinus Torvalds __sclp_find_req(u32 sccb)
315*1da177e4SLinus Torvalds {
316*1da177e4SLinus Torvalds 	struct list_head *l;
317*1da177e4SLinus Torvalds 	struct sclp_req *req;
318*1da177e4SLinus Torvalds 
319*1da177e4SLinus Torvalds 	list_for_each(l, &sclp_req_queue) {
320*1da177e4SLinus Torvalds 		req = list_entry(l, struct sclp_req, list);
321*1da177e4SLinus Torvalds 		if (sccb == (u32) (addr_t) req->sccb)
322*1da177e4SLinus Torvalds 				return req;
323*1da177e4SLinus Torvalds 	}
324*1da177e4SLinus Torvalds 	return NULL;
325*1da177e4SLinus Torvalds }
326*1da177e4SLinus Torvalds 
327*1da177e4SLinus Torvalds /* Handler for external interruption. Perform request post-processing.
328*1da177e4SLinus Torvalds  * Prepare read event data request if necessary. Start processing of next
329*1da177e4SLinus Torvalds  * request on queue. */
330*1da177e4SLinus Torvalds static void
331*1da177e4SLinus Torvalds sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
332*1da177e4SLinus Torvalds {
333*1da177e4SLinus Torvalds 	struct sclp_req *req;
334*1da177e4SLinus Torvalds 	u32 finished_sccb;
335*1da177e4SLinus Torvalds 	u32 evbuf_pending;
336*1da177e4SLinus Torvalds 
337*1da177e4SLinus Torvalds 	spin_lock(&sclp_lock);
338*1da177e4SLinus Torvalds 	finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
339*1da177e4SLinus Torvalds 	evbuf_pending = S390_lowcore.ext_params & 0x3;
340*1da177e4SLinus Torvalds 	if (finished_sccb) {
341*1da177e4SLinus Torvalds 		req = __sclp_find_req(finished_sccb);
342*1da177e4SLinus Torvalds 		if (req) {
343*1da177e4SLinus Torvalds 			/* Request post-processing */
344*1da177e4SLinus Torvalds 			list_del(&req->list);
345*1da177e4SLinus Torvalds 			req->status = SCLP_REQ_DONE;
346*1da177e4SLinus Torvalds 			if (req->callback) {
347*1da177e4SLinus Torvalds 				spin_unlock(&sclp_lock);
348*1da177e4SLinus Torvalds 				req->callback(req, req->callback_data);
349*1da177e4SLinus Torvalds 				spin_lock(&sclp_lock);
350*1da177e4SLinus Torvalds 			}
351*1da177e4SLinus Torvalds 		}
352*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_idle;
353*1da177e4SLinus Torvalds 	}
354*1da177e4SLinus Torvalds 	if (evbuf_pending && sclp_receive_mask != 0 &&
355*1da177e4SLinus Torvalds 	    sclp_reading_state == sclp_reading_state_idle &&
356*1da177e4SLinus Torvalds 	    sclp_activation_state == sclp_activation_state_active ) {
357*1da177e4SLinus Torvalds 		sclp_reading_state = sclp_reading_state_reading;
358*1da177e4SLinus Torvalds 		__sclp_make_read_req();
359*1da177e4SLinus Torvalds 		/* Add request to head of queue */
360*1da177e4SLinus Torvalds 		list_add(&sclp_read_req.list, &sclp_req_queue);
361*1da177e4SLinus Torvalds 	}
362*1da177e4SLinus Torvalds 	spin_unlock(&sclp_lock);
363*1da177e4SLinus Torvalds 	sclp_process_queue();
364*1da177e4SLinus Torvalds }
365*1da177e4SLinus Torvalds 
366*1da177e4SLinus Torvalds /* Return current Time-Of-Day clock. */
367*1da177e4SLinus Torvalds static inline u64
368*1da177e4SLinus Torvalds sclp_get_clock(void)
369*1da177e4SLinus Torvalds {
370*1da177e4SLinus Torvalds 	u64 result;
371*1da177e4SLinus Torvalds 
372*1da177e4SLinus Torvalds 	asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
373*1da177e4SLinus Torvalds 	return result;
374*1da177e4SLinus Torvalds }
375*1da177e4SLinus Torvalds 
376*1da177e4SLinus Torvalds /* Convert interval in jiffies to TOD ticks. */
377*1da177e4SLinus Torvalds static inline u64
378*1da177e4SLinus Torvalds sclp_tod_from_jiffies(unsigned long jiffies)
379*1da177e4SLinus Torvalds {
380*1da177e4SLinus Torvalds 	return (u64) (jiffies / HZ) << 32;
381*1da177e4SLinus Torvalds }
382*1da177e4SLinus Torvalds 
383*1da177e4SLinus Torvalds /* Wait until a currently running request finished. Note: while this function
384*1da177e4SLinus Torvalds  * is running, no timers are served on the calling CPU. */
385*1da177e4SLinus Torvalds void
386*1da177e4SLinus Torvalds sclp_sync_wait(void)
387*1da177e4SLinus Torvalds {
388*1da177e4SLinus Torvalds 	unsigned long psw_mask;
389*1da177e4SLinus Torvalds 	unsigned long cr0, cr0_sync;
390*1da177e4SLinus Torvalds 	u64 timeout;
391*1da177e4SLinus Torvalds 
392*1da177e4SLinus Torvalds 	/* We'll be disabling timer interrupts, so we need a custom timeout
393*1da177e4SLinus Torvalds 	 * mechanism */
394*1da177e4SLinus Torvalds 	timeout = 0;
395*1da177e4SLinus Torvalds 	if (timer_pending(&sclp_request_timer)) {
396*1da177e4SLinus Torvalds 		/* Get timeout TOD value */
397*1da177e4SLinus Torvalds 		timeout = sclp_get_clock() +
398*1da177e4SLinus Torvalds 			  sclp_tod_from_jiffies(sclp_request_timer.expires -
399*1da177e4SLinus Torvalds 						jiffies);
400*1da177e4SLinus Torvalds 	}
401*1da177e4SLinus Torvalds 	/* Prevent bottom half from executing once we force interrupts open */
402*1da177e4SLinus Torvalds 	local_bh_disable();
403*1da177e4SLinus Torvalds 	/* Enable service-signal interruption, disable timer interrupts */
404*1da177e4SLinus Torvalds 	__ctl_store(cr0, 0, 0);
405*1da177e4SLinus Torvalds 	cr0_sync = cr0;
406*1da177e4SLinus Torvalds 	cr0_sync |= 0x00000200;
407*1da177e4SLinus Torvalds 	cr0_sync &= 0xFFFFF3AC;
408*1da177e4SLinus Torvalds 	__ctl_load(cr0_sync, 0, 0);
409*1da177e4SLinus Torvalds 	asm volatile ("STOSM 0(%1),0x01"
410*1da177e4SLinus Torvalds 		      : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
411*1da177e4SLinus Torvalds 	/* Loop until driver state indicates finished request */
412*1da177e4SLinus Torvalds 	while (sclp_running_state != sclp_running_state_idle) {
413*1da177e4SLinus Torvalds 		/* Check for expired request timer */
414*1da177e4SLinus Torvalds 		if (timer_pending(&sclp_request_timer) &&
415*1da177e4SLinus Torvalds 		    sclp_get_clock() > timeout &&
416*1da177e4SLinus Torvalds 		    del_timer(&sclp_request_timer))
417*1da177e4SLinus Torvalds 			sclp_request_timer.function(sclp_request_timer.data);
418*1da177e4SLinus Torvalds 		barrier();
419*1da177e4SLinus Torvalds 		cpu_relax();
420*1da177e4SLinus Torvalds 	}
421*1da177e4SLinus Torvalds 	/* Restore interrupt settings */
422*1da177e4SLinus Torvalds 	asm volatile ("SSM 0(%0)"
423*1da177e4SLinus Torvalds 		      : : "a" (&psw_mask) : "memory");
424*1da177e4SLinus Torvalds 	__ctl_load(cr0, 0, 0);
425*1da177e4SLinus Torvalds 	__local_bh_enable();
426*1da177e4SLinus Torvalds }
427*1da177e4SLinus Torvalds 
428*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_sync_wait);
429*1da177e4SLinus Torvalds 
430*1da177e4SLinus Torvalds /* Dispatch changes in send and receive mask to registered listeners. */
431*1da177e4SLinus Torvalds static inline void
432*1da177e4SLinus Torvalds sclp_dispatch_state_change(void)
433*1da177e4SLinus Torvalds {
434*1da177e4SLinus Torvalds 	struct list_head *l;
435*1da177e4SLinus Torvalds 	struct sclp_register *reg;
436*1da177e4SLinus Torvalds 	unsigned long flags;
437*1da177e4SLinus Torvalds 	sccb_mask_t receive_mask;
438*1da177e4SLinus Torvalds 	sccb_mask_t send_mask;
439*1da177e4SLinus Torvalds 
440*1da177e4SLinus Torvalds 	do {
441*1da177e4SLinus Torvalds 		spin_lock_irqsave(&sclp_lock, flags);
442*1da177e4SLinus Torvalds 		reg = NULL;
443*1da177e4SLinus Torvalds 		list_for_each(l, &sclp_reg_list) {
444*1da177e4SLinus Torvalds 			reg = list_entry(l, struct sclp_register, list);
445*1da177e4SLinus Torvalds 			receive_mask = reg->receive_mask & sclp_receive_mask;
446*1da177e4SLinus Torvalds 			send_mask = reg->send_mask & sclp_send_mask;
447*1da177e4SLinus Torvalds 			if (reg->sclp_receive_mask != receive_mask ||
448*1da177e4SLinus Torvalds 			    reg->sclp_send_mask != send_mask) {
449*1da177e4SLinus Torvalds 				reg->sclp_receive_mask = receive_mask;
450*1da177e4SLinus Torvalds 				reg->sclp_send_mask = send_mask;
451*1da177e4SLinus Torvalds 				break;
452*1da177e4SLinus Torvalds 			} else
453*1da177e4SLinus Torvalds 				reg = NULL;
454*1da177e4SLinus Torvalds 		}
455*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
456*1da177e4SLinus Torvalds 		if (reg && reg->state_change_fn)
457*1da177e4SLinus Torvalds 			reg->state_change_fn(reg);
458*1da177e4SLinus Torvalds 	} while (reg);
459*1da177e4SLinus Torvalds }
460*1da177e4SLinus Torvalds 
461*1da177e4SLinus Torvalds struct sclp_statechangebuf {
462*1da177e4SLinus Torvalds 	struct evbuf_header	header;
463*1da177e4SLinus Torvalds 	u8		validity_sclp_active_facility_mask : 1;
464*1da177e4SLinus Torvalds 	u8		validity_sclp_receive_mask : 1;
465*1da177e4SLinus Torvalds 	u8		validity_sclp_send_mask : 1;
466*1da177e4SLinus Torvalds 	u8		validity_read_data_function_mask : 1;
467*1da177e4SLinus Torvalds 	u16		_zeros : 12;
468*1da177e4SLinus Torvalds 	u16		mask_length;
469*1da177e4SLinus Torvalds 	u64		sclp_active_facility_mask;
470*1da177e4SLinus Torvalds 	sccb_mask_t	sclp_receive_mask;
471*1da177e4SLinus Torvalds 	sccb_mask_t	sclp_send_mask;
472*1da177e4SLinus Torvalds 	u32		read_data_function_mask;
473*1da177e4SLinus Torvalds } __attribute__((packed));
474*1da177e4SLinus Torvalds 
475*1da177e4SLinus Torvalds 
476*1da177e4SLinus Torvalds /* State change event callback. Inform listeners of changes. */
477*1da177e4SLinus Torvalds static void
478*1da177e4SLinus Torvalds sclp_state_change_cb(struct evbuf_header *evbuf)
479*1da177e4SLinus Torvalds {
480*1da177e4SLinus Torvalds 	unsigned long flags;
481*1da177e4SLinus Torvalds 	struct sclp_statechangebuf *scbuf;
482*1da177e4SLinus Torvalds 
483*1da177e4SLinus Torvalds 	scbuf = (struct sclp_statechangebuf *) evbuf;
484*1da177e4SLinus Torvalds 	if (scbuf->mask_length != sizeof(sccb_mask_t))
485*1da177e4SLinus Torvalds 		return;
486*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
487*1da177e4SLinus Torvalds 	if (scbuf->validity_sclp_receive_mask)
488*1da177e4SLinus Torvalds 		sclp_receive_mask = scbuf->sclp_receive_mask;
489*1da177e4SLinus Torvalds 	if (scbuf->validity_sclp_send_mask)
490*1da177e4SLinus Torvalds 		sclp_send_mask = scbuf->sclp_send_mask;
491*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
492*1da177e4SLinus Torvalds 	sclp_dispatch_state_change();
493*1da177e4SLinus Torvalds }
494*1da177e4SLinus Torvalds 
495*1da177e4SLinus Torvalds static struct sclp_register sclp_state_change_event = {
496*1da177e4SLinus Torvalds 	.receive_mask = EvTyp_StateChange_Mask,
497*1da177e4SLinus Torvalds 	.receiver_fn = sclp_state_change_cb
498*1da177e4SLinus Torvalds };
499*1da177e4SLinus Torvalds 
500*1da177e4SLinus Torvalds /* Calculate receive and send mask of currently registered listeners.
501*1da177e4SLinus Torvalds  * Called while sclp_lock is locked. */
502*1da177e4SLinus Torvalds static inline void
503*1da177e4SLinus Torvalds __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
504*1da177e4SLinus Torvalds {
505*1da177e4SLinus Torvalds 	struct list_head *l;
506*1da177e4SLinus Torvalds 	struct sclp_register *t;
507*1da177e4SLinus Torvalds 
508*1da177e4SLinus Torvalds 	*receive_mask = 0;
509*1da177e4SLinus Torvalds 	*send_mask = 0;
510*1da177e4SLinus Torvalds 	list_for_each(l, &sclp_reg_list) {
511*1da177e4SLinus Torvalds 		t = list_entry(l, struct sclp_register, list);
512*1da177e4SLinus Torvalds 		*receive_mask |= t->receive_mask;
513*1da177e4SLinus Torvalds 		*send_mask |= t->send_mask;
514*1da177e4SLinus Torvalds 	}
515*1da177e4SLinus Torvalds }
516*1da177e4SLinus Torvalds 
517*1da177e4SLinus Torvalds /* Register event listener. Return 0 on success, non-zero otherwise. */
518*1da177e4SLinus Torvalds int
519*1da177e4SLinus Torvalds sclp_register(struct sclp_register *reg)
520*1da177e4SLinus Torvalds {
521*1da177e4SLinus Torvalds 	unsigned long flags;
522*1da177e4SLinus Torvalds 	sccb_mask_t receive_mask;
523*1da177e4SLinus Torvalds 	sccb_mask_t send_mask;
524*1da177e4SLinus Torvalds 	int rc;
525*1da177e4SLinus Torvalds 
526*1da177e4SLinus Torvalds 	rc = sclp_init();
527*1da177e4SLinus Torvalds 	if (rc)
528*1da177e4SLinus Torvalds 		return rc;
529*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
530*1da177e4SLinus Torvalds 	/* Check event mask for collisions */
531*1da177e4SLinus Torvalds 	__sclp_get_mask(&receive_mask, &send_mask);
532*1da177e4SLinus Torvalds 	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
533*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
534*1da177e4SLinus Torvalds 		return -EBUSY;
535*1da177e4SLinus Torvalds 	}
536*1da177e4SLinus Torvalds 	/* Trigger initial state change callback */
537*1da177e4SLinus Torvalds 	reg->sclp_receive_mask = 0;
538*1da177e4SLinus Torvalds 	reg->sclp_send_mask = 0;
539*1da177e4SLinus Torvalds 	list_add(&reg->list, &sclp_reg_list);
540*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
541*1da177e4SLinus Torvalds 	rc = sclp_init_mask(1);
542*1da177e4SLinus Torvalds 	if (rc) {
543*1da177e4SLinus Torvalds 		spin_lock_irqsave(&sclp_lock, flags);
544*1da177e4SLinus Torvalds 		list_del(&reg->list);
545*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
546*1da177e4SLinus Torvalds 	}
547*1da177e4SLinus Torvalds 	return rc;
548*1da177e4SLinus Torvalds }
549*1da177e4SLinus Torvalds 
550*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_register);
551*1da177e4SLinus Torvalds 
552*1da177e4SLinus Torvalds /* Unregister event listener. */
553*1da177e4SLinus Torvalds void
554*1da177e4SLinus Torvalds sclp_unregister(struct sclp_register *reg)
555*1da177e4SLinus Torvalds {
556*1da177e4SLinus Torvalds 	unsigned long flags;
557*1da177e4SLinus Torvalds 
558*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
559*1da177e4SLinus Torvalds 	list_del(&reg->list);
560*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
561*1da177e4SLinus Torvalds 	sclp_init_mask(1);
562*1da177e4SLinus Torvalds }
563*1da177e4SLinus Torvalds 
564*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_unregister);
565*1da177e4SLinus Torvalds 
566*1da177e4SLinus Torvalds /* Remove event buffers which are marked processed. Return the number of
567*1da177e4SLinus Torvalds  * remaining event buffers. */
568*1da177e4SLinus Torvalds int
569*1da177e4SLinus Torvalds sclp_remove_processed(struct sccb_header *sccb)
570*1da177e4SLinus Torvalds {
571*1da177e4SLinus Torvalds 	struct evbuf_header *evbuf;
572*1da177e4SLinus Torvalds 	int unprocessed;
573*1da177e4SLinus Torvalds 	u16 remaining;
574*1da177e4SLinus Torvalds 
575*1da177e4SLinus Torvalds 	evbuf = (struct evbuf_header *) (sccb + 1);
576*1da177e4SLinus Torvalds 	unprocessed = 0;
577*1da177e4SLinus Torvalds 	remaining = sccb->length - sizeof(struct sccb_header);
578*1da177e4SLinus Torvalds 	while (remaining > 0) {
579*1da177e4SLinus Torvalds 		remaining -= evbuf->length;
580*1da177e4SLinus Torvalds 		if (evbuf->flags & 0x80) {
581*1da177e4SLinus Torvalds 			sccb->length -= evbuf->length;
582*1da177e4SLinus Torvalds 			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
583*1da177e4SLinus Torvalds 			       remaining);
584*1da177e4SLinus Torvalds 		} else {
585*1da177e4SLinus Torvalds 			unprocessed++;
586*1da177e4SLinus Torvalds 			evbuf = (struct evbuf_header *)
587*1da177e4SLinus Torvalds 					((addr_t) evbuf + evbuf->length);
588*1da177e4SLinus Torvalds 		}
589*1da177e4SLinus Torvalds 	}
590*1da177e4SLinus Torvalds 	return unprocessed;
591*1da177e4SLinus Torvalds }
592*1da177e4SLinus Torvalds 
593*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_remove_processed);
594*1da177e4SLinus Torvalds 
595*1da177e4SLinus Torvalds struct init_sccb {
596*1da177e4SLinus Torvalds 	struct sccb_header header;
597*1da177e4SLinus Torvalds 	u16 _reserved;
598*1da177e4SLinus Torvalds 	u16 mask_length;
599*1da177e4SLinus Torvalds 	sccb_mask_t receive_mask;
600*1da177e4SLinus Torvalds 	sccb_mask_t send_mask;
601*1da177e4SLinus Torvalds 	sccb_mask_t sclp_send_mask;
602*1da177e4SLinus Torvalds 	sccb_mask_t sclp_receive_mask;
603*1da177e4SLinus Torvalds } __attribute__((packed));
604*1da177e4SLinus Torvalds 
605*1da177e4SLinus Torvalds /* Prepare init mask request. Called while sclp_lock is locked. */
606*1da177e4SLinus Torvalds static inline void
607*1da177e4SLinus Torvalds __sclp_make_init_req(u32 receive_mask, u32 send_mask)
608*1da177e4SLinus Torvalds {
609*1da177e4SLinus Torvalds 	struct init_sccb *sccb;
610*1da177e4SLinus Torvalds 
611*1da177e4SLinus Torvalds 	sccb = (struct init_sccb *) sclp_init_sccb;
612*1da177e4SLinus Torvalds 	clear_page(sccb);
613*1da177e4SLinus Torvalds 	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
614*1da177e4SLinus Torvalds 	sclp_init_req.command = SCLP_CMDW_WRITEMASK;
615*1da177e4SLinus Torvalds 	sclp_init_req.status = SCLP_REQ_FILLED;
616*1da177e4SLinus Torvalds 	sclp_init_req.start_count = 0;
617*1da177e4SLinus Torvalds 	sclp_init_req.callback = NULL;
618*1da177e4SLinus Torvalds 	sclp_init_req.callback_data = NULL;
619*1da177e4SLinus Torvalds 	sclp_init_req.sccb = sccb;
620*1da177e4SLinus Torvalds 	sccb->header.length = sizeof(struct init_sccb);
621*1da177e4SLinus Torvalds 	sccb->mask_length = sizeof(sccb_mask_t);
622*1da177e4SLinus Torvalds 	sccb->receive_mask = receive_mask;
623*1da177e4SLinus Torvalds 	sccb->send_mask = send_mask;
624*1da177e4SLinus Torvalds 	sccb->sclp_receive_mask = 0;
625*1da177e4SLinus Torvalds 	sccb->sclp_send_mask = 0;
626*1da177e4SLinus Torvalds }
627*1da177e4SLinus Torvalds 
628*1da177e4SLinus Torvalds /* Start init mask request. If calculate is non-zero, calculate the mask as
629*1da177e4SLinus Torvalds  * requested by registered listeners. Use zero mask otherwise. Return 0 on
630*1da177e4SLinus Torvalds  * success, non-zero otherwise. */
631*1da177e4SLinus Torvalds static int
632*1da177e4SLinus Torvalds sclp_init_mask(int calculate)
633*1da177e4SLinus Torvalds {
634*1da177e4SLinus Torvalds 	unsigned long flags;
635*1da177e4SLinus Torvalds 	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
636*1da177e4SLinus Torvalds 	sccb_mask_t receive_mask;
637*1da177e4SLinus Torvalds 	sccb_mask_t send_mask;
638*1da177e4SLinus Torvalds 	int retry;
639*1da177e4SLinus Torvalds 	int rc;
640*1da177e4SLinus Torvalds 	unsigned long wait;
641*1da177e4SLinus Torvalds 
642*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
643*1da177e4SLinus Torvalds 	/* Check if interface is in appropriate state */
644*1da177e4SLinus Torvalds 	if (sclp_mask_state != sclp_mask_state_idle) {
645*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
646*1da177e4SLinus Torvalds 		return -EBUSY;
647*1da177e4SLinus Torvalds 	}
648*1da177e4SLinus Torvalds 	if (sclp_activation_state == sclp_activation_state_inactive) {
649*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
650*1da177e4SLinus Torvalds 		return -EINVAL;
651*1da177e4SLinus Torvalds 	}
652*1da177e4SLinus Torvalds 	sclp_mask_state = sclp_mask_state_initializing;
653*1da177e4SLinus Torvalds 	/* Determine mask */
654*1da177e4SLinus Torvalds 	if (calculate)
655*1da177e4SLinus Torvalds 		__sclp_get_mask(&receive_mask, &send_mask);
656*1da177e4SLinus Torvalds 	else {
657*1da177e4SLinus Torvalds 		receive_mask = 0;
658*1da177e4SLinus Torvalds 		send_mask = 0;
659*1da177e4SLinus Torvalds 	}
660*1da177e4SLinus Torvalds 	rc = -EIO;
661*1da177e4SLinus Torvalds 	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
662*1da177e4SLinus Torvalds 		/* Prepare request */
663*1da177e4SLinus Torvalds 		__sclp_make_init_req(receive_mask, send_mask);
664*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
665*1da177e4SLinus Torvalds 		if (sclp_add_request(&sclp_init_req)) {
666*1da177e4SLinus Torvalds 			/* Try again later */
667*1da177e4SLinus Torvalds 			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
668*1da177e4SLinus Torvalds 			while (time_before(jiffies, wait))
669*1da177e4SLinus Torvalds 				sclp_sync_wait();
670*1da177e4SLinus Torvalds 			spin_lock_irqsave(&sclp_lock, flags);
671*1da177e4SLinus Torvalds 			continue;
672*1da177e4SLinus Torvalds 		}
673*1da177e4SLinus Torvalds 		while (sclp_init_req.status != SCLP_REQ_DONE &&
674*1da177e4SLinus Torvalds 		       sclp_init_req.status != SCLP_REQ_FAILED)
675*1da177e4SLinus Torvalds 			sclp_sync_wait();
676*1da177e4SLinus Torvalds 		spin_lock_irqsave(&sclp_lock, flags);
677*1da177e4SLinus Torvalds 		if (sclp_init_req.status == SCLP_REQ_DONE &&
678*1da177e4SLinus Torvalds 		    sccb->header.response_code == 0x20) {
679*1da177e4SLinus Torvalds 			/* Successful request */
680*1da177e4SLinus Torvalds 			if (calculate) {
681*1da177e4SLinus Torvalds 				sclp_receive_mask = sccb->sclp_receive_mask;
682*1da177e4SLinus Torvalds 				sclp_send_mask = sccb->sclp_send_mask;
683*1da177e4SLinus Torvalds 			} else {
684*1da177e4SLinus Torvalds 				sclp_receive_mask = 0;
685*1da177e4SLinus Torvalds 				sclp_send_mask = 0;
686*1da177e4SLinus Torvalds 			}
687*1da177e4SLinus Torvalds 			spin_unlock_irqrestore(&sclp_lock, flags);
688*1da177e4SLinus Torvalds 			sclp_dispatch_state_change();
689*1da177e4SLinus Torvalds 			spin_lock_irqsave(&sclp_lock, flags);
690*1da177e4SLinus Torvalds 			rc = 0;
691*1da177e4SLinus Torvalds 			break;
692*1da177e4SLinus Torvalds 		}
693*1da177e4SLinus Torvalds 	}
694*1da177e4SLinus Torvalds 	sclp_mask_state = sclp_mask_state_idle;
695*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
696*1da177e4SLinus Torvalds 	return rc;
697*1da177e4SLinus Torvalds }
698*1da177e4SLinus Torvalds 
699*1da177e4SLinus Torvalds /* Deactivate SCLP interface. On success, new requests will be rejected,
700*1da177e4SLinus Torvalds  * events will no longer be dispatched. Return 0 on success, non-zero
701*1da177e4SLinus Torvalds  * otherwise. */
702*1da177e4SLinus Torvalds int
703*1da177e4SLinus Torvalds sclp_deactivate(void)
704*1da177e4SLinus Torvalds {
705*1da177e4SLinus Torvalds 	unsigned long flags;
706*1da177e4SLinus Torvalds 	int rc;
707*1da177e4SLinus Torvalds 
708*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
709*1da177e4SLinus Torvalds 	/* Deactivate can only be called when active */
710*1da177e4SLinus Torvalds 	if (sclp_activation_state != sclp_activation_state_active) {
711*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
712*1da177e4SLinus Torvalds 		return -EINVAL;
713*1da177e4SLinus Torvalds 	}
714*1da177e4SLinus Torvalds 	sclp_activation_state = sclp_activation_state_deactivating;
715*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
716*1da177e4SLinus Torvalds 	rc = sclp_init_mask(0);
717*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
718*1da177e4SLinus Torvalds 	if (rc == 0)
719*1da177e4SLinus Torvalds 		sclp_activation_state = sclp_activation_state_inactive;
720*1da177e4SLinus Torvalds 	else
721*1da177e4SLinus Torvalds 		sclp_activation_state = sclp_activation_state_active;
722*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
723*1da177e4SLinus Torvalds 	return rc;
724*1da177e4SLinus Torvalds }
725*1da177e4SLinus Torvalds 
726*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_deactivate);
727*1da177e4SLinus Torvalds 
728*1da177e4SLinus Torvalds /* Reactivate SCLP interface after sclp_deactivate. On success, new
729*1da177e4SLinus Torvalds  * requests will be accepted, events will be dispatched again. Return 0 on
730*1da177e4SLinus Torvalds  * success, non-zero otherwise. */
731*1da177e4SLinus Torvalds int
732*1da177e4SLinus Torvalds sclp_reactivate(void)
733*1da177e4SLinus Torvalds {
734*1da177e4SLinus Torvalds 	unsigned long flags;
735*1da177e4SLinus Torvalds 	int rc;
736*1da177e4SLinus Torvalds 
737*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
738*1da177e4SLinus Torvalds 	/* Reactivate can only be called when inactive */
739*1da177e4SLinus Torvalds 	if (sclp_activation_state != sclp_activation_state_inactive) {
740*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
741*1da177e4SLinus Torvalds 		return -EINVAL;
742*1da177e4SLinus Torvalds 	}
743*1da177e4SLinus Torvalds 	sclp_activation_state = sclp_activation_state_activating;
744*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
745*1da177e4SLinus Torvalds 	rc = sclp_init_mask(1);
746*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
747*1da177e4SLinus Torvalds 	if (rc == 0)
748*1da177e4SLinus Torvalds 		sclp_activation_state = sclp_activation_state_active;
749*1da177e4SLinus Torvalds 	else
750*1da177e4SLinus Torvalds 		sclp_activation_state = sclp_activation_state_inactive;
751*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
752*1da177e4SLinus Torvalds 	return rc;
753*1da177e4SLinus Torvalds }
754*1da177e4SLinus Torvalds 
755*1da177e4SLinus Torvalds EXPORT_SYMBOL(sclp_reactivate);
756*1da177e4SLinus Torvalds 
757*1da177e4SLinus Torvalds /* Handler for external interruption used during initialization. Modify
758*1da177e4SLinus Torvalds  * request state to done. */
759*1da177e4SLinus Torvalds static void
760*1da177e4SLinus Torvalds sclp_check_handler(struct pt_regs *regs, __u16 code)
761*1da177e4SLinus Torvalds {
762*1da177e4SLinus Torvalds 	u32 finished_sccb;
763*1da177e4SLinus Torvalds 
764*1da177e4SLinus Torvalds 	finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
765*1da177e4SLinus Torvalds 	/* Is this the interrupt we are waiting for? */
766*1da177e4SLinus Torvalds 	if (finished_sccb == 0)
767*1da177e4SLinus Torvalds 		return;
768*1da177e4SLinus Torvalds 	if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
769*1da177e4SLinus Torvalds 		printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
770*1da177e4SLinus Torvalds 		       "for buffer at 0x%x\n", finished_sccb);
771*1da177e4SLinus Torvalds 		return;
772*1da177e4SLinus Torvalds 	}
773*1da177e4SLinus Torvalds 	spin_lock(&sclp_lock);
774*1da177e4SLinus Torvalds 	if (sclp_running_state == sclp_running_state_running) {
775*1da177e4SLinus Torvalds 		sclp_init_req.status = SCLP_REQ_DONE;
776*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_idle;
777*1da177e4SLinus Torvalds 	}
778*1da177e4SLinus Torvalds 	spin_unlock(&sclp_lock);
779*1da177e4SLinus Torvalds }
780*1da177e4SLinus Torvalds 
781*1da177e4SLinus Torvalds /* Initial init mask request timed out. Modify request state to failed. */
782*1da177e4SLinus Torvalds static void
783*1da177e4SLinus Torvalds sclp_check_timeout(unsigned long data)
784*1da177e4SLinus Torvalds {
785*1da177e4SLinus Torvalds 	unsigned long flags;
786*1da177e4SLinus Torvalds 
787*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
788*1da177e4SLinus Torvalds 	if (sclp_running_state == sclp_running_state_running) {
789*1da177e4SLinus Torvalds 		sclp_init_req.status = SCLP_REQ_FAILED;
790*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_idle;
791*1da177e4SLinus Torvalds 	}
792*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
793*1da177e4SLinus Torvalds }
794*1da177e4SLinus Torvalds 
795*1da177e4SLinus Torvalds /* Perform a check of the SCLP interface. Return zero if the interface is
796*1da177e4SLinus Torvalds  * available and there are no pending requests from a previous instance.
797*1da177e4SLinus Torvalds  * Return non-zero otherwise. */
798*1da177e4SLinus Torvalds static int
799*1da177e4SLinus Torvalds sclp_check_interface(void)
800*1da177e4SLinus Torvalds {
801*1da177e4SLinus Torvalds 	struct init_sccb *sccb;
802*1da177e4SLinus Torvalds 	unsigned long flags;
803*1da177e4SLinus Torvalds 	int retry;
804*1da177e4SLinus Torvalds 	int rc;
805*1da177e4SLinus Torvalds 
806*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
807*1da177e4SLinus Torvalds 	/* Prepare init mask command */
808*1da177e4SLinus Torvalds 	rc = register_early_external_interrupt(0x2401, sclp_check_handler,
809*1da177e4SLinus Torvalds 					       &ext_int_info_hwc);
810*1da177e4SLinus Torvalds 	if (rc) {
811*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
812*1da177e4SLinus Torvalds 		return rc;
813*1da177e4SLinus Torvalds 	}
814*1da177e4SLinus Torvalds 	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
815*1da177e4SLinus Torvalds 		__sclp_make_init_req(0, 0);
816*1da177e4SLinus Torvalds 		sccb = (struct init_sccb *) sclp_init_req.sccb;
817*1da177e4SLinus Torvalds 		rc = service_call(sclp_init_req.command, sccb);
818*1da177e4SLinus Torvalds 		if (rc == -EIO)
819*1da177e4SLinus Torvalds 			break;
820*1da177e4SLinus Torvalds 		sclp_init_req.status = SCLP_REQ_RUNNING;
821*1da177e4SLinus Torvalds 		sclp_running_state = sclp_running_state_running;
822*1da177e4SLinus Torvalds 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
823*1da177e4SLinus Torvalds 					 sclp_check_timeout, 0);
824*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
825*1da177e4SLinus Torvalds 		/* Enable service-signal interruption - needs to happen
826*1da177e4SLinus Torvalds 		 * with IRQs enabled. */
827*1da177e4SLinus Torvalds 		ctl_set_bit(0, 9);
828*1da177e4SLinus Torvalds 		/* Wait for signal from interrupt or timeout */
829*1da177e4SLinus Torvalds 		sclp_sync_wait();
830*1da177e4SLinus Torvalds 		/* Disable service-signal interruption - needs to happen
831*1da177e4SLinus Torvalds 		 * with IRQs enabled. */
832*1da177e4SLinus Torvalds 		ctl_clear_bit(0,9);
833*1da177e4SLinus Torvalds 		spin_lock_irqsave(&sclp_lock, flags);
834*1da177e4SLinus Torvalds 		del_timer(&sclp_request_timer);
835*1da177e4SLinus Torvalds 		if (sclp_init_req.status == SCLP_REQ_DONE &&
836*1da177e4SLinus Torvalds 		    sccb->header.response_code == 0x20) {
837*1da177e4SLinus Torvalds 			rc = 0;
838*1da177e4SLinus Torvalds 			break;
839*1da177e4SLinus Torvalds 		} else
840*1da177e4SLinus Torvalds 			rc = -EBUSY;
841*1da177e4SLinus Torvalds 	}
842*1da177e4SLinus Torvalds 	unregister_early_external_interrupt(0x2401, sclp_check_handler,
843*1da177e4SLinus Torvalds 					    &ext_int_info_hwc);
844*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
845*1da177e4SLinus Torvalds 	return rc;
846*1da177e4SLinus Torvalds }
847*1da177e4SLinus Torvalds 
848*1da177e4SLinus Torvalds /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
849*1da177e4SLinus Torvalds  * events from interfering with rebooted system. */
850*1da177e4SLinus Torvalds static int
851*1da177e4SLinus Torvalds sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
852*1da177e4SLinus Torvalds {
853*1da177e4SLinus Torvalds 	sclp_deactivate();
854*1da177e4SLinus Torvalds 	return NOTIFY_DONE;
855*1da177e4SLinus Torvalds }
856*1da177e4SLinus Torvalds 
857*1da177e4SLinus Torvalds static struct notifier_block sclp_reboot_notifier = {
858*1da177e4SLinus Torvalds 	.notifier_call = sclp_reboot_event
859*1da177e4SLinus Torvalds };
860*1da177e4SLinus Torvalds 
861*1da177e4SLinus Torvalds /* Initialize SCLP driver. Return zero if driver is operational, non-zero
862*1da177e4SLinus Torvalds  * otherwise. */
863*1da177e4SLinus Torvalds static int
864*1da177e4SLinus Torvalds sclp_init(void)
865*1da177e4SLinus Torvalds {
866*1da177e4SLinus Torvalds 	unsigned long flags;
867*1da177e4SLinus Torvalds 	int rc;
868*1da177e4SLinus Torvalds 
869*1da177e4SLinus Torvalds 	if (!MACHINE_HAS_SCLP)
870*1da177e4SLinus Torvalds 		return -ENODEV;
871*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
872*1da177e4SLinus Torvalds 	/* Check for previous or running initialization */
873*1da177e4SLinus Torvalds 	if (sclp_init_state != sclp_init_state_uninitialized) {
874*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
875*1da177e4SLinus Torvalds 		return 0;
876*1da177e4SLinus Torvalds 	}
877*1da177e4SLinus Torvalds 	sclp_init_state = sclp_init_state_initializing;
878*1da177e4SLinus Torvalds 	/* Set up variables */
879*1da177e4SLinus Torvalds 	INIT_LIST_HEAD(&sclp_req_queue);
880*1da177e4SLinus Torvalds 	INIT_LIST_HEAD(&sclp_reg_list);
881*1da177e4SLinus Torvalds 	list_add(&sclp_state_change_event.list, &sclp_reg_list);
882*1da177e4SLinus Torvalds 	init_timer(&sclp_request_timer);
883*1da177e4SLinus Torvalds 	/* Check interface */
884*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
885*1da177e4SLinus Torvalds 	rc = sclp_check_interface();
886*1da177e4SLinus Torvalds 	spin_lock_irqsave(&sclp_lock, flags);
887*1da177e4SLinus Torvalds 	if (rc) {
888*1da177e4SLinus Torvalds 		sclp_init_state = sclp_init_state_uninitialized;
889*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
890*1da177e4SLinus Torvalds 		return rc;
891*1da177e4SLinus Torvalds 	}
892*1da177e4SLinus Torvalds 	/* Register reboot handler */
893*1da177e4SLinus Torvalds 	rc = register_reboot_notifier(&sclp_reboot_notifier);
894*1da177e4SLinus Torvalds 	if (rc) {
895*1da177e4SLinus Torvalds 		sclp_init_state = sclp_init_state_uninitialized;
896*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
897*1da177e4SLinus Torvalds 		return rc;
898*1da177e4SLinus Torvalds 	}
899*1da177e4SLinus Torvalds 	/* Register interrupt handler */
900*1da177e4SLinus Torvalds 	rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
901*1da177e4SLinus Torvalds 					       &ext_int_info_hwc);
902*1da177e4SLinus Torvalds 	if (rc) {
903*1da177e4SLinus Torvalds 		unregister_reboot_notifier(&sclp_reboot_notifier);
904*1da177e4SLinus Torvalds 		sclp_init_state = sclp_init_state_uninitialized;
905*1da177e4SLinus Torvalds 		spin_unlock_irqrestore(&sclp_lock, flags);
906*1da177e4SLinus Torvalds 		return rc;
907*1da177e4SLinus Torvalds 	}
908*1da177e4SLinus Torvalds 	sclp_init_state = sclp_init_state_initialized;
909*1da177e4SLinus Torvalds 	spin_unlock_irqrestore(&sclp_lock, flags);
910*1da177e4SLinus Torvalds 	/* Enable service-signal external interruption - needs to happen with
911*1da177e4SLinus Torvalds 	 * IRQs enabled. */
912*1da177e4SLinus Torvalds 	ctl_set_bit(0, 9);
913*1da177e4SLinus Torvalds 	sclp_init_mask(1);
914*1da177e4SLinus Torvalds 	return 0;
915*1da177e4SLinus Torvalds }
916