xref: /openbmc/linux/drivers/s390/cio/device_status.c (revision 64c70b1c)
1 /*
2  * drivers/s390/cio/device_status.c
3  *
4  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
5  *			 IBM Corporation
6  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
7  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  * Status accumulation and basic sense functions.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 
15 #include <asm/ccwdev.h>
16 #include <asm/cio.h>
17 
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "ioasm.h"
23 
24 /*
25  * Check for any kind of channel or interface control check but don't
26  * issue the message for the console device
27  */
28 static void
29 ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30 {
31 	if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
32 				 SCHN_STAT_CHN_CTRL_CHK |
33 				 SCHN_STAT_INTF_CTRL_CHK)))
34 		return;
35 	CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
36 		      "received"
37 		      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
38 		      ": %02X sch_stat : %02X\n",
39 		      cdev->private->dev_id.devno, cdev->private->schid.ssid,
40 		      cdev->private->schid.sch_no,
41 		      irb->scsw.dstat, irb->scsw.cstat);
42 
43 	if (irb->scsw.cc != 3) {
44 		char dbf_text[15];
45 
46 		sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
47 		CIO_TRACE_EVENT(0, dbf_text);
48 		CIO_HEX_EVENT(0, irb, sizeof (struct irb));
49 	}
50 }
51 
52 /*
53  * Some paths became not operational (pno bit in scsw is set).
54  */
55 static void
56 ccw_device_path_notoper(struct ccw_device *cdev)
57 {
58 	struct subchannel *sch;
59 
60 	sch = to_subchannel(cdev->dev.parent);
61 	stsch (sch->schid, &sch->schib);
62 
63 	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
64 		      "not operational \n", __FUNCTION__,
65 		      sch->schid.ssid, sch->schid.sch_no,
66 		      sch->schib.pmcw.pnom);
67 
68 	sch->lpm &= ~sch->schib.pmcw.pnom;
69 	cdev->private->flags.doverify = 1;
70 }
71 
72 /*
73  * Copy valid bits from the extended control word to device irb.
74  */
75 static void
76 ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77 {
78 	/*
79 	 * Copy extended control bit if it is valid... yes there
80 	 * are condition that have to be met for the extended control
81 	 * bit to have meaning. Sick.
82 	 */
83 	cdev->private->irb.scsw.ectl = 0;
84 	if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) &&
85 	    !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS))
86 		cdev->private->irb.scsw.ectl = irb->scsw.ectl;
87 	/* Check if extended control word is valid. */
88 	if (!cdev->private->irb.scsw.ectl)
89 		return;
90 	/* Copy concurrent sense / model dependent information. */
91 	memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
92 }
93 
94 /*
95  * Check if extended status word is valid.
96  */
97 static int
98 ccw_device_accumulate_esw_valid(struct irb *irb)
99 {
100 	if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
101 		return 0;
102 	if (irb->scsw.stctl ==
103 	    		(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
104 	    !(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
105 		return 0;
106 	return 1;
107 }
108 
109 /*
110  * Copy valid bits from the extended status word to device irb.
111  */
112 static void
113 ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114 {
115 	struct irb *cdev_irb;
116 	struct sublog *cdev_sublog, *sublog;
117 
118 	if (!ccw_device_accumulate_esw_valid(irb))
119 		return;
120 
121 	cdev_irb = &cdev->private->irb;
122 
123 	/* Copy last path used mask. */
124 	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
125 
126 	/* Copy subchannel logout information if esw is of format 0. */
127 	if (irb->scsw.eswf) {
128 		cdev_sublog = &cdev_irb->esw.esw0.sublog;
129 		sublog = &irb->esw.esw0.sublog;
130 		/* Copy extended status flags. */
131 		cdev_sublog->esf = sublog->esf;
132 		/*
133 		 * Copy fields that have a meaning for channel data check
134 		 * channel control check and interface control check.
135 		 */
136 		if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
137 				       SCHN_STAT_CHN_CTRL_CHK |
138 				       SCHN_STAT_INTF_CTRL_CHK)) {
139 			/* Copy ancillary report bit. */
140 			cdev_sublog->arep = sublog->arep;
141 			/* Copy field-validity-flags. */
142 			cdev_sublog->fvf = sublog->fvf;
143 			/* Copy storage access code. */
144 			cdev_sublog->sacc = sublog->sacc;
145 			/* Copy termination code. */
146 			cdev_sublog->termc = sublog->termc;
147 			/* Copy sequence code. */
148 			cdev_sublog->seqc = sublog->seqc;
149 		}
150 		/* Copy device status check. */
151 		cdev_sublog->devsc = sublog->devsc;
152 		/* Copy secondary error. */
153 		cdev_sublog->serr = sublog->serr;
154 		/* Copy i/o-error alert. */
155 		cdev_sublog->ioerr = sublog->ioerr;
156 		/* Copy channel path timeout bit. */
157 		if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK)
158 			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
159 		/* Copy failing storage address validity flag. */
160 		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
161 		if (cdev_irb->esw.esw0.erw.fsavf) {
162 			/* ... and copy the failing storage address. */
163 			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
164 			       sizeof (irb->esw.esw0.faddr));
165 			/* ... and copy the failing storage address format. */
166 			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
167 		}
168 		/* Copy secondary ccw address validity bit. */
169 		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
170 		if (irb->esw.esw0.erw.scavf)
171 			/* ... and copy the secondary ccw address. */
172 			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
173 
174 	}
175 	/* FIXME: DCTI for format 2? */
176 
177 	/* Copy authorization bit. */
178 	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
179 	/* Copy path verification required flag. */
180 	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
181 	if (irb->esw.esw0.erw.pvrf)
182 		cdev->private->flags.doverify = 1;
183 	/* Copy concurrent sense bit. */
184 	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
185 	if (irb->esw.esw0.erw.cons)
186 		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
187 }
188 
189 /*
190  * Accumulate status from irb to devstat.
191  */
192 void
193 ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
194 {
195 	struct irb *cdev_irb;
196 
197 	/*
198 	 * Check if the status pending bit is set in stctl.
199 	 * If not, the remaining bit have no meaning and we must ignore them.
200 	 * The esw is not meaningful as well...
201 	 */
202 	if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
203 		return;
204 
205 	/* Check for channel checks and interface control checks. */
206 	ccw_device_msg_control_check(cdev, irb);
207 
208 	/* Check for path not operational. */
209 	if (irb->scsw.pno && irb->scsw.fctl != 0 &&
210 	    (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
211 	     (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
212 		ccw_device_path_notoper(cdev);
213 
214 	/*
215 	 * Don't accumulate unsolicited interrupts.
216 	 */
217 	if ((irb->scsw.stctl ==
218 	     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
219 	    (!irb->scsw.cc))
220 		return;
221 
222 	cdev_irb = &cdev->private->irb;
223 
224 	/*
225 	 * If the clear function had been performed, all formerly pending
226 	 * status at the subchannel has been cleared and we must not pass
227 	 * intermediate accumulated status to the device driver.
228 	 */
229 	if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC)
230 		memset(&cdev->private->irb, 0, sizeof(struct irb));
231 
232 	/* Copy bits which are valid only for the start function. */
233 	if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) {
234 		/* Copy key. */
235 		cdev_irb->scsw.key = irb->scsw.key;
236 		/* Copy suspend control bit. */
237 		cdev_irb->scsw.sctl = irb->scsw.sctl;
238 		/* Accumulate deferred condition code. */
239 		cdev_irb->scsw.cc |= irb->scsw.cc;
240 		/* Copy ccw format bit. */
241 		cdev_irb->scsw.fmt = irb->scsw.fmt;
242 		/* Copy prefetch bit. */
243 		cdev_irb->scsw.pfch = irb->scsw.pfch;
244 		/* Copy initial-status-interruption-control. */
245 		cdev_irb->scsw.isic = irb->scsw.isic;
246 		/* Copy address limit checking control. */
247 		cdev_irb->scsw.alcc = irb->scsw.alcc;
248 		/* Copy suppress suspend bit. */
249 		cdev_irb->scsw.ssi = irb->scsw.ssi;
250 	}
251 
252 	/* Take care of the extended control bit and extended control word. */
253 	ccw_device_accumulate_ecw(cdev, irb);
254 
255 	/* Accumulate function control. */
256 	cdev_irb->scsw.fctl |= irb->scsw.fctl;
257 	/* Copy activity control. */
258 	cdev_irb->scsw.actl= irb->scsw.actl;
259 	/* Accumulate status control. */
260 	cdev_irb->scsw.stctl |= irb->scsw.stctl;
261 	/*
262 	 * Copy ccw address if it is valid. This is a bit simplified
263 	 * but should be close enough for all practical purposes.
264 	 */
265 	if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) ||
266 	    ((irb->scsw.stctl ==
267 	      (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
268 	     (irb->scsw.actl & SCSW_ACTL_DEVACT) &&
269 	     (irb->scsw.actl & SCSW_ACTL_SCHACT)) ||
270 	    (irb->scsw.actl & SCSW_ACTL_SUSPENDED))
271 		cdev_irb->scsw.cpa = irb->scsw.cpa;
272 	/* Accumulate device status, but not the device busy flag. */
273 	cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY;
274 	/* dstat is not always valid. */
275 	if (irb->scsw.stctl &
276 	    (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
277 	     | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
278 		cdev_irb->scsw.dstat |= irb->scsw.dstat;
279 	/* Accumulate subchannel status. */
280 	cdev_irb->scsw.cstat |= irb->scsw.cstat;
281 	/* Copy residual count if it is valid. */
282 	if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
283 	    (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0)
284 		cdev_irb->scsw.count = irb->scsw.count;
285 
286 	/* Take care of bits in the extended status word. */
287 	ccw_device_accumulate_esw(cdev, irb);
288 
289 	/*
290 	 * Check whether we must issue a SENSE CCW ourselves if there is no
291 	 * concurrent sense facility installed for the subchannel.
292 	 * No sense is required if no delayed sense is pending
293 	 * and we did not get a unit check without sense information.
294 	 *
295 	 * Note: We should check for ioinfo[irq]->flags.consns but VM
296 	 *	 violates the ESA/390 architecture and doesn't present an
297 	 *	 operand exception for virtual devices without concurrent
298 	 *	 sense facility available/supported when enabling the
299 	 *	 concurrent sense facility.
300 	 */
301 	if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
302 	    !(cdev_irb->esw.esw0.erw.cons))
303 		cdev->private->flags.dosense = 1;
304 }
305 
306 /*
307  * Do a basic sense.
308  */
309 int
310 ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
311 {
312 	struct subchannel *sch;
313 
314 	sch = to_subchannel(cdev->dev.parent);
315 
316 	/* A sense is required, can we do it now ? */
317 	if ((irb->scsw.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
318 		/*
319 		 * we received an Unit Check but we have no final
320 		 *  status yet, therefore we must delay the SENSE
321 		 *  processing. We must not report this intermediate
322 		 *  status to the device interrupt handler.
323 		 */
324 		return -EBUSY;
325 
326 	/*
327 	 * We have ending status but no sense information. Do a basic sense.
328 	 */
329 	sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
330 	sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
331 	sch->sense_ccw.count = SENSE_MAX_COUNT;
332 	sch->sense_ccw.flags = CCW_FLAG_SLI;
333 
334 	/* Reset internal retry indication. */
335 	cdev->private->flags.intretry = 0;
336 
337 	return cio_start (sch, &sch->sense_ccw, 0xff);
338 }
339 
340 /*
341  * Add information from basic sense to devstat.
342  */
343 void
344 ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
345 {
346 	/*
347 	 * Check if the status pending bit is set in stctl.
348 	 * If not, the remaining bit have no meaning and we must ignore them.
349 	 * The esw is not meaningful as well...
350 	 */
351 	if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
352 		return;
353 
354 	/* Check for channel checks and interface control checks. */
355 	ccw_device_msg_control_check(cdev, irb);
356 
357 	/* Check for path not operational. */
358 	if (irb->scsw.pno && irb->scsw.fctl != 0 &&
359 	    (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
360 	     (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
361 		ccw_device_path_notoper(cdev);
362 
363 	if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
364 	    (irb->scsw.dstat & DEV_STAT_CHN_END)) {
365 		cdev->private->irb.esw.esw0.erw.cons = 1;
366 		cdev->private->flags.dosense = 0;
367 	}
368 	/* Check if path verification is required. */
369 	if (ccw_device_accumulate_esw_valid(irb) &&
370 	    irb->esw.esw0.erw.pvrf)
371 		cdev->private->flags.doverify = 1;
372 }
373 
374 /*
375  * This function accumulates the status into the private devstat and
376  * starts a basic sense if one is needed.
377  */
378 int
379 ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
380 {
381 	ccw_device_accumulate_irb(cdev, irb);
382 	if ((irb->scsw.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
383 		return -EBUSY;
384 	/* Check for basic sense. */
385 	if (cdev->private->flags.dosense &&
386 	    !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
387 		cdev->private->irb.esw.esw0.erw.cons = 1;
388 		cdev->private->flags.dosense = 0;
389 		return 0;
390 	}
391 	if (cdev->private->flags.dosense) {
392 		ccw_device_do_sense(cdev, irb);
393 		return -EBUSY;
394 	}
395 	return 0;
396 }
397 
398