xref: /openbmc/linux/drivers/s390/cio/device_ops.c (revision a2cce7a9)
1 /*
2  * Copyright IBM Corp. 2002, 2009
3  *
4  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
6  */
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/device.h>
13 #include <linux/delay.h>
14 #include <linux/completion.h>
15 
16 #include <asm/ccwdev.h>
17 #include <asm/idals.h>
18 #include <asm/chpid.h>
19 #include <asm/fcx.h>
20 
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "css.h"
24 #include "chsc.h"
25 #include "device.h"
26 #include "chp.h"
27 
28 /**
29  * ccw_device_set_options_mask() - set some options and unset the rest
30  * @cdev: device for which the options are to be set
31  * @flags: options to be set
32  *
33  * All flags specified in @flags are set, all flags not specified in @flags
34  * are cleared.
35  * Returns:
36  *   %0 on success, -%EINVAL on an invalid flag combination.
37  */
38 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
39 {
40        /*
41 	* The flag usage is mutal exclusive ...
42 	*/
43 	if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
44 	    (flags & CCWDEV_REPORT_ALL))
45 		return -EINVAL;
46 	cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
47 	cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
48 	cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
49 	cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 	cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
51 	return 0;
52 }
53 
54 /**
55  * ccw_device_set_options() - set some options
56  * @cdev: device for which the options are to be set
57  * @flags: options to be set
58  *
59  * All flags specified in @flags are set, the remainder is left untouched.
60  * Returns:
61  *   %0 on success, -%EINVAL if an invalid flag combination would ensue.
62  */
63 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
64 {
65        /*
66 	* The flag usage is mutal exclusive ...
67 	*/
68 	if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
69 	    (flags & CCWDEV_REPORT_ALL)) ||
70 	    ((flags & CCWDEV_EARLY_NOTIFICATION) &&
71 	     cdev->private->options.repall) ||
72 	    ((flags & CCWDEV_REPORT_ALL) &&
73 	     cdev->private->options.fast))
74 		return -EINVAL;
75 	cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
76 	cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
77 	cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
78 	cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 	cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
80 	return 0;
81 }
82 
83 /**
84  * ccw_device_clear_options() - clear some options
85  * @cdev: device for which the options are to be cleared
86  * @flags: options to be cleared
87  *
88  * All flags specified in @flags are cleared, the remainder is left untouched.
89  */
90 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
91 {
92 	cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
93 	cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
94 	cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
95 	cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 	cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
97 }
98 
99 /**
100  * ccw_device_is_pathgroup - determine if paths to this device are grouped
101  * @cdev: ccw device
102  *
103  * Return non-zero if there is a path group, zero otherwise.
104  */
105 int ccw_device_is_pathgroup(struct ccw_device *cdev)
106 {
107 	return cdev->private->flags.pgroup;
108 }
109 EXPORT_SYMBOL(ccw_device_is_pathgroup);
110 
111 /**
112  * ccw_device_is_multipath - determine if device is operating in multipath mode
113  * @cdev: ccw device
114  *
115  * Return non-zero if device is operating in multipath mode, zero otherwise.
116  */
117 int ccw_device_is_multipath(struct ccw_device *cdev)
118 {
119 	return cdev->private->flags.mpath;
120 }
121 EXPORT_SYMBOL(ccw_device_is_multipath);
122 
123 /**
124  * ccw_device_clear() - terminate I/O request processing
125  * @cdev: target ccw device
126  * @intparm: interruption parameter; value is only used if no I/O is
127  *	     outstanding, otherwise the intparm associated with the I/O request
128  *	     is returned
129  *
130  * ccw_device_clear() calls csch on @cdev's subchannel.
131  * Returns:
132  *  %0 on success,
133  *  -%ENODEV on device not operational,
134  *  -%EINVAL on invalid device state.
135  * Context:
136  *  Interrupts disabled, ccw device lock held
137  */
138 int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
139 {
140 	struct subchannel *sch;
141 	int ret;
142 
143 	if (!cdev || !cdev->dev.parent)
144 		return -ENODEV;
145 	sch = to_subchannel(cdev->dev.parent);
146 	if (!sch->schib.pmcw.ena)
147 		return -EINVAL;
148 	if (cdev->private->state == DEV_STATE_NOT_OPER)
149 		return -ENODEV;
150 	if (cdev->private->state != DEV_STATE_ONLINE &&
151 	    cdev->private->state != DEV_STATE_W4SENSE)
152 		return -EINVAL;
153 
154 	ret = cio_clear(sch);
155 	if (ret == 0)
156 		cdev->private->intparm = intparm;
157 	return ret;
158 }
159 
160 /**
161  * ccw_device_start_key() - start a s390 channel program with key
162  * @cdev: target ccw device
163  * @cpa: logical start address of channel program
164  * @intparm: user specific interruption parameter; will be presented back to
165  *	     @cdev's interrupt handler. Allows a device driver to associate
166  *	     the interrupt with a particular I/O request.
167  * @lpm: defines the channel path to be used for a specific I/O request. A
168  *	 value of 0 will make cio use the opm.
169  * @key: storage key to be used for the I/O
170  * @flags: additional flags; defines the action to be performed for I/O
171  *	   processing.
172  *
173  * Start a S/390 channel program. When the interrupt arrives, the
174  * IRQ handler is called, either immediately, delayed (dev-end missing,
175  * or sense required) or never (no IRQ handler registered).
176  * Returns:
177  *  %0, if the operation was successful;
178  *  -%EBUSY, if the device is busy, or status pending;
179  *  -%EACCES, if no path specified in @lpm is operational;
180  *  -%ENODEV, if the device is not operational.
181  * Context:
182  *  Interrupts disabled, ccw device lock held
183  */
184 int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
185 			 unsigned long intparm, __u8 lpm, __u8 key,
186 			 unsigned long flags)
187 {
188 	struct subchannel *sch;
189 	int ret;
190 
191 	if (!cdev || !cdev->dev.parent)
192 		return -ENODEV;
193 	sch = to_subchannel(cdev->dev.parent);
194 	if (!sch->schib.pmcw.ena)
195 		return -EINVAL;
196 	if (cdev->private->state == DEV_STATE_NOT_OPER)
197 		return -ENODEV;
198 	if (cdev->private->state == DEV_STATE_VERIFY) {
199 		/* Remember to fake irb when finished. */
200 		if (!cdev->private->flags.fake_irb) {
201 			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
202 			cdev->private->intparm = intparm;
203 			return 0;
204 		} else
205 			/* There's already a fake I/O around. */
206 			return -EBUSY;
207 	}
208 	if (cdev->private->state != DEV_STATE_ONLINE ||
209 	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
210 	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
211 	    cdev->private->flags.doverify)
212 		return -EBUSY;
213 	ret = cio_set_options (sch, flags);
214 	if (ret)
215 		return ret;
216 	/* Adjust requested path mask to exclude unusable paths. */
217 	if (lpm) {
218 		lpm &= sch->lpm;
219 		if (lpm == 0)
220 			return -EACCES;
221 	}
222 	ret = cio_start_key (sch, cpa, lpm, key);
223 	switch (ret) {
224 	case 0:
225 		cdev->private->intparm = intparm;
226 		break;
227 	case -EACCES:
228 	case -ENODEV:
229 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
230 		break;
231 	}
232 	return ret;
233 }
234 
235 /**
236  * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
237  * @cdev: target ccw device
238  * @cpa: logical start address of channel program
239  * @intparm: user specific interruption parameter; will be presented back to
240  *	     @cdev's interrupt handler. Allows a device driver to associate
241  *	     the interrupt with a particular I/O request.
242  * @lpm: defines the channel path to be used for a specific I/O request. A
243  *	 value of 0 will make cio use the opm.
244  * @key: storage key to be used for the I/O
245  * @flags: additional flags; defines the action to be performed for I/O
246  *	   processing.
247  * @expires: timeout value in jiffies
248  *
249  * Start a S/390 channel program. When the interrupt arrives, the
250  * IRQ handler is called, either immediately, delayed (dev-end missing,
251  * or sense required) or never (no IRQ handler registered).
252  * This function notifies the device driver if the channel program has not
253  * completed during the time specified by @expires. If a timeout occurs, the
254  * channel program is terminated via xsch, hsch or csch, and the device's
255  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
256  * Returns:
257  *  %0, if the operation was successful;
258  *  -%EBUSY, if the device is busy, or status pending;
259  *  -%EACCES, if no path specified in @lpm is operational;
260  *  -%ENODEV, if the device is not operational.
261  * Context:
262  *  Interrupts disabled, ccw device lock held
263  */
264 int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
265 				 unsigned long intparm, __u8 lpm, __u8 key,
266 				 unsigned long flags, int expires)
267 {
268 	int ret;
269 
270 	if (!cdev)
271 		return -ENODEV;
272 	ccw_device_set_timeout(cdev, expires);
273 	ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
274 	if (ret != 0)
275 		ccw_device_set_timeout(cdev, 0);
276 	return ret;
277 }
278 
279 /**
280  * ccw_device_start() - start a s390 channel program
281  * @cdev: target ccw device
282  * @cpa: logical start address of channel program
283  * @intparm: user specific interruption parameter; will be presented back to
284  *	     @cdev's interrupt handler. Allows a device driver to associate
285  *	     the interrupt with a particular I/O request.
286  * @lpm: defines the channel path to be used for a specific I/O request. A
287  *	 value of 0 will make cio use the opm.
288  * @flags: additional flags; defines the action to be performed for I/O
289  *	   processing.
290  *
291  * Start a S/390 channel program. When the interrupt arrives, the
292  * IRQ handler is called, either immediately, delayed (dev-end missing,
293  * or sense required) or never (no IRQ handler registered).
294  * Returns:
295  *  %0, if the operation was successful;
296  *  -%EBUSY, if the device is busy, or status pending;
297  *  -%EACCES, if no path specified in @lpm is operational;
298  *  -%ENODEV, if the device is not operational.
299  * Context:
300  *  Interrupts disabled, ccw device lock held
301  */
302 int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
303 		     unsigned long intparm, __u8 lpm, unsigned long flags)
304 {
305 	return ccw_device_start_key(cdev, cpa, intparm, lpm,
306 				    PAGE_DEFAULT_KEY, flags);
307 }
308 
309 /**
310  * ccw_device_start_timeout() - start a s390 channel program with timeout
311  * @cdev: target ccw device
312  * @cpa: logical start address of channel program
313  * @intparm: user specific interruption parameter; will be presented back to
314  *	     @cdev's interrupt handler. Allows a device driver to associate
315  *	     the interrupt with a particular I/O request.
316  * @lpm: defines the channel path to be used for a specific I/O request. A
317  *	 value of 0 will make cio use the opm.
318  * @flags: additional flags; defines the action to be performed for I/O
319  *	   processing.
320  * @expires: timeout value in jiffies
321  *
322  * Start a S/390 channel program. When the interrupt arrives, the
323  * IRQ handler is called, either immediately, delayed (dev-end missing,
324  * or sense required) or never (no IRQ handler registered).
325  * This function notifies the device driver if the channel program has not
326  * completed during the time specified by @expires. If a timeout occurs, the
327  * channel program is terminated via xsch, hsch or csch, and the device's
328  * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
329  * Returns:
330  *  %0, if the operation was successful;
331  *  -%EBUSY, if the device is busy, or status pending;
332  *  -%EACCES, if no path specified in @lpm is operational;
333  *  -%ENODEV, if the device is not operational.
334  * Context:
335  *  Interrupts disabled, ccw device lock held
336  */
337 int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
338 			     unsigned long intparm, __u8 lpm,
339 			     unsigned long flags, int expires)
340 {
341 	return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
342 					    PAGE_DEFAULT_KEY, flags,
343 					    expires);
344 }
345 
346 
347 /**
348  * ccw_device_halt() - halt I/O request processing
349  * @cdev: target ccw device
350  * @intparm: interruption parameter; value is only used if no I/O is
351  *	     outstanding, otherwise the intparm associated with the I/O request
352  *	     is returned
353  *
354  * ccw_device_halt() calls hsch on @cdev's subchannel.
355  * Returns:
356  *  %0 on success,
357  *  -%ENODEV on device not operational,
358  *  -%EINVAL on invalid device state,
359  *  -%EBUSY on device busy or interrupt pending.
360  * Context:
361  *  Interrupts disabled, ccw device lock held
362  */
363 int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
364 {
365 	struct subchannel *sch;
366 	int ret;
367 
368 	if (!cdev || !cdev->dev.parent)
369 		return -ENODEV;
370 	sch = to_subchannel(cdev->dev.parent);
371 	if (!sch->schib.pmcw.ena)
372 		return -EINVAL;
373 	if (cdev->private->state == DEV_STATE_NOT_OPER)
374 		return -ENODEV;
375 	if (cdev->private->state != DEV_STATE_ONLINE &&
376 	    cdev->private->state != DEV_STATE_W4SENSE)
377 		return -EINVAL;
378 
379 	ret = cio_halt(sch);
380 	if (ret == 0)
381 		cdev->private->intparm = intparm;
382 	return ret;
383 }
384 
385 /**
386  * ccw_device_resume() - resume channel program execution
387  * @cdev: target ccw device
388  *
389  * ccw_device_resume() calls rsch on @cdev's subchannel.
390  * Returns:
391  *  %0 on success,
392  *  -%ENODEV on device not operational,
393  *  -%EINVAL on invalid device state,
394  *  -%EBUSY on device busy or interrupt pending.
395  * Context:
396  *  Interrupts disabled, ccw device lock held
397  */
398 int ccw_device_resume(struct ccw_device *cdev)
399 {
400 	struct subchannel *sch;
401 
402 	if (!cdev || !cdev->dev.parent)
403 		return -ENODEV;
404 	sch = to_subchannel(cdev->dev.parent);
405 	if (!sch->schib.pmcw.ena)
406 		return -EINVAL;
407 	if (cdev->private->state == DEV_STATE_NOT_OPER)
408 		return -ENODEV;
409 	if (cdev->private->state != DEV_STATE_ONLINE ||
410 	    !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
411 		return -EINVAL;
412 	return cio_resume(sch);
413 }
414 
415 /*
416  * Pass interrupt to device driver.
417  */
418 int
419 ccw_device_call_handler(struct ccw_device *cdev)
420 {
421 	unsigned int stctl;
422 	int ending_status;
423 
424 	/*
425 	 * we allow for the device action handler if .
426 	 *  - we received ending status
427 	 *  - the action handler requested to see all interrupts
428 	 *  - we received an intermediate status
429 	 *  - fast notification was requested (primary status)
430 	 *  - unsolicited interrupts
431 	 */
432 	stctl = scsw_stctl(&cdev->private->irb.scsw);
433 	ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
434 		(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
435 		(stctl == SCSW_STCTL_STATUS_PEND);
436 	if (!ending_status &&
437 	    !cdev->private->options.repall &&
438 	    !(stctl & SCSW_STCTL_INTER_STATUS) &&
439 	    !(cdev->private->options.fast &&
440 	      (stctl & SCSW_STCTL_PRIM_STATUS)))
441 		return 0;
442 
443 	/* Clear pending timers for device driver initiated I/O. */
444 	if (ending_status)
445 		ccw_device_set_timeout(cdev, 0);
446 	/*
447 	 * Now we are ready to call the device driver interrupt handler.
448 	 */
449 	if (cdev->handler)
450 		cdev->handler(cdev, cdev->private->intparm,
451 			      &cdev->private->irb);
452 
453 	/*
454 	 * Clear the old and now useless interrupt response block.
455 	 */
456 	memset(&cdev->private->irb, 0, sizeof(struct irb));
457 
458 	return 1;
459 }
460 
461 /**
462  * ccw_device_get_ciw() - Search for CIW command in extended sense data.
463  * @cdev: ccw device to inspect
464  * @ct: command type to look for
465  *
466  * During SenseID, command information words (CIWs) describing special
467  * commands available to the device may have been stored in the extended
468  * sense data. This function searches for CIWs of a specified command
469  * type in the extended sense data.
470  * Returns:
471  *  %NULL if no extended sense data has been stored or if no CIW of the
472  *  specified command type could be found,
473  *  else a pointer to the CIW of the specified command type.
474  */
475 struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
476 {
477 	int ciw_cnt;
478 
479 	if (cdev->private->flags.esid == 0)
480 		return NULL;
481 	for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
482 		if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
483 			return cdev->private->senseid.ciw + ciw_cnt;
484 	return NULL;
485 }
486 
487 /**
488  * ccw_device_get_path_mask() - get currently available paths
489  * @cdev: ccw device to be queried
490  * Returns:
491  *  %0 if no subchannel for the device is available,
492  *  else the mask of currently available paths for the ccw device's subchannel.
493  */
494 __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
495 {
496 	struct subchannel *sch;
497 
498 	if (!cdev->dev.parent)
499 		return 0;
500 
501 	sch = to_subchannel(cdev->dev.parent);
502 	return sch->lpm;
503 }
504 
505 struct stlck_data {
506 	struct completion done;
507 	int rc;
508 };
509 
510 void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
511 {
512 	struct stlck_data *sdata = data;
513 
514 	sdata->rc = rc;
515 	complete(&sdata->done);
516 }
517 
518 /*
519  * Perform unconditional reserve + release.
520  */
521 int ccw_device_stlck(struct ccw_device *cdev)
522 {
523 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
524 	struct stlck_data data;
525 	u8 *buffer;
526 	int rc;
527 
528 	/* Check if steal lock operation is valid for this device. */
529 	if (cdev->drv) {
530 		if (!cdev->private->options.force)
531 			return -EINVAL;
532 	}
533 	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
534 	if (!buffer)
535 		return -ENOMEM;
536 	init_completion(&data.done);
537 	data.rc = -EIO;
538 	spin_lock_irq(sch->lock);
539 	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
540 	if (rc)
541 		goto out_unlock;
542 	/* Perform operation. */
543 	cdev->private->state = DEV_STATE_STEAL_LOCK;
544 	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
545 	spin_unlock_irq(sch->lock);
546 	/* Wait for operation to finish. */
547 	if (wait_for_completion_interruptible(&data.done)) {
548 		/* Got a signal. */
549 		spin_lock_irq(sch->lock);
550 		ccw_request_cancel(cdev);
551 		spin_unlock_irq(sch->lock);
552 		wait_for_completion(&data.done);
553 	}
554 	rc = data.rc;
555 	/* Check results. */
556 	spin_lock_irq(sch->lock);
557 	cio_disable_subchannel(sch);
558 	cdev->private->state = DEV_STATE_BOXED;
559 out_unlock:
560 	spin_unlock_irq(sch->lock);
561 	kfree(buffer);
562 
563 	return rc;
564 }
565 
566 /**
567  * chp_get_chp_desc - return newly allocated channel-path descriptor
568  * @cdev: device to obtain the descriptor for
569  * @chp_idx: index of the channel path
570  *
571  * On success return a newly allocated copy of the channel-path description
572  * data associated with the given channel path. Return %NULL on error.
573  */
574 struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
575 						  int chp_idx)
576 {
577 	struct subchannel *sch;
578 	struct chp_id chpid;
579 
580 	sch = to_subchannel(cdev->dev.parent);
581 	chp_id_init(&chpid);
582 	chpid.id = sch->schib.pmcw.chpid[chp_idx];
583 	return chp_get_chp_desc(chpid);
584 }
585 
586 /**
587  * ccw_device_get_id - obtain a ccw device id
588  * @cdev: device to obtain the id for
589  * @dev_id: where to fill in the values
590  */
591 void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
592 {
593 	*dev_id = cdev->private->dev_id;
594 }
595 EXPORT_SYMBOL(ccw_device_get_id);
596 
597 /**
598  * ccw_device_tm_start_key - perform start function
599  * @cdev: ccw device on which to perform the start function
600  * @tcw: transport-command word to be started
601  * @intparm: user defined parameter to be passed to the interrupt handler
602  * @lpm: mask of paths to use
603  * @key: storage key to use for storage access
604  *
605  * Start the tcw on the given ccw device. Return zero on success, non-zero
606  * otherwise.
607  */
608 int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
609 			    unsigned long intparm, u8 lpm, u8 key)
610 {
611 	struct subchannel *sch;
612 	int rc;
613 
614 	sch = to_subchannel(cdev->dev.parent);
615 	if (!sch->schib.pmcw.ena)
616 		return -EINVAL;
617 	if (cdev->private->state == DEV_STATE_VERIFY) {
618 		/* Remember to fake irb when finished. */
619 		if (!cdev->private->flags.fake_irb) {
620 			cdev->private->flags.fake_irb = FAKE_TM_IRB;
621 			cdev->private->intparm = intparm;
622 			return 0;
623 		} else
624 			/* There's already a fake I/O around. */
625 			return -EBUSY;
626 	}
627 	if (cdev->private->state != DEV_STATE_ONLINE)
628 		return -EIO;
629 	/* Adjust requested path mask to exclude unusable paths. */
630 	if (lpm) {
631 		lpm &= sch->lpm;
632 		if (lpm == 0)
633 			return -EACCES;
634 	}
635 	rc = cio_tm_start_key(sch, tcw, lpm, key);
636 	if (rc == 0)
637 		cdev->private->intparm = intparm;
638 	return rc;
639 }
640 EXPORT_SYMBOL(ccw_device_tm_start_key);
641 
642 /**
643  * ccw_device_tm_start_timeout_key - perform start function
644  * @cdev: ccw device on which to perform the start function
645  * @tcw: transport-command word to be started
646  * @intparm: user defined parameter to be passed to the interrupt handler
647  * @lpm: mask of paths to use
648  * @key: storage key to use for storage access
649  * @expires: time span in jiffies after which to abort request
650  *
651  * Start the tcw on the given ccw device. Return zero on success, non-zero
652  * otherwise.
653  */
654 int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
655 				    unsigned long intparm, u8 lpm, u8 key,
656 				    int expires)
657 {
658 	int ret;
659 
660 	ccw_device_set_timeout(cdev, expires);
661 	ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
662 	if (ret != 0)
663 		ccw_device_set_timeout(cdev, 0);
664 	return ret;
665 }
666 EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
667 
668 /**
669  * ccw_device_tm_start - perform start function
670  * @cdev: ccw device on which to perform the start function
671  * @tcw: transport-command word to be started
672  * @intparm: user defined parameter to be passed to the interrupt handler
673  * @lpm: mask of paths to use
674  *
675  * Start the tcw on the given ccw device. Return zero on success, non-zero
676  * otherwise.
677  */
678 int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
679 			unsigned long intparm, u8 lpm)
680 {
681 	return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
682 				       PAGE_DEFAULT_KEY);
683 }
684 EXPORT_SYMBOL(ccw_device_tm_start);
685 
686 /**
687  * ccw_device_tm_start_timeout - perform start function
688  * @cdev: ccw device on which to perform the start function
689  * @tcw: transport-command word to be started
690  * @intparm: user defined parameter to be passed to the interrupt handler
691  * @lpm: mask of paths to use
692  * @expires: time span in jiffies after which to abort request
693  *
694  * Start the tcw on the given ccw device. Return zero on success, non-zero
695  * otherwise.
696  */
697 int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
698 			       unsigned long intparm, u8 lpm, int expires)
699 {
700 	return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
701 					       PAGE_DEFAULT_KEY, expires);
702 }
703 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
704 
705 /**
706  * ccw_device_get_mdc - accumulate max data count
707  * @cdev: ccw device for which the max data count is accumulated
708  * @mask: mask of paths to use
709  *
710  * Return the number of 64K-bytes blocks all paths at least support
711  * for a transport command. Return values <= 0 indicate failures.
712  */
713 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
714 {
715 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
716 	struct channel_path *chp;
717 	struct chp_id chpid;
718 	int mdc = 0, i;
719 
720 	/* Adjust requested path mask to excluded varied off paths. */
721 	if (mask)
722 		mask &= sch->lpm;
723 	else
724 		mask = sch->lpm;
725 
726 	chp_id_init(&chpid);
727 	for (i = 0; i < 8; i++) {
728 		if (!(mask & (0x80 >> i)))
729 			continue;
730 		chpid.id = sch->schib.pmcw.chpid[i];
731 		chp = chpid_to_chp(chpid);
732 		if (!chp)
733 			continue;
734 
735 		mutex_lock(&chp->lock);
736 		if (!chp->desc_fmt1.f) {
737 			mutex_unlock(&chp->lock);
738 			return 0;
739 		}
740 		if (!chp->desc_fmt1.r)
741 			mdc = 1;
742 		mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
743 			    chp->desc_fmt1.mdc;
744 		mutex_unlock(&chp->lock);
745 	}
746 
747 	return mdc;
748 }
749 EXPORT_SYMBOL(ccw_device_get_mdc);
750 
751 /**
752  * ccw_device_tm_intrg - perform interrogate function
753  * @cdev: ccw device on which to perform the interrogate function
754  *
755  * Perform an interrogate function on the given ccw device. Return zero on
756  * success, non-zero otherwise.
757  */
758 int ccw_device_tm_intrg(struct ccw_device *cdev)
759 {
760 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
761 
762 	if (!sch->schib.pmcw.ena)
763 		return -EINVAL;
764 	if (cdev->private->state != DEV_STATE_ONLINE)
765 		return -EIO;
766 	if (!scsw_is_tm(&sch->schib.scsw) ||
767 	    !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
768 		return -EINVAL;
769 	return cio_tm_intrg(sch);
770 }
771 EXPORT_SYMBOL(ccw_device_tm_intrg);
772 
773 /**
774  * ccw_device_get_schid - obtain a subchannel id
775  * @cdev: device to obtain the id for
776  * @schid: where to fill in the values
777  */
778 void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
779 {
780 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
781 
782 	*schid = sch->schid;
783 }
784 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
785 
786 MODULE_LICENSE("GPL");
787 EXPORT_SYMBOL(ccw_device_set_options_mask);
788 EXPORT_SYMBOL(ccw_device_set_options);
789 EXPORT_SYMBOL(ccw_device_clear_options);
790 EXPORT_SYMBOL(ccw_device_clear);
791 EXPORT_SYMBOL(ccw_device_halt);
792 EXPORT_SYMBOL(ccw_device_resume);
793 EXPORT_SYMBOL(ccw_device_start_timeout);
794 EXPORT_SYMBOL(ccw_device_start);
795 EXPORT_SYMBOL(ccw_device_start_timeout_key);
796 EXPORT_SYMBOL(ccw_device_start_key);
797 EXPORT_SYMBOL(ccw_device_get_ciw);
798 EXPORT_SYMBOL(ccw_device_get_path_mask);
799 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
800