xref: /openbmc/linux/drivers/s390/block/dasd_int.h (revision 2dec9e09)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
6  * Bugreports.to..: <Linux390@de.ibm.com>
7  * Copyright IBM Corp. 1999, 2009
8  */
9 
10 #ifndef DASD_INT_H
11 #define DASD_INT_H
12 
13 /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
14 #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
15 #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
16 
17 /*
18  * States a dasd device can have:
19  *   new: the dasd_device structure is allocated.
20  *   known: the discipline for the device is identified.
21  *   basic: the device can do basic i/o.
22  *   unfmt: the device could not be analyzed (format is unknown).
23  *   ready: partition detection is done and the device is can do block io.
24  *   online: the device accepts requests from the block device queue.
25  *
26  * Things to do for startup state transitions:
27  *   new -> known: find discipline for the device and create devfs entries.
28  *   known -> basic: request irq line for the device.
29  *   basic -> ready: do the initial analysis, e.g. format detection,
30  *                   do block device setup and detect partitions.
31  *   ready -> online: schedule the device tasklet.
32  * Things to do for shutdown state transitions:
33  *   online -> ready: just set the new device state.
34  *   ready -> basic: flush requests from the block device layer, clear
35  *                   partition information and reset format information.
36  *   basic -> known: terminate all requests and free irq.
37  *   known -> new: remove devfs entries and forget discipline.
38  */
39 
40 #define DASD_STATE_NEW	  0
41 #define DASD_STATE_KNOWN  1
42 #define DASD_STATE_BASIC  2
43 #define DASD_STATE_UNFMT  3
44 #define DASD_STATE_READY  4
45 #define DASD_STATE_ONLINE 5
46 
47 #include <linux/module.h>
48 #include <linux/wait.h>
49 #include <linux/blkdev.h>
50 #include <linux/hdreg.h>
51 #include <linux/interrupt.h>
52 #include <linux/log2.h>
53 #include <asm/ccwdev.h>
54 #include <linux/workqueue.h>
55 #include <asm/debug.h>
56 #include <asm/dasd.h>
57 #include <asm/idals.h>
58 #include <linux/bitops.h>
59 #include <linux/blk-mq.h>
60 
61 /* DASD discipline magic */
62 #define DASD_ECKD_MAGIC 0xC5C3D2C4
63 #define DASD_DIAG_MAGIC 0xC4C9C1C7
64 #define DASD_FBA_MAGIC 0xC6C2C140
65 
66 /*
67  * SECTION: Type definitions
68  */
69 struct dasd_device;
70 struct dasd_block;
71 
72 /* BIT DEFINITIONS FOR SENSE DATA */
73 #define DASD_SENSE_BIT_0 0x80
74 #define DASD_SENSE_BIT_1 0x40
75 #define DASD_SENSE_BIT_2 0x20
76 #define DASD_SENSE_BIT_3 0x10
77 
78 /* BIT DEFINITIONS FOR SIM SENSE */
79 #define DASD_SIM_SENSE 0x0F
80 #define DASD_SIM_MSG_TO_OP 0x03
81 #define DASD_SIM_LOG 0x0C
82 
83 /* lock class for nested cdev lock */
84 #define CDEV_NESTED_FIRST 1
85 #define CDEV_NESTED_SECOND 2
86 
87 /*
88  * SECTION: MACROs for klogd and s390 debug feature (dbf)
89  */
90 #define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
91 do { \
92 	debug_sprintf_event(d_device->debug_area, \
93 			    d_level, \
94 			    d_str "\n", \
95 			    d_data); \
96 } while(0)
97 
98 #define DBF_EVENT(d_level, d_str, d_data...)\
99 do { \
100 	debug_sprintf_event(dasd_debug_area, \
101 			    d_level,\
102 			    d_str "\n", \
103 			    d_data); \
104 } while(0)
105 
106 #define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...)	\
107 do { \
108 	struct ccw_dev_id __dev_id;			\
109 	ccw_device_get_id(d_cdev, &__dev_id);		\
110 	debug_sprintf_event(dasd_debug_area,		\
111 			    d_level,					\
112 			    "0.%x.%04x " d_str "\n",			\
113 			    __dev_id.ssid, __dev_id.devno, d_data);	\
114 } while (0)
115 
116 /* limit size for an errorstring */
117 #define ERRORLENGTH 30
118 
119 /* definition of dbf debug levels */
120 #define	DBF_EMERG	0	/* system is unusable			*/
121 #define	DBF_ALERT	1	/* action must be taken immediately	*/
122 #define	DBF_CRIT	2	/* critical conditions			*/
123 #define	DBF_ERR		3	/* error conditions			*/
124 #define	DBF_WARNING	4	/* warning conditions			*/
125 #define	DBF_NOTICE	5	/* normal but significant condition	*/
126 #define	DBF_INFO	6	/* informational			*/
127 #define	DBF_DEBUG	6	/* debug-level messages			*/
128 
129 /* messages to be written via klogd and dbf */
130 #define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
131 do { \
132 	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
133 	       dev_name(&d_device->cdev->dev), d_args); \
134 	DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
135 } while(0)
136 
137 #define MESSAGE(d_loglevel,d_string,d_args...)\
138 do { \
139 	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
140 	DBF_EVENT(DBF_ALERT, d_string, d_args); \
141 } while(0)
142 
143 /* messages to be written via klogd only */
144 #define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
145 do { \
146 	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
147 	       dev_name(&d_device->cdev->dev), d_args); \
148 } while(0)
149 
150 #define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
151 do { \
152 	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
153 } while(0)
154 
155 /* Macro to calculate number of blocks per page */
156 #define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
157 
158 struct dasd_ccw_req {
159 	unsigned int magic;		/* Eye catcher */
160 	int intrc;			/* internal error, e.g. from start_IO */
161 	struct list_head devlist;	/* for dasd_device request queue */
162 	struct list_head blocklist;	/* for dasd_block request queue */
163 	struct dasd_block *block;	/* the originating block device */
164 	struct dasd_device *memdev;	/* the device used to allocate this */
165 	struct dasd_device *startdev;	/* device the request is started on */
166 	struct dasd_device *basedev;	/* base device if no block->base */
167 	void *cpaddr;			/* address of ccw or tcw */
168 	short retries;			/* A retry counter */
169 	unsigned char cpmode;		/* 0 = cmd mode, 1 = itcw */
170 	char status;			/* status of this request */
171 	char lpm;			/* logical path mask */
172 	unsigned long flags;        	/* flags of this request */
173 	struct dasd_queue *dq;
174 	unsigned long starttime;	/* jiffies time of request start */
175 	unsigned long expires;		/* expiration period in jiffies */
176 	void *data;			/* pointer to data area */
177 	struct irb irb;			/* device status in case of an error */
178 	struct dasd_ccw_req *refers;	/* ERP-chain queueing. */
179 	void *function; 		/* originating ERP action */
180 	void *mem_chunk;
181 
182 	unsigned long buildclk;		/* TOD-clock of request generation */
183 	unsigned long startclk;		/* TOD-clock of request start */
184 	unsigned long stopclk;		/* TOD-clock of request interrupt */
185 	unsigned long endclk;		/* TOD-clock of request termination */
186 
187 	void (*callback)(struct dasd_ccw_req *, void *data);
188 	void *callback_data;
189 	unsigned int proc_bytes;	/* bytes for partial completion */
190 	unsigned int trkcount;		/* count formatted tracks */
191 };
192 
193 /*
194  * dasd_ccw_req -> status can be:
195  */
196 #define DASD_CQR_FILLED 	0x00	/* request is ready to be processed */
197 #define DASD_CQR_DONE		0x01	/* request is completed successfully */
198 #define DASD_CQR_NEED_ERP	0x02	/* request needs recovery action */
199 #define DASD_CQR_IN_ERP 	0x03	/* request is in recovery */
200 #define DASD_CQR_FAILED 	0x04	/* request is finally failed */
201 #define DASD_CQR_TERMINATED	0x05	/* request was stopped by driver */
202 
203 #define DASD_CQR_QUEUED 	0x80	/* request is queued to be processed */
204 #define DASD_CQR_IN_IO		0x81	/* request is currently in IO */
205 #define DASD_CQR_ERROR		0x82	/* request is completed with error */
206 #define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
207 #define DASD_CQR_CLEARED	0x84	/* request was cleared */
208 #define DASD_CQR_SUCCESS	0x85	/* request was successful */
209 
210 /* default expiration time*/
211 #define DASD_EXPIRES	  300
212 #define DASD_EXPIRES_MAX  40000000
213 #define DASD_RETRIES	  256
214 #define DASD_RETRIES_MAX  32768
215 
216 /* per dasd_ccw_req flags */
217 #define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
218 #define DASD_CQR_FLAGS_FAILFAST  1	/* FAILFAST */
219 #define DASD_CQR_VERIFY_PATH	 2	/* path verification request */
220 #define DASD_CQR_ALLOW_SLOCK	 3	/* Try this request even when lock was
221 					 * stolen. Should not be combined with
222 					 * DASD_CQR_FLAGS_USE_ERP
223 					 */
224 /*
225  * The following flags are used to suppress output of certain errors.
226  */
227 #define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
228 #define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
229 #define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
230 #define DASD_CQR_SUPPRESS_CR	7	/* Suppress 'Command Reject' error */
231 
232 #define DASD_REQ_PER_DEV 4
233 
234 /* Signature for error recovery functions. */
235 typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
236 
237 /*
238  * A single CQR can only contain a maximum of 255 CCWs. It is limited by
239  * the locate record and locate record extended count value which can only hold
240  * 1 Byte max.
241  */
242 #define DASD_CQR_MAX_CCW 255
243 
244 /*
245  * Unique identifier for dasd device.
246  */
247 #define UA_NOT_CONFIGURED  0x00
248 #define UA_BASE_DEVICE	   0x01
249 #define UA_BASE_PAV_ALIAS  0x02
250 #define UA_HYPER_PAV_ALIAS 0x03
251 
252 struct dasd_uid {
253 	__u8 type;
254 	char vendor[4];
255 	char serial[15];
256 	__u16 ssid;
257 	__u8 real_unit_addr;
258 	__u8 base_unit_addr;
259 	char vduit[33];
260 };
261 
262 /*
263  * the struct dasd_discipline is
264  * sth like a table of virtual functions, if you think of dasd_eckd
265  * inheriting dasd...
266  * no, currently we are not planning to reimplement the driver in C++
267  */
268 struct dasd_discipline {
269 	struct module *owner;
270 	char ebcname[8];	/* a name used for tagging and printks */
271 	char name[8];		/* a name used for tagging and printks */
272 
273 	struct list_head list;	/* used for list of disciplines */
274 
275 	/*
276 	 * Device recognition functions. check_device is used to verify
277 	 * the sense data and the information returned by read device
278 	 * characteristics. It returns 0 if the discipline can be used
279 	 * for the device in question. uncheck_device is called during
280 	 * device shutdown to deregister a device from its discipline.
281 	 */
282 	int (*check_device) (struct dasd_device *);
283 	void (*uncheck_device) (struct dasd_device *);
284 
285 	/*
286 	 * do_analysis is used in the step from device state "basic" to
287 	 * state "accept". It returns 0 if the device can be made ready,
288 	 * it returns -EMEDIUMTYPE if the device can't be made ready or
289 	 * -EAGAIN if do_analysis started a ccw that needs to complete
290 	 * before the analysis may be repeated.
291 	 */
292 	int (*do_analysis) (struct dasd_block *);
293 
294 	/*
295 	 * This function is called, when new paths become available.
296 	 * Disciplins may use this callback to do necessary setup work,
297 	 * e.g. verify that new path is compatible with the current
298 	 * configuration.
299 	 */
300 	int (*pe_handler)(struct dasd_device *, __u8, __u8);
301 
302 	/*
303 	 * Last things to do when a device is set online, and first things
304 	 * when it is set offline.
305 	 */
306 	int (*basic_to_ready) (struct dasd_device *);
307 	int (*online_to_ready) (struct dasd_device *);
308 	int (*basic_to_known)(struct dasd_device *);
309 
310 	/*
311 	 * Initialize block layer request queue.
312 	 */
313 	void (*setup_blk_queue)(struct dasd_block *);
314 	/* (struct dasd_device *);
315 	 * Device operation functions. build_cp creates a ccw chain for
316 	 * a block device request, start_io starts the request and
317 	 * term_IO cancels it (e.g. in case of a timeout). format_device
318 	 * formats the device and check_device_format compares the format of
319 	 * a device with the expected format_data.
320 	 * handle_terminated_request allows to examine a cqr and prepare
321 	 * it for retry.
322 	 */
323 	struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
324 					  struct dasd_block *,
325 					  struct request *);
326 	int (*start_IO) (struct dasd_ccw_req *);
327 	int (*term_IO) (struct dasd_ccw_req *);
328 	void (*handle_terminated_request) (struct dasd_ccw_req *);
329 	int (*format_device) (struct dasd_device *,
330 			      struct format_data_t *, int);
331 	int (*check_device_format)(struct dasd_device *,
332 				   struct format_check_t *, int);
333 	int (*free_cp) (struct dasd_ccw_req *, struct request *);
334 
335 	/*
336 	 * Error recovery functions. examine_error() returns a value that
337 	 * indicates what to do for an error condition. If examine_error()
338 	 * returns 'dasd_era_recover' erp_action() is called to create a
339 	 * special error recovery ccw. erp_postaction() is called after
340 	 * an error recovery ccw has finished its execution. dump_sense
341 	 * is called for every error condition to print the sense data
342 	 * to the console.
343 	 */
344 	dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
345 	dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
346 	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
347 			    struct irb *);
348 	void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
349 	void (*check_for_device_change) (struct dasd_device *,
350 					 struct dasd_ccw_req *,
351 					 struct irb *);
352 
353         /* i/o control functions. */
354 	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
355 	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
356 	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
357 
358 	/* reload device after state change */
359 	int (*reload) (struct dasd_device *);
360 
361 	int (*get_uid) (struct dasd_device *, struct dasd_uid *);
362 	void (*kick_validate) (struct dasd_device *);
363 	int (*check_attention)(struct dasd_device *, __u8);
364 	int (*host_access_count)(struct dasd_device *);
365 	int (*hosts_print)(struct dasd_device *, struct seq_file *);
366 	void (*handle_hpf_error)(struct dasd_device *, struct irb *);
367 	void (*disable_hpf)(struct dasd_device *);
368 	int (*hpf_enabled)(struct dasd_device *);
369 	void (*reset_path)(struct dasd_device *, __u8);
370 
371 	/*
372 	 * Extent Space Efficient (ESE) relevant functions
373 	 */
374 	int (*is_ese)(struct dasd_device *);
375 	/* Capacity */
376 	int (*space_allocated)(struct dasd_device *);
377 	int (*space_configured)(struct dasd_device *);
378 	int (*logical_capacity)(struct dasd_device *);
379 	int (*release_space)(struct dasd_device *, struct format_data_t *);
380 	/* Extent Pool */
381 	int (*ext_pool_id)(struct dasd_device *);
382 	int (*ext_size)(struct dasd_device *);
383 	int (*ext_pool_cap_at_warnlevel)(struct dasd_device *);
384 	int (*ext_pool_warn_thrshld)(struct dasd_device *);
385 	int (*ext_pool_oos)(struct dasd_device *);
386 	int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
387 	struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
388 					   struct dasd_ccw_req *, struct irb *);
389 	int (*ese_read)(struct dasd_ccw_req *, struct irb *);
390 };
391 
392 extern struct dasd_discipline *dasd_diag_discipline_pointer;
393 
394 /*
395  * Notification numbers for extended error reporting notifications:
396  * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
397  * eer pointer) is freed. The error reporting module needs to do all necessary
398  * cleanup steps.
399  * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
400  */
401 #define DASD_EER_DISABLE 0
402 #define DASD_EER_TRIGGER 1
403 
404 /* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
405 #define DASD_EER_FATALERROR  1
406 #define DASD_EER_NOPATH      2
407 #define DASD_EER_STATECHANGE 3
408 #define DASD_EER_PPRCSUSPEND 4
409 #define DASD_EER_NOSPC	     5
410 
411 /* DASD path handling */
412 
413 #define DASD_PATH_OPERATIONAL  1
414 #define DASD_PATH_TBV	       2
415 #define DASD_PATH_PP	       3
416 #define DASD_PATH_NPP	       4
417 #define DASD_PATH_MISCABLED    5
418 #define DASD_PATH_NOHPF        6
419 #define DASD_PATH_CUIR	       7
420 #define DASD_PATH_IFCC	       8
421 #define DASD_PATH_FCSEC	       9
422 
423 #define DASD_THRHLD_MAX		4294967295U
424 #define DASD_INTERVAL_MAX	4294967295U
425 
426 /* FC Endpoint Security Capabilities */
427 #define DASD_FC_SECURITY_UNSUP		0
428 #define DASD_FC_SECURITY_AUTH		1
429 #define DASD_FC_SECURITY_ENC_FCSP2	2
430 #define DASD_FC_SECURITY_ENC_ERAS	3
431 
432 #define DASD_FC_SECURITY_ENC_STR	"Encryption"
433 static const struct {
434 	u8 value;
435 	char *name;
436 } dasd_path_fcs_mnemonics[] = {
437 	{ DASD_FC_SECURITY_UNSUP,	"Unsupported" },
438 	{ DASD_FC_SECURITY_AUTH,	"Authentication" },
439 	{ DASD_FC_SECURITY_ENC_FCSP2,	DASD_FC_SECURITY_ENC_STR },
440 	{ DASD_FC_SECURITY_ENC_ERAS,	DASD_FC_SECURITY_ENC_STR },
441 };
442 
443 static inline char *dasd_path_get_fcs_str(int val)
444 {
445 	int i;
446 
447 	for (i = 0; i < ARRAY_SIZE(dasd_path_fcs_mnemonics); i++) {
448 		if (dasd_path_fcs_mnemonics[i].value == val)
449 			return dasd_path_fcs_mnemonics[i].name;
450 	}
451 
452 	return dasd_path_fcs_mnemonics[0].name;
453 }
454 
455 struct dasd_path {
456 	unsigned long flags;
457 	u8 cssid;
458 	u8 ssid;
459 	u8 chpid;
460 	struct dasd_conf_data *conf_data;
461 	atomic_t error_count;
462 	unsigned long errorclk;
463 	u8 fc_security;
464 	struct kobject kobj;
465 	bool in_sysfs;
466 };
467 
468 #define to_dasd_path(path) container_of(path, struct dasd_path, kobj)
469 
470 static inline void dasd_path_release(struct kobject *kobj)
471 {
472 /* Memory for the dasd_path kobject is freed when dasd_free_device() is called */
473 }
474 
475 
476 struct dasd_profile_info {
477 	/* legacy part of profile data, as in dasd_profile_info_t */
478 	unsigned int dasd_io_reqs;	 /* number of requests processed */
479 	unsigned int dasd_io_sects;	 /* number of sectors processed */
480 	unsigned int dasd_io_secs[32];	 /* histogram of request's sizes */
481 	unsigned int dasd_io_times[32];	 /* histogram of requests's times */
482 	unsigned int dasd_io_timps[32];	 /* h. of requests's times per sector */
483 	unsigned int dasd_io_time1[32];	 /* hist. of time from build to start */
484 	unsigned int dasd_io_time2[32];	 /* hist. of time from start to irq */
485 	unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
486 	unsigned int dasd_io_time3[32];	 /* hist. of time from irq to end */
487 	unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
488 
489 	/* new data */
490 	struct timespec64 starttod;	   /* time of start or last reset */
491 	unsigned int dasd_io_alias;	   /* requests using an alias */
492 	unsigned int dasd_io_tpm;	   /* requests using transport mode */
493 	unsigned int dasd_read_reqs;	   /* total number of read  requests */
494 	unsigned int dasd_read_sects;	   /* total number read sectors */
495 	unsigned int dasd_read_alias;	   /* read request using an alias */
496 	unsigned int dasd_read_tpm;	   /* read requests in transport mode */
497 	unsigned int dasd_read_secs[32];   /* histogram of request's sizes */
498 	unsigned int dasd_read_times[32];  /* histogram of requests's times */
499 	unsigned int dasd_read_time1[32];  /* hist. time from build to start */
500 	unsigned int dasd_read_time2[32];  /* hist. of time from start to irq */
501 	unsigned int dasd_read_time3[32];  /* hist. of time from irq to end */
502 	unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
503 	unsigned long dasd_sum_times;	   /* sum of request times */
504 	unsigned long dasd_sum_time_str;   /* sum of time from build to start */
505 	unsigned long dasd_sum_time_irq;   /* sum of time from start to irq */
506 	unsigned long dasd_sum_time_end;   /* sum of time from irq to end */
507 };
508 
509 struct dasd_profile {
510 	struct dentry *dentry;
511 	struct dasd_profile_info *data;
512 	spinlock_t lock;
513 };
514 
515 struct dasd_format_entry {
516 	struct list_head list;
517 	sector_t track;
518 };
519 
520 struct dasd_device {
521 	/* Block device stuff. */
522 	struct dasd_block *block;
523 
524         unsigned int devindex;
525 	unsigned long flags;	   /* per device flags */
526 	unsigned short features;   /* copy of devmap-features (read-only!) */
527 
528 	/* extended error reporting stuff (eer) */
529 	struct dasd_ccw_req *eer_cqr;
530 
531 	/* Device discipline stuff. */
532 	struct dasd_discipline *discipline;
533 	struct dasd_discipline *base_discipline;
534 	void *private;
535 	struct dasd_path path[8];
536 	__u8 opm;
537 
538 	/* Device state and target state. */
539 	int state, target;
540 	struct mutex state_mutex;
541 	int stopped;		/* device (ccw_device_start) was stopped */
542 
543 	/* reference count. */
544         atomic_t ref_count;
545 
546 	/* ccw queue and memory for static ccw/erp buffers. */
547 	struct list_head ccw_queue;
548 	spinlock_t mem_lock;
549 	void *ccw_mem;
550 	void *erp_mem;
551 	void *ese_mem;
552 	struct list_head ccw_chunks;
553 	struct list_head erp_chunks;
554 	struct list_head ese_chunks;
555 
556 	atomic_t tasklet_scheduled;
557         struct tasklet_struct tasklet;
558 	struct work_struct kick_work;
559 	struct work_struct reload_device;
560 	struct work_struct kick_validate;
561 	struct work_struct suc_work;
562 	struct work_struct requeue_requests;
563 	struct timer_list timer;
564 
565 	debug_info_t *debug_area;
566 
567 	struct ccw_device *cdev;
568 
569 	/* hook for alias management */
570 	struct list_head alias_list;
571 
572 	/* default expiration time in s */
573 	unsigned long default_expires;
574 	unsigned long default_retries;
575 
576 	unsigned long blk_timeout;
577 
578 	unsigned long path_thrhld;
579 	unsigned long path_interval;
580 
581 	struct dentry *debugfs_dentry;
582 	struct dentry *hosts_dentry;
583 	struct dasd_profile profile;
584 	struct dasd_format_entry format_entry;
585 	struct kset *paths_info;
586 };
587 
588 struct dasd_block {
589 	/* Block device stuff. */
590 	struct gendisk *gdp;
591 	struct request_queue *request_queue;
592 	spinlock_t request_queue_lock;
593 	struct blk_mq_tag_set tag_set;
594 	struct block_device *bdev;
595 	atomic_t open_count;
596 
597 	unsigned long blocks;	   /* size of volume in blocks */
598 	unsigned int bp_block;	   /* bytes per block */
599 	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
600 
601 	struct dasd_device *base;
602 	struct list_head ccw_queue;
603 	spinlock_t queue_lock;
604 
605 	atomic_t tasklet_scheduled;
606 	struct tasklet_struct tasklet;
607 	struct timer_list timer;
608 
609 	struct dentry *debugfs_dentry;
610 	struct dasd_profile profile;
611 
612 	struct list_head format_list;
613 	spinlock_t format_lock;
614 	atomic_t trkcount;
615 };
616 
617 struct dasd_attention_data {
618 	struct dasd_device *device;
619 	__u8 lpum;
620 };
621 
622 struct dasd_queue {
623 	spinlock_t lock;
624 };
625 
626 /* reasons why device (ccw_device_start) was stopped */
627 #define DASD_STOPPED_NOT_ACC 1         /* not accessible */
628 #define DASD_STOPPED_QUIESCE 2         /* Quiesced */
629 #define DASD_STOPPED_PENDING 4         /* long busy */
630 #define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
631 #define DASD_STOPPED_SU      16        /* summary unit check handling */
632 #define DASD_STOPPED_NOSPC   128       /* no space left */
633 
634 /* per device flags */
635 #define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
636 #define DASD_FLAG_EER_SNSS	4	/* A SNSS is required */
637 #define DASD_FLAG_EER_IN_USE	5	/* A SNSS request is running */
638 #define DASD_FLAG_DEVICE_RO	6	/* The device itself is read-only. Don't
639 					 * confuse this with the user specified
640 					 * read-only feature.
641 					 */
642 #define DASD_FLAG_IS_RESERVED	7	/* The device is reserved */
643 #define DASD_FLAG_LOCK_STOLEN	8	/* The device lock was stolen */
644 #define DASD_FLAG_SUSPENDED	9	/* The device was suspended */
645 #define DASD_FLAG_SAFE_OFFLINE	10	/* safe offline processing requested*/
646 #define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
647 #define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
648 #define DASD_FLAG_PATH_VERIFY	13	/* Path verification worker running */
649 #define DASD_FLAG_SUC		14	/* unhandled summary unit check */
650 
651 #define DASD_SLEEPON_START_TAG	((void *) 1)
652 #define DASD_SLEEPON_END_TAG	((void *) 2)
653 
654 void dasd_put_device_wake(struct dasd_device *);
655 
656 /*
657  * Reference count inliners
658  */
659 static inline void
660 dasd_get_device(struct dasd_device *device)
661 {
662 	atomic_inc(&device->ref_count);
663 }
664 
665 static inline void
666 dasd_put_device(struct dasd_device *device)
667 {
668 	if (atomic_dec_return(&device->ref_count) == 0)
669 		dasd_put_device_wake(device);
670 }
671 
672 /*
673  * The static memory in ccw_mem and erp_mem is managed by a sorted
674  * list of free memory chunks.
675  */
676 struct dasd_mchunk
677 {
678 	struct list_head list;
679 	unsigned long size;
680 } __attribute__ ((aligned(8)));
681 
682 static inline void
683 dasd_init_chunklist(struct list_head *chunk_list, void *mem,
684 		    unsigned long size)
685 {
686 	struct dasd_mchunk *chunk;
687 
688 	INIT_LIST_HEAD(chunk_list);
689 	chunk = (struct dasd_mchunk *) mem;
690 	chunk->size = size - sizeof(struct dasd_mchunk);
691 	list_add(&chunk->list, chunk_list);
692 }
693 
694 static inline void *
695 dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
696 {
697 	struct dasd_mchunk *chunk, *tmp;
698 
699 	size = (size + 7L) & -8L;
700 	list_for_each_entry(chunk, chunk_list, list) {
701 		if (chunk->size < size)
702 			continue;
703 		if (chunk->size > size + sizeof(struct dasd_mchunk)) {
704 			char *endaddr = (char *) (chunk + 1) + chunk->size;
705 			tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
706 			tmp->size = size;
707 			chunk->size -= size + sizeof(struct dasd_mchunk);
708 			chunk = tmp;
709 		} else
710 			list_del(&chunk->list);
711 		return (void *) (chunk + 1);
712 	}
713 	return NULL;
714 }
715 
716 static inline void
717 dasd_free_chunk(struct list_head *chunk_list, void *mem)
718 {
719 	struct dasd_mchunk *chunk, *tmp;
720 	struct list_head *p, *left;
721 
722 	chunk = (struct dasd_mchunk *)
723 		((char *) mem - sizeof(struct dasd_mchunk));
724 	/* Find out the left neighbour in chunk_list. */
725 	left = chunk_list;
726 	list_for_each(p, chunk_list) {
727 		if (list_entry(p, struct dasd_mchunk, list) > chunk)
728 			break;
729 		left = p;
730 	}
731 	/* Try to merge with right neighbour = next element from left. */
732 	if (left->next != chunk_list) {
733 		tmp = list_entry(left->next, struct dasd_mchunk, list);
734 		if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
735 			list_del(&tmp->list);
736 			chunk->size += tmp->size + sizeof(struct dasd_mchunk);
737 		}
738 	}
739 	/* Try to merge with left neighbour. */
740 	if (left != chunk_list) {
741 		tmp = list_entry(left, struct dasd_mchunk, list);
742 		if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
743 			tmp->size += chunk->size + sizeof(struct dasd_mchunk);
744 			return;
745 		}
746 	}
747 	__list_add(&chunk->list, left, left->next);
748 }
749 
750 /*
751  * Check if bsize is in { 512, 1024, 2048, 4096 }
752  */
753 static inline int
754 dasd_check_blocksize(int bsize)
755 {
756 	if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
757 		return -EMEDIUMTYPE;
758 	return 0;
759 }
760 
761 /*
762  * return the callback data of the original request in case there are
763  * ERP requests build on top of it
764  */
765 static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
766 {
767 	while (cqr->refers)
768 		cqr = cqr->refers;
769 
770 	return cqr->callback_data;
771 }
772 
773 /* externals in dasd.c */
774 #define DASD_PROFILE_OFF	 0
775 #define DASD_PROFILE_ON 	 1
776 #define DASD_PROFILE_GLOBAL_ONLY 2
777 
778 extern debug_info_t *dasd_debug_area;
779 extern struct dasd_profile dasd_global_profile;
780 extern unsigned int dasd_global_profile_level;
781 extern const struct block_device_operations dasd_device_operations;
782 
783 extern struct kmem_cache *dasd_page_cache;
784 
785 struct dasd_ccw_req *
786 dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
787 struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *);
788 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
789 void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *);
790 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
791 
792 struct dasd_device *dasd_alloc_device(void);
793 void dasd_free_device(struct dasd_device *);
794 
795 struct dasd_block *dasd_alloc_block(void);
796 void dasd_free_block(struct dasd_block *);
797 
798 enum blk_eh_timer_return dasd_times_out(struct request *req);
799 
800 void dasd_enable_device(struct dasd_device *);
801 void dasd_set_target_state(struct dasd_device *, int);
802 void dasd_kick_device(struct dasd_device *);
803 void dasd_reload_device(struct dasd_device *);
804 void dasd_schedule_requeue(struct dasd_device *);
805 
806 void dasd_add_request_head(struct dasd_ccw_req *);
807 void dasd_add_request_tail(struct dasd_ccw_req *);
808 int  dasd_start_IO(struct dasd_ccw_req *);
809 int  dasd_term_IO(struct dasd_ccw_req *);
810 void dasd_schedule_device_bh(struct dasd_device *);
811 void dasd_schedule_block_bh(struct dasd_block *);
812 int  dasd_sleep_on(struct dasd_ccw_req *);
813 int  dasd_sleep_on_queue(struct list_head *);
814 int  dasd_sleep_on_immediatly(struct dasd_ccw_req *);
815 int  dasd_sleep_on_queue_interruptible(struct list_head *);
816 int  dasd_sleep_on_interruptible(struct dasd_ccw_req *);
817 void dasd_device_set_timer(struct dasd_device *, int);
818 void dasd_device_clear_timer(struct dasd_device *);
819 void dasd_block_set_timer(struct dasd_block *, int);
820 void dasd_block_clear_timer(struct dasd_block *);
821 int  dasd_cancel_req(struct dasd_ccw_req *);
822 int dasd_flush_device_queue(struct dasd_device *);
823 int dasd_generic_probe(struct ccw_device *);
824 void dasd_generic_free_discipline(struct dasd_device *);
825 void dasd_generic_remove (struct ccw_device *cdev);
826 int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
827 int dasd_generic_set_offline (struct ccw_device *cdev);
828 int dasd_generic_notify(struct ccw_device *, int);
829 int dasd_generic_last_path_gone(struct dasd_device *);
830 int dasd_generic_path_operational(struct dasd_device *);
831 void dasd_generic_shutdown(struct ccw_device *);
832 
833 void dasd_generic_handle_state_change(struct dasd_device *);
834 enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
835 void dasd_generic_path_event(struct ccw_device *, int *);
836 int dasd_generic_verify_path(struct dasd_device *, __u8);
837 void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
838 void dasd_generic_space_avail(struct dasd_device *);
839 
840 int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
841 char *dasd_get_sense(struct irb *);
842 
843 void dasd_device_set_stop_bits(struct dasd_device *, int);
844 void dasd_device_remove_stop_bits(struct dasd_device *, int);
845 
846 int dasd_device_is_ro(struct dasd_device *);
847 
848 void dasd_profile_reset(struct dasd_profile *);
849 int dasd_profile_on(struct dasd_profile *);
850 void dasd_profile_off(struct dasd_profile *);
851 char *dasd_get_user_string(const char __user *, size_t);
852 
853 /* externals in dasd_devmap.c */
854 extern int dasd_max_devindex;
855 extern int dasd_probeonly;
856 extern int dasd_autodetect;
857 extern int dasd_nopav;
858 extern int dasd_nofcx;
859 
860 int dasd_devmap_init(void);
861 void dasd_devmap_exit(void);
862 
863 struct dasd_device *dasd_create_device(struct ccw_device *);
864 void dasd_delete_device(struct dasd_device *);
865 
866 int dasd_get_feature(struct ccw_device *, int);
867 int dasd_set_feature(struct ccw_device *, int, int);
868 
869 extern const struct attribute_group *dasd_dev_groups[];
870 void dasd_path_create_kobj(struct dasd_device *, int);
871 void dasd_path_create_kobjects(struct dasd_device *);
872 void dasd_path_remove_kobjects(struct dasd_device *);
873 
874 struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
875 struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
876 struct dasd_device *dasd_device_from_devindex(int);
877 
878 void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
879 struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
880 
881 int dasd_parse(void) __init;
882 int dasd_busid_known(const char *);
883 
884 /* externals in dasd_gendisk.c */
885 int  dasd_gendisk_init(void);
886 void dasd_gendisk_exit(void);
887 int dasd_gendisk_alloc(struct dasd_block *);
888 void dasd_gendisk_free(struct dasd_block *);
889 int dasd_scan_partitions(struct dasd_block *);
890 void dasd_destroy_partitions(struct dasd_block *);
891 
892 /* externals in dasd_ioctl.c */
893 int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
894 int dasd_set_read_only(struct block_device *bdev, bool ro);
895 
896 /* externals in dasd_proc.c */
897 int dasd_proc_init(void);
898 void dasd_proc_exit(void);
899 
900 /* externals in dasd_erp.c */
901 struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
902 struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
903 struct dasd_ccw_req *dasd_alloc_erp_request(unsigned int, int, int,
904 					    struct dasd_device *);
905 void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
906 void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
907 void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
908 
909 /* externals in dasd_3990_erp.c */
910 struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
911 void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
912 
913 /* externals in dasd_eer.c */
914 #ifdef CONFIG_DASD_EER
915 int dasd_eer_init(void);
916 void dasd_eer_exit(void);
917 int dasd_eer_enable(struct dasd_device *);
918 void dasd_eer_disable(struct dasd_device *);
919 void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
920 		    unsigned int id);
921 void dasd_eer_snss(struct dasd_device *);
922 
923 static inline int dasd_eer_enabled(struct dasd_device *device)
924 {
925 	return device->eer_cqr != NULL;
926 }
927 #else
928 #define dasd_eer_init()		(0)
929 #define dasd_eer_exit()		do { } while (0)
930 #define dasd_eer_enable(d)	(0)
931 #define dasd_eer_disable(d)	do { } while (0)
932 #define dasd_eer_write(d,c,i)	do { } while (0)
933 #define dasd_eer_snss(d)	do { } while (0)
934 #define dasd_eer_enabled(d)	(0)
935 #endif	/* CONFIG_DASD_ERR */
936 
937 
938 /* DASD path handling functions */
939 
940 /*
941  * helper functions to modify bit masks for a given channel path for a device
942  */
943 static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
944 {
945 	return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
946 }
947 
948 static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
949 {
950 	return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
951 }
952 
953 static inline void dasd_path_verify(struct dasd_device *device, int chp)
954 {
955 	__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
956 }
957 
958 static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
959 {
960 	__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
961 }
962 
963 static inline void dasd_path_clear_all_verify(struct dasd_device *device)
964 {
965 	int chp;
966 
967 	for (chp = 0; chp < 8; chp++)
968 		dasd_path_clear_verify(device, chp);
969 }
970 
971 static inline void dasd_path_fcsec(struct dasd_device *device, int chp)
972 {
973 	__set_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
974 }
975 
976 static inline void dasd_path_clear_fcsec(struct dasd_device *device, int chp)
977 {
978 	__clear_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
979 }
980 
981 static inline int dasd_path_need_fcsec(struct dasd_device *device, int chp)
982 {
983 	return test_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
984 }
985 
986 static inline void dasd_path_clear_all_fcsec(struct dasd_device *device)
987 {
988 	int chp;
989 
990 	for (chp = 0; chp < 8; chp++)
991 		dasd_path_clear_fcsec(device, chp);
992 }
993 
994 static inline void dasd_path_operational(struct dasd_device *device, int chp)
995 {
996 	__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
997 	device->opm |= (0x80 >> chp);
998 }
999 
1000 static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
1001 {
1002 	__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
1003 }
1004 
1005 static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
1006 {
1007 	return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
1008 }
1009 
1010 static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
1011 						int chp)
1012 {
1013 	__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
1014 }
1015 
1016 static inline void dasd_path_preferred(struct dasd_device *device, int chp)
1017 {
1018 	__set_bit(DASD_PATH_PP, &device->path[chp].flags);
1019 }
1020 
1021 static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
1022 {
1023 	return test_bit(DASD_PATH_PP, &device->path[chp].flags);
1024 }
1025 
1026 static inline void dasd_path_clear_preferred(struct dasd_device *device,
1027 					     int chp)
1028 {
1029 	__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
1030 }
1031 
1032 static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
1033 {
1034 	__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
1035 	device->opm &= ~(0x80 >> chp);
1036 }
1037 
1038 static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
1039 {
1040 	__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
1041 }
1042 
1043 static inline void dasd_path_cuir(struct dasd_device *device, int chp)
1044 {
1045 	__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
1046 }
1047 
1048 static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
1049 {
1050 	return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
1051 }
1052 
1053 static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
1054 {
1055 	__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
1056 }
1057 
1058 static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
1059 {
1060 	set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1061 }
1062 
1063 static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
1064 {
1065 	return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1066 }
1067 
1068 static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
1069 {
1070 	clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1071 }
1072 
1073 static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
1074 {
1075 	__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1076 }
1077 
1078 static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
1079 {
1080 	__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
1081 }
1082 
1083 static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
1084 {
1085 	return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
1086 }
1087 
1088 static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
1089 {
1090 	__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1091 }
1092 
1093 static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
1094 {
1095 	return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1096 }
1097 
1098 /*
1099  * get functions for path masks
1100  * will return a path masks for the given device
1101  */
1102 
1103 static inline __u8 dasd_path_get_opm(struct dasd_device *device)
1104 {
1105 	return device->opm;
1106 }
1107 
1108 static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
1109 {
1110 	int chp;
1111 	__u8 tbvpm = 0x00;
1112 
1113 	for (chp = 0; chp < 8; chp++)
1114 		if (dasd_path_need_verify(device, chp))
1115 			tbvpm |= 0x80 >> chp;
1116 	return tbvpm;
1117 }
1118 
1119 static inline int dasd_path_get_fcsecpm(struct dasd_device *device)
1120 {
1121 	int chp;
1122 
1123 	for (chp = 0; chp < 8; chp++)
1124 		if (dasd_path_need_fcsec(device, chp))
1125 			return 1;
1126 
1127 	return 0;
1128 }
1129 
1130 static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
1131 {
1132 	int chp;
1133 	__u8 npm = 0x00;
1134 
1135 	for (chp = 0; chp < 8; chp++) {
1136 		if (dasd_path_is_nonpreferred(device, chp))
1137 			npm |= 0x80 >> chp;
1138 	}
1139 	return npm;
1140 }
1141 
1142 static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
1143 {
1144 	int chp;
1145 	__u8 ppm = 0x00;
1146 
1147 	for (chp = 0; chp < 8; chp++)
1148 		if (dasd_path_is_preferred(device, chp))
1149 			ppm |= 0x80 >> chp;
1150 	return ppm;
1151 }
1152 
1153 static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
1154 {
1155 	int chp;
1156 	__u8 cablepm = 0x00;
1157 
1158 	for (chp = 0; chp < 8; chp++)
1159 		if (dasd_path_is_miscabled(device, chp))
1160 			cablepm |= 0x80 >> chp;
1161 	return cablepm;
1162 }
1163 
1164 static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
1165 {
1166 	int chp;
1167 	__u8 cuirpm = 0x00;
1168 
1169 	for (chp = 0; chp < 8; chp++)
1170 		if (dasd_path_is_cuir(device, chp))
1171 			cuirpm |= 0x80 >> chp;
1172 	return cuirpm;
1173 }
1174 
1175 static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
1176 {
1177 	int chp;
1178 	__u8 ifccpm = 0x00;
1179 
1180 	for (chp = 0; chp < 8; chp++)
1181 		if (dasd_path_is_ifcc(device, chp))
1182 			ifccpm |= 0x80 >> chp;
1183 	return ifccpm;
1184 }
1185 
1186 static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
1187 {
1188 	int chp;
1189 	__u8 hpfpm = 0x00;
1190 
1191 	for (chp = 0; chp < 8; chp++)
1192 		if (dasd_path_is_nohpf(device, chp))
1193 			hpfpm |= 0x80 >> chp;
1194 	return hpfpm;
1195 }
1196 
1197 static inline u8 dasd_path_get_fcs_path(struct dasd_device *device, int chp)
1198 {
1199 	return device->path[chp].fc_security;
1200 }
1201 
1202 static inline int dasd_path_get_fcs_device(struct dasd_device *device)
1203 {
1204 	u8 fc_sec = 0;
1205 	int chp;
1206 
1207 	for (chp = 0; chp < 8; chp++) {
1208 		if (device->opm & (0x80 >> chp)) {
1209 			fc_sec = device->path[chp].fc_security;
1210 			break;
1211 		}
1212 	}
1213 	for (; chp < 8; chp++) {
1214 		if (device->opm & (0x80 >> chp))
1215 			if (device->path[chp].fc_security != fc_sec)
1216 				return -EINVAL;
1217 	}
1218 
1219 	return fc_sec;
1220 }
1221 
1222 /*
1223  * add functions for path masks
1224  * the existing path mask will be extended by the given path mask
1225  */
1226 static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
1227 {
1228 	int chp;
1229 
1230 	for (chp = 0; chp < 8; chp++)
1231 		if (pm & (0x80 >> chp))
1232 			dasd_path_verify(device, chp);
1233 }
1234 
1235 static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
1236 {
1237 	int chp;
1238 	__u8 nopm = 0x00;
1239 
1240 	for (chp = 0; chp < 8; chp++)
1241 		if (dasd_path_is_nohpf(device, chp) ||
1242 		    dasd_path_is_ifcc(device, chp) ||
1243 		    dasd_path_is_cuir(device, chp) ||
1244 		    dasd_path_is_miscabled(device, chp))
1245 			nopm |= 0x80 >> chp;
1246 	return nopm;
1247 }
1248 
1249 static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
1250 {
1251 	int chp;
1252 
1253 	for (chp = 0; chp < 8; chp++)
1254 		if (pm & (0x80 >> chp)) {
1255 			dasd_path_operational(device, chp);
1256 			/*
1257 			 * if the path is used
1258 			 * it should not be in one of the negative lists
1259 			 */
1260 			dasd_path_clear_nohpf(device, chp);
1261 			dasd_path_clear_cuir(device, chp);
1262 			dasd_path_clear_cable(device, chp);
1263 			dasd_path_clear_ifcc(device, chp);
1264 		}
1265 }
1266 
1267 static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
1268 {
1269 	int chp;
1270 
1271 	for (chp = 0; chp < 8; chp++)
1272 		if (pm & (0x80 >> chp))
1273 			dasd_path_miscabled(device, chp);
1274 }
1275 
1276 static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
1277 {
1278 	int chp;
1279 
1280 	for (chp = 0; chp < 8; chp++)
1281 		if (pm & (0x80 >> chp))
1282 			dasd_path_cuir(device, chp);
1283 }
1284 
1285 static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
1286 {
1287 	int chp;
1288 
1289 	for (chp = 0; chp < 8; chp++)
1290 		if (pm & (0x80 >> chp))
1291 			dasd_path_ifcc(device, chp);
1292 }
1293 
1294 static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
1295 {
1296 	int chp;
1297 
1298 	for (chp = 0; chp < 8; chp++)
1299 		if (pm & (0x80 >> chp))
1300 			dasd_path_nonpreferred(device, chp);
1301 }
1302 
1303 static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
1304 {
1305 	int chp;
1306 
1307 	for (chp = 0; chp < 8; chp++)
1308 		if (pm & (0x80 >> chp))
1309 			dasd_path_nohpf(device, chp);
1310 }
1311 
1312 static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
1313 {
1314 	int chp;
1315 
1316 	for (chp = 0; chp < 8; chp++)
1317 		if (pm & (0x80 >> chp))
1318 			dasd_path_preferred(device, chp);
1319 }
1320 
1321 static inline void dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
1322 {
1323 	int chp;
1324 
1325 	for (chp = 0; chp < 8; chp++)
1326 		if (pm & (0x80 >> chp))
1327 			dasd_path_fcsec(device, chp);
1328 }
1329 
1330 /*
1331  * set functions for path masks
1332  * the existing path mask will be replaced by the given path mask
1333  */
1334 static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
1335 {
1336 	int chp;
1337 
1338 	for (chp = 0; chp < 8; chp++)
1339 		if (pm & (0x80 >> chp))
1340 			dasd_path_verify(device, chp);
1341 		else
1342 			dasd_path_clear_verify(device, chp);
1343 }
1344 
1345 static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
1346 {
1347 	int chp;
1348 
1349 	for (chp = 0; chp < 8; chp++) {
1350 		dasd_path_clear_oper(device, chp);
1351 		if (pm & (0x80 >> chp)) {
1352 			dasd_path_operational(device, chp);
1353 			/*
1354 			 * if the path is used
1355 			 * it should not be in one of the negative lists
1356 			 */
1357 			dasd_path_clear_nohpf(device, chp);
1358 			dasd_path_clear_cuir(device, chp);
1359 			dasd_path_clear_cable(device, chp);
1360 			dasd_path_clear_ifcc(device, chp);
1361 		}
1362 	}
1363 }
1364 
1365 /*
1366  * remove functions for path masks
1367  * the existing path mask will be cleared with the given path mask
1368  */
1369 static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
1370 {
1371 	int chp;
1372 
1373 	for (chp = 0; chp < 8; chp++) {
1374 		if (pm & (0x80 >> chp))
1375 			dasd_path_clear_oper(device, chp);
1376 	}
1377 }
1378 
1379 /*
1380  * add the newly available path to the to be verified pm and remove it from
1381  * normal operation until it is verified
1382  */
1383 static inline void dasd_path_available(struct dasd_device *device, int chp)
1384 {
1385 	dasd_path_clear_oper(device, chp);
1386 	dasd_path_verify(device, chp);
1387 }
1388 
1389 static inline void dasd_path_notoper(struct dasd_device *device, int chp)
1390 {
1391 	dasd_path_clear_oper(device, chp);
1392 	dasd_path_clear_preferred(device, chp);
1393 	dasd_path_clear_nonpreferred(device, chp);
1394 }
1395 
1396 static inline void dasd_path_fcsec_update(struct dasd_device *device, int chp)
1397 {
1398 	dasd_path_fcsec(device, chp);
1399 }
1400 
1401 /*
1402  * remove all paths from normal operation
1403  */
1404 static inline void dasd_path_no_path(struct dasd_device *device)
1405 {
1406 	int chp;
1407 
1408 	for (chp = 0; chp < 8; chp++)
1409 		dasd_path_notoper(device, chp);
1410 
1411 	dasd_path_clear_all_verify(device);
1412 }
1413 
1414 /* end - path handling */
1415 
1416 #endif				/* DASD_H */
1417