xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision a2cce7a9)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27 
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
30 
31 #include <linux/module.h>
32 
33 #include <linux/kernel.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <linux/slab.h>
37 #include <linux/types.h>
38 #include <linux/string.h>
39 #include <linux/genhd.h>
40 #include <linux/fs.h>
41 #include <linux/init.h>
42 #include <linux/proc_fs.h>
43 #include <linux/vmalloc.h>
44 #include <linux/moduleparam.h>
45 #include <linux/scatterlist.h>
46 #include <linux/blkdev.h>
47 #include <linux/crc-t10dif.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/atomic.h>
51 #include <linux/hrtimer.h>
52 
53 #include <net/checksum.h>
54 
55 #include <asm/unaligned.h>
56 
57 #include <scsi/scsi.h>
58 #include <scsi/scsi_cmnd.h>
59 #include <scsi/scsi_device.h>
60 #include <scsi/scsi_host.h>
61 #include <scsi/scsicam.h>
62 #include <scsi/scsi_eh.h>
63 #include <scsi/scsi_tcq.h>
64 #include <scsi/scsi_dbg.h>
65 
66 #include "sd.h"
67 #include "scsi_logging.h"
68 
69 #define SCSI_DEBUG_VERSION "1.85"
70 static const char *scsi_debug_version_date = "20141022";
71 
72 #define MY_NAME "scsi_debug"
73 
74 /* Additional Sense Code (ASC) */
75 #define NO_ADDITIONAL_SENSE 0x0
76 #define LOGICAL_UNIT_NOT_READY 0x4
77 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
78 #define UNRECOVERED_READ_ERR 0x11
79 #define PARAMETER_LIST_LENGTH_ERR 0x1a
80 #define INVALID_OPCODE 0x20
81 #define LBA_OUT_OF_RANGE 0x21
82 #define INVALID_FIELD_IN_CDB 0x24
83 #define INVALID_FIELD_IN_PARAM_LIST 0x26
84 #define UA_RESET_ASC 0x29
85 #define UA_CHANGED_ASC 0x2a
86 #define TARGET_CHANGED_ASC 0x3f
87 #define LUNS_CHANGED_ASCQ 0x0e
88 #define INSUFF_RES_ASC 0x55
89 #define INSUFF_RES_ASCQ 0x3
90 #define POWER_ON_RESET_ASCQ 0x0
91 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
101 
102 /* Additional Sense Code Qualifier (ASCQ) */
103 #define ACK_NAK_TO 0x3
104 
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_DELAY   1		/* if > 0 unit is a jiffy */
115 #define DEF_DEV_SIZE_MB   8
116 #define DEF_DIF 0
117 #define DEF_DIX 0
118 #define DEF_D_SENSE   0
119 #define DEF_EVERY_NTH   0
120 #define DEF_FAKE_RW	0
121 #define DEF_GUARD 0
122 #define DEF_HOST_LOCK 0
123 #define DEF_LBPU 0
124 #define DEF_LBPWS 0
125 #define DEF_LBPWS10 0
126 #define DEF_LBPRZ 1
127 #define DEF_LOWEST_ALIGNED 0
128 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
129 #define DEF_NO_LUN_0   0
130 #define DEF_NUM_PARTS   0
131 #define DEF_OPTS   0
132 #define DEF_OPT_BLKS 64
133 #define DEF_PHYSBLK_EXP 0
134 #define DEF_PTYPE   0
135 #define DEF_REMOVABLE false
136 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
137 #define DEF_SECTOR_SIZE 512
138 #define DEF_UNMAP_ALIGNMENT 0
139 #define DEF_UNMAP_GRANULARITY 1
140 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
141 #define DEF_UNMAP_MAX_DESC 256
142 #define DEF_VIRTUAL_GB   0
143 #define DEF_VPD_USE_HOSTNO 1
144 #define DEF_WRITESAME_LENGTH 0xFFFF
145 #define DEF_STRICT 0
146 #define DELAY_OVERRIDDEN -9999
147 
148 /* bit mask values for scsi_debug_opts */
149 #define SCSI_DEBUG_OPT_NOISE   1
150 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
151 #define SCSI_DEBUG_OPT_TIMEOUT   4
152 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
153 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
154 #define SCSI_DEBUG_OPT_DIF_ERR   32
155 #define SCSI_DEBUG_OPT_DIX_ERR   64
156 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
157 #define SCSI_DEBUG_OPT_SHORT_TRANSFER	0x100
158 #define SCSI_DEBUG_OPT_Q_NOISE	0x200
159 #define SCSI_DEBUG_OPT_ALL_TSF	0x400
160 #define SCSI_DEBUG_OPT_RARE_TSF	0x800
161 #define SCSI_DEBUG_OPT_N_WCE	0x1000
162 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
163 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
164 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
165 /* When "every_nth" > 0 then modulo "every_nth" commands:
166  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167  *   - a RECOVERED_ERROR is simulated on successful read and write
168  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169  *   - a TRANSPORT_ERROR is simulated on successful read and write
170  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171  *
172  * When "every_nth" < 0 then after "- every_nth" commands:
173  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
174  *   - a RECOVERED_ERROR is simulated on successful read and write
175  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
176  *   - a TRANSPORT_ERROR is simulated on successful read and write
177  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
178  * This will continue until some other action occurs (e.g. the user
179  * writing a new value (other than -1 or 1) to every_nth via sysfs).
180  */
181 
182 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
183  * priority order. In the subset implemented here lower numbers have higher
184  * priority. The UA numbers should be a sequence starting from 0 with
185  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
186 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
187 #define SDEBUG_UA_BUS_RESET 1
188 #define SDEBUG_UA_MODE_CHANGED 2
189 #define SDEBUG_UA_CAPACITY_CHANGED 3
190 #define SDEBUG_UA_LUNS_CHANGED 4
191 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
192 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
193 #define SDEBUG_NUM_UAS 7
194 
195 /* for check_readiness() */
196 #define UAS_ONLY 1	/* check for UAs only */
197 #define UAS_TUR 0	/* if no UAs then check if media access possible */
198 
199 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200  * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
203 
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205  * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
207 
208 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
209  * (for response) at one time. Can be reduced by max_queue option. Command
210  * responses are not queued when delay=0 and ndelay=0. The per-device
211  * DEF_CMD_PER_LUN can be changed via sysfs:
212  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
213  * SCSI_DEBUG_CANQUEUE. */
214 #define SCSI_DEBUG_CANQUEUE_WORDS  9	/* a WORD is bits in a long */
215 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
216 #define DEF_CMD_PER_LUN  255
217 
218 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
219 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
220 #endif
221 
222 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
223 enum sdeb_opcode_index {
224 	SDEB_I_INVALID_OPCODE =	0,
225 	SDEB_I_INQUIRY = 1,
226 	SDEB_I_REPORT_LUNS = 2,
227 	SDEB_I_REQUEST_SENSE = 3,
228 	SDEB_I_TEST_UNIT_READY = 4,
229 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
230 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
231 	SDEB_I_LOG_SENSE = 7,
232 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
233 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
234 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
235 	SDEB_I_START_STOP = 11,
236 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
237 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
238 	SDEB_I_MAINT_IN = 14,
239 	SDEB_I_MAINT_OUT = 15,
240 	SDEB_I_VERIFY = 16,		/* 10 only */
241 	SDEB_I_VARIABLE_LEN = 17,
242 	SDEB_I_RESERVE = 18,		/* 6, 10 */
243 	SDEB_I_RELEASE = 19,		/* 6, 10 */
244 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
245 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
246 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
247 	SDEB_I_SEND_DIAG = 23,
248 	SDEB_I_UNMAP = 24,
249 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
250 	SDEB_I_WRITE_BUFFER = 26,
251 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
252 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
253 	SDEB_I_COMP_WRITE = 29,
254 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
255 };
256 
257 static const unsigned char opcode_ind_arr[256] = {
258 /* 0x0; 0x0->0x1f: 6 byte cdbs */
259 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
260 	    0, 0, 0, 0,
261 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
262 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
263 	    SDEB_I_RELEASE,
264 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
265 	    SDEB_I_ALLOW_REMOVAL, 0,
266 /* 0x20; 0x20->0x3f: 10 byte cdbs */
267 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
268 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
269 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
270 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
271 /* 0x40; 0x40->0x5f: 10 byte cdbs */
272 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
273 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
274 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
275 	    SDEB_I_RELEASE,
276 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
277 /* 0x60; 0x60->0x7d are reserved */
278 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
280 	0, SDEB_I_VARIABLE_LEN,
281 /* 0x80; 0x80->0x9f: 16 byte cdbs */
282 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
283 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
284 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
285 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
286 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
287 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
288 	     SDEB_I_MAINT_OUT, 0, 0, 0,
289 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
290 	     0, 0, 0, 0,
291 	0, 0, 0, 0, 0, 0, 0, 0,
292 	0, 0, 0, 0, 0, 0, 0, 0,
293 /* 0xc0; 0xc0->0xff: vendor specific */
294 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
297 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
298 };
299 
300 #define F_D_IN			1
301 #define F_D_OUT			2
302 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
303 #define F_D_UNKN		8
304 #define F_RL_WLUN_OK		0x10
305 #define F_SKIP_UA		0x20
306 #define F_DELAY_OVERR		0x40
307 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
308 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
309 #define F_INV_OP		0x200
310 #define F_FAKE_RW		0x400
311 #define F_M_ACCESS		0x800	/* media access */
312 
313 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
314 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
315 #define FF_SA (F_SA_HIGH | F_SA_LOW)
316 
317 struct sdebug_dev_info;
318 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
337 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
338 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
339 
340 struct opcode_info_t {
341 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff
342 				 * for terminating element */
343 	u8 opcode;		/* if num_attached > 0, preferred */
344 	u16 sa;			/* service action */
345 	u32 flags;		/* OR-ed set of SDEB_F_* */
346 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
347 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
348 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
349 				/* ignore cdb bytes after position 15 */
350 };
351 
352 static const struct opcode_info_t msense_iarr[1] = {
353 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
354 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
355 };
356 
357 static const struct opcode_info_t mselect_iarr[1] = {
358 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
359 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
360 };
361 
362 static const struct opcode_info_t read_iarr[3] = {
363 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
364 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
365 	     0, 0, 0, 0} },
366 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
367 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
368 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
369 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
370 	     0xc7, 0, 0, 0, 0} },
371 };
372 
373 static const struct opcode_info_t write_iarr[3] = {
374 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
375 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
376 	     0, 0, 0, 0} },
377 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
378 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
379 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
380 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
381 	     0xc7, 0, 0, 0, 0} },
382 };
383 
384 static const struct opcode_info_t sa_in_iarr[1] = {
385 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
386 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
387 	     0xff, 0xff, 0xff, 0, 0xc7} },
388 };
389 
390 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
391 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
392 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
393 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
394 };
395 
396 static const struct opcode_info_t maint_in_iarr[2] = {
397 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
398 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
399 	     0xc7, 0, 0, 0, 0} },
400 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
401 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
402 	     0, 0} },
403 };
404 
405 static const struct opcode_info_t write_same_iarr[1] = {
406 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
407 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
408 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
409 };
410 
411 static const struct opcode_info_t reserve_iarr[1] = {
412 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
413 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
414 };
415 
416 static const struct opcode_info_t release_iarr[1] = {
417 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
418 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419 };
420 
421 
422 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
423  * plus the terminating elements for logic that scans this table such as
424  * REPORT SUPPORTED OPERATION CODES. */
425 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
426 /* 0 */
427 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
428 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
430 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
431 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
432 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
433 	     0, 0} },
434 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
435 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
436 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
437 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
438 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
439 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
440 	     0} },
441 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
442 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
443 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
444 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
445 	     0, 0, 0} },
446 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
447 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
448 	     0, 0} },
449 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
450 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
452 /* 10 */
453 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
454 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
455 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
456 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
457 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
459 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
460 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
461 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
462 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
463 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
464 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
465 	     0} },
466 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
467 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
469 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
470 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
473 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
474 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
475 	     0} },
476 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
477 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478 	     0} },
479 /* 20 */
480 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
481 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
485 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
487 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
488 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
489 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
490 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
491 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
492 		   0, 0, 0, 0, 0, 0} },
493 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
494 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
495 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
496 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
497 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
498 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
499 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
500 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
501 	     0, 0, 0, 0} },
502 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
503 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
504 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
505 
506 /* 30 */
507 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
508 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510 
511 struct sdebug_scmd_extra_t {
512 	bool inj_recovered;
513 	bool inj_transport;
514 	bool inj_dif;
515 	bool inj_dix;
516 	bool inj_short;
517 };
518 
519 static int scsi_debug_add_host = DEF_NUM_HOST;
520 static int scsi_debug_ato = DEF_ATO;
521 static int scsi_debug_delay = DEF_DELAY;
522 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
523 static int scsi_debug_dif = DEF_DIF;
524 static int scsi_debug_dix = DEF_DIX;
525 static int scsi_debug_dsense = DEF_D_SENSE;
526 static int scsi_debug_every_nth = DEF_EVERY_NTH;
527 static int scsi_debug_fake_rw = DEF_FAKE_RW;
528 static unsigned int scsi_debug_guard = DEF_GUARD;
529 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
530 static int scsi_debug_max_luns = DEF_MAX_LUNS;
531 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
532 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
533 static int scsi_debug_ndelay = DEF_NDELAY;
534 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
535 static int scsi_debug_no_uld = 0;
536 static int scsi_debug_num_parts = DEF_NUM_PARTS;
537 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
538 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
539 static int scsi_debug_opts = DEF_OPTS;
540 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
541 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
542 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
543 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
544 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
545 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
546 static unsigned int scsi_debug_lbpu = DEF_LBPU;
547 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
548 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
549 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
550 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
551 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
552 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
553 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
554 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
555 static bool scsi_debug_removable = DEF_REMOVABLE;
556 static bool scsi_debug_clustering;
557 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
558 static bool scsi_debug_strict = DEF_STRICT;
559 static bool sdebug_any_injecting_opt;
560 
561 static atomic_t sdebug_cmnd_count;
562 static atomic_t sdebug_completions;
563 static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
564 
565 #define DEV_READONLY(TGT)      (0)
566 
567 static unsigned int sdebug_store_sectors;
568 static sector_t sdebug_capacity;	/* in sectors */
569 
570 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
571    may still need them */
572 static int sdebug_heads;		/* heads per disk */
573 static int sdebug_cylinders_per;	/* cylinders per surface */
574 static int sdebug_sectors_per;		/* sectors per cylinder */
575 
576 #define SDEBUG_MAX_PARTS 4
577 
578 #define SCSI_DEBUG_MAX_CMD_LEN 32
579 
580 static unsigned int scsi_debug_lbp(void)
581 {
582 	return ((0 == scsi_debug_fake_rw) &&
583 		(scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
584 }
585 
586 struct sdebug_dev_info {
587 	struct list_head dev_list;
588 	unsigned int channel;
589 	unsigned int target;
590 	u64 lun;
591 	struct sdebug_host_info *sdbg_host;
592 	unsigned long uas_bm[1];
593 	atomic_t num_in_q;
594 	char stopped;		/* TODO: should be atomic */
595 	bool used;
596 };
597 
598 struct sdebug_host_info {
599 	struct list_head host_list;
600 	struct Scsi_Host *shost;
601 	struct device dev;
602 	struct list_head dev_info_list;
603 };
604 
605 #define to_sdebug_host(d)	\
606 	container_of(d, struct sdebug_host_info, dev)
607 
608 static LIST_HEAD(sdebug_host_list);
609 static DEFINE_SPINLOCK(sdebug_host_list_lock);
610 
611 
612 struct sdebug_hrtimer {		/* ... is derived from hrtimer */
613 	struct hrtimer hrt;	/* must be first element */
614 	int qa_indx;
615 };
616 
617 struct sdebug_queued_cmd {
618 	/* in_use flagged by a bit in queued_in_use_bm[] */
619 	struct timer_list *cmnd_timerp;
620 	struct tasklet_struct *tletp;
621 	struct sdebug_hrtimer *sd_hrtp;
622 	struct scsi_cmnd * a_cmnd;
623 };
624 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
625 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
626 
627 
628 static unsigned char * fake_storep;	/* ramdisk storage */
629 static struct sd_dif_tuple *dif_storep;	/* protection info */
630 static void *map_storep;		/* provisioning map */
631 
632 static unsigned long map_size;
633 static int num_aborts;
634 static int num_dev_resets;
635 static int num_target_resets;
636 static int num_bus_resets;
637 static int num_host_resets;
638 static int dix_writes;
639 static int dix_reads;
640 static int dif_errors;
641 
642 static DEFINE_SPINLOCK(queued_arr_lock);
643 static DEFINE_RWLOCK(atomic_rw);
644 
645 static char sdebug_proc_name[] = MY_NAME;
646 static const char *my_name = MY_NAME;
647 
648 static struct bus_type pseudo_lld_bus;
649 
650 static struct device_driver sdebug_driverfs_driver = {
651 	.name 		= sdebug_proc_name,
652 	.bus		= &pseudo_lld_bus,
653 };
654 
655 static const int check_condition_result =
656 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
657 
658 static const int illegal_condition_result =
659 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
660 
661 static const int device_qfull_result =
662 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
663 
664 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
665 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
666 				     0, 0, 0, 0};
667 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
668 				    0, 0, 0x2, 0x4b};
669 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
670 			           0, 0, 0x0, 0x0};
671 
672 static void *fake_store(unsigned long long lba)
673 {
674 	lba = do_div(lba, sdebug_store_sectors);
675 
676 	return fake_storep + lba * scsi_debug_sector_size;
677 }
678 
679 static struct sd_dif_tuple *dif_store(sector_t sector)
680 {
681 	sector = do_div(sector, sdebug_store_sectors);
682 
683 	return dif_storep + sector;
684 }
685 
686 static int sdebug_add_adapter(void);
687 static void sdebug_remove_adapter(void);
688 
689 static void sdebug_max_tgts_luns(void)
690 {
691 	struct sdebug_host_info *sdbg_host;
692 	struct Scsi_Host *hpnt;
693 
694 	spin_lock(&sdebug_host_list_lock);
695 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
696 		hpnt = sdbg_host->shost;
697 		if ((hpnt->this_id >= 0) &&
698 		    (scsi_debug_num_tgts > hpnt->this_id))
699 			hpnt->max_id = scsi_debug_num_tgts + 1;
700 		else
701 			hpnt->max_id = scsi_debug_num_tgts;
702 		/* scsi_debug_max_luns; */
703 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
704 	}
705 	spin_unlock(&sdebug_host_list_lock);
706 }
707 
708 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
709 
710 /* Set in_bit to -1 to indicate no bit position of invalid field */
711 static void
712 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
713 		     int in_byte, int in_bit)
714 {
715 	unsigned char *sbuff;
716 	u8 sks[4];
717 	int sl, asc;
718 
719 	sbuff = scp->sense_buffer;
720 	if (!sbuff) {
721 		sdev_printk(KERN_ERR, scp->device,
722 			    "%s: sense_buffer is NULL\n", __func__);
723 		return;
724 	}
725 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
726 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
727 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
728 				asc, 0);
729 	memset(sks, 0, sizeof(sks));
730 	sks[0] = 0x80;
731 	if (c_d)
732 		sks[0] |= 0x40;
733 	if (in_bit >= 0) {
734 		sks[0] |= 0x8;
735 		sks[0] |= 0x7 & in_bit;
736 	}
737 	put_unaligned_be16(in_byte, sks + 1);
738 	if (scsi_debug_dsense) {
739 		sl = sbuff[7] + 8;
740 		sbuff[7] = sl;
741 		sbuff[sl] = 0x2;
742 		sbuff[sl + 1] = 0x6;
743 		memcpy(sbuff + sl + 4, sks, 3);
744 	} else
745 		memcpy(sbuff + 15, sks, 3);
746 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
747 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
748 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
749 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
750 }
751 
752 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
753 {
754 	unsigned char *sbuff;
755 
756 	sbuff = scp->sense_buffer;
757 	if (!sbuff) {
758 		sdev_printk(KERN_ERR, scp->device,
759 			    "%s: sense_buffer is NULL\n", __func__);
760 		return;
761 	}
762 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
763 
764 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
765 
766 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
767 		sdev_printk(KERN_INFO, scp->device,
768 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
769 			    my_name, key, asc, asq);
770 }
771 
772 static void
773 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
774 {
775 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
776 }
777 
778 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
779 {
780 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
781 		if (0x1261 == cmd)
782 			sdev_printk(KERN_INFO, dev,
783 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
784 		else if (0x5331 == cmd)
785 			sdev_printk(KERN_INFO, dev,
786 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
787 				    __func__);
788 		else
789 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
790 				    __func__, cmd);
791 	}
792 	return -EINVAL;
793 	/* return -ENOTTY; // correct return but upsets fdisk */
794 }
795 
796 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
797 {
798 	struct sdebug_host_info *sdhp;
799 	struct sdebug_dev_info *dp;
800 
801 	spin_lock(&sdebug_host_list_lock);
802 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
803 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
804 			if ((devip->sdbg_host == dp->sdbg_host) &&
805 			    (devip->target == dp->target))
806 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
807 		}
808 	}
809 	spin_unlock(&sdebug_host_list_lock);
810 }
811 
812 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
813 			   struct sdebug_dev_info * devip)
814 {
815 	int k;
816 	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
817 
818 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
819 	if (k != SDEBUG_NUM_UAS) {
820 		const char *cp = NULL;
821 
822 		switch (k) {
823 		case SDEBUG_UA_POR:
824 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
825 					UA_RESET_ASC, POWER_ON_RESET_ASCQ);
826 			if (debug)
827 				cp = "power on reset";
828 			break;
829 		case SDEBUG_UA_BUS_RESET:
830 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
831 					UA_RESET_ASC, BUS_RESET_ASCQ);
832 			if (debug)
833 				cp = "bus reset";
834 			break;
835 		case SDEBUG_UA_MODE_CHANGED:
836 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
837 					UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
838 			if (debug)
839 				cp = "mode parameters changed";
840 			break;
841 		case SDEBUG_UA_CAPACITY_CHANGED:
842 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
843 					UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
844 			if (debug)
845 				cp = "capacity data changed";
846 			break;
847 		case SDEBUG_UA_MICROCODE_CHANGED:
848 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
849 				 TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
850 			if (debug)
851 				cp = "microcode has been changed";
852 			break;
853 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
854 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
855 					TARGET_CHANGED_ASC,
856 					MICROCODE_CHANGED_WO_RESET_ASCQ);
857 			if (debug)
858 				cp = "microcode has been changed without reset";
859 			break;
860 		case SDEBUG_UA_LUNS_CHANGED:
861 			/*
862 			 * SPC-3 behavior is to report a UNIT ATTENTION with
863 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 			 * on the target, until a REPORT LUNS command is
865 			 * received.  SPC-4 behavior is to report it only once.
866 			 * NOTE:  scsi_debug_scsi_level does not use the same
867 			 * values as struct scsi_device->scsi_level.
868 			 */
869 			if (scsi_debug_scsi_level >= 6)	/* SPC-4 and above */
870 				clear_luns_changed_on_target(devip);
871 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
872 					TARGET_CHANGED_ASC,
873 					LUNS_CHANGED_ASCQ);
874 			if (debug)
875 				cp = "reported luns data has changed";
876 			break;
877 		default:
878 			pr_warn("%s: unexpected unit attention code=%d\n",
879 				__func__, k);
880 			if (debug)
881 				cp = "unknown";
882 			break;
883 		}
884 		clear_bit(k, devip->uas_bm);
885 		if (debug)
886 			sdev_printk(KERN_INFO, SCpnt->device,
887 				   "%s reports: Unit attention: %s\n",
888 				   my_name, cp);
889 		return check_condition_result;
890 	}
891 	if ((UAS_TUR == uas_only) && devip->stopped) {
892 		mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
893 				0x2);
894 		if (debug)
895 			sdev_printk(KERN_INFO, SCpnt->device,
896 				    "%s reports: Not ready: %s\n", my_name,
897 				    "initializing command required");
898 		return check_condition_result;
899 	}
900 	return 0;
901 }
902 
903 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
904 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
905 				int arr_len)
906 {
907 	int act_len;
908 	struct scsi_data_buffer *sdb = scsi_in(scp);
909 
910 	if (!sdb->length)
911 		return 0;
912 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
913 		return (DID_ERROR << 16);
914 
915 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
916 				      arr, arr_len);
917 	sdb->resid = scsi_bufflen(scp) - act_len;
918 
919 	return 0;
920 }
921 
922 /* Returns number of bytes fetched into 'arr' or -1 if error. */
923 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
924 			       int arr_len)
925 {
926 	if (!scsi_bufflen(scp))
927 		return 0;
928 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
929 		return -1;
930 
931 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
932 }
933 
934 
935 static const char * inq_vendor_id = "Linux   ";
936 static const char * inq_product_id = "scsi_debug      ";
937 static const char *inq_product_rev = "0184";	/* version less '.' */
938 
939 /* Device identification VPD page. Returns number of bytes placed in arr */
940 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
941 			   int target_dev_id, int dev_id_num,
942 			   const char * dev_id_str,
943 			   int dev_id_str_len)
944 {
945 	int num, port_a;
946 	char b[32];
947 
948 	port_a = target_dev_id + 1;
949 	/* T10 vendor identifier field format (faked) */
950 	arr[0] = 0x2;	/* ASCII */
951 	arr[1] = 0x1;
952 	arr[2] = 0x0;
953 	memcpy(&arr[4], inq_vendor_id, 8);
954 	memcpy(&arr[12], inq_product_id, 16);
955 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
956 	num = 8 + 16 + dev_id_str_len;
957 	arr[3] = num;
958 	num += 4;
959 	if (dev_id_num >= 0) {
960 		/* NAA-5, Logical unit identifier (binary) */
961 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
962 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
963 		arr[num++] = 0x0;
964 		arr[num++] = 0x8;
965 		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
966 		arr[num++] = 0x33;
967 		arr[num++] = 0x33;
968 		arr[num++] = 0x30;
969 		arr[num++] = (dev_id_num >> 24);
970 		arr[num++] = (dev_id_num >> 16) & 0xff;
971 		arr[num++] = (dev_id_num >> 8) & 0xff;
972 		arr[num++] = dev_id_num & 0xff;
973 		/* Target relative port number */
974 		arr[num++] = 0x61;	/* proto=sas, binary */
975 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
976 		arr[num++] = 0x0;	/* reserved */
977 		arr[num++] = 0x4;	/* length */
978 		arr[num++] = 0x0;	/* reserved */
979 		arr[num++] = 0x0;	/* reserved */
980 		arr[num++] = 0x0;
981 		arr[num++] = 0x1;	/* relative port A */
982 	}
983 	/* NAA-5, Target port identifier */
984 	arr[num++] = 0x61;	/* proto=sas, binary */
985 	arr[num++] = 0x93;	/* piv=1, target port, naa */
986 	arr[num++] = 0x0;
987 	arr[num++] = 0x8;
988 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
989 	arr[num++] = 0x22;
990 	arr[num++] = 0x22;
991 	arr[num++] = 0x20;
992 	arr[num++] = (port_a >> 24);
993 	arr[num++] = (port_a >> 16) & 0xff;
994 	arr[num++] = (port_a >> 8) & 0xff;
995 	arr[num++] = port_a & 0xff;
996 	/* NAA-5, Target port group identifier */
997 	arr[num++] = 0x61;	/* proto=sas, binary */
998 	arr[num++] = 0x95;	/* piv=1, target port group id */
999 	arr[num++] = 0x0;
1000 	arr[num++] = 0x4;
1001 	arr[num++] = 0;
1002 	arr[num++] = 0;
1003 	arr[num++] = (port_group_id >> 8) & 0xff;
1004 	arr[num++] = port_group_id & 0xff;
1005 	/* NAA-5, Target device identifier */
1006 	arr[num++] = 0x61;	/* proto=sas, binary */
1007 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1008 	arr[num++] = 0x0;
1009 	arr[num++] = 0x8;
1010 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
1011 	arr[num++] = 0x22;
1012 	arr[num++] = 0x22;
1013 	arr[num++] = 0x20;
1014 	arr[num++] = (target_dev_id >> 24);
1015 	arr[num++] = (target_dev_id >> 16) & 0xff;
1016 	arr[num++] = (target_dev_id >> 8) & 0xff;
1017 	arr[num++] = target_dev_id & 0xff;
1018 	/* SCSI name string: Target device identifier */
1019 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1020 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1021 	arr[num++] = 0x0;
1022 	arr[num++] = 24;
1023 	memcpy(arr + num, "naa.52222220", 12);
1024 	num += 12;
1025 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1026 	memcpy(arr + num, b, 8);
1027 	num += 8;
1028 	memset(arr + num, 0, 4);
1029 	num += 4;
1030 	return num;
1031 }
1032 
1033 
1034 static unsigned char vpd84_data[] = {
1035 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1036     0x22,0x22,0x22,0x0,0xbb,0x1,
1037     0x22,0x22,0x22,0x0,0xbb,0x2,
1038 };
1039 
1040 /*  Software interface identification VPD page */
1041 static int inquiry_evpd_84(unsigned char * arr)
1042 {
1043 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1044 	return sizeof(vpd84_data);
1045 }
1046 
1047 /* Management network addresses VPD page */
1048 static int inquiry_evpd_85(unsigned char * arr)
1049 {
1050 	int num = 0;
1051 	const char * na1 = "https://www.kernel.org/config";
1052 	const char * na2 = "http://www.kernel.org/log";
1053 	int plen, olen;
1054 
1055 	arr[num++] = 0x1;	/* lu, storage config */
1056 	arr[num++] = 0x0;	/* reserved */
1057 	arr[num++] = 0x0;
1058 	olen = strlen(na1);
1059 	plen = olen + 1;
1060 	if (plen % 4)
1061 		plen = ((plen / 4) + 1) * 4;
1062 	arr[num++] = plen;	/* length, null termianted, padded */
1063 	memcpy(arr + num, na1, olen);
1064 	memset(arr + num + olen, 0, plen - olen);
1065 	num += plen;
1066 
1067 	arr[num++] = 0x4;	/* lu, logging */
1068 	arr[num++] = 0x0;	/* reserved */
1069 	arr[num++] = 0x0;
1070 	olen = strlen(na2);
1071 	plen = olen + 1;
1072 	if (plen % 4)
1073 		plen = ((plen / 4) + 1) * 4;
1074 	arr[num++] = plen;	/* length, null terminated, padded */
1075 	memcpy(arr + num, na2, olen);
1076 	memset(arr + num + olen, 0, plen - olen);
1077 	num += plen;
1078 
1079 	return num;
1080 }
1081 
1082 /* SCSI ports VPD page */
1083 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1084 {
1085 	int num = 0;
1086 	int port_a, port_b;
1087 
1088 	port_a = target_dev_id + 1;
1089 	port_b = port_a + 1;
1090 	arr[num++] = 0x0;	/* reserved */
1091 	arr[num++] = 0x0;	/* reserved */
1092 	arr[num++] = 0x0;
1093 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1094 	memset(arr + num, 0, 6);
1095 	num += 6;
1096 	arr[num++] = 0x0;
1097 	arr[num++] = 12;	/* length tp descriptor */
1098 	/* naa-5 target port identifier (A) */
1099 	arr[num++] = 0x61;	/* proto=sas, binary */
1100 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1101 	arr[num++] = 0x0;	/* reserved */
1102 	arr[num++] = 0x8;	/* length */
1103 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
1104 	arr[num++] = 0x22;
1105 	arr[num++] = 0x22;
1106 	arr[num++] = 0x20;
1107 	arr[num++] = (port_a >> 24);
1108 	arr[num++] = (port_a >> 16) & 0xff;
1109 	arr[num++] = (port_a >> 8) & 0xff;
1110 	arr[num++] = port_a & 0xff;
1111 
1112 	arr[num++] = 0x0;	/* reserved */
1113 	arr[num++] = 0x0;	/* reserved */
1114 	arr[num++] = 0x0;
1115 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1116 	memset(arr + num, 0, 6);
1117 	num += 6;
1118 	arr[num++] = 0x0;
1119 	arr[num++] = 12;	/* length tp descriptor */
1120 	/* naa-5 target port identifier (B) */
1121 	arr[num++] = 0x61;	/* proto=sas, binary */
1122 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1123 	arr[num++] = 0x0;	/* reserved */
1124 	arr[num++] = 0x8;	/* length */
1125 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
1126 	arr[num++] = 0x22;
1127 	arr[num++] = 0x22;
1128 	arr[num++] = 0x20;
1129 	arr[num++] = (port_b >> 24);
1130 	arr[num++] = (port_b >> 16) & 0xff;
1131 	arr[num++] = (port_b >> 8) & 0xff;
1132 	arr[num++] = port_b & 0xff;
1133 
1134 	return num;
1135 }
1136 
1137 
1138 static unsigned char vpd89_data[] = {
1139 /* from 4th byte */ 0,0,0,0,
1140 'l','i','n','u','x',' ',' ',' ',
1141 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1142 '1','2','3','4',
1143 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1144 0xec,0,0,0,
1145 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1146 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1147 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1148 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1149 0x53,0x41,
1150 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1151 0x20,0x20,
1152 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1153 0x10,0x80,
1154 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1155 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1156 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1157 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1158 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1159 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1160 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1164 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1165 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1166 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1167 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1170 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1172 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1178 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1179 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1180 };
1181 
1182 /* ATA Information VPD page */
1183 static int inquiry_evpd_89(unsigned char * arr)
1184 {
1185 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1186 	return sizeof(vpd89_data);
1187 }
1188 
1189 
1190 static unsigned char vpdb0_data[] = {
1191 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1192 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1193 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1194 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1195 };
1196 
1197 /* Block limits VPD page (SBC-3) */
1198 static int inquiry_evpd_b0(unsigned char * arr)
1199 {
1200 	unsigned int gran;
1201 
1202 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1203 
1204 	/* Optimal transfer length granularity */
1205 	gran = 1 << scsi_debug_physblk_exp;
1206 	arr[2] = (gran >> 8) & 0xff;
1207 	arr[3] = gran & 0xff;
1208 
1209 	/* Maximum Transfer Length */
1210 	if (sdebug_store_sectors > 0x400) {
1211 		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1212 		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1213 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1214 		arr[7] = sdebug_store_sectors & 0xff;
1215 	}
1216 
1217 	/* Optimal Transfer Length */
1218 	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1219 
1220 	if (scsi_debug_lbpu) {
1221 		/* Maximum Unmap LBA Count */
1222 		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1223 
1224 		/* Maximum Unmap Block Descriptor Count */
1225 		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1226 	}
1227 
1228 	/* Unmap Granularity Alignment */
1229 	if (scsi_debug_unmap_alignment) {
1230 		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1231 		arr[28] |= 0x80; /* UGAVALID */
1232 	}
1233 
1234 	/* Optimal Unmap Granularity */
1235 	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1236 
1237 	/* Maximum WRITE SAME Length */
1238 	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1239 
1240 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1241 
1242 	return sizeof(vpdb0_data);
1243 }
1244 
1245 /* Block device characteristics VPD page (SBC-3) */
1246 static int inquiry_evpd_b1(unsigned char *arr)
1247 {
1248 	memset(arr, 0, 0x3c);
1249 	arr[0] = 0;
1250 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1251 	arr[2] = 0;
1252 	arr[3] = 5;	/* less than 1.8" */
1253 
1254 	return 0x3c;
1255 }
1256 
1257 /* Logical block provisioning VPD page (SBC-3) */
1258 static int inquiry_evpd_b2(unsigned char *arr)
1259 {
1260 	memset(arr, 0, 0x4);
1261 	arr[0] = 0;			/* threshold exponent */
1262 
1263 	if (scsi_debug_lbpu)
1264 		arr[1] = 1 << 7;
1265 
1266 	if (scsi_debug_lbpws)
1267 		arr[1] |= 1 << 6;
1268 
1269 	if (scsi_debug_lbpws10)
1270 		arr[1] |= 1 << 5;
1271 
1272 	if (scsi_debug_lbprz)
1273 		arr[1] |= 1 << 2;
1274 
1275 	return 0x4;
1276 }
1277 
1278 #define SDEBUG_LONG_INQ_SZ 96
1279 #define SDEBUG_MAX_INQ_ARR_SZ 584
1280 
1281 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1282 {
1283 	unsigned char pq_pdt;
1284 	unsigned char * arr;
1285 	unsigned char *cmd = scp->cmnd;
1286 	int alloc_len, n, ret;
1287 	bool have_wlun;
1288 
1289 	alloc_len = (cmd[3] << 8) + cmd[4];
1290 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1291 	if (! arr)
1292 		return DID_REQUEUE << 16;
1293 	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1294 	if (have_wlun)
1295 		pq_pdt = 0x1e;	/* present, wlun */
1296 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1297 		pq_pdt = 0x7f;	/* not present, no device type */
1298 	else
1299 		pq_pdt = (scsi_debug_ptype & 0x1f);
1300 	arr[0] = pq_pdt;
1301 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1302 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1303 		kfree(arr);
1304 		return check_condition_result;
1305 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1306 		int lu_id_num, port_group_id, target_dev_id, len;
1307 		char lu_id_str[6];
1308 		int host_no = devip->sdbg_host->shost->host_no;
1309 
1310 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1311 		    (devip->channel & 0x7f);
1312 		if (0 == scsi_debug_vpd_use_hostno)
1313 			host_no = 0;
1314 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1315 			    (devip->target * 1000) + devip->lun);
1316 		target_dev_id = ((host_no + 1) * 2000) +
1317 				 (devip->target * 1000) - 3;
1318 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1319 		if (0 == cmd[2]) { /* supported vital product data pages */
1320 			arr[1] = cmd[2];	/*sanity */
1321 			n = 4;
1322 			arr[n++] = 0x0;   /* this page */
1323 			arr[n++] = 0x80;  /* unit serial number */
1324 			arr[n++] = 0x83;  /* device identification */
1325 			arr[n++] = 0x84;  /* software interface ident. */
1326 			arr[n++] = 0x85;  /* management network addresses */
1327 			arr[n++] = 0x86;  /* extended inquiry */
1328 			arr[n++] = 0x87;  /* mode page policy */
1329 			arr[n++] = 0x88;  /* SCSI ports */
1330 			arr[n++] = 0x89;  /* ATA information */
1331 			arr[n++] = 0xb0;  /* Block limits (SBC) */
1332 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1333 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1334 				arr[n++] = 0xb2;
1335 			arr[3] = n - 4;	  /* number of supported VPD pages */
1336 		} else if (0x80 == cmd[2]) { /* unit serial number */
1337 			arr[1] = cmd[2];	/*sanity */
1338 			arr[3] = len;
1339 			memcpy(&arr[4], lu_id_str, len);
1340 		} else if (0x83 == cmd[2]) { /* device identification */
1341 			arr[1] = cmd[2];	/*sanity */
1342 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1343 						 target_dev_id, lu_id_num,
1344 						 lu_id_str, len);
1345 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1346 			arr[1] = cmd[2];	/*sanity */
1347 			arr[3] = inquiry_evpd_84(&arr[4]);
1348 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1349 			arr[1] = cmd[2];	/*sanity */
1350 			arr[3] = inquiry_evpd_85(&arr[4]);
1351 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1352 			arr[1] = cmd[2];	/*sanity */
1353 			arr[3] = 0x3c;	/* number of following entries */
1354 			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1355 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1356 			else if (scsi_debug_dif)
1357 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1358 			else
1359 				arr[4] = 0x0;   /* no protection stuff */
1360 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1361 		} else if (0x87 == cmd[2]) { /* mode page policy */
1362 			arr[1] = cmd[2];	/*sanity */
1363 			arr[3] = 0x8;	/* number of following entries */
1364 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1365 			arr[6] = 0x80;	/* mlus, shared */
1366 			arr[8] = 0x18;	 /* protocol specific lu */
1367 			arr[10] = 0x82;	 /* mlus, per initiator port */
1368 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1369 			arr[1] = cmd[2];	/*sanity */
1370 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1371 		} else if (0x89 == cmd[2]) { /* ATA information */
1372 			arr[1] = cmd[2];        /*sanity */
1373 			n = inquiry_evpd_89(&arr[4]);
1374 			arr[2] = (n >> 8);
1375 			arr[3] = (n & 0xff);
1376 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1377 			arr[1] = cmd[2];        /*sanity */
1378 			arr[3] = inquiry_evpd_b0(&arr[4]);
1379 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1380 			arr[1] = cmd[2];        /*sanity */
1381 			arr[3] = inquiry_evpd_b1(&arr[4]);
1382 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1383 			arr[1] = cmd[2];        /*sanity */
1384 			arr[3] = inquiry_evpd_b2(&arr[4]);
1385 		} else {
1386 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1387 			kfree(arr);
1388 			return check_condition_result;
1389 		}
1390 		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1391 		ret = fill_from_dev_buffer(scp, arr,
1392 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1393 		kfree(arr);
1394 		return ret;
1395 	}
1396 	/* drops through here for a standard inquiry */
1397 	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
1398 	arr[2] = scsi_debug_scsi_level;
1399 	arr[3] = 2;    /* response_data_format==2 */
1400 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1401 	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1402 	if (0 == scsi_debug_vpd_use_hostno)
1403 		arr[5] = 0x10; /* claim: implicit TGPS */
1404 	arr[6] = 0x10; /* claim: MultiP */
1405 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1406 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1407 	memcpy(&arr[8], inq_vendor_id, 8);
1408 	memcpy(&arr[16], inq_product_id, 16);
1409 	memcpy(&arr[32], inq_product_rev, 4);
1410 	/* version descriptors (2 bytes each) follow */
1411 	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1412 	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1413 	n = 62;
1414 	if (scsi_debug_ptype == 0) {
1415 		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1416 	} else if (scsi_debug_ptype == 1) {
1417 		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1418 	}
1419 	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1420 	ret = fill_from_dev_buffer(scp, arr,
1421 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1422 	kfree(arr);
1423 	return ret;
1424 }
1425 
1426 static int resp_requests(struct scsi_cmnd * scp,
1427 			 struct sdebug_dev_info * devip)
1428 {
1429 	unsigned char * sbuff;
1430 	unsigned char *cmd = scp->cmnd;
1431 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1432 	bool dsense;
1433 	int len = 18;
1434 
1435 	memset(arr, 0, sizeof(arr));
1436 	dsense = !!(cmd[1] & 1);
1437 	sbuff = scp->sense_buffer;
1438 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1439 		if (dsense) {
1440 			arr[0] = 0x72;
1441 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1442 			arr[2] = THRESHOLD_EXCEEDED;
1443 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1444 			len = 8;
1445 		} else {
1446 			arr[0] = 0x70;
1447 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1448 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1449 			arr[12] = THRESHOLD_EXCEEDED;
1450 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1451 		}
1452 	} else {
1453 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1454 		if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1455 			;	/* have sense and formats match */
1456 		else if (arr[0] <= 0x70) {
1457 			if (dsense) {
1458 				memset(arr, 0, 8);
1459 				arr[0] = 0x72;
1460 				len = 8;
1461 			} else {
1462 				memset(arr, 0, 18);
1463 				arr[0] = 0x70;
1464 				arr[7] = 0xa;
1465 			}
1466 		} else if (dsense) {
1467 			memset(arr, 0, 8);
1468 			arr[0] = 0x72;
1469 			arr[1] = sbuff[2];     /* sense key */
1470 			arr[2] = sbuff[12];    /* asc */
1471 			arr[3] = sbuff[13];    /* ascq */
1472 			len = 8;
1473 		} else {
1474 			memset(arr, 0, 18);
1475 			arr[0] = 0x70;
1476 			arr[2] = sbuff[1];
1477 			arr[7] = 0xa;
1478 			arr[12] = sbuff[1];
1479 			arr[13] = sbuff[3];
1480 		}
1481 
1482 	}
1483 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1484 	return fill_from_dev_buffer(scp, arr, len);
1485 }
1486 
1487 static int resp_start_stop(struct scsi_cmnd * scp,
1488 			   struct sdebug_dev_info * devip)
1489 {
1490 	unsigned char *cmd = scp->cmnd;
1491 	int power_cond, start;
1492 
1493 	power_cond = (cmd[4] & 0xf0) >> 4;
1494 	if (power_cond) {
1495 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1496 		return check_condition_result;
1497 	}
1498 	start = cmd[4] & 1;
1499 	if (start == devip->stopped)
1500 		devip->stopped = !start;
1501 	return 0;
1502 }
1503 
1504 static sector_t get_sdebug_capacity(void)
1505 {
1506 	if (scsi_debug_virtual_gb > 0)
1507 		return (sector_t)scsi_debug_virtual_gb *
1508 			(1073741824 / scsi_debug_sector_size);
1509 	else
1510 		return sdebug_store_sectors;
1511 }
1512 
1513 #define SDEBUG_READCAP_ARR_SZ 8
1514 static int resp_readcap(struct scsi_cmnd * scp,
1515 			struct sdebug_dev_info * devip)
1516 {
1517 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1518 	unsigned int capac;
1519 
1520 	/* following just in case virtual_gb changed */
1521 	sdebug_capacity = get_sdebug_capacity();
1522 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1523 	if (sdebug_capacity < 0xffffffff) {
1524 		capac = (unsigned int)sdebug_capacity - 1;
1525 		arr[0] = (capac >> 24);
1526 		arr[1] = (capac >> 16) & 0xff;
1527 		arr[2] = (capac >> 8) & 0xff;
1528 		arr[3] = capac & 0xff;
1529 	} else {
1530 		arr[0] = 0xff;
1531 		arr[1] = 0xff;
1532 		arr[2] = 0xff;
1533 		arr[3] = 0xff;
1534 	}
1535 	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1536 	arr[7] = scsi_debug_sector_size & 0xff;
1537 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1538 }
1539 
1540 #define SDEBUG_READCAP16_ARR_SZ 32
1541 static int resp_readcap16(struct scsi_cmnd * scp,
1542 			  struct sdebug_dev_info * devip)
1543 {
1544 	unsigned char *cmd = scp->cmnd;
1545 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1546 	unsigned long long capac;
1547 	int k, alloc_len;
1548 
1549 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1550 		     + cmd[13]);
1551 	/* following just in case virtual_gb changed */
1552 	sdebug_capacity = get_sdebug_capacity();
1553 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1554 	capac = sdebug_capacity - 1;
1555 	for (k = 0; k < 8; ++k, capac >>= 8)
1556 		arr[7 - k] = capac & 0xff;
1557 	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1558 	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1559 	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1560 	arr[11] = scsi_debug_sector_size & 0xff;
1561 	arr[13] = scsi_debug_physblk_exp & 0xf;
1562 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1563 
1564 	if (scsi_debug_lbp()) {
1565 		arr[14] |= 0x80; /* LBPME */
1566 		if (scsi_debug_lbprz)
1567 			arr[14] |= 0x40; /* LBPRZ */
1568 	}
1569 
1570 	arr[15] = scsi_debug_lowest_aligned & 0xff;
1571 
1572 	if (scsi_debug_dif) {
1573 		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1574 		arr[12] |= 1; /* PROT_EN */
1575 	}
1576 
1577 	return fill_from_dev_buffer(scp, arr,
1578 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1579 }
1580 
1581 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1582 
1583 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1584 			      struct sdebug_dev_info * devip)
1585 {
1586 	unsigned char *cmd = scp->cmnd;
1587 	unsigned char * arr;
1588 	int host_no = devip->sdbg_host->shost->host_no;
1589 	int n, ret, alen, rlen;
1590 	int port_group_a, port_group_b, port_a, port_b;
1591 
1592 	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1593 		+ cmd[9]);
1594 
1595 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1596 	if (! arr)
1597 		return DID_REQUEUE << 16;
1598 	/*
1599 	 * EVPD page 0x88 states we have two ports, one
1600 	 * real and a fake port with no device connected.
1601 	 * So we create two port groups with one port each
1602 	 * and set the group with port B to unavailable.
1603 	 */
1604 	port_a = 0x1; /* relative port A */
1605 	port_b = 0x2; /* relative port B */
1606 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1607 	    (devip->channel & 0x7f);
1608 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1609 	    (devip->channel & 0x7f) + 0x80;
1610 
1611 	/*
1612 	 * The asymmetric access state is cycled according to the host_id.
1613 	 */
1614 	n = 4;
1615 	if (0 == scsi_debug_vpd_use_hostno) {
1616 	    arr[n++] = host_no % 3; /* Asymm access state */
1617 	    arr[n++] = 0x0F; /* claim: all states are supported */
1618 	} else {
1619 	    arr[n++] = 0x0; /* Active/Optimized path */
1620 	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1621 	}
1622 	arr[n++] = (port_group_a >> 8) & 0xff;
1623 	arr[n++] = port_group_a & 0xff;
1624 	arr[n++] = 0;    /* Reserved */
1625 	arr[n++] = 0;    /* Status code */
1626 	arr[n++] = 0;    /* Vendor unique */
1627 	arr[n++] = 0x1;  /* One port per group */
1628 	arr[n++] = 0;    /* Reserved */
1629 	arr[n++] = 0;    /* Reserved */
1630 	arr[n++] = (port_a >> 8) & 0xff;
1631 	arr[n++] = port_a & 0xff;
1632 	arr[n++] = 3;    /* Port unavailable */
1633 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1634 	arr[n++] = (port_group_b >> 8) & 0xff;
1635 	arr[n++] = port_group_b & 0xff;
1636 	arr[n++] = 0;    /* Reserved */
1637 	arr[n++] = 0;    /* Status code */
1638 	arr[n++] = 0;    /* Vendor unique */
1639 	arr[n++] = 0x1;  /* One port per group */
1640 	arr[n++] = 0;    /* Reserved */
1641 	arr[n++] = 0;    /* Reserved */
1642 	arr[n++] = (port_b >> 8) & 0xff;
1643 	arr[n++] = port_b & 0xff;
1644 
1645 	rlen = n - 4;
1646 	arr[0] = (rlen >> 24) & 0xff;
1647 	arr[1] = (rlen >> 16) & 0xff;
1648 	arr[2] = (rlen >> 8) & 0xff;
1649 	arr[3] = rlen & 0xff;
1650 
1651 	/*
1652 	 * Return the smallest value of either
1653 	 * - The allocated length
1654 	 * - The constructed command length
1655 	 * - The maximum array size
1656 	 */
1657 	rlen = min(alen,n);
1658 	ret = fill_from_dev_buffer(scp, arr,
1659 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1660 	kfree(arr);
1661 	return ret;
1662 }
1663 
1664 static int
1665 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1666 {
1667 	bool rctd;
1668 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1669 	u16 req_sa, u;
1670 	u32 alloc_len, a_len;
1671 	int k, offset, len, errsts, count, bump, na;
1672 	const struct opcode_info_t *oip;
1673 	const struct opcode_info_t *r_oip;
1674 	u8 *arr;
1675 	u8 *cmd = scp->cmnd;
1676 
1677 	rctd = !!(cmd[2] & 0x80);
1678 	reporting_opts = cmd[2] & 0x7;
1679 	req_opcode = cmd[3];
1680 	req_sa = get_unaligned_be16(cmd + 4);
1681 	alloc_len = get_unaligned_be32(cmd + 6);
1682 	if (alloc_len < 4 || alloc_len > 0xffff) {
1683 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1684 		return check_condition_result;
1685 	}
1686 	if (alloc_len > 8192)
1687 		a_len = 8192;
1688 	else
1689 		a_len = alloc_len;
1690 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1691 	if (NULL == arr) {
1692 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1693 				INSUFF_RES_ASCQ);
1694 		return check_condition_result;
1695 	}
1696 	switch (reporting_opts) {
1697 	case 0:	/* all commands */
1698 		/* count number of commands */
1699 		for (count = 0, oip = opcode_info_arr;
1700 		     oip->num_attached != 0xff; ++oip) {
1701 			if (F_INV_OP & oip->flags)
1702 				continue;
1703 			count += (oip->num_attached + 1);
1704 		}
1705 		bump = rctd ? 20 : 8;
1706 		put_unaligned_be32(count * bump, arr);
1707 		for (offset = 4, oip = opcode_info_arr;
1708 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1709 			if (F_INV_OP & oip->flags)
1710 				continue;
1711 			na = oip->num_attached;
1712 			arr[offset] = oip->opcode;
1713 			put_unaligned_be16(oip->sa, arr + offset + 2);
1714 			if (rctd)
1715 				arr[offset + 5] |= 0x2;
1716 			if (FF_SA & oip->flags)
1717 				arr[offset + 5] |= 0x1;
1718 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1719 			if (rctd)
1720 				put_unaligned_be16(0xa, arr + offset + 8);
1721 			r_oip = oip;
1722 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1723 				if (F_INV_OP & oip->flags)
1724 					continue;
1725 				offset += bump;
1726 				arr[offset] = oip->opcode;
1727 				put_unaligned_be16(oip->sa, arr + offset + 2);
1728 				if (rctd)
1729 					arr[offset + 5] |= 0x2;
1730 				if (FF_SA & oip->flags)
1731 					arr[offset + 5] |= 0x1;
1732 				put_unaligned_be16(oip->len_mask[0],
1733 						   arr + offset + 6);
1734 				if (rctd)
1735 					put_unaligned_be16(0xa,
1736 							   arr + offset + 8);
1737 			}
1738 			oip = r_oip;
1739 			offset += bump;
1740 		}
1741 		break;
1742 	case 1:	/* one command: opcode only */
1743 	case 2:	/* one command: opcode plus service action */
1744 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1745 		sdeb_i = opcode_ind_arr[req_opcode];
1746 		oip = &opcode_info_arr[sdeb_i];
1747 		if (F_INV_OP & oip->flags) {
1748 			supp = 1;
1749 			offset = 4;
1750 		} else {
1751 			if (1 == reporting_opts) {
1752 				if (FF_SA & oip->flags) {
1753 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1754 							     2, 2);
1755 					kfree(arr);
1756 					return check_condition_result;
1757 				}
1758 				req_sa = 0;
1759 			} else if (2 == reporting_opts &&
1760 				   0 == (FF_SA & oip->flags)) {
1761 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1762 				kfree(arr);	/* point at requested sa */
1763 				return check_condition_result;
1764 			}
1765 			if (0 == (FF_SA & oip->flags) &&
1766 			    req_opcode == oip->opcode)
1767 				supp = 3;
1768 			else if (0 == (FF_SA & oip->flags)) {
1769 				na = oip->num_attached;
1770 				for (k = 0, oip = oip->arrp; k < na;
1771 				     ++k, ++oip) {
1772 					if (req_opcode == oip->opcode)
1773 						break;
1774 				}
1775 				supp = (k >= na) ? 1 : 3;
1776 			} else if (req_sa != oip->sa) {
1777 				na = oip->num_attached;
1778 				for (k = 0, oip = oip->arrp; k < na;
1779 				     ++k, ++oip) {
1780 					if (req_sa == oip->sa)
1781 						break;
1782 				}
1783 				supp = (k >= na) ? 1 : 3;
1784 			} else
1785 				supp = 3;
1786 			if (3 == supp) {
1787 				u = oip->len_mask[0];
1788 				put_unaligned_be16(u, arr + 2);
1789 				arr[4] = oip->opcode;
1790 				for (k = 1; k < u; ++k)
1791 					arr[4 + k] = (k < 16) ?
1792 						 oip->len_mask[k] : 0xff;
1793 				offset = 4 + u;
1794 			} else
1795 				offset = 4;
1796 		}
1797 		arr[1] = (rctd ? 0x80 : 0) | supp;
1798 		if (rctd) {
1799 			put_unaligned_be16(0xa, arr + offset);
1800 			offset += 12;
1801 		}
1802 		break;
1803 	default:
1804 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1805 		kfree(arr);
1806 		return check_condition_result;
1807 	}
1808 	offset = (offset < a_len) ? offset : a_len;
1809 	len = (offset < alloc_len) ? offset : alloc_len;
1810 	errsts = fill_from_dev_buffer(scp, arr, len);
1811 	kfree(arr);
1812 	return errsts;
1813 }
1814 
1815 static int
1816 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1817 {
1818 	bool repd;
1819 	u32 alloc_len, len;
1820 	u8 arr[16];
1821 	u8 *cmd = scp->cmnd;
1822 
1823 	memset(arr, 0, sizeof(arr));
1824 	repd = !!(cmd[2] & 0x80);
1825 	alloc_len = get_unaligned_be32(cmd + 6);
1826 	if (alloc_len < 4) {
1827 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1828 		return check_condition_result;
1829 	}
1830 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1831 	arr[1] = 0x1;		/* ITNRS */
1832 	if (repd) {
1833 		arr[3] = 0xc;
1834 		len = 16;
1835 	} else
1836 		len = 4;
1837 
1838 	len = (len < alloc_len) ? len : alloc_len;
1839 	return fill_from_dev_buffer(scp, arr, len);
1840 }
1841 
1842 /* <<Following mode page info copied from ST318451LW>> */
1843 
1844 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1845 {	/* Read-Write Error Recovery page for mode_sense */
1846 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1847 					5, 0, 0xff, 0xff};
1848 
1849 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1850 	if (1 == pcontrol)
1851 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1852 	return sizeof(err_recov_pg);
1853 }
1854 
1855 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1856 { 	/* Disconnect-Reconnect page for mode_sense */
1857 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1858 					 0, 0, 0, 0, 0, 0, 0, 0};
1859 
1860 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1861 	if (1 == pcontrol)
1862 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1863 	return sizeof(disconnect_pg);
1864 }
1865 
1866 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1867 {       /* Format device page for mode_sense */
1868 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1869 				     0, 0, 0, 0, 0, 0, 0, 0,
1870 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1871 
1872 	memcpy(p, format_pg, sizeof(format_pg));
1873 	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1874 	p[11] = sdebug_sectors_per & 0xff;
1875 	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1876 	p[13] = scsi_debug_sector_size & 0xff;
1877 	if (scsi_debug_removable)
1878 		p[20] |= 0x20; /* should agree with INQUIRY */
1879 	if (1 == pcontrol)
1880 		memset(p + 2, 0, sizeof(format_pg) - 2);
1881 	return sizeof(format_pg);
1882 }
1883 
1884 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1885 { 	/* Caching page for mode_sense */
1886 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1887 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1888 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1889 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1890 
1891 	if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1892 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1893 	memcpy(p, caching_pg, sizeof(caching_pg));
1894 	if (1 == pcontrol)
1895 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1896 	else if (2 == pcontrol)
1897 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1898 	return sizeof(caching_pg);
1899 }
1900 
1901 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1902 { 	/* Control mode page for mode_sense */
1903 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1904 				        0, 0, 0, 0};
1905 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1906 				     0, 0, 0x2, 0x4b};
1907 
1908 	if (scsi_debug_dsense)
1909 		ctrl_m_pg[2] |= 0x4;
1910 	else
1911 		ctrl_m_pg[2] &= ~0x4;
1912 
1913 	if (scsi_debug_ato)
1914 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1915 
1916 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1917 	if (1 == pcontrol)
1918 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1919 	else if (2 == pcontrol)
1920 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1921 	return sizeof(ctrl_m_pg);
1922 }
1923 
1924 
1925 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1926 {	/* Informational Exceptions control mode page for mode_sense */
1927 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1928 				       0, 0, 0x0, 0x0};
1929 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1930 				      0, 0, 0x0, 0x0};
1931 
1932 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1933 	if (1 == pcontrol)
1934 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1935 	else if (2 == pcontrol)
1936 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1937 	return sizeof(iec_m_pg);
1938 }
1939 
1940 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1941 {	/* SAS SSP mode page - short format for mode_sense */
1942 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1943 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1944 
1945 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1946 	if (1 == pcontrol)
1947 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1948 	return sizeof(sas_sf_m_pg);
1949 }
1950 
1951 
1952 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1953 			      int target_dev_id)
1954 {	/* SAS phy control and discover mode page for mode_sense */
1955 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1956 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1957 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1958 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1959 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1960 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1961 		    0, 0, 0, 0, 0, 0, 0, 0,
1962 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1963 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1964 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1965 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1966 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1967 		    0, 0, 0, 0, 0, 0, 0, 0,
1968 		};
1969 	int port_a, port_b;
1970 
1971 	port_a = target_dev_id + 1;
1972 	port_b = port_a + 1;
1973 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1974 	p[20] = (port_a >> 24);
1975 	p[21] = (port_a >> 16) & 0xff;
1976 	p[22] = (port_a >> 8) & 0xff;
1977 	p[23] = port_a & 0xff;
1978 	p[48 + 20] = (port_b >> 24);
1979 	p[48 + 21] = (port_b >> 16) & 0xff;
1980 	p[48 + 22] = (port_b >> 8) & 0xff;
1981 	p[48 + 23] = port_b & 0xff;
1982 	if (1 == pcontrol)
1983 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1984 	return sizeof(sas_pcd_m_pg);
1985 }
1986 
1987 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1988 {	/* SAS SSP shared protocol specific port mode subpage */
1989 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1990 		    0, 0, 0, 0, 0, 0, 0, 0,
1991 		};
1992 
1993 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1994 	if (1 == pcontrol)
1995 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1996 	return sizeof(sas_sha_m_pg);
1997 }
1998 
1999 #define SDEBUG_MAX_MSENSE_SZ 256
2000 
2001 static int
2002 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2003 {
2004 	unsigned char dbd, llbaa;
2005 	int pcontrol, pcode, subpcode, bd_len;
2006 	unsigned char dev_spec;
2007 	int k, alloc_len, msense_6, offset, len, target_dev_id;
2008 	int target = scp->device->id;
2009 	unsigned char * ap;
2010 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2011 	unsigned char *cmd = scp->cmnd;
2012 
2013 	dbd = !!(cmd[1] & 0x8);
2014 	pcontrol = (cmd[2] & 0xc0) >> 6;
2015 	pcode = cmd[2] & 0x3f;
2016 	subpcode = cmd[3];
2017 	msense_6 = (MODE_SENSE == cmd[0]);
2018 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
2019 	if ((0 == scsi_debug_ptype) && (0 == dbd))
2020 		bd_len = llbaa ? 16 : 8;
2021 	else
2022 		bd_len = 0;
2023 	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
2024 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2025 	if (0x3 == pcontrol) {  /* Saving values not supported */
2026 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2027 		return check_condition_result;
2028 	}
2029 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2030 			(devip->target * 1000) - 3;
2031 	/* set DPOFUA bit for disks */
2032 	if (0 == scsi_debug_ptype)
2033 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
2034 	else
2035 		dev_spec = 0x0;
2036 	if (msense_6) {
2037 		arr[2] = dev_spec;
2038 		arr[3] = bd_len;
2039 		offset = 4;
2040 	} else {
2041 		arr[3] = dev_spec;
2042 		if (16 == bd_len)
2043 			arr[4] = 0x1;	/* set LONGLBA bit */
2044 		arr[7] = bd_len;	/* assume 255 or less */
2045 		offset = 8;
2046 	}
2047 	ap = arr + offset;
2048 	if ((bd_len > 0) && (!sdebug_capacity))
2049 		sdebug_capacity = get_sdebug_capacity();
2050 
2051 	if (8 == bd_len) {
2052 		if (sdebug_capacity > 0xfffffffe) {
2053 			ap[0] = 0xff;
2054 			ap[1] = 0xff;
2055 			ap[2] = 0xff;
2056 			ap[3] = 0xff;
2057 		} else {
2058 			ap[0] = (sdebug_capacity >> 24) & 0xff;
2059 			ap[1] = (sdebug_capacity >> 16) & 0xff;
2060 			ap[2] = (sdebug_capacity >> 8) & 0xff;
2061 			ap[3] = sdebug_capacity & 0xff;
2062 		}
2063 		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2064 		ap[7] = scsi_debug_sector_size & 0xff;
2065 		offset += bd_len;
2066 		ap = arr + offset;
2067 	} else if (16 == bd_len) {
2068 		unsigned long long capac = sdebug_capacity;
2069 
2070         	for (k = 0; k < 8; ++k, capac >>= 8)
2071                 	ap[7 - k] = capac & 0xff;
2072 		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2073 		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2074 		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2075 		ap[15] = scsi_debug_sector_size & 0xff;
2076 		offset += bd_len;
2077 		ap = arr + offset;
2078 	}
2079 
2080 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2081 		/* TODO: Control Extension page */
2082 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2083 		return check_condition_result;
2084 	}
2085 	switch (pcode) {
2086 	case 0x1:	/* Read-Write error recovery page, direct access */
2087 		len = resp_err_recov_pg(ap, pcontrol, target);
2088 		offset += len;
2089 		break;
2090 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2091 		len = resp_disconnect_pg(ap, pcontrol, target);
2092 		offset += len;
2093 		break;
2094         case 0x3:       /* Format device page, direct access */
2095                 len = resp_format_pg(ap, pcontrol, target);
2096                 offset += len;
2097                 break;
2098 	case 0x8:	/* Caching page, direct access */
2099 		len = resp_caching_pg(ap, pcontrol, target);
2100 		offset += len;
2101 		break;
2102 	case 0xa:	/* Control Mode page, all devices */
2103 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2104 		offset += len;
2105 		break;
2106 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2107 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2108 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2109 			return check_condition_result;
2110 	        }
2111 		len = 0;
2112 		if ((0x0 == subpcode) || (0xff == subpcode))
2113 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2114 		if ((0x1 == subpcode) || (0xff == subpcode))
2115 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2116 						  target_dev_id);
2117 		if ((0x2 == subpcode) || (0xff == subpcode))
2118 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2119 		offset += len;
2120 		break;
2121 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2122 		len = resp_iec_m_pg(ap, pcontrol, target);
2123 		offset += len;
2124 		break;
2125 	case 0x3f:	/* Read all Mode pages */
2126 		if ((0 == subpcode) || (0xff == subpcode)) {
2127 			len = resp_err_recov_pg(ap, pcontrol, target);
2128 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2129 			len += resp_format_pg(ap + len, pcontrol, target);
2130 			len += resp_caching_pg(ap + len, pcontrol, target);
2131 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2132 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2133 			if (0xff == subpcode) {
2134 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2135 						  target, target_dev_id);
2136 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2137 			}
2138 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2139 		} else {
2140 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2141 			return check_condition_result;
2142                 }
2143 		offset += len;
2144 		break;
2145 	default:
2146 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2147 		return check_condition_result;
2148 	}
2149 	if (msense_6)
2150 		arr[0] = offset - 1;
2151 	else {
2152 		arr[0] = ((offset - 2) >> 8) & 0xff;
2153 		arr[1] = (offset - 2) & 0xff;
2154 	}
2155 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2156 }
2157 
2158 #define SDEBUG_MAX_MSELECT_SZ 512
2159 
2160 static int
2161 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2162 {
2163 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2164 	int param_len, res, mpage;
2165 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2166 	unsigned char *cmd = scp->cmnd;
2167 	int mselect6 = (MODE_SELECT == cmd[0]);
2168 
2169 	memset(arr, 0, sizeof(arr));
2170 	pf = cmd[1] & 0x10;
2171 	sp = cmd[1] & 0x1;
2172 	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2173 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2174 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2175 		return check_condition_result;
2176 	}
2177         res = fetch_to_dev_buffer(scp, arr, param_len);
2178         if (-1 == res)
2179                 return (DID_ERROR << 16);
2180         else if ((res < param_len) &&
2181                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2182 		sdev_printk(KERN_INFO, scp->device,
2183 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2184 			    __func__, param_len, res);
2185 	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2186 	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2187 	if (md_len > 2) {
2188 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2189 		return check_condition_result;
2190 	}
2191 	off = bd_len + (mselect6 ? 4 : 8);
2192 	mpage = arr[off] & 0x3f;
2193 	ps = !!(arr[off] & 0x80);
2194 	if (ps) {
2195 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2196 		return check_condition_result;
2197 	}
2198 	spf = !!(arr[off] & 0x40);
2199 	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2200 		       (arr[off + 1] + 2);
2201 	if ((pg_len + off) > param_len) {
2202 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2203 				PARAMETER_LIST_LENGTH_ERR, 0);
2204 		return check_condition_result;
2205 	}
2206 	switch (mpage) {
2207 	case 0x8:      /* Caching Mode page */
2208 		if (caching_pg[1] == arr[off + 1]) {
2209 			memcpy(caching_pg + 2, arr + off + 2,
2210 			       sizeof(caching_pg) - 2);
2211 			goto set_mode_changed_ua;
2212 		}
2213 		break;
2214 	case 0xa:      /* Control Mode page */
2215 		if (ctrl_m_pg[1] == arr[off + 1]) {
2216 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2217 			       sizeof(ctrl_m_pg) - 2);
2218 			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2219 			goto set_mode_changed_ua;
2220 		}
2221 		break;
2222 	case 0x1c:      /* Informational Exceptions Mode page */
2223 		if (iec_m_pg[1] == arr[off + 1]) {
2224 			memcpy(iec_m_pg + 2, arr + off + 2,
2225 			       sizeof(iec_m_pg) - 2);
2226 			goto set_mode_changed_ua;
2227 		}
2228 		break;
2229 	default:
2230 		break;
2231 	}
2232 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2233 	return check_condition_result;
2234 set_mode_changed_ua:
2235 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2236 	return 0;
2237 }
2238 
2239 static int resp_temp_l_pg(unsigned char * arr)
2240 {
2241 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2242 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2243 		};
2244 
2245         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2246         return sizeof(temp_l_pg);
2247 }
2248 
2249 static int resp_ie_l_pg(unsigned char * arr)
2250 {
2251 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2252 		};
2253 
2254         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2255 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2256 		arr[4] = THRESHOLD_EXCEEDED;
2257 		arr[5] = 0xff;
2258 	}
2259         return sizeof(ie_l_pg);
2260 }
2261 
2262 #define SDEBUG_MAX_LSENSE_SZ 512
2263 
2264 static int resp_log_sense(struct scsi_cmnd * scp,
2265                           struct sdebug_dev_info * devip)
2266 {
2267 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2268 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2269 	unsigned char *cmd = scp->cmnd;
2270 
2271 	memset(arr, 0, sizeof(arr));
2272 	ppc = cmd[1] & 0x2;
2273 	sp = cmd[1] & 0x1;
2274 	if (ppc || sp) {
2275 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2276 		return check_condition_result;
2277 	}
2278 	pcontrol = (cmd[2] & 0xc0) >> 6;
2279 	pcode = cmd[2] & 0x3f;
2280 	subpcode = cmd[3] & 0xff;
2281 	alloc_len = (cmd[7] << 8) + cmd[8];
2282 	arr[0] = pcode;
2283 	if (0 == subpcode) {
2284 		switch (pcode) {
2285 		case 0x0:	/* Supported log pages log page */
2286 			n = 4;
2287 			arr[n++] = 0x0;		/* this page */
2288 			arr[n++] = 0xd;		/* Temperature */
2289 			arr[n++] = 0x2f;	/* Informational exceptions */
2290 			arr[3] = n - 4;
2291 			break;
2292 		case 0xd:	/* Temperature log page */
2293 			arr[3] = resp_temp_l_pg(arr + 4);
2294 			break;
2295 		case 0x2f:	/* Informational exceptions log page */
2296 			arr[3] = resp_ie_l_pg(arr + 4);
2297 			break;
2298 		default:
2299 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2300 			return check_condition_result;
2301 		}
2302 	} else if (0xff == subpcode) {
2303 		arr[0] |= 0x40;
2304 		arr[1] = subpcode;
2305 		switch (pcode) {
2306 		case 0x0:	/* Supported log pages and subpages log page */
2307 			n = 4;
2308 			arr[n++] = 0x0;
2309 			arr[n++] = 0x0;		/* 0,0 page */
2310 			arr[n++] = 0x0;
2311 			arr[n++] = 0xff;	/* this page */
2312 			arr[n++] = 0xd;
2313 			arr[n++] = 0x0;		/* Temperature */
2314 			arr[n++] = 0x2f;
2315 			arr[n++] = 0x0;	/* Informational exceptions */
2316 			arr[3] = n - 4;
2317 			break;
2318 		case 0xd:	/* Temperature subpages */
2319 			n = 4;
2320 			arr[n++] = 0xd;
2321 			arr[n++] = 0x0;		/* Temperature */
2322 			arr[3] = n - 4;
2323 			break;
2324 		case 0x2f:	/* Informational exceptions subpages */
2325 			n = 4;
2326 			arr[n++] = 0x2f;
2327 			arr[n++] = 0x0;		/* Informational exceptions */
2328 			arr[3] = n - 4;
2329 			break;
2330 		default:
2331 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2332 			return check_condition_result;
2333 		}
2334 	} else {
2335 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2336 		return check_condition_result;
2337 	}
2338 	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2339 	return fill_from_dev_buffer(scp, arr,
2340 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2341 }
2342 
2343 static int check_device_access_params(struct scsi_cmnd *scp,
2344 				      unsigned long long lba, unsigned int num)
2345 {
2346 	if (lba + num > sdebug_capacity) {
2347 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2348 		return check_condition_result;
2349 	}
2350 	/* transfer length excessive (tie in to block limits VPD page) */
2351 	if (num > sdebug_store_sectors) {
2352 		/* needs work to find which cdb byte 'num' comes from */
2353 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2354 		return check_condition_result;
2355 	}
2356 	return 0;
2357 }
2358 
2359 /* Returns number of bytes copied or -1 if error. */
2360 static int
2361 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2362 {
2363 	int ret;
2364 	u64 block, rest = 0;
2365 	struct scsi_data_buffer *sdb;
2366 	enum dma_data_direction dir;
2367 
2368 	if (do_write) {
2369 		sdb = scsi_out(scmd);
2370 		dir = DMA_TO_DEVICE;
2371 	} else {
2372 		sdb = scsi_in(scmd);
2373 		dir = DMA_FROM_DEVICE;
2374 	}
2375 
2376 	if (!sdb->length)
2377 		return 0;
2378 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2379 		return -1;
2380 
2381 	block = do_div(lba, sdebug_store_sectors);
2382 	if (block + num > sdebug_store_sectors)
2383 		rest = block + num - sdebug_store_sectors;
2384 
2385 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2386 		   fake_storep + (block * scsi_debug_sector_size),
2387 		   (num - rest) * scsi_debug_sector_size, 0, do_write);
2388 	if (ret != (num - rest) * scsi_debug_sector_size)
2389 		return ret;
2390 
2391 	if (rest) {
2392 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2393 			    fake_storep, rest * scsi_debug_sector_size,
2394 			    (num - rest) * scsi_debug_sector_size, do_write);
2395 	}
2396 
2397 	return ret;
2398 }
2399 
2400 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2401  * arr into fake_store(lba,num) and return true. If comparison fails then
2402  * return false. */
2403 static bool
2404 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2405 {
2406 	bool res;
2407 	u64 block, rest = 0;
2408 	u32 store_blks = sdebug_store_sectors;
2409 	u32 lb_size = scsi_debug_sector_size;
2410 
2411 	block = do_div(lba, store_blks);
2412 	if (block + num > store_blks)
2413 		rest = block + num - store_blks;
2414 
2415 	res = !memcmp(fake_storep + (block * lb_size), arr,
2416 		      (num - rest) * lb_size);
2417 	if (!res)
2418 		return res;
2419 	if (rest)
2420 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2421 			     rest * lb_size);
2422 	if (!res)
2423 		return res;
2424 	arr += num * lb_size;
2425 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2426 	if (rest)
2427 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2428 		       rest * lb_size);
2429 	return res;
2430 }
2431 
2432 static __be16 dif_compute_csum(const void *buf, int len)
2433 {
2434 	__be16 csum;
2435 
2436 	if (scsi_debug_guard)
2437 		csum = (__force __be16)ip_compute_csum(buf, len);
2438 	else
2439 		csum = cpu_to_be16(crc_t10dif(buf, len));
2440 
2441 	return csum;
2442 }
2443 
2444 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2445 		      sector_t sector, u32 ei_lba)
2446 {
2447 	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2448 
2449 	if (sdt->guard_tag != csum) {
2450 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2451 			(unsigned long)sector,
2452 			be16_to_cpu(sdt->guard_tag),
2453 			be16_to_cpu(csum));
2454 		return 0x01;
2455 	}
2456 	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2457 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2458 		pr_err("REF check failed on sector %lu\n",
2459 			(unsigned long)sector);
2460 		return 0x03;
2461 	}
2462 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2463 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2464 		pr_err("REF check failed on sector %lu\n",
2465 			(unsigned long)sector);
2466 		return 0x03;
2467 	}
2468 	return 0;
2469 }
2470 
2471 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2472 			  unsigned int sectors, bool read)
2473 {
2474 	size_t resid;
2475 	void *paddr;
2476 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2477 	struct sg_mapping_iter miter;
2478 
2479 	/* Bytes of protection data to copy into sgl */
2480 	resid = sectors * sizeof(*dif_storep);
2481 
2482 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2483 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2484 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2485 
2486 	while (sg_miter_next(&miter) && resid > 0) {
2487 		size_t len = min(miter.length, resid);
2488 		void *start = dif_store(sector);
2489 		size_t rest = 0;
2490 
2491 		if (dif_store_end < start + len)
2492 			rest = start + len - dif_store_end;
2493 
2494 		paddr = miter.addr;
2495 
2496 		if (read)
2497 			memcpy(paddr, start, len - rest);
2498 		else
2499 			memcpy(start, paddr, len - rest);
2500 
2501 		if (rest) {
2502 			if (read)
2503 				memcpy(paddr + len - rest, dif_storep, rest);
2504 			else
2505 				memcpy(dif_storep, paddr + len - rest, rest);
2506 		}
2507 
2508 		sector += len / sizeof(*dif_storep);
2509 		resid -= len;
2510 	}
2511 	sg_miter_stop(&miter);
2512 }
2513 
2514 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2515 			    unsigned int sectors, u32 ei_lba)
2516 {
2517 	unsigned int i;
2518 	struct sd_dif_tuple *sdt;
2519 	sector_t sector;
2520 
2521 	for (i = 0; i < sectors; i++, ei_lba++) {
2522 		int ret;
2523 
2524 		sector = start_sec + i;
2525 		sdt = dif_store(sector);
2526 
2527 		if (sdt->app_tag == cpu_to_be16(0xffff))
2528 			continue;
2529 
2530 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2531 		if (ret) {
2532 			dif_errors++;
2533 			return ret;
2534 		}
2535 	}
2536 
2537 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2538 	dix_reads++;
2539 
2540 	return 0;
2541 }
2542 
2543 static int
2544 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2545 {
2546 	u8 *cmd = scp->cmnd;
2547 	u64 lba;
2548 	u32 num;
2549 	u32 ei_lba;
2550 	unsigned long iflags;
2551 	int ret;
2552 	bool check_prot;
2553 
2554 	switch (cmd[0]) {
2555 	case READ_16:
2556 		ei_lba = 0;
2557 		lba = get_unaligned_be64(cmd + 2);
2558 		num = get_unaligned_be32(cmd + 10);
2559 		check_prot = true;
2560 		break;
2561 	case READ_10:
2562 		ei_lba = 0;
2563 		lba = get_unaligned_be32(cmd + 2);
2564 		num = get_unaligned_be16(cmd + 7);
2565 		check_prot = true;
2566 		break;
2567 	case READ_6:
2568 		ei_lba = 0;
2569 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2570 		      (u32)(cmd[1] & 0x1f) << 16;
2571 		num = (0 == cmd[4]) ? 256 : cmd[4];
2572 		check_prot = true;
2573 		break;
2574 	case READ_12:
2575 		ei_lba = 0;
2576 		lba = get_unaligned_be32(cmd + 2);
2577 		num = get_unaligned_be32(cmd + 6);
2578 		check_prot = true;
2579 		break;
2580 	case XDWRITEREAD_10:
2581 		ei_lba = 0;
2582 		lba = get_unaligned_be32(cmd + 2);
2583 		num = get_unaligned_be16(cmd + 7);
2584 		check_prot = false;
2585 		break;
2586 	default:	/* assume READ(32) */
2587 		lba = get_unaligned_be64(cmd + 12);
2588 		ei_lba = get_unaligned_be32(cmd + 20);
2589 		num = get_unaligned_be32(cmd + 28);
2590 		check_prot = false;
2591 		break;
2592 	}
2593 	if (check_prot) {
2594 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2595 		    (cmd[1] & 0xe0)) {
2596 			mk_sense_invalid_opcode(scp);
2597 			return check_condition_result;
2598 		}
2599 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2600 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2601 		    (cmd[1] & 0xe0) == 0)
2602 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2603 				    "to DIF device\n");
2604 	}
2605 	if (sdebug_any_injecting_opt) {
2606 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2607 
2608 		if (ep->inj_short)
2609 			num /= 2;
2610 	}
2611 
2612 	/* inline check_device_access_params() */
2613 	if (lba + num > sdebug_capacity) {
2614 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2615 		return check_condition_result;
2616 	}
2617 	/* transfer length excessive (tie in to block limits VPD page) */
2618 	if (num > sdebug_store_sectors) {
2619 		/* needs work to find which cdb byte 'num' comes from */
2620 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2621 		return check_condition_result;
2622 	}
2623 
2624 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2625 	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2626 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2627 		/* claim unrecoverable read error */
2628 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2629 		/* set info field and valid bit for fixed descriptor */
2630 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2631 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2632 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2633 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2634 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2635 		}
2636 		scsi_set_resid(scp, scsi_bufflen(scp));
2637 		return check_condition_result;
2638 	}
2639 
2640 	read_lock_irqsave(&atomic_rw, iflags);
2641 
2642 	/* DIX + T10 DIF */
2643 	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2644 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2645 
2646 		if (prot_ret) {
2647 			read_unlock_irqrestore(&atomic_rw, iflags);
2648 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2649 			return illegal_condition_result;
2650 		}
2651 	}
2652 
2653 	ret = do_device_access(scp, lba, num, false);
2654 	read_unlock_irqrestore(&atomic_rw, iflags);
2655 	if (ret == -1)
2656 		return DID_ERROR << 16;
2657 
2658 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2659 
2660 	if (sdebug_any_injecting_opt) {
2661 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2662 
2663 		if (ep->inj_recovered) {
2664 			mk_sense_buffer(scp, RECOVERED_ERROR,
2665 					THRESHOLD_EXCEEDED, 0);
2666 			return check_condition_result;
2667 		} else if (ep->inj_transport) {
2668 			mk_sense_buffer(scp, ABORTED_COMMAND,
2669 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2670 			return check_condition_result;
2671 		} else if (ep->inj_dif) {
2672 			/* Logical block guard check failed */
2673 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2674 			return illegal_condition_result;
2675 		} else if (ep->inj_dix) {
2676 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2677 			return illegal_condition_result;
2678 		}
2679 	}
2680 	return 0;
2681 }
2682 
2683 static void dump_sector(unsigned char *buf, int len)
2684 {
2685 	int i, j, n;
2686 
2687 	pr_err(">>> Sector Dump <<<\n");
2688 	for (i = 0 ; i < len ; i += 16) {
2689 		char b[128];
2690 
2691 		for (j = 0, n = 0; j < 16; j++) {
2692 			unsigned char c = buf[i+j];
2693 
2694 			if (c >= 0x20 && c < 0x7e)
2695 				n += scnprintf(b + n, sizeof(b) - n,
2696 					       " %c ", buf[i+j]);
2697 			else
2698 				n += scnprintf(b + n, sizeof(b) - n,
2699 					       "%02x ", buf[i+j]);
2700 		}
2701 		pr_err("%04d: %s\n", i, b);
2702 	}
2703 }
2704 
2705 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2706 			     unsigned int sectors, u32 ei_lba)
2707 {
2708 	int ret;
2709 	struct sd_dif_tuple *sdt;
2710 	void *daddr;
2711 	sector_t sector = start_sec;
2712 	int ppage_offset;
2713 	int dpage_offset;
2714 	struct sg_mapping_iter diter;
2715 	struct sg_mapping_iter piter;
2716 
2717 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2718 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2719 
2720 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2721 			scsi_prot_sg_count(SCpnt),
2722 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2723 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2724 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2725 
2726 	/* For each protection page */
2727 	while (sg_miter_next(&piter)) {
2728 		dpage_offset = 0;
2729 		if (WARN_ON(!sg_miter_next(&diter))) {
2730 			ret = 0x01;
2731 			goto out;
2732 		}
2733 
2734 		for (ppage_offset = 0; ppage_offset < piter.length;
2735 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2736 			/* If we're at the end of the current
2737 			 * data page advance to the next one
2738 			 */
2739 			if (dpage_offset >= diter.length) {
2740 				if (WARN_ON(!sg_miter_next(&diter))) {
2741 					ret = 0x01;
2742 					goto out;
2743 				}
2744 				dpage_offset = 0;
2745 			}
2746 
2747 			sdt = piter.addr + ppage_offset;
2748 			daddr = diter.addr + dpage_offset;
2749 
2750 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2751 			if (ret) {
2752 				dump_sector(daddr, scsi_debug_sector_size);
2753 				goto out;
2754 			}
2755 
2756 			sector++;
2757 			ei_lba++;
2758 			dpage_offset += scsi_debug_sector_size;
2759 		}
2760 		diter.consumed = dpage_offset;
2761 		sg_miter_stop(&diter);
2762 	}
2763 	sg_miter_stop(&piter);
2764 
2765 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2766 	dix_writes++;
2767 
2768 	return 0;
2769 
2770 out:
2771 	dif_errors++;
2772 	sg_miter_stop(&diter);
2773 	sg_miter_stop(&piter);
2774 	return ret;
2775 }
2776 
2777 static unsigned long lba_to_map_index(sector_t lba)
2778 {
2779 	if (scsi_debug_unmap_alignment) {
2780 		lba += scsi_debug_unmap_granularity -
2781 			scsi_debug_unmap_alignment;
2782 	}
2783 	do_div(lba, scsi_debug_unmap_granularity);
2784 
2785 	return lba;
2786 }
2787 
2788 static sector_t map_index_to_lba(unsigned long index)
2789 {
2790 	sector_t lba = index * scsi_debug_unmap_granularity;
2791 
2792 	if (scsi_debug_unmap_alignment) {
2793 		lba -= scsi_debug_unmap_granularity -
2794 			scsi_debug_unmap_alignment;
2795 	}
2796 
2797 	return lba;
2798 }
2799 
2800 static unsigned int map_state(sector_t lba, unsigned int *num)
2801 {
2802 	sector_t end;
2803 	unsigned int mapped;
2804 	unsigned long index;
2805 	unsigned long next;
2806 
2807 	index = lba_to_map_index(lba);
2808 	mapped = test_bit(index, map_storep);
2809 
2810 	if (mapped)
2811 		next = find_next_zero_bit(map_storep, map_size, index);
2812 	else
2813 		next = find_next_bit(map_storep, map_size, index);
2814 
2815 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2816 	*num = end - lba;
2817 
2818 	return mapped;
2819 }
2820 
2821 static void map_region(sector_t lba, unsigned int len)
2822 {
2823 	sector_t end = lba + len;
2824 
2825 	while (lba < end) {
2826 		unsigned long index = lba_to_map_index(lba);
2827 
2828 		if (index < map_size)
2829 			set_bit(index, map_storep);
2830 
2831 		lba = map_index_to_lba(index + 1);
2832 	}
2833 }
2834 
2835 static void unmap_region(sector_t lba, unsigned int len)
2836 {
2837 	sector_t end = lba + len;
2838 
2839 	while (lba < end) {
2840 		unsigned long index = lba_to_map_index(lba);
2841 
2842 		if (lba == map_index_to_lba(index) &&
2843 		    lba + scsi_debug_unmap_granularity <= end &&
2844 		    index < map_size) {
2845 			clear_bit(index, map_storep);
2846 			if (scsi_debug_lbprz) {
2847 				memset(fake_storep +
2848 				       lba * scsi_debug_sector_size, 0,
2849 				       scsi_debug_sector_size *
2850 				       scsi_debug_unmap_granularity);
2851 			}
2852 			if (dif_storep) {
2853 				memset(dif_storep + lba, 0xff,
2854 				       sizeof(*dif_storep) *
2855 				       scsi_debug_unmap_granularity);
2856 			}
2857 		}
2858 		lba = map_index_to_lba(index + 1);
2859 	}
2860 }
2861 
2862 static int
2863 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2864 {
2865 	u8 *cmd = scp->cmnd;
2866 	u64 lba;
2867 	u32 num;
2868 	u32 ei_lba;
2869 	unsigned long iflags;
2870 	int ret;
2871 	bool check_prot;
2872 
2873 	switch (cmd[0]) {
2874 	case WRITE_16:
2875 		ei_lba = 0;
2876 		lba = get_unaligned_be64(cmd + 2);
2877 		num = get_unaligned_be32(cmd + 10);
2878 		check_prot = true;
2879 		break;
2880 	case WRITE_10:
2881 		ei_lba = 0;
2882 		lba = get_unaligned_be32(cmd + 2);
2883 		num = get_unaligned_be16(cmd + 7);
2884 		check_prot = true;
2885 		break;
2886 	case WRITE_6:
2887 		ei_lba = 0;
2888 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2889 		      (u32)(cmd[1] & 0x1f) << 16;
2890 		num = (0 == cmd[4]) ? 256 : cmd[4];
2891 		check_prot = true;
2892 		break;
2893 	case WRITE_12:
2894 		ei_lba = 0;
2895 		lba = get_unaligned_be32(cmd + 2);
2896 		num = get_unaligned_be32(cmd + 6);
2897 		check_prot = true;
2898 		break;
2899 	case 0x53:	/* XDWRITEREAD(10) */
2900 		ei_lba = 0;
2901 		lba = get_unaligned_be32(cmd + 2);
2902 		num = get_unaligned_be16(cmd + 7);
2903 		check_prot = false;
2904 		break;
2905 	default:	/* assume WRITE(32) */
2906 		lba = get_unaligned_be64(cmd + 12);
2907 		ei_lba = get_unaligned_be32(cmd + 20);
2908 		num = get_unaligned_be32(cmd + 28);
2909 		check_prot = false;
2910 		break;
2911 	}
2912 	if (check_prot) {
2913 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2914 		    (cmd[1] & 0xe0)) {
2915 			mk_sense_invalid_opcode(scp);
2916 			return check_condition_result;
2917 		}
2918 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2919 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2920 		    (cmd[1] & 0xe0) == 0)
2921 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2922 				    "to DIF device\n");
2923 	}
2924 
2925 	/* inline check_device_access_params() */
2926 	if (lba + num > sdebug_capacity) {
2927 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2928 		return check_condition_result;
2929 	}
2930 	/* transfer length excessive (tie in to block limits VPD page) */
2931 	if (num > sdebug_store_sectors) {
2932 		/* needs work to find which cdb byte 'num' comes from */
2933 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2934 		return check_condition_result;
2935 	}
2936 
2937 	write_lock_irqsave(&atomic_rw, iflags);
2938 
2939 	/* DIX + T10 DIF */
2940 	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2941 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2942 
2943 		if (prot_ret) {
2944 			write_unlock_irqrestore(&atomic_rw, iflags);
2945 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2946 			return illegal_condition_result;
2947 		}
2948 	}
2949 
2950 	ret = do_device_access(scp, lba, num, true);
2951 	if (scsi_debug_lbp())
2952 		map_region(lba, num);
2953 	write_unlock_irqrestore(&atomic_rw, iflags);
2954 	if (-1 == ret)
2955 		return (DID_ERROR << 16);
2956 	else if ((ret < (num * scsi_debug_sector_size)) &&
2957 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2958 		sdev_printk(KERN_INFO, scp->device,
2959 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2960 			    my_name, num * scsi_debug_sector_size, ret);
2961 
2962 	if (sdebug_any_injecting_opt) {
2963 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2964 
2965 		if (ep->inj_recovered) {
2966 			mk_sense_buffer(scp, RECOVERED_ERROR,
2967 					THRESHOLD_EXCEEDED, 0);
2968 			return check_condition_result;
2969 		} else if (ep->inj_dif) {
2970 			/* Logical block guard check failed */
2971 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2972 			return illegal_condition_result;
2973 		} else if (ep->inj_dix) {
2974 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2975 			return illegal_condition_result;
2976 		}
2977 	}
2978 	return 0;
2979 }
2980 
2981 static int
2982 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2983 		bool unmap, bool ndob)
2984 {
2985 	unsigned long iflags;
2986 	unsigned long long i;
2987 	int ret;
2988 
2989 	ret = check_device_access_params(scp, lba, num);
2990 	if (ret)
2991 		return ret;
2992 
2993 	write_lock_irqsave(&atomic_rw, iflags);
2994 
2995 	if (unmap && scsi_debug_lbp()) {
2996 		unmap_region(lba, num);
2997 		goto out;
2998 	}
2999 
3000 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3001 	if (ndob) {
3002 		memset(fake_storep + (lba * scsi_debug_sector_size), 0,
3003 		       scsi_debug_sector_size);
3004 		ret = 0;
3005 	} else
3006 		ret = fetch_to_dev_buffer(scp, fake_storep +
3007 					       (lba * scsi_debug_sector_size),
3008 					  scsi_debug_sector_size);
3009 
3010 	if (-1 == ret) {
3011 		write_unlock_irqrestore(&atomic_rw, iflags);
3012 		return (DID_ERROR << 16);
3013 	} else if ((ret < (num * scsi_debug_sector_size)) &&
3014 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3015 		sdev_printk(KERN_INFO, scp->device,
3016 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3017 			    my_name, "write same",
3018 			    num * scsi_debug_sector_size, ret);
3019 
3020 	/* Copy first sector to remaining blocks */
3021 	for (i = 1 ; i < num ; i++)
3022 		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
3023 		       fake_storep + (lba * scsi_debug_sector_size),
3024 		       scsi_debug_sector_size);
3025 
3026 	if (scsi_debug_lbp())
3027 		map_region(lba, num);
3028 out:
3029 	write_unlock_irqrestore(&atomic_rw, iflags);
3030 
3031 	return 0;
3032 }
3033 
3034 static int
3035 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3036 {
3037 	u8 *cmd = scp->cmnd;
3038 	u32 lba;
3039 	u16 num;
3040 	u32 ei_lba = 0;
3041 	bool unmap = false;
3042 
3043 	if (cmd[1] & 0x8) {
3044 		if (scsi_debug_lbpws10 == 0) {
3045 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3046 			return check_condition_result;
3047 		} else
3048 			unmap = true;
3049 	}
3050 	lba = get_unaligned_be32(cmd + 2);
3051 	num = get_unaligned_be16(cmd + 7);
3052 	if (num > scsi_debug_write_same_length) {
3053 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3054 		return check_condition_result;
3055 	}
3056 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3057 }
3058 
3059 static int
3060 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3061 {
3062 	u8 *cmd = scp->cmnd;
3063 	u64 lba;
3064 	u32 num;
3065 	u32 ei_lba = 0;
3066 	bool unmap = false;
3067 	bool ndob = false;
3068 
3069 	if (cmd[1] & 0x8) {	/* UNMAP */
3070 		if (scsi_debug_lbpws == 0) {
3071 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3072 			return check_condition_result;
3073 		} else
3074 			unmap = true;
3075 	}
3076 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3077 		ndob = true;
3078 	lba = get_unaligned_be64(cmd + 2);
3079 	num = get_unaligned_be32(cmd + 10);
3080 	if (num > scsi_debug_write_same_length) {
3081 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3082 		return check_condition_result;
3083 	}
3084 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3085 }
3086 
3087 /* Note the mode field is in the same position as the (lower) service action
3088  * field. For the Report supported operation codes command, SPC-4 suggests
3089  * each mode of this command should be reported separately; for future. */
3090 static int
3091 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3092 {
3093 	u8 *cmd = scp->cmnd;
3094 	struct scsi_device *sdp = scp->device;
3095 	struct sdebug_dev_info *dp;
3096 	u8 mode;
3097 
3098 	mode = cmd[1] & 0x1f;
3099 	switch (mode) {
3100 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3101 		/* set UAs on this device only */
3102 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3103 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3104 		break;
3105 	case 0x5:	/* download MC, save and ACT */
3106 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3107 		break;
3108 	case 0x6:	/* download MC with offsets and ACT */
3109 		/* set UAs on most devices (LUs) in this target */
3110 		list_for_each_entry(dp,
3111 				    &devip->sdbg_host->dev_info_list,
3112 				    dev_list)
3113 			if (dp->target == sdp->id) {
3114 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3115 				if (devip != dp)
3116 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3117 						dp->uas_bm);
3118 			}
3119 		break;
3120 	case 0x7:	/* download MC with offsets, save, and ACT */
3121 		/* set UA on all devices (LUs) in this target */
3122 		list_for_each_entry(dp,
3123 				    &devip->sdbg_host->dev_info_list,
3124 				    dev_list)
3125 			if (dp->target == sdp->id)
3126 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3127 					dp->uas_bm);
3128 		break;
3129 	default:
3130 		/* do nothing for this command for other mode values */
3131 		break;
3132 	}
3133 	return 0;
3134 }
3135 
3136 static int
3137 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3138 {
3139 	u8 *cmd = scp->cmnd;
3140 	u8 *arr;
3141 	u8 *fake_storep_hold;
3142 	u64 lba;
3143 	u32 dnum;
3144 	u32 lb_size = scsi_debug_sector_size;
3145 	u8 num;
3146 	unsigned long iflags;
3147 	int ret;
3148 	int retval = 0;
3149 
3150 	lba = get_unaligned_be64(cmd + 2);
3151 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3152 	if (0 == num)
3153 		return 0;	/* degenerate case, not an error */
3154 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3155 	    (cmd[1] & 0xe0)) {
3156 		mk_sense_invalid_opcode(scp);
3157 		return check_condition_result;
3158 	}
3159 	if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3160 	     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3161 	    (cmd[1] & 0xe0) == 0)
3162 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3163 			    "to DIF device\n");
3164 
3165 	/* inline check_device_access_params() */
3166 	if (lba + num > sdebug_capacity) {
3167 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3168 		return check_condition_result;
3169 	}
3170 	/* transfer length excessive (tie in to block limits VPD page) */
3171 	if (num > sdebug_store_sectors) {
3172 		/* needs work to find which cdb byte 'num' comes from */
3173 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3174 		return check_condition_result;
3175 	}
3176 	dnum = 2 * num;
3177 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3178 	if (NULL == arr) {
3179 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3180 				INSUFF_RES_ASCQ);
3181 		return check_condition_result;
3182 	}
3183 
3184 	write_lock_irqsave(&atomic_rw, iflags);
3185 
3186 	/* trick do_device_access() to fetch both compare and write buffers
3187 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3188 	fake_storep_hold = fake_storep;
3189 	fake_storep = arr;
3190 	ret = do_device_access(scp, 0, dnum, true);
3191 	fake_storep = fake_storep_hold;
3192 	if (ret == -1) {
3193 		retval = DID_ERROR << 16;
3194 		goto cleanup;
3195 	} else if ((ret < (dnum * lb_size)) &&
3196 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3197 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3198 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3199 			    dnum * lb_size, ret);
3200 	if (!comp_write_worker(lba, num, arr)) {
3201 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3202 		retval = check_condition_result;
3203 		goto cleanup;
3204 	}
3205 	if (scsi_debug_lbp())
3206 		map_region(lba, num);
3207 cleanup:
3208 	write_unlock_irqrestore(&atomic_rw, iflags);
3209 	kfree(arr);
3210 	return retval;
3211 }
3212 
3213 struct unmap_block_desc {
3214 	__be64	lba;
3215 	__be32	blocks;
3216 	__be32	__reserved;
3217 };
3218 
3219 static int
3220 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3221 {
3222 	unsigned char *buf;
3223 	struct unmap_block_desc *desc;
3224 	unsigned int i, payload_len, descriptors;
3225 	int ret;
3226 	unsigned long iflags;
3227 
3228 
3229 	if (!scsi_debug_lbp())
3230 		return 0;	/* fib and say its done */
3231 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3232 	BUG_ON(scsi_bufflen(scp) != payload_len);
3233 
3234 	descriptors = (payload_len - 8) / 16;
3235 	if (descriptors > scsi_debug_unmap_max_desc) {
3236 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3237 		return check_condition_result;
3238 	}
3239 
3240 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3241 	if (!buf) {
3242 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3243 				INSUFF_RES_ASCQ);
3244 		return check_condition_result;
3245 	}
3246 
3247 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3248 
3249 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3250 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3251 
3252 	desc = (void *)&buf[8];
3253 
3254 	write_lock_irqsave(&atomic_rw, iflags);
3255 
3256 	for (i = 0 ; i < descriptors ; i++) {
3257 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3258 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3259 
3260 		ret = check_device_access_params(scp, lba, num);
3261 		if (ret)
3262 			goto out;
3263 
3264 		unmap_region(lba, num);
3265 	}
3266 
3267 	ret = 0;
3268 
3269 out:
3270 	write_unlock_irqrestore(&atomic_rw, iflags);
3271 	kfree(buf);
3272 
3273 	return ret;
3274 }
3275 
3276 #define SDEBUG_GET_LBA_STATUS_LEN 32
3277 
3278 static int
3279 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3280 {
3281 	u8 *cmd = scp->cmnd;
3282 	u64 lba;
3283 	u32 alloc_len, mapped, num;
3284 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3285 	int ret;
3286 
3287 	lba = get_unaligned_be64(cmd + 2);
3288 	alloc_len = get_unaligned_be32(cmd + 10);
3289 
3290 	if (alloc_len < 24)
3291 		return 0;
3292 
3293 	ret = check_device_access_params(scp, lba, 1);
3294 	if (ret)
3295 		return ret;
3296 
3297 	if (scsi_debug_lbp())
3298 		mapped = map_state(lba, &num);
3299 	else {
3300 		mapped = 1;
3301 		/* following just in case virtual_gb changed */
3302 		sdebug_capacity = get_sdebug_capacity();
3303 		if (sdebug_capacity - lba <= 0xffffffff)
3304 			num = sdebug_capacity - lba;
3305 		else
3306 			num = 0xffffffff;
3307 	}
3308 
3309 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3310 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3311 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3312 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3313 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3314 
3315 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3316 }
3317 
3318 #define SDEBUG_RLUN_ARR_SZ 256
3319 
3320 static int resp_report_luns(struct scsi_cmnd * scp,
3321 			    struct sdebug_dev_info * devip)
3322 {
3323 	unsigned int alloc_len;
3324 	int lun_cnt, i, upper, num, n, want_wlun, shortish;
3325 	u64 lun;
3326 	unsigned char *cmd = scp->cmnd;
3327 	int select_report = (int)cmd[2];
3328 	struct scsi_lun *one_lun;
3329 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3330 	unsigned char * max_addr;
3331 
3332 	clear_luns_changed_on_target(devip);
3333 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3334 	shortish = (alloc_len < 4);
3335 	if (shortish || (select_report > 2)) {
3336 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3337 		return check_condition_result;
3338 	}
3339 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
3340 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3341 	lun_cnt = scsi_debug_max_luns;
3342 	if (1 == select_report)
3343 		lun_cnt = 0;
3344 	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3345 		--lun_cnt;
3346 	want_wlun = (select_report > 0) ? 1 : 0;
3347 	num = lun_cnt + want_wlun;
3348 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3349 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3350 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3351 			    sizeof(struct scsi_lun)), num);
3352 	if (n < num) {
3353 		want_wlun = 0;
3354 		lun_cnt = n;
3355 	}
3356 	one_lun = (struct scsi_lun *) &arr[8];
3357 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3358 	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3359              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3360 	     i++, lun++) {
3361 		upper = (lun >> 8) & 0x3f;
3362 		if (upper)
3363 			one_lun[i].scsi_lun[0] =
3364 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3365 		one_lun[i].scsi_lun[1] = lun & 0xff;
3366 	}
3367 	if (want_wlun) {
3368 		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3369 		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3370 		i++;
3371 	}
3372 	alloc_len = (unsigned char *)(one_lun + i) - arr;
3373 	return fill_from_dev_buffer(scp, arr,
3374 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3375 }
3376 
3377 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3378 			    unsigned int num, struct sdebug_dev_info *devip)
3379 {
3380 	int j;
3381 	unsigned char *kaddr, *buf;
3382 	unsigned int offset;
3383 	struct scsi_data_buffer *sdb = scsi_in(scp);
3384 	struct sg_mapping_iter miter;
3385 
3386 	/* better not to use temporary buffer. */
3387 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3388 	if (!buf) {
3389 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3390 				INSUFF_RES_ASCQ);
3391 		return check_condition_result;
3392 	}
3393 
3394 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3395 
3396 	offset = 0;
3397 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3398 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3399 
3400 	while (sg_miter_next(&miter)) {
3401 		kaddr = miter.addr;
3402 		for (j = 0; j < miter.length; j++)
3403 			*(kaddr + j) ^= *(buf + offset + j);
3404 
3405 		offset += miter.length;
3406 	}
3407 	sg_miter_stop(&miter);
3408 	kfree(buf);
3409 
3410 	return 0;
3411 }
3412 
3413 static int
3414 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3415 {
3416 	u8 *cmd = scp->cmnd;
3417 	u64 lba;
3418 	u32 num;
3419 	int errsts;
3420 
3421 	if (!scsi_bidi_cmnd(scp)) {
3422 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3423 				INSUFF_RES_ASCQ);
3424 		return check_condition_result;
3425 	}
3426 	errsts = resp_read_dt0(scp, devip);
3427 	if (errsts)
3428 		return errsts;
3429 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3430 		errsts = resp_write_dt0(scp, devip);
3431 		if (errsts)
3432 			return errsts;
3433 	}
3434 	lba = get_unaligned_be32(cmd + 2);
3435 	num = get_unaligned_be16(cmd + 7);
3436 	return resp_xdwriteread(scp, lba, num, devip);
3437 }
3438 
3439 /* When timer or tasklet goes off this function is called. */
3440 static void sdebug_q_cmd_complete(unsigned long indx)
3441 {
3442 	int qa_indx;
3443 	int retiring = 0;
3444 	unsigned long iflags;
3445 	struct sdebug_queued_cmd *sqcp;
3446 	struct scsi_cmnd *scp;
3447 	struct sdebug_dev_info *devip;
3448 
3449 	atomic_inc(&sdebug_completions);
3450 	qa_indx = indx;
3451 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3452 		pr_err("wild qa_indx=%d\n", qa_indx);
3453 		return;
3454 	}
3455 	spin_lock_irqsave(&queued_arr_lock, iflags);
3456 	sqcp = &queued_arr[qa_indx];
3457 	scp = sqcp->a_cmnd;
3458 	if (NULL == scp) {
3459 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3460 		pr_err("scp is NULL\n");
3461 		return;
3462 	}
3463 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3464 	if (devip)
3465 		atomic_dec(&devip->num_in_q);
3466 	else
3467 		pr_err("devip=NULL\n");
3468 	if (atomic_read(&retired_max_queue) > 0)
3469 		retiring = 1;
3470 
3471 	sqcp->a_cmnd = NULL;
3472 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3473 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3474 		pr_err("Unexpected completion\n");
3475 		return;
3476 	}
3477 
3478 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3479 		int k, retval;
3480 
3481 		retval = atomic_read(&retired_max_queue);
3482 		if (qa_indx >= retval) {
3483 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3484 			pr_err("index %d too large\n", retval);
3485 			return;
3486 		}
3487 		k = find_last_bit(queued_in_use_bm, retval);
3488 		if ((k < scsi_debug_max_queue) || (k == retval))
3489 			atomic_set(&retired_max_queue, 0);
3490 		else
3491 			atomic_set(&retired_max_queue, k + 1);
3492 	}
3493 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3494 	scp->scsi_done(scp); /* callback to mid level */
3495 }
3496 
3497 /* When high resolution timer goes off this function is called. */
3498 static enum hrtimer_restart
3499 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3500 {
3501 	int qa_indx;
3502 	int retiring = 0;
3503 	unsigned long iflags;
3504 	struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3505 	struct sdebug_queued_cmd *sqcp;
3506 	struct scsi_cmnd *scp;
3507 	struct sdebug_dev_info *devip;
3508 
3509 	atomic_inc(&sdebug_completions);
3510 	qa_indx = sd_hrtp->qa_indx;
3511 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3512 		pr_err("wild qa_indx=%d\n", qa_indx);
3513 		goto the_end;
3514 	}
3515 	spin_lock_irqsave(&queued_arr_lock, iflags);
3516 	sqcp = &queued_arr[qa_indx];
3517 	scp = sqcp->a_cmnd;
3518 	if (NULL == scp) {
3519 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3520 		pr_err("scp is NULL\n");
3521 		goto the_end;
3522 	}
3523 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3524 	if (devip)
3525 		atomic_dec(&devip->num_in_q);
3526 	else
3527 		pr_err("devip=NULL\n");
3528 	if (atomic_read(&retired_max_queue) > 0)
3529 		retiring = 1;
3530 
3531 	sqcp->a_cmnd = NULL;
3532 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3533 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3534 		pr_err("Unexpected completion\n");
3535 		goto the_end;
3536 	}
3537 
3538 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3539 		int k, retval;
3540 
3541 		retval = atomic_read(&retired_max_queue);
3542 		if (qa_indx >= retval) {
3543 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3544 			pr_err("index %d too large\n", retval);
3545 			goto the_end;
3546 		}
3547 		k = find_last_bit(queued_in_use_bm, retval);
3548 		if ((k < scsi_debug_max_queue) || (k == retval))
3549 			atomic_set(&retired_max_queue, 0);
3550 		else
3551 			atomic_set(&retired_max_queue, k + 1);
3552 	}
3553 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3554 	scp->scsi_done(scp); /* callback to mid level */
3555 the_end:
3556 	return HRTIMER_NORESTART;
3557 }
3558 
3559 static struct sdebug_dev_info *
3560 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3561 {
3562 	struct sdebug_dev_info *devip;
3563 
3564 	devip = kzalloc(sizeof(*devip), flags);
3565 	if (devip) {
3566 		devip->sdbg_host = sdbg_host;
3567 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3568 	}
3569 	return devip;
3570 }
3571 
3572 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3573 {
3574 	struct sdebug_host_info * sdbg_host;
3575 	struct sdebug_dev_info * open_devip = NULL;
3576 	struct sdebug_dev_info * devip =
3577 			(struct sdebug_dev_info *)sdev->hostdata;
3578 
3579 	if (devip)
3580 		return devip;
3581 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3582 	if (!sdbg_host) {
3583 		pr_err("Host info NULL\n");
3584 		return NULL;
3585         }
3586 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3587 		if ((devip->used) && (devip->channel == sdev->channel) &&
3588                     (devip->target == sdev->id) &&
3589                     (devip->lun == sdev->lun))
3590                         return devip;
3591 		else {
3592 			if ((!devip->used) && (!open_devip))
3593 				open_devip = devip;
3594 		}
3595 	}
3596 	if (!open_devip) { /* try and make a new one */
3597 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3598 		if (!open_devip) {
3599 			pr_err("out of memory at line %d\n", __LINE__);
3600 			return NULL;
3601 		}
3602 	}
3603 
3604 	open_devip->channel = sdev->channel;
3605 	open_devip->target = sdev->id;
3606 	open_devip->lun = sdev->lun;
3607 	open_devip->sdbg_host = sdbg_host;
3608 	atomic_set(&open_devip->num_in_q, 0);
3609 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3610 	open_devip->used = true;
3611 	return open_devip;
3612 }
3613 
3614 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3615 {
3616 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3617 		pr_info("slave_alloc <%u %u %u %llu>\n",
3618 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3619 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3620 	return 0;
3621 }
3622 
3623 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3624 {
3625 	struct sdebug_dev_info *devip;
3626 
3627 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3628 		pr_info("slave_configure <%u %u %u %llu>\n",
3629 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3630 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3631 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3632 	devip = devInfoReg(sdp);
3633 	if (NULL == devip)
3634 		return 1;	/* no resources, will be marked offline */
3635 	sdp->hostdata = devip;
3636 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3637 	if (scsi_debug_no_uld)
3638 		sdp->no_uld_attach = 1;
3639 	return 0;
3640 }
3641 
3642 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3643 {
3644 	struct sdebug_dev_info *devip =
3645 		(struct sdebug_dev_info *)sdp->hostdata;
3646 
3647 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3648 		pr_info("slave_destroy <%u %u %u %llu>\n",
3649 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3650 	if (devip) {
3651 		/* make this slot available for re-use */
3652 		devip->used = false;
3653 		sdp->hostdata = NULL;
3654 	}
3655 }
3656 
3657 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3658 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3659 {
3660 	unsigned long iflags;
3661 	int k, qmax, r_qmax;
3662 	struct sdebug_queued_cmd *sqcp;
3663 	struct sdebug_dev_info *devip;
3664 
3665 	spin_lock_irqsave(&queued_arr_lock, iflags);
3666 	qmax = scsi_debug_max_queue;
3667 	r_qmax = atomic_read(&retired_max_queue);
3668 	if (r_qmax > qmax)
3669 		qmax = r_qmax;
3670 	for (k = 0; k < qmax; ++k) {
3671 		if (test_bit(k, queued_in_use_bm)) {
3672 			sqcp = &queued_arr[k];
3673 			if (cmnd == sqcp->a_cmnd) {
3674 				devip = (struct sdebug_dev_info *)
3675 					cmnd->device->hostdata;
3676 				if (devip)
3677 					atomic_dec(&devip->num_in_q);
3678 				sqcp->a_cmnd = NULL;
3679 				spin_unlock_irqrestore(&queued_arr_lock,
3680 						       iflags);
3681 				if (scsi_debug_ndelay > 0) {
3682 					if (sqcp->sd_hrtp)
3683 						hrtimer_cancel(
3684 							&sqcp->sd_hrtp->hrt);
3685 				} else if (scsi_debug_delay > 0) {
3686 					if (sqcp->cmnd_timerp)
3687 						del_timer_sync(
3688 							sqcp->cmnd_timerp);
3689 				} else if (scsi_debug_delay < 0) {
3690 					if (sqcp->tletp)
3691 						tasklet_kill(sqcp->tletp);
3692 				}
3693 				clear_bit(k, queued_in_use_bm);
3694 				return 1;
3695 			}
3696 		}
3697 	}
3698 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3699 	return 0;
3700 }
3701 
3702 /* Deletes (stops) timers or tasklets of all queued commands */
3703 static void stop_all_queued(void)
3704 {
3705 	unsigned long iflags;
3706 	int k;
3707 	struct sdebug_queued_cmd *sqcp;
3708 	struct sdebug_dev_info *devip;
3709 
3710 	spin_lock_irqsave(&queued_arr_lock, iflags);
3711 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3712 		if (test_bit(k, queued_in_use_bm)) {
3713 			sqcp = &queued_arr[k];
3714 			if (sqcp->a_cmnd) {
3715 				devip = (struct sdebug_dev_info *)
3716 					sqcp->a_cmnd->device->hostdata;
3717 				if (devip)
3718 					atomic_dec(&devip->num_in_q);
3719 				sqcp->a_cmnd = NULL;
3720 				spin_unlock_irqrestore(&queued_arr_lock,
3721 						       iflags);
3722 				if (scsi_debug_ndelay > 0) {
3723 					if (sqcp->sd_hrtp)
3724 						hrtimer_cancel(
3725 							&sqcp->sd_hrtp->hrt);
3726 				} else if (scsi_debug_delay > 0) {
3727 					if (sqcp->cmnd_timerp)
3728 						del_timer_sync(
3729 							sqcp->cmnd_timerp);
3730 				} else if (scsi_debug_delay < 0) {
3731 					if (sqcp->tletp)
3732 						tasklet_kill(sqcp->tletp);
3733 				}
3734 				clear_bit(k, queued_in_use_bm);
3735 				spin_lock_irqsave(&queued_arr_lock, iflags);
3736 			}
3737 		}
3738 	}
3739 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3740 }
3741 
3742 /* Free queued command memory on heap */
3743 static void free_all_queued(void)
3744 {
3745 	unsigned long iflags;
3746 	int k;
3747 	struct sdebug_queued_cmd *sqcp;
3748 
3749 	spin_lock_irqsave(&queued_arr_lock, iflags);
3750 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3751 		sqcp = &queued_arr[k];
3752 		kfree(sqcp->cmnd_timerp);
3753 		sqcp->cmnd_timerp = NULL;
3754 		kfree(sqcp->tletp);
3755 		sqcp->tletp = NULL;
3756 		kfree(sqcp->sd_hrtp);
3757 		sqcp->sd_hrtp = NULL;
3758 	}
3759 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3760 }
3761 
3762 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3763 {
3764 	++num_aborts;
3765 	if (SCpnt) {
3766 		if (SCpnt->device &&
3767 		    (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3768 			sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3769 				    __func__);
3770 		stop_queued_cmnd(SCpnt);
3771 	}
3772 	return SUCCESS;
3773 }
3774 
3775 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3776 {
3777 	struct sdebug_dev_info * devip;
3778 
3779 	++num_dev_resets;
3780 	if (SCpnt && SCpnt->device) {
3781 		struct scsi_device *sdp = SCpnt->device;
3782 
3783 		if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3784 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3785 		devip = devInfoReg(sdp);
3786 		if (devip)
3787 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3788 	}
3789 	return SUCCESS;
3790 }
3791 
3792 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3793 {
3794 	struct sdebug_host_info *sdbg_host;
3795 	struct sdebug_dev_info *devip;
3796 	struct scsi_device *sdp;
3797 	struct Scsi_Host *hp;
3798 	int k = 0;
3799 
3800 	++num_target_resets;
3801 	if (!SCpnt)
3802 		goto lie;
3803 	sdp = SCpnt->device;
3804 	if (!sdp)
3805 		goto lie;
3806 	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3807 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3808 	hp = sdp->host;
3809 	if (!hp)
3810 		goto lie;
3811 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3812 	if (sdbg_host) {
3813 		list_for_each_entry(devip,
3814 				    &sdbg_host->dev_info_list,
3815 				    dev_list)
3816 			if (devip->target == sdp->id) {
3817 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3818 				++k;
3819 			}
3820 	}
3821 	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3822 		sdev_printk(KERN_INFO, sdp,
3823 			    "%s: %d device(s) found in target\n", __func__, k);
3824 lie:
3825 	return SUCCESS;
3826 }
3827 
3828 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3829 {
3830 	struct sdebug_host_info *sdbg_host;
3831 	struct sdebug_dev_info *devip;
3832         struct scsi_device * sdp;
3833         struct Scsi_Host * hp;
3834 	int k = 0;
3835 
3836 	++num_bus_resets;
3837 	if (!(SCpnt && SCpnt->device))
3838 		goto lie;
3839 	sdp = SCpnt->device;
3840 	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3841 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3842 	hp = sdp->host;
3843 	if (hp) {
3844 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3845 		if (sdbg_host) {
3846 			list_for_each_entry(devip,
3847                                             &sdbg_host->dev_info_list,
3848 					    dev_list) {
3849 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3850 				++k;
3851 			}
3852 		}
3853 	}
3854 	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3855 		sdev_printk(KERN_INFO, sdp,
3856 			    "%s: %d device(s) found in host\n", __func__, k);
3857 lie:
3858 	return SUCCESS;
3859 }
3860 
3861 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3862 {
3863 	struct sdebug_host_info * sdbg_host;
3864 	struct sdebug_dev_info *devip;
3865 	int k = 0;
3866 
3867 	++num_host_resets;
3868 	if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3869 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3870         spin_lock(&sdebug_host_list_lock);
3871         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3872 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3873 				    dev_list) {
3874 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3875 			++k;
3876 		}
3877         }
3878         spin_unlock(&sdebug_host_list_lock);
3879 	stop_all_queued();
3880 	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3881 		sdev_printk(KERN_INFO, SCpnt->device,
3882 			    "%s: %d device(s) found\n", __func__, k);
3883 	return SUCCESS;
3884 }
3885 
3886 static void __init sdebug_build_parts(unsigned char *ramp,
3887 				      unsigned long store_size)
3888 {
3889 	struct partition * pp;
3890 	int starts[SDEBUG_MAX_PARTS + 2];
3891 	int sectors_per_part, num_sectors, k;
3892 	int heads_by_sects, start_sec, end_sec;
3893 
3894 	/* assume partition table already zeroed */
3895 	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3896 		return;
3897 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3898 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3899 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3900 	}
3901 	num_sectors = (int)sdebug_store_sectors;
3902 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3903 			   / scsi_debug_num_parts;
3904 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3905         starts[0] = sdebug_sectors_per;
3906 	for (k = 1; k < scsi_debug_num_parts; ++k)
3907 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3908 			    * heads_by_sects;
3909 	starts[scsi_debug_num_parts] = num_sectors;
3910 	starts[scsi_debug_num_parts + 1] = 0;
3911 
3912 	ramp[510] = 0x55;	/* magic partition markings */
3913 	ramp[511] = 0xAA;
3914 	pp = (struct partition *)(ramp + 0x1be);
3915 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3916 		start_sec = starts[k];
3917 		end_sec = starts[k + 1] - 1;
3918 		pp->boot_ind = 0;
3919 
3920 		pp->cyl = start_sec / heads_by_sects;
3921 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3922 			   / sdebug_sectors_per;
3923 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3924 
3925 		pp->end_cyl = end_sec / heads_by_sects;
3926 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3927 			       / sdebug_sectors_per;
3928 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3929 
3930 		pp->start_sect = cpu_to_le32(start_sec);
3931 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3932 		pp->sys_ind = 0x83;	/* plain Linux partition */
3933 	}
3934 }
3935 
3936 static int
3937 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3938 	      int scsi_result, int delta_jiff)
3939 {
3940 	unsigned long iflags;
3941 	int k, num_in_q, qdepth, inject;
3942 	struct sdebug_queued_cmd *sqcp = NULL;
3943 	struct scsi_device *sdp;
3944 
3945 	/* this should never happen */
3946 	if (WARN_ON(!cmnd))
3947 		return SCSI_MLQUEUE_HOST_BUSY;
3948 
3949 	if (NULL == devip) {
3950 		pr_warn("called devip == NULL\n");
3951 		/* no particularly good error to report back */
3952 		return SCSI_MLQUEUE_HOST_BUSY;
3953 	}
3954 
3955 	sdp = cmnd->device;
3956 
3957 	if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3958 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3959 			    __func__, scsi_result);
3960 	if (delta_jiff == 0)
3961 		goto respond_in_thread;
3962 
3963 	/* schedule the response at a later time if resources permit */
3964 	spin_lock_irqsave(&queued_arr_lock, iflags);
3965 	num_in_q = atomic_read(&devip->num_in_q);
3966 	qdepth = cmnd->device->queue_depth;
3967 	inject = 0;
3968 	if ((qdepth > 0) && (num_in_q >= qdepth)) {
3969 		if (scsi_result) {
3970 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3971 			goto respond_in_thread;
3972 		} else
3973 			scsi_result = device_qfull_result;
3974 	} else if ((scsi_debug_every_nth != 0) &&
3975 		   (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3976 		   (scsi_result == 0)) {
3977 		if ((num_in_q == (qdepth - 1)) &&
3978 		    (atomic_inc_return(&sdebug_a_tsf) >=
3979 		     abs(scsi_debug_every_nth))) {
3980 			atomic_set(&sdebug_a_tsf, 0);
3981 			inject = 1;
3982 			scsi_result = device_qfull_result;
3983 		}
3984 	}
3985 
3986 	k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3987 	if (k >= scsi_debug_max_queue) {
3988 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3989 		if (scsi_result)
3990 			goto respond_in_thread;
3991 		else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3992 			scsi_result = device_qfull_result;
3993 		if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3994 			sdev_printk(KERN_INFO, sdp,
3995 				    "%s: max_queue=%d exceeded, %s\n",
3996 				    __func__, scsi_debug_max_queue,
3997 				    (scsi_result ?  "status: TASK SET FULL" :
3998 						    "report: host busy"));
3999 		if (scsi_result)
4000 			goto respond_in_thread;
4001 		else
4002 			return SCSI_MLQUEUE_HOST_BUSY;
4003 	}
4004 	__set_bit(k, queued_in_use_bm);
4005 	atomic_inc(&devip->num_in_q);
4006 	sqcp = &queued_arr[k];
4007 	sqcp->a_cmnd = cmnd;
4008 	cmnd->result = scsi_result;
4009 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
4010 	if (delta_jiff > 0) {
4011 		if (NULL == sqcp->cmnd_timerp) {
4012 			sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
4013 						    GFP_ATOMIC);
4014 			if (NULL == sqcp->cmnd_timerp)
4015 				return SCSI_MLQUEUE_HOST_BUSY;
4016 			init_timer(sqcp->cmnd_timerp);
4017 		}
4018 		sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
4019 		sqcp->cmnd_timerp->data = k;
4020 		sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
4021 		add_timer(sqcp->cmnd_timerp);
4022 	} else if (scsi_debug_ndelay > 0) {
4023 		ktime_t kt = ktime_set(0, scsi_debug_ndelay);
4024 		struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
4025 
4026 		if (NULL == sd_hp) {
4027 			sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
4028 			if (NULL == sd_hp)
4029 				return SCSI_MLQUEUE_HOST_BUSY;
4030 			sqcp->sd_hrtp = sd_hp;
4031 			hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
4032 				     HRTIMER_MODE_REL);
4033 			sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
4034 			sd_hp->qa_indx = k;
4035 		}
4036 		hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
4037 	} else {	/* delay < 0 */
4038 		if (NULL == sqcp->tletp) {
4039 			sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
4040 					      GFP_ATOMIC);
4041 			if (NULL == sqcp->tletp)
4042 				return SCSI_MLQUEUE_HOST_BUSY;
4043 			tasklet_init(sqcp->tletp,
4044 				     sdebug_q_cmd_complete, k);
4045 		}
4046 		if (-1 == delta_jiff)
4047 			tasklet_hi_schedule(sqcp->tletp);
4048 		else
4049 			tasklet_schedule(sqcp->tletp);
4050 	}
4051 	if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
4052 	    (scsi_result == device_qfull_result))
4053 		sdev_printk(KERN_INFO, sdp,
4054 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4055 			    num_in_q, (inject ? "<inject> " : ""),
4056 			    "status: TASK SET FULL");
4057 	return 0;
4058 
4059 respond_in_thread:	/* call back to mid-layer using invocation thread */
4060 	cmnd->result = scsi_result;
4061 	cmnd->scsi_done(cmnd);
4062 	return 0;
4063 }
4064 
4065 /* Note: The following macros create attribute files in the
4066    /sys/module/scsi_debug/parameters directory. Unfortunately this
4067    driver is unaware of a change and cannot trigger auxiliary actions
4068    as it can when the corresponding attribute in the
4069    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4070  */
4071 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
4072 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
4073 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
4074 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
4075 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
4076 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
4077 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
4078 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
4079 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
4080 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
4081 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
4082 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
4083 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
4084 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
4085 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
4086 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
4087 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
4088 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
4089 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
4090 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
4091 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
4092 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
4093 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
4094 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
4095 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
4096 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
4097 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
4098 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
4099 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
4100 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
4101 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
4102 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
4103 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
4104 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4105 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4106 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4107 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4108 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4109 		   S_IRUGO | S_IWUSR);
4110 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4111 		   S_IRUGO | S_IWUSR);
4112 
4113 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4114 MODULE_DESCRIPTION("SCSI debug adapter driver");
4115 MODULE_LICENSE("GPL");
4116 MODULE_VERSION(SCSI_DEBUG_VERSION);
4117 
4118 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4119 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4120 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4121 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4122 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4123 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4124 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4125 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4126 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4127 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4128 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4129 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4130 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4131 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4132 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4133 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4134 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4135 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4136 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4137 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4138 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4139 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4140 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4141 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4142 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4143 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4144 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4145 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4146 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4147 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4148 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4149 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4150 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4151 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4152 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4153 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4154 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4155 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4156 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4157 
4158 static char sdebug_info[256];
4159 
4160 static const char * scsi_debug_info(struct Scsi_Host * shp)
4161 {
4162 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4163 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4164 		scsi_debug_version_date, scsi_debug_dev_size_mb,
4165 		scsi_debug_opts);
4166 	return sdebug_info;
4167 }
4168 
4169 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4170 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4171 {
4172 	char arr[16];
4173 	int opts;
4174 	int minLen = length > 15 ? 15 : length;
4175 
4176 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4177 		return -EACCES;
4178 	memcpy(arr, buffer, minLen);
4179 	arr[minLen] = '\0';
4180 	if (1 != sscanf(arr, "%d", &opts))
4181 		return -EINVAL;
4182 	scsi_debug_opts = opts;
4183 	if (scsi_debug_every_nth != 0)
4184 		atomic_set(&sdebug_cmnd_count, 0);
4185 	return length;
4186 }
4187 
4188 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4189  * same for each scsi_debug host (if more than one). Some of the counters
4190  * output are not atomics so might be inaccurate in a busy system. */
4191 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4192 {
4193 	int f, l;
4194 	char b[32];
4195 
4196 	if (scsi_debug_every_nth > 0)
4197 		snprintf(b, sizeof(b), " (curr:%d)",
4198 			 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4199 				atomic_read(&sdebug_a_tsf) :
4200 				atomic_read(&sdebug_cmnd_count)));
4201 	else
4202 		b[0] = '\0';
4203 
4204 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4205 		"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4206 		"every_nth=%d%s\n"
4207 		"delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4208 		"sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4209 		"command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4210 		"host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4211 		"usec_in_jiffy=%lu\n",
4212 		SCSI_DEBUG_VERSION, scsi_debug_version_date,
4213 		scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4214 		scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4215 		scsi_debug_max_luns, atomic_read(&sdebug_completions),
4216 		scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4217 		sdebug_sectors_per, num_aborts, num_dev_resets,
4218 		num_target_resets, num_bus_resets, num_host_resets,
4219 		dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4220 
4221 	f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4222 	if (f != scsi_debug_max_queue) {
4223 		l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4224 		seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4225 			   "queued_in_use_bm", f, l);
4226 	}
4227 	return 0;
4228 }
4229 
4230 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4231 {
4232         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4233 }
4234 /* Returns -EBUSY if delay is being changed and commands are queued */
4235 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4236 			   size_t count)
4237 {
4238 	int delay, res;
4239 
4240 	if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4241 		res = count;
4242 		if (scsi_debug_delay != delay) {
4243 			unsigned long iflags;
4244 			int k;
4245 
4246 			spin_lock_irqsave(&queued_arr_lock, iflags);
4247 			k = find_first_bit(queued_in_use_bm,
4248 					   scsi_debug_max_queue);
4249 			if (k != scsi_debug_max_queue)
4250 				res = -EBUSY;	/* have queued commands */
4251 			else {
4252 				scsi_debug_delay = delay;
4253 				scsi_debug_ndelay = 0;
4254 			}
4255 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4256 		}
4257 		return res;
4258 	}
4259 	return -EINVAL;
4260 }
4261 static DRIVER_ATTR_RW(delay);
4262 
4263 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4264 {
4265 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4266 }
4267 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4268 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4269 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4270 			   size_t count)
4271 {
4272 	unsigned long iflags;
4273 	int ndelay, res, k;
4274 
4275 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4276 	    (ndelay >= 0) && (ndelay < 1000000000)) {
4277 		res = count;
4278 		if (scsi_debug_ndelay != ndelay) {
4279 			spin_lock_irqsave(&queued_arr_lock, iflags);
4280 			k = find_first_bit(queued_in_use_bm,
4281 					   scsi_debug_max_queue);
4282 			if (k != scsi_debug_max_queue)
4283 				res = -EBUSY;	/* have queued commands */
4284 			else {
4285 				scsi_debug_ndelay = ndelay;
4286 				scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4287 							  : DEF_DELAY;
4288 			}
4289 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4290 		}
4291 		return res;
4292 	}
4293 	return -EINVAL;
4294 }
4295 static DRIVER_ATTR_RW(ndelay);
4296 
4297 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4298 {
4299         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4300 }
4301 
4302 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4303 			  size_t count)
4304 {
4305         int opts;
4306 	char work[20];
4307 
4308         if (1 == sscanf(buf, "%10s", work)) {
4309 		if (0 == strncasecmp(work,"0x", 2)) {
4310 			if (1 == sscanf(&work[2], "%x", &opts))
4311 				goto opts_done;
4312 		} else {
4313 			if (1 == sscanf(work, "%d", &opts))
4314 				goto opts_done;
4315 		}
4316 	}
4317 	return -EINVAL;
4318 opts_done:
4319 	scsi_debug_opts = opts;
4320 	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4321 		sdebug_any_injecting_opt = true;
4322 	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4323 		sdebug_any_injecting_opt = true;
4324 	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4325 		sdebug_any_injecting_opt = true;
4326 	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4327 		sdebug_any_injecting_opt = true;
4328 	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4329 		sdebug_any_injecting_opt = true;
4330 	atomic_set(&sdebug_cmnd_count, 0);
4331 	atomic_set(&sdebug_a_tsf, 0);
4332 	return count;
4333 }
4334 static DRIVER_ATTR_RW(opts);
4335 
4336 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4337 {
4338         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4339 }
4340 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4341 			   size_t count)
4342 {
4343         int n;
4344 
4345 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4346 		scsi_debug_ptype = n;
4347 		return count;
4348 	}
4349 	return -EINVAL;
4350 }
4351 static DRIVER_ATTR_RW(ptype);
4352 
4353 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4354 {
4355         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4356 }
4357 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4358 			    size_t count)
4359 {
4360         int n;
4361 
4362 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4363 		scsi_debug_dsense = n;
4364 		return count;
4365 	}
4366 	return -EINVAL;
4367 }
4368 static DRIVER_ATTR_RW(dsense);
4369 
4370 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4371 {
4372         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4373 }
4374 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4375 			     size_t count)
4376 {
4377         int n;
4378 
4379 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4380 		n = (n > 0);
4381 		scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4382 		if (scsi_debug_fake_rw != n) {
4383 			if ((0 == n) && (NULL == fake_storep)) {
4384 				unsigned long sz =
4385 					(unsigned long)scsi_debug_dev_size_mb *
4386 					1048576;
4387 
4388 				fake_storep = vmalloc(sz);
4389 				if (NULL == fake_storep) {
4390 					pr_err("out of memory, 9\n");
4391 					return -ENOMEM;
4392 				}
4393 				memset(fake_storep, 0, sz);
4394 			}
4395 			scsi_debug_fake_rw = n;
4396 		}
4397 		return count;
4398 	}
4399 	return -EINVAL;
4400 }
4401 static DRIVER_ATTR_RW(fake_rw);
4402 
4403 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4404 {
4405         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4406 }
4407 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4408 			      size_t count)
4409 {
4410         int n;
4411 
4412 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4413 		scsi_debug_no_lun_0 = n;
4414 		return count;
4415 	}
4416 	return -EINVAL;
4417 }
4418 static DRIVER_ATTR_RW(no_lun_0);
4419 
4420 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4421 {
4422         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4423 }
4424 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4425 			      size_t count)
4426 {
4427         int n;
4428 
4429 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4430 		scsi_debug_num_tgts = n;
4431 		sdebug_max_tgts_luns();
4432 		return count;
4433 	}
4434 	return -EINVAL;
4435 }
4436 static DRIVER_ATTR_RW(num_tgts);
4437 
4438 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4439 {
4440         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4441 }
4442 static DRIVER_ATTR_RO(dev_size_mb);
4443 
4444 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4445 {
4446         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4447 }
4448 static DRIVER_ATTR_RO(num_parts);
4449 
4450 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4451 {
4452         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4453 }
4454 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4455 			       size_t count)
4456 {
4457         int nth;
4458 
4459 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4460 		scsi_debug_every_nth = nth;
4461 		atomic_set(&sdebug_cmnd_count, 0);
4462 		return count;
4463 	}
4464 	return -EINVAL;
4465 }
4466 static DRIVER_ATTR_RW(every_nth);
4467 
4468 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4469 {
4470         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4471 }
4472 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4473 			      size_t count)
4474 {
4475         int n;
4476 	bool changed;
4477 
4478 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4479 		changed = (scsi_debug_max_luns != n);
4480 		scsi_debug_max_luns = n;
4481 		sdebug_max_tgts_luns();
4482 		if (changed && (scsi_debug_scsi_level >= 5)) {	/* >= SPC-3 */
4483 			struct sdebug_host_info *sdhp;
4484 			struct sdebug_dev_info *dp;
4485 
4486 			spin_lock(&sdebug_host_list_lock);
4487 			list_for_each_entry(sdhp, &sdebug_host_list,
4488 					    host_list) {
4489 				list_for_each_entry(dp, &sdhp->dev_info_list,
4490 						    dev_list) {
4491 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4492 						dp->uas_bm);
4493 				}
4494 			}
4495 			spin_unlock(&sdebug_host_list_lock);
4496 		}
4497 		return count;
4498 	}
4499 	return -EINVAL;
4500 }
4501 static DRIVER_ATTR_RW(max_luns);
4502 
4503 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4504 {
4505         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4506 }
4507 /* N.B. max_queue can be changed while there are queued commands. In flight
4508  * commands beyond the new max_queue will be completed. */
4509 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4510 			       size_t count)
4511 {
4512 	unsigned long iflags;
4513 	int n, k;
4514 
4515 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4516 	    (n <= SCSI_DEBUG_CANQUEUE)) {
4517 		spin_lock_irqsave(&queued_arr_lock, iflags);
4518 		k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4519 		scsi_debug_max_queue = n;
4520 		if (SCSI_DEBUG_CANQUEUE == k)
4521 			atomic_set(&retired_max_queue, 0);
4522 		else if (k >= n)
4523 			atomic_set(&retired_max_queue, k + 1);
4524 		else
4525 			atomic_set(&retired_max_queue, 0);
4526 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4527 		return count;
4528 	}
4529 	return -EINVAL;
4530 }
4531 static DRIVER_ATTR_RW(max_queue);
4532 
4533 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4534 {
4535         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4536 }
4537 static DRIVER_ATTR_RO(no_uld);
4538 
4539 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4540 {
4541         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4542 }
4543 static DRIVER_ATTR_RO(scsi_level);
4544 
4545 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4546 {
4547         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4548 }
4549 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4550 				size_t count)
4551 {
4552         int n;
4553 	bool changed;
4554 
4555 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4556 		changed = (scsi_debug_virtual_gb != n);
4557 		scsi_debug_virtual_gb = n;
4558 		sdebug_capacity = get_sdebug_capacity();
4559 		if (changed) {
4560 			struct sdebug_host_info *sdhp;
4561 			struct sdebug_dev_info *dp;
4562 
4563 			spin_lock(&sdebug_host_list_lock);
4564 			list_for_each_entry(sdhp, &sdebug_host_list,
4565 					    host_list) {
4566 				list_for_each_entry(dp, &sdhp->dev_info_list,
4567 						    dev_list) {
4568 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4569 						dp->uas_bm);
4570 				}
4571 			}
4572 			spin_unlock(&sdebug_host_list_lock);
4573 		}
4574 		return count;
4575 	}
4576 	return -EINVAL;
4577 }
4578 static DRIVER_ATTR_RW(virtual_gb);
4579 
4580 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4581 {
4582         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4583 }
4584 
4585 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4586 			      size_t count)
4587 {
4588 	int delta_hosts;
4589 
4590 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4591 		return -EINVAL;
4592 	if (delta_hosts > 0) {
4593 		do {
4594 			sdebug_add_adapter();
4595 		} while (--delta_hosts);
4596 	} else if (delta_hosts < 0) {
4597 		do {
4598 			sdebug_remove_adapter();
4599 		} while (++delta_hosts);
4600 	}
4601 	return count;
4602 }
4603 static DRIVER_ATTR_RW(add_host);
4604 
4605 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4606 {
4607 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4608 }
4609 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4610 				    size_t count)
4611 {
4612 	int n;
4613 
4614 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4615 		scsi_debug_vpd_use_hostno = n;
4616 		return count;
4617 	}
4618 	return -EINVAL;
4619 }
4620 static DRIVER_ATTR_RW(vpd_use_hostno);
4621 
4622 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4623 {
4624 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4625 }
4626 static DRIVER_ATTR_RO(sector_size);
4627 
4628 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4629 {
4630 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4631 }
4632 static DRIVER_ATTR_RO(dix);
4633 
4634 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4635 {
4636 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4637 }
4638 static DRIVER_ATTR_RO(dif);
4639 
4640 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4641 {
4642 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4643 }
4644 static DRIVER_ATTR_RO(guard);
4645 
4646 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4647 {
4648 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4649 }
4650 static DRIVER_ATTR_RO(ato);
4651 
4652 static ssize_t map_show(struct device_driver *ddp, char *buf)
4653 {
4654 	ssize_t count;
4655 
4656 	if (!scsi_debug_lbp())
4657 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4658 				 sdebug_store_sectors);
4659 
4660 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4661 			  (int)map_size, map_storep);
4662 	buf[count++] = '\n';
4663 	buf[count] = '\0';
4664 
4665 	return count;
4666 }
4667 static DRIVER_ATTR_RO(map);
4668 
4669 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4670 {
4671 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4672 }
4673 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4674 			       size_t count)
4675 {
4676 	int n;
4677 
4678 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4679 		scsi_debug_removable = (n > 0);
4680 		return count;
4681 	}
4682 	return -EINVAL;
4683 }
4684 static DRIVER_ATTR_RW(removable);
4685 
4686 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4687 {
4688 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4689 }
4690 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4691 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4692 			       size_t count)
4693 {
4694 	int n, res;
4695 
4696 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4697 		bool new_host_lock = (n > 0);
4698 
4699 		res = count;
4700 		if (new_host_lock != scsi_debug_host_lock) {
4701 			unsigned long iflags;
4702 			int k;
4703 
4704 			spin_lock_irqsave(&queued_arr_lock, iflags);
4705 			k = find_first_bit(queued_in_use_bm,
4706 					   scsi_debug_max_queue);
4707 			if (k != scsi_debug_max_queue)
4708 				res = -EBUSY;	/* have queued commands */
4709 			else
4710 				scsi_debug_host_lock = new_host_lock;
4711 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4712 		}
4713 		return res;
4714 	}
4715 	return -EINVAL;
4716 }
4717 static DRIVER_ATTR_RW(host_lock);
4718 
4719 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4720 {
4721 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4722 }
4723 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4724 			    size_t count)
4725 {
4726 	int n;
4727 
4728 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4729 		scsi_debug_strict = (n > 0);
4730 		return count;
4731 	}
4732 	return -EINVAL;
4733 }
4734 static DRIVER_ATTR_RW(strict);
4735 
4736 
4737 /* Note: The following array creates attribute files in the
4738    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4739    files (over those found in the /sys/module/scsi_debug/parameters
4740    directory) is that auxiliary actions can be triggered when an attribute
4741    is changed. For example see: sdebug_add_host_store() above.
4742  */
4743 
4744 static struct attribute *sdebug_drv_attrs[] = {
4745 	&driver_attr_delay.attr,
4746 	&driver_attr_opts.attr,
4747 	&driver_attr_ptype.attr,
4748 	&driver_attr_dsense.attr,
4749 	&driver_attr_fake_rw.attr,
4750 	&driver_attr_no_lun_0.attr,
4751 	&driver_attr_num_tgts.attr,
4752 	&driver_attr_dev_size_mb.attr,
4753 	&driver_attr_num_parts.attr,
4754 	&driver_attr_every_nth.attr,
4755 	&driver_attr_max_luns.attr,
4756 	&driver_attr_max_queue.attr,
4757 	&driver_attr_no_uld.attr,
4758 	&driver_attr_scsi_level.attr,
4759 	&driver_attr_virtual_gb.attr,
4760 	&driver_attr_add_host.attr,
4761 	&driver_attr_vpd_use_hostno.attr,
4762 	&driver_attr_sector_size.attr,
4763 	&driver_attr_dix.attr,
4764 	&driver_attr_dif.attr,
4765 	&driver_attr_guard.attr,
4766 	&driver_attr_ato.attr,
4767 	&driver_attr_map.attr,
4768 	&driver_attr_removable.attr,
4769 	&driver_attr_host_lock.attr,
4770 	&driver_attr_ndelay.attr,
4771 	&driver_attr_strict.attr,
4772 	NULL,
4773 };
4774 ATTRIBUTE_GROUPS(sdebug_drv);
4775 
4776 static struct device *pseudo_primary;
4777 
4778 static int __init scsi_debug_init(void)
4779 {
4780 	unsigned long sz;
4781 	int host_to_add;
4782 	int k;
4783 	int ret;
4784 
4785 	atomic_set(&sdebug_cmnd_count, 0);
4786 	atomic_set(&sdebug_completions, 0);
4787 	atomic_set(&retired_max_queue, 0);
4788 
4789 	if (scsi_debug_ndelay >= 1000000000) {
4790 		pr_warn("ndelay must be less than 1 second, ignored\n");
4791 		scsi_debug_ndelay = 0;
4792 	} else if (scsi_debug_ndelay > 0)
4793 		scsi_debug_delay = DELAY_OVERRIDDEN;
4794 
4795 	switch (scsi_debug_sector_size) {
4796 	case  512:
4797 	case 1024:
4798 	case 2048:
4799 	case 4096:
4800 		break;
4801 	default:
4802 		pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
4803 		return -EINVAL;
4804 	}
4805 
4806 	switch (scsi_debug_dif) {
4807 
4808 	case SD_DIF_TYPE0_PROTECTION:
4809 	case SD_DIF_TYPE1_PROTECTION:
4810 	case SD_DIF_TYPE2_PROTECTION:
4811 	case SD_DIF_TYPE3_PROTECTION:
4812 		break;
4813 
4814 	default:
4815 		pr_err("dif must be 0, 1, 2 or 3\n");
4816 		return -EINVAL;
4817 	}
4818 
4819 	if (scsi_debug_guard > 1) {
4820 		pr_err("guard must be 0 or 1\n");
4821 		return -EINVAL;
4822 	}
4823 
4824 	if (scsi_debug_ato > 1) {
4825 		pr_err("ato must be 0 or 1\n");
4826 		return -EINVAL;
4827 	}
4828 
4829 	if (scsi_debug_physblk_exp > 15) {
4830 		pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
4831 		return -EINVAL;
4832 	}
4833 
4834 	if (scsi_debug_lowest_aligned > 0x3fff) {
4835 		pr_err("lowest_aligned too big: %u\n",
4836 			scsi_debug_lowest_aligned);
4837 		return -EINVAL;
4838 	}
4839 
4840 	if (scsi_debug_dev_size_mb < 1)
4841 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4842 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4843 	sdebug_store_sectors = sz / scsi_debug_sector_size;
4844 	sdebug_capacity = get_sdebug_capacity();
4845 
4846 	/* play around with geometry, don't waste too much on track 0 */
4847 	sdebug_heads = 8;
4848 	sdebug_sectors_per = 32;
4849 	if (scsi_debug_dev_size_mb >= 16)
4850 		sdebug_heads = 32;
4851 	else if (scsi_debug_dev_size_mb >= 256)
4852 		sdebug_heads = 64;
4853 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4854 			       (sdebug_sectors_per * sdebug_heads);
4855 	if (sdebug_cylinders_per >= 1024) {
4856 		/* other LLDs do this; implies >= 1GB ram disk ... */
4857 		sdebug_heads = 255;
4858 		sdebug_sectors_per = 63;
4859 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4860 			       (sdebug_sectors_per * sdebug_heads);
4861 	}
4862 
4863 	if (0 == scsi_debug_fake_rw) {
4864 		fake_storep = vmalloc(sz);
4865 		if (NULL == fake_storep) {
4866 			pr_err("out of memory, 1\n");
4867 			return -ENOMEM;
4868 		}
4869 		memset(fake_storep, 0, sz);
4870 		if (scsi_debug_num_parts > 0)
4871 			sdebug_build_parts(fake_storep, sz);
4872 	}
4873 
4874 	if (scsi_debug_dix) {
4875 		int dif_size;
4876 
4877 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4878 		dif_storep = vmalloc(dif_size);
4879 
4880 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4881 
4882 		if (dif_storep == NULL) {
4883 			pr_err("out of mem. (DIX)\n");
4884 			ret = -ENOMEM;
4885 			goto free_vm;
4886 		}
4887 
4888 		memset(dif_storep, 0xff, dif_size);
4889 	}
4890 
4891 	/* Logical Block Provisioning */
4892 	if (scsi_debug_lbp()) {
4893 		scsi_debug_unmap_max_blocks =
4894 			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4895 
4896 		scsi_debug_unmap_max_desc =
4897 			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4898 
4899 		scsi_debug_unmap_granularity =
4900 			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4901 
4902 		if (scsi_debug_unmap_alignment &&
4903 		    scsi_debug_unmap_granularity <=
4904 		    scsi_debug_unmap_alignment) {
4905 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4906 			return -EINVAL;
4907 		}
4908 
4909 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4910 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4911 
4912 		pr_info("%lu provisioning blocks\n", map_size);
4913 
4914 		if (map_storep == NULL) {
4915 			pr_err("out of mem. (MAP)\n");
4916 			ret = -ENOMEM;
4917 			goto free_vm;
4918 		}
4919 
4920 		bitmap_zero(map_storep, map_size);
4921 
4922 		/* Map first 1KB for partition table */
4923 		if (scsi_debug_num_parts)
4924 			map_region(0, 2);
4925 	}
4926 
4927 	pseudo_primary = root_device_register("pseudo_0");
4928 	if (IS_ERR(pseudo_primary)) {
4929 		pr_warn("root_device_register() error\n");
4930 		ret = PTR_ERR(pseudo_primary);
4931 		goto free_vm;
4932 	}
4933 	ret = bus_register(&pseudo_lld_bus);
4934 	if (ret < 0) {
4935 		pr_warn("bus_register error: %d\n", ret);
4936 		goto dev_unreg;
4937 	}
4938 	ret = driver_register(&sdebug_driverfs_driver);
4939 	if (ret < 0) {
4940 		pr_warn("driver_register error: %d\n", ret);
4941 		goto bus_unreg;
4942 	}
4943 
4944 	host_to_add = scsi_debug_add_host;
4945         scsi_debug_add_host = 0;
4946 
4947         for (k = 0; k < host_to_add; k++) {
4948                 if (sdebug_add_adapter()) {
4949 			pr_err("sdebug_add_adapter failed k=%d\n", k);
4950                         break;
4951                 }
4952         }
4953 
4954 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4955 		pr_info("built %d host(s)\n", scsi_debug_add_host);
4956 
4957 	return 0;
4958 
4959 bus_unreg:
4960 	bus_unregister(&pseudo_lld_bus);
4961 dev_unreg:
4962 	root_device_unregister(pseudo_primary);
4963 free_vm:
4964 	vfree(map_storep);
4965 	vfree(dif_storep);
4966 	vfree(fake_storep);
4967 
4968 	return ret;
4969 }
4970 
4971 static void __exit scsi_debug_exit(void)
4972 {
4973 	int k = scsi_debug_add_host;
4974 
4975 	stop_all_queued();
4976 	free_all_queued();
4977 	for (; k; k--)
4978 		sdebug_remove_adapter();
4979 	driver_unregister(&sdebug_driverfs_driver);
4980 	bus_unregister(&pseudo_lld_bus);
4981 	root_device_unregister(pseudo_primary);
4982 
4983 	vfree(dif_storep);
4984 	vfree(fake_storep);
4985 }
4986 
4987 device_initcall(scsi_debug_init);
4988 module_exit(scsi_debug_exit);
4989 
4990 static void sdebug_release_adapter(struct device * dev)
4991 {
4992         struct sdebug_host_info *sdbg_host;
4993 
4994 	sdbg_host = to_sdebug_host(dev);
4995         kfree(sdbg_host);
4996 }
4997 
4998 static int sdebug_add_adapter(void)
4999 {
5000 	int k, devs_per_host;
5001         int error = 0;
5002         struct sdebug_host_info *sdbg_host;
5003 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5004 
5005         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5006         if (NULL == sdbg_host) {
5007 		pr_err("out of memory at line %d\n", __LINE__);
5008                 return -ENOMEM;
5009         }
5010 
5011         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5012 
5013 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
5014         for (k = 0; k < devs_per_host; k++) {
5015 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5016 		if (!sdbg_devinfo) {
5017 			pr_err("out of memory at line %d\n", __LINE__);
5018                         error = -ENOMEM;
5019 			goto clean;
5020                 }
5021         }
5022 
5023         spin_lock(&sdebug_host_list_lock);
5024         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5025         spin_unlock(&sdebug_host_list_lock);
5026 
5027         sdbg_host->dev.bus = &pseudo_lld_bus;
5028         sdbg_host->dev.parent = pseudo_primary;
5029         sdbg_host->dev.release = &sdebug_release_adapter;
5030         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
5031 
5032         error = device_register(&sdbg_host->dev);
5033 
5034         if (error)
5035 		goto clean;
5036 
5037 	++scsi_debug_add_host;
5038         return error;
5039 
5040 clean:
5041 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5042 				 dev_list) {
5043 		list_del(&sdbg_devinfo->dev_list);
5044 		kfree(sdbg_devinfo);
5045 	}
5046 
5047 	kfree(sdbg_host);
5048         return error;
5049 }
5050 
5051 static void sdebug_remove_adapter(void)
5052 {
5053         struct sdebug_host_info * sdbg_host = NULL;
5054 
5055         spin_lock(&sdebug_host_list_lock);
5056         if (!list_empty(&sdebug_host_list)) {
5057                 sdbg_host = list_entry(sdebug_host_list.prev,
5058                                        struct sdebug_host_info, host_list);
5059 		list_del(&sdbg_host->host_list);
5060 	}
5061         spin_unlock(&sdebug_host_list_lock);
5062 
5063 	if (!sdbg_host)
5064 		return;
5065 
5066         device_unregister(&sdbg_host->dev);
5067         --scsi_debug_add_host;
5068 }
5069 
5070 static int
5071 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5072 {
5073 	int num_in_q = 0;
5074 	unsigned long iflags;
5075 	struct sdebug_dev_info *devip;
5076 
5077 	spin_lock_irqsave(&queued_arr_lock, iflags);
5078 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5079 	if (NULL == devip) {
5080 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
5081 		return	-ENODEV;
5082 	}
5083 	num_in_q = atomic_read(&devip->num_in_q);
5084 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
5085 
5086 	if (qdepth < 1)
5087 		qdepth = 1;
5088 	/* allow to exceed max host queued_arr elements for testing */
5089 	if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
5090 		qdepth = SCSI_DEBUG_CANQUEUE + 10;
5091 	scsi_change_queue_depth(sdev, qdepth);
5092 
5093 	if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
5094 		sdev_printk(KERN_INFO, sdev,
5095 			    "%s: qdepth=%d, num_in_q=%d\n",
5096 			    __func__, qdepth, num_in_q);
5097 	}
5098 	return sdev->queue_depth;
5099 }
5100 
5101 static int
5102 check_inject(struct scsi_cmnd *scp)
5103 {
5104 	struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5105 
5106 	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5107 
5108 	if (atomic_inc_return(&sdebug_cmnd_count) >=
5109 	    abs(scsi_debug_every_nth)) {
5110 		atomic_set(&sdebug_cmnd_count, 0);
5111 		if (scsi_debug_every_nth < -1)
5112 			scsi_debug_every_nth = -1;
5113 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5114 			return 1; /* ignore command causing timeout */
5115 		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5116 			 scsi_medium_access_command(scp))
5117 			return 1; /* time out reads and writes */
5118 		if (sdebug_any_injecting_opt) {
5119 			int opts = scsi_debug_opts;
5120 
5121 			if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5122 				ep->inj_recovered = true;
5123 			else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5124 				ep->inj_transport = true;
5125 			else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5126 				ep->inj_dif = true;
5127 			else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5128 				ep->inj_dix = true;
5129 			else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5130 				ep->inj_short = true;
5131 		}
5132 	}
5133 	return 0;
5134 }
5135 
5136 static int
5137 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5138 {
5139 	u8 sdeb_i;
5140 	struct scsi_device *sdp = scp->device;
5141 	const struct opcode_info_t *oip;
5142 	const struct opcode_info_t *r_oip;
5143 	struct sdebug_dev_info *devip;
5144 	u8 *cmd = scp->cmnd;
5145 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5146 	int k, na;
5147 	int errsts = 0;
5148 	int errsts_no_connect = DID_NO_CONNECT << 16;
5149 	u32 flags;
5150 	u16 sa;
5151 	u8 opcode = cmd[0];
5152 	bool has_wlun_rl;
5153 	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5154 
5155 	scsi_set_resid(scp, 0);
5156 	if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5157 		char b[120];
5158 		int n, len, sb;
5159 
5160 		len = scp->cmd_len;
5161 		sb = (int)sizeof(b);
5162 		if (len > 32)
5163 			strcpy(b, "too long, over 32 bytes");
5164 		else {
5165 			for (k = 0, n = 0; k < len && n < sb; ++k)
5166 				n += scnprintf(b + n, sb - n, "%02x ",
5167 					       (u32)cmd[k]);
5168 		}
5169 		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5170 	}
5171 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5172 	if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5173 		return schedule_resp(scp, NULL, errsts_no_connect, 0);
5174 
5175 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5176 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5177 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5178 	if (!devip) {
5179 		devip = devInfoReg(sdp);
5180 		if (NULL == devip)
5181 			return schedule_resp(scp, NULL, errsts_no_connect, 0);
5182 	}
5183 	na = oip->num_attached;
5184 	r_pfp = oip->pfp;
5185 	if (na) {	/* multiple commands with this opcode */
5186 		r_oip = oip;
5187 		if (FF_SA & r_oip->flags) {
5188 			if (F_SA_LOW & oip->flags)
5189 				sa = 0x1f & cmd[1];
5190 			else
5191 				sa = get_unaligned_be16(cmd + 8);
5192 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5193 				if (opcode == oip->opcode && sa == oip->sa)
5194 					break;
5195 			}
5196 		} else {   /* since no service action only check opcode */
5197 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5198 				if (opcode == oip->opcode)
5199 					break;
5200 			}
5201 		}
5202 		if (k > na) {
5203 			if (F_SA_LOW & r_oip->flags)
5204 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5205 			else if (F_SA_HIGH & r_oip->flags)
5206 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5207 			else
5208 				mk_sense_invalid_opcode(scp);
5209 			goto check_cond;
5210 		}
5211 	}	/* else (when na==0) we assume the oip is a match */
5212 	flags = oip->flags;
5213 	if (F_INV_OP & flags) {
5214 		mk_sense_invalid_opcode(scp);
5215 		goto check_cond;
5216 	}
5217 	if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5218 		if (debug)
5219 			sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5220 				    "0x%x not supported for wlun\n", opcode);
5221 		mk_sense_invalid_opcode(scp);
5222 		goto check_cond;
5223 	}
5224 	if (scsi_debug_strict) {	/* check cdb against mask */
5225 		u8 rem;
5226 		int j;
5227 
5228 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5229 			rem = ~oip->len_mask[k] & cmd[k];
5230 			if (rem) {
5231 				for (j = 7; j >= 0; --j, rem <<= 1) {
5232 					if (0x80 & rem)
5233 						break;
5234 				}
5235 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5236 				goto check_cond;
5237 			}
5238 		}
5239 	}
5240 	if (!(F_SKIP_UA & flags) &&
5241 	    SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5242 		errsts = check_readiness(scp, UAS_ONLY, devip);
5243 		if (errsts)
5244 			goto check_cond;
5245 	}
5246 	if ((F_M_ACCESS & flags) && devip->stopped) {
5247 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5248 		if (debug)
5249 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5250 				    "%s\n", my_name, "initializing command "
5251 				    "required");
5252 		errsts = check_condition_result;
5253 		goto fini;
5254 	}
5255 	if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5256 		goto fini;
5257 	if (scsi_debug_every_nth) {
5258 		if (check_inject(scp))
5259 			return 0;	/* ignore command: make trouble */
5260 	}
5261 	if (oip->pfp)	/* if this command has a resp_* function, call it */
5262 		errsts = oip->pfp(scp, devip);
5263 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5264 		errsts = r_pfp(scp, devip);
5265 
5266 fini:
5267 	return schedule_resp(scp, devip, errsts,
5268 			     ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5269 check_cond:
5270 	return schedule_resp(scp, devip, check_condition_result, 0);
5271 }
5272 
5273 static int
5274 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5275 {
5276 	if (scsi_debug_host_lock) {
5277 		unsigned long iflags;
5278 		int rc;
5279 
5280 		spin_lock_irqsave(shost->host_lock, iflags);
5281 		rc = scsi_debug_queuecommand(cmd);
5282 		spin_unlock_irqrestore(shost->host_lock, iflags);
5283 		return rc;
5284 	} else
5285 		return scsi_debug_queuecommand(cmd);
5286 }
5287 
5288 static struct scsi_host_template sdebug_driver_template = {
5289 	.show_info =		scsi_debug_show_info,
5290 	.write_info =		scsi_debug_write_info,
5291 	.proc_name =		sdebug_proc_name,
5292 	.name =			"SCSI DEBUG",
5293 	.info =			scsi_debug_info,
5294 	.slave_alloc =		scsi_debug_slave_alloc,
5295 	.slave_configure =	scsi_debug_slave_configure,
5296 	.slave_destroy =	scsi_debug_slave_destroy,
5297 	.ioctl =		scsi_debug_ioctl,
5298 	.queuecommand =		sdebug_queuecommand_lock_or_not,
5299 	.change_queue_depth =	sdebug_change_qdepth,
5300 	.eh_abort_handler =	scsi_debug_abort,
5301 	.eh_device_reset_handler = scsi_debug_device_reset,
5302 	.eh_target_reset_handler = scsi_debug_target_reset,
5303 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5304 	.eh_host_reset_handler = scsi_debug_host_reset,
5305 	.can_queue =		SCSI_DEBUG_CANQUEUE,
5306 	.this_id =		7,
5307 	.sg_tablesize =		SCSI_MAX_SG_CHAIN_SEGMENTS,
5308 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5309 	.max_sectors =		-1U,
5310 	.use_clustering = 	DISABLE_CLUSTERING,
5311 	.module =		THIS_MODULE,
5312 	.track_queue_depth =	1,
5313 	.cmd_size =		sizeof(struct sdebug_scmd_extra_t),
5314 };
5315 
5316 static int sdebug_driver_probe(struct device * dev)
5317 {
5318 	int error = 0;
5319 	int opts;
5320 	struct sdebug_host_info *sdbg_host;
5321 	struct Scsi_Host *hpnt;
5322 	int host_prot;
5323 
5324 	sdbg_host = to_sdebug_host(dev);
5325 
5326 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
5327 	if (scsi_debug_clustering)
5328 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5329 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5330 	if (NULL == hpnt) {
5331 		pr_err("scsi_host_alloc failed\n");
5332 		error = -ENODEV;
5333 		return error;
5334 	}
5335 
5336         sdbg_host->shost = hpnt;
5337 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5338 	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5339 		hpnt->max_id = scsi_debug_num_tgts + 1;
5340 	else
5341 		hpnt->max_id = scsi_debug_num_tgts;
5342 	/* = scsi_debug_max_luns; */
5343 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5344 
5345 	host_prot = 0;
5346 
5347 	switch (scsi_debug_dif) {
5348 
5349 	case SD_DIF_TYPE1_PROTECTION:
5350 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
5351 		if (scsi_debug_dix)
5352 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5353 		break;
5354 
5355 	case SD_DIF_TYPE2_PROTECTION:
5356 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
5357 		if (scsi_debug_dix)
5358 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5359 		break;
5360 
5361 	case SD_DIF_TYPE3_PROTECTION:
5362 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
5363 		if (scsi_debug_dix)
5364 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5365 		break;
5366 
5367 	default:
5368 		if (scsi_debug_dix)
5369 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5370 		break;
5371 	}
5372 
5373 	scsi_host_set_prot(hpnt, host_prot);
5374 
5375 	pr_info("host protection%s%s%s%s%s%s%s\n",
5376 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5377 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5378 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5379 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5380 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5381 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5382 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5383 
5384 	if (scsi_debug_guard == 1)
5385 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5386 	else
5387 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5388 
5389 	opts = scsi_debug_opts;
5390 	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5391 		sdebug_any_injecting_opt = true;
5392 	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5393 		sdebug_any_injecting_opt = true;
5394 	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5395 		sdebug_any_injecting_opt = true;
5396 	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5397 		sdebug_any_injecting_opt = true;
5398 	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5399 		sdebug_any_injecting_opt = true;
5400 
5401         error = scsi_add_host(hpnt, &sdbg_host->dev);
5402         if (error) {
5403 		pr_err("scsi_add_host failed\n");
5404                 error = -ENODEV;
5405 		scsi_host_put(hpnt);
5406         } else
5407 		scsi_scan_host(hpnt);
5408 
5409 	return error;
5410 }
5411 
5412 static int sdebug_driver_remove(struct device * dev)
5413 {
5414         struct sdebug_host_info *sdbg_host;
5415 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5416 
5417 	sdbg_host = to_sdebug_host(dev);
5418 
5419 	if (!sdbg_host) {
5420 		pr_err("Unable to locate host info\n");
5421 		return -ENODEV;
5422 	}
5423 
5424         scsi_remove_host(sdbg_host->shost);
5425 
5426 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5427 				 dev_list) {
5428                 list_del(&sdbg_devinfo->dev_list);
5429                 kfree(sdbg_devinfo);
5430         }
5431 
5432         scsi_host_put(sdbg_host->shost);
5433         return 0;
5434 }
5435 
5436 static int pseudo_lld_bus_match(struct device *dev,
5437 				struct device_driver *dev_driver)
5438 {
5439 	return 1;
5440 }
5441 
5442 static struct bus_type pseudo_lld_bus = {
5443 	.name = "pseudo",
5444 	.match = pseudo_lld_bus_match,
5445 	.probe = sdebug_driver_probe,
5446 	.remove = sdebug_driver_remove,
5447 	.drv_groups = sdebug_drv_groups,
5448 };
5449