xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision b333a819)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SCSI_DEBUG_VERSION "1.86"
63 static const char *sdebug_version_date = "20160422";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define UA_RESET_ASC 0x29
78 #define UA_CHANGED_ASC 0x2a
79 #define TARGET_CHANGED_ASC 0x3f
80 #define LUNS_CHANGED_ASCQ 0x0e
81 #define INSUFF_RES_ASC 0x55
82 #define INSUFF_RES_ASCQ 0x3
83 #define POWER_ON_RESET_ASCQ 0x0
84 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
85 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
86 #define CAPACITY_CHANGED_ASCQ 0x9
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91 #define MISCOMPARE_VERIFY_ASC 0x1d
92 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
93 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
94 
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97 
98 
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1		/* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW	0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DELAY_OVERRIDDEN -9999
140 
141 /* bit mask values for sdebug_opts */
142 #define SDEBUG_OPT_NOISE		1
143 #define SDEBUG_OPT_MEDIUM_ERR		2
144 #define SDEBUG_OPT_TIMEOUT		4
145 #define SDEBUG_OPT_RECOVERED_ERR	8
146 #define SDEBUG_OPT_TRANSPORT_ERR	16
147 #define SDEBUG_OPT_DIF_ERR		32
148 #define SDEBUG_OPT_DIX_ERR		64
149 #define SDEBUG_OPT_MAC_TIMEOUT		128
150 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
151 #define SDEBUG_OPT_Q_NOISE		0x200
152 #define SDEBUG_OPT_ALL_TSF		0x400
153 #define SDEBUG_OPT_RARE_TSF		0x800
154 #define SDEBUG_OPT_N_WCE		0x1000
155 #define SDEBUG_OPT_RESET_NOISE		0x2000
156 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
157 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
158 			      SDEBUG_OPT_RESET_NOISE)
159 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
160 				  SDEBUG_OPT_TRANSPORT_ERR | \
161 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
162 				  SDEBUG_OPT_SHORT_TRANSFER)
163 /* When "every_nth" > 0 then modulo "every_nth" commands:
164  *   - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
165  *   - a RECOVERED_ERROR is simulated on successful read and write
166  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
167  *   - a TRANSPORT_ERROR is simulated on successful read and write
168  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
169  *
170  * When "every_nth" < 0 then after "- every_nth" commands:
171  *   - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
172  *   - a RECOVERED_ERROR is simulated on successful read and write
173  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
174  *   - a TRANSPORT_ERROR is simulated on successful read and write
175  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
176  * This will continue on every subsequent command until some other action
177  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
178  * every_nth via sysfs).
179  */
180 
181 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
182  * priority order. In the subset implemented here lower numbers have higher
183  * priority. The UA numbers should be a sequence starting from 0 with
184  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
185 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
186 #define SDEBUG_UA_BUS_RESET 1
187 #define SDEBUG_UA_MODE_CHANGED 2
188 #define SDEBUG_UA_CAPACITY_CHANGED 3
189 #define SDEBUG_UA_LUNS_CHANGED 4
190 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
191 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
192 #define SDEBUG_NUM_UAS 7
193 
194 /* for check_readiness() */
195 #define UAS_ONLY 1	/* check for UAs only */
196 #define UAS_TUR 0	/* if no UAs then check if media access possible */
197 
198 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
199  * sector on read commands: */
200 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
201 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
202 
203 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
204  * or "peripheral device" addressing (value 0) */
205 #define SAM2_LUN_ADDRESS_METHOD 0
206 
207 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
208  * (for response) at one time. Can be reduced by max_queue option. Command
209  * responses are not queued when delay=0 and ndelay=0. The per-device
210  * DEF_CMD_PER_LUN can be changed via sysfs:
211  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
212  * SCSI_DEBUG_CANQUEUE. */
213 #define SCSI_DEBUG_CANQUEUE_WORDS  9	/* a WORD is bits in a long */
214 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
215 #define DEF_CMD_PER_LUN  255
216 
217 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
218 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
219 #endif
220 
221 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
222 enum sdeb_opcode_index {
223 	SDEB_I_INVALID_OPCODE =	0,
224 	SDEB_I_INQUIRY = 1,
225 	SDEB_I_REPORT_LUNS = 2,
226 	SDEB_I_REQUEST_SENSE = 3,
227 	SDEB_I_TEST_UNIT_READY = 4,
228 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
229 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
230 	SDEB_I_LOG_SENSE = 7,
231 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
232 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
233 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
234 	SDEB_I_START_STOP = 11,
235 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
236 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
237 	SDEB_I_MAINT_IN = 14,
238 	SDEB_I_MAINT_OUT = 15,
239 	SDEB_I_VERIFY = 16,		/* 10 only */
240 	SDEB_I_VARIABLE_LEN = 17,
241 	SDEB_I_RESERVE = 18,		/* 6, 10 */
242 	SDEB_I_RELEASE = 19,		/* 6, 10 */
243 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
244 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
245 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
246 	SDEB_I_SEND_DIAG = 23,
247 	SDEB_I_UNMAP = 24,
248 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
249 	SDEB_I_WRITE_BUFFER = 26,
250 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
251 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
252 	SDEB_I_COMP_WRITE = 29,
253 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
254 };
255 
256 static const unsigned char opcode_ind_arr[256] = {
257 /* 0x0; 0x0->0x1f: 6 byte cdbs */
258 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
259 	    0, 0, 0, 0,
260 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
261 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
262 	    SDEB_I_RELEASE,
263 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
264 	    SDEB_I_ALLOW_REMOVAL, 0,
265 /* 0x20; 0x20->0x3f: 10 byte cdbs */
266 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
267 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
268 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
269 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
270 /* 0x40; 0x40->0x5f: 10 byte cdbs */
271 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
272 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
273 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
274 	    SDEB_I_RELEASE,
275 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
276 /* 0x60; 0x60->0x7d are reserved */
277 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 	0, SDEB_I_VARIABLE_LEN,
280 /* 0x80; 0x80->0x9f: 16 byte cdbs */
281 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
282 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
283 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
284 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
285 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
286 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
287 	     SDEB_I_MAINT_OUT, 0, 0, 0,
288 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
289 	     0, 0, 0, 0,
290 	0, 0, 0, 0, 0, 0, 0, 0,
291 	0, 0, 0, 0, 0, 0, 0, 0,
292 /* 0xc0; 0xc0->0xff: vendor specific */
293 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
297 };
298 
299 #define F_D_IN			1
300 #define F_D_OUT			2
301 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
302 #define F_D_UNKN		8
303 #define F_RL_WLUN_OK		0x10
304 #define F_SKIP_UA		0x20
305 #define F_DELAY_OVERR		0x40
306 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
307 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
308 #define F_INV_OP		0x200
309 #define F_FAKE_RW		0x400
310 #define F_M_ACCESS		0x800	/* media access */
311 
312 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
313 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
314 #define FF_SA (F_SA_HIGH | F_SA_LOW)
315 
316 struct sdebug_dev_info;
317 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
337 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
338 
339 struct opcode_info_t {
340 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff
341 				 * for terminating element */
342 	u8 opcode;		/* if num_attached > 0, preferred */
343 	u16 sa;			/* service action */
344 	u32 flags;		/* OR-ed set of SDEB_F_* */
345 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
346 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
347 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
348 				/* ignore cdb bytes after position 15 */
349 };
350 
351 static const struct opcode_info_t msense_iarr[1] = {
352 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
353 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
354 };
355 
356 static const struct opcode_info_t mselect_iarr[1] = {
357 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
358 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
359 };
360 
361 static const struct opcode_info_t read_iarr[3] = {
362 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
363 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
364 	     0, 0, 0, 0} },
365 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
366 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
367 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
368 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
369 	     0xc7, 0, 0, 0, 0} },
370 };
371 
372 static const struct opcode_info_t write_iarr[3] = {
373 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
374 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
375 	     0, 0, 0, 0} },
376 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
377 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
378 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
379 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
380 	     0xc7, 0, 0, 0, 0} },
381 };
382 
383 static const struct opcode_info_t sa_in_iarr[1] = {
384 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
385 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
386 	     0xff, 0xff, 0xff, 0, 0xc7} },
387 };
388 
389 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
390 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
391 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
392 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
393 };
394 
395 static const struct opcode_info_t maint_in_iarr[2] = {
396 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
397 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
398 	     0xc7, 0, 0, 0, 0} },
399 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
400 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
401 	     0, 0} },
402 };
403 
404 static const struct opcode_info_t write_same_iarr[1] = {
405 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
406 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
407 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
408 };
409 
410 static const struct opcode_info_t reserve_iarr[1] = {
411 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
412 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
413 };
414 
415 static const struct opcode_info_t release_iarr[1] = {
416 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
417 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 };
419 
420 
421 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
422  * plus the terminating elements for logic that scans this table such as
423  * REPORT SUPPORTED OPERATION CODES. */
424 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
425 /* 0 */
426 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
427 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
429 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
430 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
431 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
432 	     0, 0} },
433 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
434 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
435 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
436 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
438 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
439 	     0} },
440 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
441 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
442 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
443 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
444 	     0, 0, 0} },
445 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
446 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
447 	     0, 0} },
448 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
449 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
451 /* 10 */
452 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
453 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
454 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
455 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
456 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
457 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
458 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
460 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
461 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
462 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
463 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
464 	     0} },
465 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
466 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
467 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
468 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
469 	     0, 0, 0, 0, 0, 0} },
470 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
473 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
474 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
475 	     0} },
476 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
477 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478 	     0} },
479 /* 20 */
480 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
485 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
487 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
488 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
489 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
490 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
491 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
492 		   0, 0, 0, 0, 0, 0} },
493 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
494 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
495 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
496 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
497 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
498 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
499 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
500 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
501 	     0, 0, 0, 0} },
502 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
503 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
504 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
505 
506 /* 30 */
507 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
508 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510 
511 struct sdebug_scmd_extra_t {
512 	bool inj_recovered;
513 	bool inj_transport;
514 	bool inj_dif;
515 	bool inj_dix;
516 	bool inj_short;
517 };
518 
519 static int sdebug_add_host = DEF_NUM_HOST;
520 static int sdebug_ato = DEF_ATO;
521 static int sdebug_delay = DEF_DELAY;	/* in jiffies */
522 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
523 static int sdebug_dif = DEF_DIF;
524 static int sdebug_dix = DEF_DIX;
525 static int sdebug_dsense = DEF_D_SENSE;
526 static int sdebug_every_nth = DEF_EVERY_NTH;
527 static int sdebug_fake_rw = DEF_FAKE_RW;
528 static unsigned int sdebug_guard = DEF_GUARD;
529 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
530 static int sdebug_max_luns = DEF_MAX_LUNS;
531 static int sdebug_max_queue = SCSI_DEBUG_CANQUEUE;
532 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
533 static int sdebug_ndelay = DEF_NDELAY;	/* in nanoseconds */
534 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
535 static int sdebug_no_uld;
536 static int sdebug_num_parts = DEF_NUM_PARTS;
537 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
538 static int sdebug_opt_blks = DEF_OPT_BLKS;
539 static int sdebug_opts = DEF_OPTS;
540 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
541 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
542 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
543 static int sdebug_sector_size = DEF_SECTOR_SIZE;
544 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
545 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
546 static unsigned int sdebug_lbpu = DEF_LBPU;
547 static unsigned int sdebug_lbpws = DEF_LBPWS;
548 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
549 static unsigned int sdebug_lbprz = DEF_LBPRZ;
550 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
551 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
552 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
553 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
554 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
555 static bool sdebug_removable = DEF_REMOVABLE;
556 static bool sdebug_clustering;
557 static bool sdebug_host_lock = DEF_HOST_LOCK;
558 static bool sdebug_strict = DEF_STRICT;
559 static bool sdebug_any_injecting_opt;
560 static bool sdebug_verbose;
561 
562 static atomic_t sdebug_cmnd_count;
563 static atomic_t sdebug_completions;
564 static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
565 
566 #define DEV_READONLY(TGT)      (0)
567 
568 static unsigned int sdebug_store_sectors;
569 static sector_t sdebug_capacity;	/* in sectors */
570 
571 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
572    may still need them */
573 static int sdebug_heads;		/* heads per disk */
574 static int sdebug_cylinders_per;	/* cylinders per surface */
575 static int sdebug_sectors_per;		/* sectors per cylinder */
576 
577 #define SDEBUG_MAX_PARTS 4
578 
579 #define SCSI_DEBUG_MAX_CMD_LEN 32
580 
581 static unsigned int scsi_debug_lbp(void)
582 {
583 	return 0 == sdebug_fake_rw &&
584 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
585 }
586 
587 struct sdebug_dev_info {
588 	struct list_head dev_list;
589 	unsigned int channel;
590 	unsigned int target;
591 	u64 lun;
592 	struct sdebug_host_info *sdbg_host;
593 	unsigned long uas_bm[1];
594 	atomic_t num_in_q;
595 	char stopped;		/* TODO: should be atomic */
596 	bool used;
597 };
598 
599 struct sdebug_host_info {
600 	struct list_head host_list;
601 	struct Scsi_Host *shost;
602 	struct device dev;
603 	struct list_head dev_info_list;
604 };
605 
606 #define to_sdebug_host(d)	\
607 	container_of(d, struct sdebug_host_info, dev)
608 
609 static LIST_HEAD(sdebug_host_list);
610 static DEFINE_SPINLOCK(sdebug_host_list_lock);
611 
612 
613 struct sdebug_hrtimer {		/* ... is derived from hrtimer */
614 	struct hrtimer hrt;	/* must be first element */
615 	int qa_indx;
616 };
617 
618 struct sdebug_queued_cmd {
619 	/* in_use flagged by a bit in queued_in_use_bm[] */
620 	struct tasklet_struct *tletp;
621 	struct sdebug_hrtimer *sd_hrtp;
622 	struct scsi_cmnd * a_cmnd;
623 };
624 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
625 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
626 
627 
628 static unsigned char * fake_storep;	/* ramdisk storage */
629 static struct sd_dif_tuple *dif_storep;	/* protection info */
630 static void *map_storep;		/* provisioning map */
631 
632 static unsigned long map_size;
633 static int num_aborts;
634 static int num_dev_resets;
635 static int num_target_resets;
636 static int num_bus_resets;
637 static int num_host_resets;
638 static int dix_writes;
639 static int dix_reads;
640 static int dif_errors;
641 
642 static DEFINE_SPINLOCK(queued_arr_lock);
643 static DEFINE_RWLOCK(atomic_rw);
644 
645 static char sdebug_proc_name[] = MY_NAME;
646 static const char *my_name = MY_NAME;
647 
648 static struct bus_type pseudo_lld_bus;
649 
650 static struct device_driver sdebug_driverfs_driver = {
651 	.name 		= sdebug_proc_name,
652 	.bus		= &pseudo_lld_bus,
653 };
654 
655 static const int check_condition_result =
656 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
657 
658 static const int illegal_condition_result =
659 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
660 
661 static const int device_qfull_result =
662 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
663 
664 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
665 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
666 				     0, 0, 0, 0};
667 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
668 				    0, 0, 0x2, 0x4b};
669 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
670 			           0, 0, 0x0, 0x0};
671 
672 static void *fake_store(unsigned long long lba)
673 {
674 	lba = do_div(lba, sdebug_store_sectors);
675 
676 	return fake_storep + lba * sdebug_sector_size;
677 }
678 
679 static struct sd_dif_tuple *dif_store(sector_t sector)
680 {
681 	sector = sector_div(sector, sdebug_store_sectors);
682 
683 	return dif_storep + sector;
684 }
685 
686 static int sdebug_add_adapter(void);
687 static void sdebug_remove_adapter(void);
688 
689 static void sdebug_max_tgts_luns(void)
690 {
691 	struct sdebug_host_info *sdbg_host;
692 	struct Scsi_Host *hpnt;
693 
694 	spin_lock(&sdebug_host_list_lock);
695 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
696 		hpnt = sdbg_host->shost;
697 		if ((hpnt->this_id >= 0) &&
698 		    (sdebug_num_tgts > hpnt->this_id))
699 			hpnt->max_id = sdebug_num_tgts + 1;
700 		else
701 			hpnt->max_id = sdebug_num_tgts;
702 		/* sdebug_max_luns; */
703 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
704 	}
705 	spin_unlock(&sdebug_host_list_lock);
706 }
707 
708 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
709 
710 /* Set in_bit to -1 to indicate no bit position of invalid field */
711 static void
712 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
713 		     int in_byte, int in_bit)
714 {
715 	unsigned char *sbuff;
716 	u8 sks[4];
717 	int sl, asc;
718 
719 	sbuff = scp->sense_buffer;
720 	if (!sbuff) {
721 		sdev_printk(KERN_ERR, scp->device,
722 			    "%s: sense_buffer is NULL\n", __func__);
723 		return;
724 	}
725 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
726 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
727 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
728 	memset(sks, 0, sizeof(sks));
729 	sks[0] = 0x80;
730 	if (c_d)
731 		sks[0] |= 0x40;
732 	if (in_bit >= 0) {
733 		sks[0] |= 0x8;
734 		sks[0] |= 0x7 & in_bit;
735 	}
736 	put_unaligned_be16(in_byte, sks + 1);
737 	if (sdebug_dsense) {
738 		sl = sbuff[7] + 8;
739 		sbuff[7] = sl;
740 		sbuff[sl] = 0x2;
741 		sbuff[sl + 1] = 0x6;
742 		memcpy(sbuff + sl + 4, sks, 3);
743 	} else
744 		memcpy(sbuff + 15, sks, 3);
745 	if (sdebug_verbose)
746 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
747 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
748 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
749 }
750 
751 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
752 {
753 	unsigned char *sbuff;
754 
755 	sbuff = scp->sense_buffer;
756 	if (!sbuff) {
757 		sdev_printk(KERN_ERR, scp->device,
758 			    "%s: sense_buffer is NULL\n", __func__);
759 		return;
760 	}
761 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
762 
763 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
764 
765 	if (sdebug_verbose)
766 		sdev_printk(KERN_INFO, scp->device,
767 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
768 			    my_name, key, asc, asq);
769 }
770 
771 static void
772 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
773 {
774 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
775 }
776 
777 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
778 {
779 	if (sdebug_verbose) {
780 		if (0x1261 == cmd)
781 			sdev_printk(KERN_INFO, dev,
782 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
783 		else if (0x5331 == cmd)
784 			sdev_printk(KERN_INFO, dev,
785 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
786 				    __func__);
787 		else
788 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
789 				    __func__, cmd);
790 	}
791 	return -EINVAL;
792 	/* return -ENOTTY; // correct return but upsets fdisk */
793 }
794 
795 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
796 {
797 	struct sdebug_host_info *sdhp;
798 	struct sdebug_dev_info *dp;
799 
800 	spin_lock(&sdebug_host_list_lock);
801 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
802 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
803 			if ((devip->sdbg_host == dp->sdbg_host) &&
804 			    (devip->target == dp->target))
805 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
806 		}
807 	}
808 	spin_unlock(&sdebug_host_list_lock);
809 }
810 
811 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
812 			   struct sdebug_dev_info * devip)
813 {
814 	int k;
815 
816 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
817 	if (k != SDEBUG_NUM_UAS) {
818 		const char *cp = NULL;
819 
820 		switch (k) {
821 		case SDEBUG_UA_POR:
822 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
823 					UA_RESET_ASC, POWER_ON_RESET_ASCQ);
824 			if (sdebug_verbose)
825 				cp = "power on reset";
826 			break;
827 		case SDEBUG_UA_BUS_RESET:
828 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
829 					UA_RESET_ASC, BUS_RESET_ASCQ);
830 			if (sdebug_verbose)
831 				cp = "bus reset";
832 			break;
833 		case SDEBUG_UA_MODE_CHANGED:
834 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
835 					UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
836 			if (sdebug_verbose)
837 				cp = "mode parameters changed";
838 			break;
839 		case SDEBUG_UA_CAPACITY_CHANGED:
840 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
841 					UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
842 			if (sdebug_verbose)
843 				cp = "capacity data changed";
844 			break;
845 		case SDEBUG_UA_MICROCODE_CHANGED:
846 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
847 				 TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
848 			if (sdebug_verbose)
849 				cp = "microcode has been changed";
850 			break;
851 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
852 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
853 					TARGET_CHANGED_ASC,
854 					MICROCODE_CHANGED_WO_RESET_ASCQ);
855 			if (sdebug_verbose)
856 				cp = "microcode has been changed without reset";
857 			break;
858 		case SDEBUG_UA_LUNS_CHANGED:
859 			/*
860 			 * SPC-3 behavior is to report a UNIT ATTENTION with
861 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
862 			 * on the target, until a REPORT LUNS command is
863 			 * received.  SPC-4 behavior is to report it only once.
864 			 * NOTE:  sdebug_scsi_level does not use the same
865 			 * values as struct scsi_device->scsi_level.
866 			 */
867 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
868 				clear_luns_changed_on_target(devip);
869 			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
870 					TARGET_CHANGED_ASC,
871 					LUNS_CHANGED_ASCQ);
872 			if (sdebug_verbose)
873 				cp = "reported luns data has changed";
874 			break;
875 		default:
876 			pr_warn("unexpected unit attention code=%d\n", k);
877 			if (sdebug_verbose)
878 				cp = "unknown";
879 			break;
880 		}
881 		clear_bit(k, devip->uas_bm);
882 		if (sdebug_verbose)
883 			sdev_printk(KERN_INFO, SCpnt->device,
884 				   "%s reports: Unit attention: %s\n",
885 				   my_name, cp);
886 		return check_condition_result;
887 	}
888 	if ((UAS_TUR == uas_only) && devip->stopped) {
889 		mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
890 				0x2);
891 		if (sdebug_verbose)
892 			sdev_printk(KERN_INFO, SCpnt->device,
893 				    "%s reports: Not ready: %s\n", my_name,
894 				    "initializing command required");
895 		return check_condition_result;
896 	}
897 	return 0;
898 }
899 
900 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
901 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
902 				int arr_len)
903 {
904 	int act_len;
905 	struct scsi_data_buffer *sdb = scsi_in(scp);
906 
907 	if (!sdb->length)
908 		return 0;
909 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
910 		return DID_ERROR << 16;
911 
912 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
913 				      arr, arr_len);
914 	sdb->resid = scsi_bufflen(scp) - act_len;
915 
916 	return 0;
917 }
918 
919 /* Returns number of bytes fetched into 'arr' or -1 if error. */
920 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
921 			       int arr_len)
922 {
923 	if (!scsi_bufflen(scp))
924 		return 0;
925 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
926 		return -1;
927 
928 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
929 }
930 
931 
932 static const char * inq_vendor_id = "Linux   ";
933 static const char * inq_product_id = "scsi_debug      ";
934 static const char *inq_product_rev = "0186";	/* version less '.' */
935 static const u64 naa5_comp_a = 0x5222222000000000ULL;
936 static const u64 naa5_comp_b = 0x5333333000000000ULL;
937 static const u64 naa5_comp_c = 0x5111111000000000ULL;
938 
939 /* Device identification VPD page. Returns number of bytes placed in arr */
940 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
941 			   int target_dev_id, int dev_id_num,
942 			   const char * dev_id_str,
943 			   int dev_id_str_len)
944 {
945 	int num, port_a;
946 	char b[32];
947 
948 	port_a = target_dev_id + 1;
949 	/* T10 vendor identifier field format (faked) */
950 	arr[0] = 0x2;	/* ASCII */
951 	arr[1] = 0x1;
952 	arr[2] = 0x0;
953 	memcpy(&arr[4], inq_vendor_id, 8);
954 	memcpy(&arr[12], inq_product_id, 16);
955 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
956 	num = 8 + 16 + dev_id_str_len;
957 	arr[3] = num;
958 	num += 4;
959 	if (dev_id_num >= 0) {
960 		/* NAA-5, Logical unit identifier (binary) */
961 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
962 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
963 		arr[num++] = 0x0;
964 		arr[num++] = 0x8;
965 		put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
966 		num += 8;
967 		/* Target relative port number */
968 		arr[num++] = 0x61;	/* proto=sas, binary */
969 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
970 		arr[num++] = 0x0;	/* reserved */
971 		arr[num++] = 0x4;	/* length */
972 		arr[num++] = 0x0;	/* reserved */
973 		arr[num++] = 0x0;	/* reserved */
974 		arr[num++] = 0x0;
975 		arr[num++] = 0x1;	/* relative port A */
976 	}
977 	/* NAA-5, Target port identifier */
978 	arr[num++] = 0x61;	/* proto=sas, binary */
979 	arr[num++] = 0x93;	/* piv=1, target port, naa */
980 	arr[num++] = 0x0;
981 	arr[num++] = 0x8;
982 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
983 	num += 8;
984 	/* NAA-5, Target port group identifier */
985 	arr[num++] = 0x61;	/* proto=sas, binary */
986 	arr[num++] = 0x95;	/* piv=1, target port group id */
987 	arr[num++] = 0x0;
988 	arr[num++] = 0x4;
989 	arr[num++] = 0;
990 	arr[num++] = 0;
991 	put_unaligned_be16(port_group_id, arr + num);
992 	num += 2;
993 	/* NAA-5, Target device identifier */
994 	arr[num++] = 0x61;	/* proto=sas, binary */
995 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
996 	arr[num++] = 0x0;
997 	arr[num++] = 0x8;
998 	put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
999 	num += 8;
1000 	/* SCSI name string: Target device identifier */
1001 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1002 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1003 	arr[num++] = 0x0;
1004 	arr[num++] = 24;
1005 	memcpy(arr + num, "naa.52222220", 12);
1006 	num += 12;
1007 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1008 	memcpy(arr + num, b, 8);
1009 	num += 8;
1010 	memset(arr + num, 0, 4);
1011 	num += 4;
1012 	return num;
1013 }
1014 
1015 static unsigned char vpd84_data[] = {
1016 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1017     0x22,0x22,0x22,0x0,0xbb,0x1,
1018     0x22,0x22,0x22,0x0,0xbb,0x2,
1019 };
1020 
1021 /*  Software interface identification VPD page */
1022 static int inquiry_evpd_84(unsigned char * arr)
1023 {
1024 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1025 	return sizeof(vpd84_data);
1026 }
1027 
1028 /* Management network addresses VPD page */
1029 static int inquiry_evpd_85(unsigned char * arr)
1030 {
1031 	int num = 0;
1032 	const char * na1 = "https://www.kernel.org/config";
1033 	const char * na2 = "http://www.kernel.org/log";
1034 	int plen, olen;
1035 
1036 	arr[num++] = 0x1;	/* lu, storage config */
1037 	arr[num++] = 0x0;	/* reserved */
1038 	arr[num++] = 0x0;
1039 	olen = strlen(na1);
1040 	plen = olen + 1;
1041 	if (plen % 4)
1042 		plen = ((plen / 4) + 1) * 4;
1043 	arr[num++] = plen;	/* length, null termianted, padded */
1044 	memcpy(arr + num, na1, olen);
1045 	memset(arr + num + olen, 0, plen - olen);
1046 	num += plen;
1047 
1048 	arr[num++] = 0x4;	/* lu, logging */
1049 	arr[num++] = 0x0;	/* reserved */
1050 	arr[num++] = 0x0;
1051 	olen = strlen(na2);
1052 	plen = olen + 1;
1053 	if (plen % 4)
1054 		plen = ((plen / 4) + 1) * 4;
1055 	arr[num++] = plen;	/* length, null terminated, padded */
1056 	memcpy(arr + num, na2, olen);
1057 	memset(arr + num + olen, 0, plen - olen);
1058 	num += plen;
1059 
1060 	return num;
1061 }
1062 
1063 /* SCSI ports VPD page */
1064 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1065 {
1066 	int num = 0;
1067 	int port_a, port_b;
1068 
1069 	port_a = target_dev_id + 1;
1070 	port_b = port_a + 1;
1071 	arr[num++] = 0x0;	/* reserved */
1072 	arr[num++] = 0x0;	/* reserved */
1073 	arr[num++] = 0x0;
1074 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1075 	memset(arr + num, 0, 6);
1076 	num += 6;
1077 	arr[num++] = 0x0;
1078 	arr[num++] = 12;	/* length tp descriptor */
1079 	/* naa-5 target port identifier (A) */
1080 	arr[num++] = 0x61;	/* proto=sas, binary */
1081 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1082 	arr[num++] = 0x0;	/* reserved */
1083 	arr[num++] = 0x8;	/* length */
1084 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1085 	num += 8;
1086 	arr[num++] = 0x0;	/* reserved */
1087 	arr[num++] = 0x0;	/* reserved */
1088 	arr[num++] = 0x0;
1089 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1090 	memset(arr + num, 0, 6);
1091 	num += 6;
1092 	arr[num++] = 0x0;
1093 	arr[num++] = 12;	/* length tp descriptor */
1094 	/* naa-5 target port identifier (B) */
1095 	arr[num++] = 0x61;	/* proto=sas, binary */
1096 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1097 	arr[num++] = 0x0;	/* reserved */
1098 	arr[num++] = 0x8;	/* length */
1099 	put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1100 	num += 8;
1101 
1102 	return num;
1103 }
1104 
1105 
1106 static unsigned char vpd89_data[] = {
1107 /* from 4th byte */ 0,0,0,0,
1108 'l','i','n','u','x',' ',' ',' ',
1109 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1110 '1','2','3','4',
1111 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1112 0xec,0,0,0,
1113 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1114 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1115 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1116 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1117 0x53,0x41,
1118 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1119 0x20,0x20,
1120 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1121 0x10,0x80,
1122 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1123 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1124 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1125 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1126 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1127 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1128 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1129 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1132 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1133 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1134 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1135 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1139 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1140 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1141 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1148 };
1149 
1150 /* ATA Information VPD page */
1151 static int inquiry_evpd_89(unsigned char * arr)
1152 {
1153 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1154 	return sizeof(vpd89_data);
1155 }
1156 
1157 
1158 static unsigned char vpdb0_data[] = {
1159 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1160 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1161 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1163 };
1164 
1165 /* Block limits VPD page (SBC-3) */
1166 static int inquiry_evpd_b0(unsigned char * arr)
1167 {
1168 	unsigned int gran;
1169 
1170 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1171 
1172 	/* Optimal transfer length granularity */
1173 	gran = 1 << sdebug_physblk_exp;
1174 	put_unaligned_be16(gran, arr + 2);
1175 
1176 	/* Maximum Transfer Length */
1177 	if (sdebug_store_sectors > 0x400)
1178 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1179 
1180 	/* Optimal Transfer Length */
1181 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1182 
1183 	if (sdebug_lbpu) {
1184 		/* Maximum Unmap LBA Count */
1185 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1186 
1187 		/* Maximum Unmap Block Descriptor Count */
1188 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1189 	}
1190 
1191 	/* Unmap Granularity Alignment */
1192 	if (sdebug_unmap_alignment) {
1193 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1194 		arr[28] |= 0x80; /* UGAVALID */
1195 	}
1196 
1197 	/* Optimal Unmap Granularity */
1198 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1199 
1200 	/* Maximum WRITE SAME Length */
1201 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1202 
1203 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1204 
1205 	return sizeof(vpdb0_data);
1206 }
1207 
1208 /* Block device characteristics VPD page (SBC-3) */
1209 static int inquiry_evpd_b1(unsigned char *arr)
1210 {
1211 	memset(arr, 0, 0x3c);
1212 	arr[0] = 0;
1213 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1214 	arr[2] = 0;
1215 	arr[3] = 5;	/* less than 1.8" */
1216 
1217 	return 0x3c;
1218 }
1219 
1220 /* Logical block provisioning VPD page (SBC-3) */
1221 static int inquiry_evpd_b2(unsigned char *arr)
1222 {
1223 	memset(arr, 0, 0x4);
1224 	arr[0] = 0;			/* threshold exponent */
1225 
1226 	if (sdebug_lbpu)
1227 		arr[1] = 1 << 7;
1228 
1229 	if (sdebug_lbpws)
1230 		arr[1] |= 1 << 6;
1231 
1232 	if (sdebug_lbpws10)
1233 		arr[1] |= 1 << 5;
1234 
1235 	if (sdebug_lbprz)
1236 		arr[1] |= 1 << 2;
1237 
1238 	return 0x4;
1239 }
1240 
1241 #define SDEBUG_LONG_INQ_SZ 96
1242 #define SDEBUG_MAX_INQ_ARR_SZ 584
1243 
1244 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1245 {
1246 	unsigned char pq_pdt;
1247 	unsigned char * arr;
1248 	unsigned char *cmd = scp->cmnd;
1249 	int alloc_len, n, ret;
1250 	bool have_wlun;
1251 
1252 	alloc_len = get_unaligned_be16(cmd + 3);
1253 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1254 	if (! arr)
1255 		return DID_REQUEUE << 16;
1256 	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1257 	if (have_wlun)
1258 		pq_pdt = 0x1e;	/* present, wlun */
1259 	else if (sdebug_no_lun_0 && (0 == devip->lun))
1260 		pq_pdt = 0x7f;	/* not present, no device type */
1261 	else
1262 		pq_pdt = (sdebug_ptype & 0x1f);
1263 	arr[0] = pq_pdt;
1264 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1265 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1266 		kfree(arr);
1267 		return check_condition_result;
1268 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1269 		int lu_id_num, port_group_id, target_dev_id, len;
1270 		char lu_id_str[6];
1271 		int host_no = devip->sdbg_host->shost->host_no;
1272 
1273 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1274 		    (devip->channel & 0x7f);
1275 		if (0 == sdebug_vpd_use_hostno)
1276 			host_no = 0;
1277 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1278 			    (devip->target * 1000) + devip->lun);
1279 		target_dev_id = ((host_no + 1) * 2000) +
1280 				 (devip->target * 1000) - 3;
1281 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1282 		if (0 == cmd[2]) { /* supported vital product data pages */
1283 			arr[1] = cmd[2];	/*sanity */
1284 			n = 4;
1285 			arr[n++] = 0x0;   /* this page */
1286 			arr[n++] = 0x80;  /* unit serial number */
1287 			arr[n++] = 0x83;  /* device identification */
1288 			arr[n++] = 0x84;  /* software interface ident. */
1289 			arr[n++] = 0x85;  /* management network addresses */
1290 			arr[n++] = 0x86;  /* extended inquiry */
1291 			arr[n++] = 0x87;  /* mode page policy */
1292 			arr[n++] = 0x88;  /* SCSI ports */
1293 			arr[n++] = 0x89;  /* ATA information */
1294 			arr[n++] = 0xb0;  /* Block limits (SBC) */
1295 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1296 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1297 				arr[n++] = 0xb2;
1298 			arr[3] = n - 4;	  /* number of supported VPD pages */
1299 		} else if (0x80 == cmd[2]) { /* unit serial number */
1300 			arr[1] = cmd[2];	/*sanity */
1301 			arr[3] = len;
1302 			memcpy(&arr[4], lu_id_str, len);
1303 		} else if (0x83 == cmd[2]) { /* device identification */
1304 			arr[1] = cmd[2];	/*sanity */
1305 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1306 						 target_dev_id, lu_id_num,
1307 						 lu_id_str, len);
1308 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1309 			arr[1] = cmd[2];	/*sanity */
1310 			arr[3] = inquiry_evpd_84(&arr[4]);
1311 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1312 			arr[1] = cmd[2];	/*sanity */
1313 			arr[3] = inquiry_evpd_85(&arr[4]);
1314 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1315 			arr[1] = cmd[2];	/*sanity */
1316 			arr[3] = 0x3c;	/* number of following entries */
1317 			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1318 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1319 			else if (sdebug_dif)
1320 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1321 			else
1322 				arr[4] = 0x0;   /* no protection stuff */
1323 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1324 		} else if (0x87 == cmd[2]) { /* mode page policy */
1325 			arr[1] = cmd[2];	/*sanity */
1326 			arr[3] = 0x8;	/* number of following entries */
1327 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1328 			arr[6] = 0x80;	/* mlus, shared */
1329 			arr[8] = 0x18;	 /* protocol specific lu */
1330 			arr[10] = 0x82;	 /* mlus, per initiator port */
1331 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1332 			arr[1] = cmd[2];	/*sanity */
1333 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1334 		} else if (0x89 == cmd[2]) { /* ATA information */
1335 			arr[1] = cmd[2];        /*sanity */
1336 			n = inquiry_evpd_89(&arr[4]);
1337 			put_unaligned_be16(n, arr + 2);
1338 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1339 			arr[1] = cmd[2];        /*sanity */
1340 			arr[3] = inquiry_evpd_b0(&arr[4]);
1341 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1342 			arr[1] = cmd[2];        /*sanity */
1343 			arr[3] = inquiry_evpd_b1(&arr[4]);
1344 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1345 			arr[1] = cmd[2];        /*sanity */
1346 			arr[3] = inquiry_evpd_b2(&arr[4]);
1347 		} else {
1348 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1349 			kfree(arr);
1350 			return check_condition_result;
1351 		}
1352 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1353 		ret = fill_from_dev_buffer(scp, arr,
1354 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1355 		kfree(arr);
1356 		return ret;
1357 	}
1358 	/* drops through here for a standard inquiry */
1359 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1360 	arr[2] = sdebug_scsi_level;
1361 	arr[3] = 2;    /* response_data_format==2 */
1362 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1363 	arr[5] = sdebug_dif ? 1 : 0; /* PROTECT bit */
1364 	if (0 == sdebug_vpd_use_hostno)
1365 		arr[5] = 0x10; /* claim: implicit TGPS */
1366 	arr[6] = 0x10; /* claim: MultiP */
1367 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1368 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1369 	memcpy(&arr[8], inq_vendor_id, 8);
1370 	memcpy(&arr[16], inq_product_id, 16);
1371 	memcpy(&arr[32], inq_product_rev, 4);
1372 	/* version descriptors (2 bytes each) follow */
1373 	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1374 	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1375 	n = 62;
1376 	if (sdebug_ptype == 0) {
1377 		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1378 	} else if (sdebug_ptype == 1) {
1379 		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1380 	}
1381 	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1382 	ret = fill_from_dev_buffer(scp, arr,
1383 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1384 	kfree(arr);
1385 	return ret;
1386 }
1387 
1388 static int resp_requests(struct scsi_cmnd * scp,
1389 			 struct sdebug_dev_info * devip)
1390 {
1391 	unsigned char * sbuff;
1392 	unsigned char *cmd = scp->cmnd;
1393 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1394 	bool dsense;
1395 	int len = 18;
1396 
1397 	memset(arr, 0, sizeof(arr));
1398 	dsense = !!(cmd[1] & 1);
1399 	sbuff = scp->sense_buffer;
1400 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1401 		if (dsense) {
1402 			arr[0] = 0x72;
1403 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1404 			arr[2] = THRESHOLD_EXCEEDED;
1405 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1406 			len = 8;
1407 		} else {
1408 			arr[0] = 0x70;
1409 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1410 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1411 			arr[12] = THRESHOLD_EXCEEDED;
1412 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1413 		}
1414 	} else {
1415 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1416 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1417 			;	/* have sense and formats match */
1418 		else if (arr[0] <= 0x70) {
1419 			if (dsense) {
1420 				memset(arr, 0, 8);
1421 				arr[0] = 0x72;
1422 				len = 8;
1423 			} else {
1424 				memset(arr, 0, 18);
1425 				arr[0] = 0x70;
1426 				arr[7] = 0xa;
1427 			}
1428 		} else if (dsense) {
1429 			memset(arr, 0, 8);
1430 			arr[0] = 0x72;
1431 			arr[1] = sbuff[2];     /* sense key */
1432 			arr[2] = sbuff[12];    /* asc */
1433 			arr[3] = sbuff[13];    /* ascq */
1434 			len = 8;
1435 		} else {
1436 			memset(arr, 0, 18);
1437 			arr[0] = 0x70;
1438 			arr[2] = sbuff[1];
1439 			arr[7] = 0xa;
1440 			arr[12] = sbuff[1];
1441 			arr[13] = sbuff[3];
1442 		}
1443 
1444 	}
1445 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1446 	return fill_from_dev_buffer(scp, arr, len);
1447 }
1448 
1449 static int resp_start_stop(struct scsi_cmnd * scp,
1450 			   struct sdebug_dev_info * devip)
1451 {
1452 	unsigned char *cmd = scp->cmnd;
1453 	int power_cond, start;
1454 
1455 	power_cond = (cmd[4] & 0xf0) >> 4;
1456 	if (power_cond) {
1457 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1458 		return check_condition_result;
1459 	}
1460 	start = cmd[4] & 1;
1461 	if (start == devip->stopped)
1462 		devip->stopped = !start;
1463 	return 0;
1464 }
1465 
1466 static sector_t get_sdebug_capacity(void)
1467 {
1468 	static const unsigned int gibibyte = 1073741824;
1469 
1470 	if (sdebug_virtual_gb > 0)
1471 		return (sector_t)sdebug_virtual_gb *
1472 			(gibibyte / sdebug_sector_size);
1473 	else
1474 		return sdebug_store_sectors;
1475 }
1476 
1477 #define SDEBUG_READCAP_ARR_SZ 8
1478 static int resp_readcap(struct scsi_cmnd * scp,
1479 			struct sdebug_dev_info * devip)
1480 {
1481 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1482 	unsigned int capac;
1483 
1484 	/* following just in case virtual_gb changed */
1485 	sdebug_capacity = get_sdebug_capacity();
1486 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1487 	if (sdebug_capacity < 0xffffffff) {
1488 		capac = (unsigned int)sdebug_capacity - 1;
1489 		put_unaligned_be32(capac, arr + 0);
1490 	} else
1491 		put_unaligned_be32(0xffffffff, arr + 0);
1492 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1493 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1494 }
1495 
1496 #define SDEBUG_READCAP16_ARR_SZ 32
1497 static int resp_readcap16(struct scsi_cmnd * scp,
1498 			  struct sdebug_dev_info * devip)
1499 {
1500 	unsigned char *cmd = scp->cmnd;
1501 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1502 	int alloc_len;
1503 
1504 	alloc_len = get_unaligned_be32(cmd + 10);
1505 	/* following just in case virtual_gb changed */
1506 	sdebug_capacity = get_sdebug_capacity();
1507 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1508 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1509 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1510 	arr[13] = sdebug_physblk_exp & 0xf;
1511 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1512 
1513 	if (scsi_debug_lbp()) {
1514 		arr[14] |= 0x80; /* LBPME */
1515 		if (sdebug_lbprz)
1516 			arr[14] |= 0x40; /* LBPRZ */
1517 	}
1518 
1519 	arr[15] = sdebug_lowest_aligned & 0xff;
1520 
1521 	if (sdebug_dif) {
1522 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1523 		arr[12] |= 1; /* PROT_EN */
1524 	}
1525 
1526 	return fill_from_dev_buffer(scp, arr,
1527 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1528 }
1529 
1530 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1531 
1532 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1533 			      struct sdebug_dev_info * devip)
1534 {
1535 	unsigned char *cmd = scp->cmnd;
1536 	unsigned char * arr;
1537 	int host_no = devip->sdbg_host->shost->host_no;
1538 	int n, ret, alen, rlen;
1539 	int port_group_a, port_group_b, port_a, port_b;
1540 
1541 	alen = get_unaligned_be32(cmd + 6);
1542 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1543 	if (! arr)
1544 		return DID_REQUEUE << 16;
1545 	/*
1546 	 * EVPD page 0x88 states we have two ports, one
1547 	 * real and a fake port with no device connected.
1548 	 * So we create two port groups with one port each
1549 	 * and set the group with port B to unavailable.
1550 	 */
1551 	port_a = 0x1; /* relative port A */
1552 	port_b = 0x2; /* relative port B */
1553 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1554 			(devip->channel & 0x7f);
1555 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1556 			(devip->channel & 0x7f) + 0x80;
1557 
1558 	/*
1559 	 * The asymmetric access state is cycled according to the host_id.
1560 	 */
1561 	n = 4;
1562 	if (0 == sdebug_vpd_use_hostno) {
1563 		arr[n++] = host_no % 3; /* Asymm access state */
1564 		arr[n++] = 0x0F; /* claim: all states are supported */
1565 	} else {
1566 		arr[n++] = 0x0; /* Active/Optimized path */
1567 		arr[n++] = 0x01; /* only support active/optimized paths */
1568 	}
1569 	put_unaligned_be16(port_group_a, arr + n);
1570 	n += 2;
1571 	arr[n++] = 0;    /* Reserved */
1572 	arr[n++] = 0;    /* Status code */
1573 	arr[n++] = 0;    /* Vendor unique */
1574 	arr[n++] = 0x1;  /* One port per group */
1575 	arr[n++] = 0;    /* Reserved */
1576 	arr[n++] = 0;    /* Reserved */
1577 	put_unaligned_be16(port_a, arr + n);
1578 	n += 2;
1579 	arr[n++] = 3;    /* Port unavailable */
1580 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1581 	put_unaligned_be16(port_group_b, arr + n);
1582 	n += 2;
1583 	arr[n++] = 0;    /* Reserved */
1584 	arr[n++] = 0;    /* Status code */
1585 	arr[n++] = 0;    /* Vendor unique */
1586 	arr[n++] = 0x1;  /* One port per group */
1587 	arr[n++] = 0;    /* Reserved */
1588 	arr[n++] = 0;    /* Reserved */
1589 	put_unaligned_be16(port_b, arr + n);
1590 	n += 2;
1591 
1592 	rlen = n - 4;
1593 	put_unaligned_be32(rlen, arr + 0);
1594 
1595 	/*
1596 	 * Return the smallest value of either
1597 	 * - The allocated length
1598 	 * - The constructed command length
1599 	 * - The maximum array size
1600 	 */
1601 	rlen = min(alen,n);
1602 	ret = fill_from_dev_buffer(scp, arr,
1603 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1604 	kfree(arr);
1605 	return ret;
1606 }
1607 
1608 static int
1609 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1610 {
1611 	bool rctd;
1612 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1613 	u16 req_sa, u;
1614 	u32 alloc_len, a_len;
1615 	int k, offset, len, errsts, count, bump, na;
1616 	const struct opcode_info_t *oip;
1617 	const struct opcode_info_t *r_oip;
1618 	u8 *arr;
1619 	u8 *cmd = scp->cmnd;
1620 
1621 	rctd = !!(cmd[2] & 0x80);
1622 	reporting_opts = cmd[2] & 0x7;
1623 	req_opcode = cmd[3];
1624 	req_sa = get_unaligned_be16(cmd + 4);
1625 	alloc_len = get_unaligned_be32(cmd + 6);
1626 	if (alloc_len < 4 || alloc_len > 0xffff) {
1627 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628 		return check_condition_result;
1629 	}
1630 	if (alloc_len > 8192)
1631 		a_len = 8192;
1632 	else
1633 		a_len = alloc_len;
1634 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1635 	if (NULL == arr) {
1636 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637 				INSUFF_RES_ASCQ);
1638 		return check_condition_result;
1639 	}
1640 	switch (reporting_opts) {
1641 	case 0:	/* all commands */
1642 		/* count number of commands */
1643 		for (count = 0, oip = opcode_info_arr;
1644 		     oip->num_attached != 0xff; ++oip) {
1645 			if (F_INV_OP & oip->flags)
1646 				continue;
1647 			count += (oip->num_attached + 1);
1648 		}
1649 		bump = rctd ? 20 : 8;
1650 		put_unaligned_be32(count * bump, arr);
1651 		for (offset = 4, oip = opcode_info_arr;
1652 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1653 			if (F_INV_OP & oip->flags)
1654 				continue;
1655 			na = oip->num_attached;
1656 			arr[offset] = oip->opcode;
1657 			put_unaligned_be16(oip->sa, arr + offset + 2);
1658 			if (rctd)
1659 				arr[offset + 5] |= 0x2;
1660 			if (FF_SA & oip->flags)
1661 				arr[offset + 5] |= 0x1;
1662 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663 			if (rctd)
1664 				put_unaligned_be16(0xa, arr + offset + 8);
1665 			r_oip = oip;
1666 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667 				if (F_INV_OP & oip->flags)
1668 					continue;
1669 				offset += bump;
1670 				arr[offset] = oip->opcode;
1671 				put_unaligned_be16(oip->sa, arr + offset + 2);
1672 				if (rctd)
1673 					arr[offset + 5] |= 0x2;
1674 				if (FF_SA & oip->flags)
1675 					arr[offset + 5] |= 0x1;
1676 				put_unaligned_be16(oip->len_mask[0],
1677 						   arr + offset + 6);
1678 				if (rctd)
1679 					put_unaligned_be16(0xa,
1680 							   arr + offset + 8);
1681 			}
1682 			oip = r_oip;
1683 			offset += bump;
1684 		}
1685 		break;
1686 	case 1:	/* one command: opcode only */
1687 	case 2:	/* one command: opcode plus service action */
1688 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1689 		sdeb_i = opcode_ind_arr[req_opcode];
1690 		oip = &opcode_info_arr[sdeb_i];
1691 		if (F_INV_OP & oip->flags) {
1692 			supp = 1;
1693 			offset = 4;
1694 		} else {
1695 			if (1 == reporting_opts) {
1696 				if (FF_SA & oip->flags) {
1697 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1698 							     2, 2);
1699 					kfree(arr);
1700 					return check_condition_result;
1701 				}
1702 				req_sa = 0;
1703 			} else if (2 == reporting_opts &&
1704 				   0 == (FF_SA & oip->flags)) {
1705 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706 				kfree(arr);	/* point at requested sa */
1707 				return check_condition_result;
1708 			}
1709 			if (0 == (FF_SA & oip->flags) &&
1710 			    req_opcode == oip->opcode)
1711 				supp = 3;
1712 			else if (0 == (FF_SA & oip->flags)) {
1713 				na = oip->num_attached;
1714 				for (k = 0, oip = oip->arrp; k < na;
1715 				     ++k, ++oip) {
1716 					if (req_opcode == oip->opcode)
1717 						break;
1718 				}
1719 				supp = (k >= na) ? 1 : 3;
1720 			} else if (req_sa != oip->sa) {
1721 				na = oip->num_attached;
1722 				for (k = 0, oip = oip->arrp; k < na;
1723 				     ++k, ++oip) {
1724 					if (req_sa == oip->sa)
1725 						break;
1726 				}
1727 				supp = (k >= na) ? 1 : 3;
1728 			} else
1729 				supp = 3;
1730 			if (3 == supp) {
1731 				u = oip->len_mask[0];
1732 				put_unaligned_be16(u, arr + 2);
1733 				arr[4] = oip->opcode;
1734 				for (k = 1; k < u; ++k)
1735 					arr[4 + k] = (k < 16) ?
1736 						 oip->len_mask[k] : 0xff;
1737 				offset = 4 + u;
1738 			} else
1739 				offset = 4;
1740 		}
1741 		arr[1] = (rctd ? 0x80 : 0) | supp;
1742 		if (rctd) {
1743 			put_unaligned_be16(0xa, arr + offset);
1744 			offset += 12;
1745 		}
1746 		break;
1747 	default:
1748 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749 		kfree(arr);
1750 		return check_condition_result;
1751 	}
1752 	offset = (offset < a_len) ? offset : a_len;
1753 	len = (offset < alloc_len) ? offset : alloc_len;
1754 	errsts = fill_from_dev_buffer(scp, arr, len);
1755 	kfree(arr);
1756 	return errsts;
1757 }
1758 
1759 static int
1760 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1761 {
1762 	bool repd;
1763 	u32 alloc_len, len;
1764 	u8 arr[16];
1765 	u8 *cmd = scp->cmnd;
1766 
1767 	memset(arr, 0, sizeof(arr));
1768 	repd = !!(cmd[2] & 0x80);
1769 	alloc_len = get_unaligned_be32(cmd + 6);
1770 	if (alloc_len < 4) {
1771 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772 		return check_condition_result;
1773 	}
1774 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1775 	arr[1] = 0x1;		/* ITNRS */
1776 	if (repd) {
1777 		arr[3] = 0xc;
1778 		len = 16;
1779 	} else
1780 		len = 4;
1781 
1782 	len = (len < alloc_len) ? len : alloc_len;
1783 	return fill_from_dev_buffer(scp, arr, len);
1784 }
1785 
1786 /* <<Following mode page info copied from ST318451LW>> */
1787 
1788 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1789 {	/* Read-Write Error Recovery page for mode_sense */
1790 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1791 					5, 0, 0xff, 0xff};
1792 
1793 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1794 	if (1 == pcontrol)
1795 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1796 	return sizeof(err_recov_pg);
1797 }
1798 
1799 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1800 { 	/* Disconnect-Reconnect page for mode_sense */
1801 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1802 					 0, 0, 0, 0, 0, 0, 0, 0};
1803 
1804 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1805 	if (1 == pcontrol)
1806 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1807 	return sizeof(disconnect_pg);
1808 }
1809 
1810 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1811 {       /* Format device page for mode_sense */
1812 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1813 				     0, 0, 0, 0, 0, 0, 0, 0,
1814 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1815 
1816 	memcpy(p, format_pg, sizeof(format_pg));
1817 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1818 	put_unaligned_be16(sdebug_sector_size, p + 12);
1819 	if (sdebug_removable)
1820 		p[20] |= 0x20; /* should agree with INQUIRY */
1821 	if (1 == pcontrol)
1822 		memset(p + 2, 0, sizeof(format_pg) - 2);
1823 	return sizeof(format_pg);
1824 }
1825 
1826 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1827 { 	/* Caching page for mode_sense */
1828 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1829 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1830 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1831 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1832 
1833 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1834 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1835 	memcpy(p, caching_pg, sizeof(caching_pg));
1836 	if (1 == pcontrol)
1837 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1838 	else if (2 == pcontrol)
1839 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1840 	return sizeof(caching_pg);
1841 }
1842 
1843 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1844 { 	/* Control mode page for mode_sense */
1845 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1846 				        0, 0, 0, 0};
1847 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1848 				     0, 0, 0x2, 0x4b};
1849 
1850 	if (sdebug_dsense)
1851 		ctrl_m_pg[2] |= 0x4;
1852 	else
1853 		ctrl_m_pg[2] &= ~0x4;
1854 
1855 	if (sdebug_ato)
1856 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1857 
1858 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1859 	if (1 == pcontrol)
1860 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1861 	else if (2 == pcontrol)
1862 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1863 	return sizeof(ctrl_m_pg);
1864 }
1865 
1866 
1867 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1868 {	/* Informational Exceptions control mode page for mode_sense */
1869 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1870 				       0, 0, 0x0, 0x0};
1871 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1872 				      0, 0, 0x0, 0x0};
1873 
1874 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1875 	if (1 == pcontrol)
1876 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1877 	else if (2 == pcontrol)
1878 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1879 	return sizeof(iec_m_pg);
1880 }
1881 
1882 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1883 {	/* SAS SSP mode page - short format for mode_sense */
1884 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1885 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1886 
1887 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1888 	if (1 == pcontrol)
1889 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1890 	return sizeof(sas_sf_m_pg);
1891 }
1892 
1893 
1894 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1895 			      int target_dev_id)
1896 {	/* SAS phy control and discover mode page for mode_sense */
1897 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1898 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1899 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1900 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1901 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1902 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1903 		    0, 0, 0, 0, 0, 0, 0, 0,
1904 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1905 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1906 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1907 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1908 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1909 		    0, 0, 0, 0, 0, 0, 0, 0,
1910 		};
1911 	int port_a, port_b;
1912 
1913 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1914 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1915 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1916 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1917 	port_a = target_dev_id + 1;
1918 	port_b = port_a + 1;
1919 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1920 	put_unaligned_be32(port_a, p + 20);
1921 	put_unaligned_be32(port_b, p + 48 + 20);
1922 	if (1 == pcontrol)
1923 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1924 	return sizeof(sas_pcd_m_pg);
1925 }
1926 
1927 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1928 {	/* SAS SSP shared protocol specific port mode subpage */
1929 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1930 		    0, 0, 0, 0, 0, 0, 0, 0,
1931 		};
1932 
1933 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1934 	if (1 == pcontrol)
1935 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1936 	return sizeof(sas_sha_m_pg);
1937 }
1938 
1939 #define SDEBUG_MAX_MSENSE_SZ 256
1940 
1941 static int
1942 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1943 {
1944 	unsigned char dbd, llbaa;
1945 	int pcontrol, pcode, subpcode, bd_len;
1946 	unsigned char dev_spec;
1947 	int alloc_len, msense_6, offset, len, target_dev_id;
1948 	int target = scp->device->id;
1949 	unsigned char * ap;
1950 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1951 	unsigned char *cmd = scp->cmnd;
1952 
1953 	dbd = !!(cmd[1] & 0x8);
1954 	pcontrol = (cmd[2] & 0xc0) >> 6;
1955 	pcode = cmd[2] & 0x3f;
1956 	subpcode = cmd[3];
1957 	msense_6 = (MODE_SENSE == cmd[0]);
1958 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1959 	if ((0 == sdebug_ptype) && (0 == dbd))
1960 		bd_len = llbaa ? 16 : 8;
1961 	else
1962 		bd_len = 0;
1963 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1964 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1965 	if (0x3 == pcontrol) {  /* Saving values not supported */
1966 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1967 		return check_condition_result;
1968 	}
1969 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1970 			(devip->target * 1000) - 3;
1971 	/* set DPOFUA bit for disks */
1972 	if (0 == sdebug_ptype)
1973 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1974 	else
1975 		dev_spec = 0x0;
1976 	if (msense_6) {
1977 		arr[2] = dev_spec;
1978 		arr[3] = bd_len;
1979 		offset = 4;
1980 	} else {
1981 		arr[3] = dev_spec;
1982 		if (16 == bd_len)
1983 			arr[4] = 0x1;	/* set LONGLBA bit */
1984 		arr[7] = bd_len;	/* assume 255 or less */
1985 		offset = 8;
1986 	}
1987 	ap = arr + offset;
1988 	if ((bd_len > 0) && (!sdebug_capacity))
1989 		sdebug_capacity = get_sdebug_capacity();
1990 
1991 	if (8 == bd_len) {
1992 		if (sdebug_capacity > 0xfffffffe)
1993 			put_unaligned_be32(0xffffffff, ap + 0);
1994 		else
1995 			put_unaligned_be32(sdebug_capacity, ap + 0);
1996 		put_unaligned_be16(sdebug_sector_size, ap + 6);
1997 		offset += bd_len;
1998 		ap = arr + offset;
1999 	} else if (16 == bd_len) {
2000 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2001 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2002 		offset += bd_len;
2003 		ap = arr + offset;
2004 	}
2005 
2006 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2007 		/* TODO: Control Extension page */
2008 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2009 		return check_condition_result;
2010 	}
2011 	switch (pcode) {
2012 	case 0x1:	/* Read-Write error recovery page, direct access */
2013 		len = resp_err_recov_pg(ap, pcontrol, target);
2014 		offset += len;
2015 		break;
2016 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2017 		len = resp_disconnect_pg(ap, pcontrol, target);
2018 		offset += len;
2019 		break;
2020         case 0x3:       /* Format device page, direct access */
2021                 len = resp_format_pg(ap, pcontrol, target);
2022                 offset += len;
2023                 break;
2024 	case 0x8:	/* Caching page, direct access */
2025 		len = resp_caching_pg(ap, pcontrol, target);
2026 		offset += len;
2027 		break;
2028 	case 0xa:	/* Control Mode page, all devices */
2029 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2030 		offset += len;
2031 		break;
2032 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2033 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2034 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2035 			return check_condition_result;
2036 	        }
2037 		len = 0;
2038 		if ((0x0 == subpcode) || (0xff == subpcode))
2039 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2040 		if ((0x1 == subpcode) || (0xff == subpcode))
2041 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2042 						  target_dev_id);
2043 		if ((0x2 == subpcode) || (0xff == subpcode))
2044 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2045 		offset += len;
2046 		break;
2047 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2048 		len = resp_iec_m_pg(ap, pcontrol, target);
2049 		offset += len;
2050 		break;
2051 	case 0x3f:	/* Read all Mode pages */
2052 		if ((0 == subpcode) || (0xff == subpcode)) {
2053 			len = resp_err_recov_pg(ap, pcontrol, target);
2054 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2055 			len += resp_format_pg(ap + len, pcontrol, target);
2056 			len += resp_caching_pg(ap + len, pcontrol, target);
2057 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2058 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2059 			if (0xff == subpcode) {
2060 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2061 						  target, target_dev_id);
2062 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2063 			}
2064 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2065 		} else {
2066 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2067 			return check_condition_result;
2068                 }
2069 		offset += len;
2070 		break;
2071 	default:
2072 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2073 		return check_condition_result;
2074 	}
2075 	if (msense_6)
2076 		arr[0] = offset - 1;
2077 	else
2078 		put_unaligned_be16((offset - 2), arr + 0);
2079 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2080 }
2081 
2082 #define SDEBUG_MAX_MSELECT_SZ 512
2083 
2084 static int
2085 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2086 {
2087 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2088 	int param_len, res, mpage;
2089 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2090 	unsigned char *cmd = scp->cmnd;
2091 	int mselect6 = (MODE_SELECT == cmd[0]);
2092 
2093 	memset(arr, 0, sizeof(arr));
2094 	pf = cmd[1] & 0x10;
2095 	sp = cmd[1] & 0x1;
2096 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2097 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2098 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2099 		return check_condition_result;
2100 	}
2101         res = fetch_to_dev_buffer(scp, arr, param_len);
2102         if (-1 == res)
2103 		return DID_ERROR << 16;
2104 	else if (sdebug_verbose && (res < param_len))
2105 		sdev_printk(KERN_INFO, scp->device,
2106 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2107 			    __func__, param_len, res);
2108 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2109 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2110 	if (md_len > 2) {
2111 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2112 		return check_condition_result;
2113 	}
2114 	off = bd_len + (mselect6 ? 4 : 8);
2115 	mpage = arr[off] & 0x3f;
2116 	ps = !!(arr[off] & 0x80);
2117 	if (ps) {
2118 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2119 		return check_condition_result;
2120 	}
2121 	spf = !!(arr[off] & 0x40);
2122 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2123 		       (arr[off + 1] + 2);
2124 	if ((pg_len + off) > param_len) {
2125 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2126 				PARAMETER_LIST_LENGTH_ERR, 0);
2127 		return check_condition_result;
2128 	}
2129 	switch (mpage) {
2130 	case 0x8:      /* Caching Mode page */
2131 		if (caching_pg[1] == arr[off + 1]) {
2132 			memcpy(caching_pg + 2, arr + off + 2,
2133 			       sizeof(caching_pg) - 2);
2134 			goto set_mode_changed_ua;
2135 		}
2136 		break;
2137 	case 0xa:      /* Control Mode page */
2138 		if (ctrl_m_pg[1] == arr[off + 1]) {
2139 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2140 			       sizeof(ctrl_m_pg) - 2);
2141 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2142 			goto set_mode_changed_ua;
2143 		}
2144 		break;
2145 	case 0x1c:      /* Informational Exceptions Mode page */
2146 		if (iec_m_pg[1] == arr[off + 1]) {
2147 			memcpy(iec_m_pg + 2, arr + off + 2,
2148 			       sizeof(iec_m_pg) - 2);
2149 			goto set_mode_changed_ua;
2150 		}
2151 		break;
2152 	default:
2153 		break;
2154 	}
2155 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2156 	return check_condition_result;
2157 set_mode_changed_ua:
2158 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2159 	return 0;
2160 }
2161 
2162 static int resp_temp_l_pg(unsigned char * arr)
2163 {
2164 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2165 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2166 		};
2167 
2168         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2169         return sizeof(temp_l_pg);
2170 }
2171 
2172 static int resp_ie_l_pg(unsigned char * arr)
2173 {
2174 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2175 		};
2176 
2177         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2178 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2179 		arr[4] = THRESHOLD_EXCEEDED;
2180 		arr[5] = 0xff;
2181 	}
2182         return sizeof(ie_l_pg);
2183 }
2184 
2185 #define SDEBUG_MAX_LSENSE_SZ 512
2186 
2187 static int resp_log_sense(struct scsi_cmnd * scp,
2188                           struct sdebug_dev_info * devip)
2189 {
2190 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2191 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2192 	unsigned char *cmd = scp->cmnd;
2193 
2194 	memset(arr, 0, sizeof(arr));
2195 	ppc = cmd[1] & 0x2;
2196 	sp = cmd[1] & 0x1;
2197 	if (ppc || sp) {
2198 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2199 		return check_condition_result;
2200 	}
2201 	pcontrol = (cmd[2] & 0xc0) >> 6;
2202 	pcode = cmd[2] & 0x3f;
2203 	subpcode = cmd[3] & 0xff;
2204 	alloc_len = get_unaligned_be16(cmd + 7);
2205 	arr[0] = pcode;
2206 	if (0 == subpcode) {
2207 		switch (pcode) {
2208 		case 0x0:	/* Supported log pages log page */
2209 			n = 4;
2210 			arr[n++] = 0x0;		/* this page */
2211 			arr[n++] = 0xd;		/* Temperature */
2212 			arr[n++] = 0x2f;	/* Informational exceptions */
2213 			arr[3] = n - 4;
2214 			break;
2215 		case 0xd:	/* Temperature log page */
2216 			arr[3] = resp_temp_l_pg(arr + 4);
2217 			break;
2218 		case 0x2f:	/* Informational exceptions log page */
2219 			arr[3] = resp_ie_l_pg(arr + 4);
2220 			break;
2221 		default:
2222 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2223 			return check_condition_result;
2224 		}
2225 	} else if (0xff == subpcode) {
2226 		arr[0] |= 0x40;
2227 		arr[1] = subpcode;
2228 		switch (pcode) {
2229 		case 0x0:	/* Supported log pages and subpages log page */
2230 			n = 4;
2231 			arr[n++] = 0x0;
2232 			arr[n++] = 0x0;		/* 0,0 page */
2233 			arr[n++] = 0x0;
2234 			arr[n++] = 0xff;	/* this page */
2235 			arr[n++] = 0xd;
2236 			arr[n++] = 0x0;		/* Temperature */
2237 			arr[n++] = 0x2f;
2238 			arr[n++] = 0x0;	/* Informational exceptions */
2239 			arr[3] = n - 4;
2240 			break;
2241 		case 0xd:	/* Temperature subpages */
2242 			n = 4;
2243 			arr[n++] = 0xd;
2244 			arr[n++] = 0x0;		/* Temperature */
2245 			arr[3] = n - 4;
2246 			break;
2247 		case 0x2f:	/* Informational exceptions subpages */
2248 			n = 4;
2249 			arr[n++] = 0x2f;
2250 			arr[n++] = 0x0;		/* Informational exceptions */
2251 			arr[3] = n - 4;
2252 			break;
2253 		default:
2254 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2255 			return check_condition_result;
2256 		}
2257 	} else {
2258 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2259 		return check_condition_result;
2260 	}
2261 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2262 	return fill_from_dev_buffer(scp, arr,
2263 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2264 }
2265 
2266 static int check_device_access_params(struct scsi_cmnd *scp,
2267 				      unsigned long long lba, unsigned int num)
2268 {
2269 	if (lba + num > sdebug_capacity) {
2270 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2271 		return check_condition_result;
2272 	}
2273 	/* transfer length excessive (tie in to block limits VPD page) */
2274 	if (num > sdebug_store_sectors) {
2275 		/* needs work to find which cdb byte 'num' comes from */
2276 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2277 		return check_condition_result;
2278 	}
2279 	return 0;
2280 }
2281 
2282 /* Returns number of bytes copied or -1 if error. */
2283 static int
2284 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2285 {
2286 	int ret;
2287 	u64 block, rest = 0;
2288 	struct scsi_data_buffer *sdb;
2289 	enum dma_data_direction dir;
2290 
2291 	if (do_write) {
2292 		sdb = scsi_out(scmd);
2293 		dir = DMA_TO_DEVICE;
2294 	} else {
2295 		sdb = scsi_in(scmd);
2296 		dir = DMA_FROM_DEVICE;
2297 	}
2298 
2299 	if (!sdb->length)
2300 		return 0;
2301 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2302 		return -1;
2303 
2304 	block = do_div(lba, sdebug_store_sectors);
2305 	if (block + num > sdebug_store_sectors)
2306 		rest = block + num - sdebug_store_sectors;
2307 
2308 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2309 		   fake_storep + (block * sdebug_sector_size),
2310 		   (num - rest) * sdebug_sector_size, 0, do_write);
2311 	if (ret != (num - rest) * sdebug_sector_size)
2312 		return ret;
2313 
2314 	if (rest) {
2315 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2316 			    fake_storep, rest * sdebug_sector_size,
2317 			    (num - rest) * sdebug_sector_size, do_write);
2318 	}
2319 
2320 	return ret;
2321 }
2322 
2323 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2324  * arr into fake_store(lba,num) and return true. If comparison fails then
2325  * return false. */
2326 static bool
2327 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2328 {
2329 	bool res;
2330 	u64 block, rest = 0;
2331 	u32 store_blks = sdebug_store_sectors;
2332 	u32 lb_size = sdebug_sector_size;
2333 
2334 	block = do_div(lba, store_blks);
2335 	if (block + num > store_blks)
2336 		rest = block + num - store_blks;
2337 
2338 	res = !memcmp(fake_storep + (block * lb_size), arr,
2339 		      (num - rest) * lb_size);
2340 	if (!res)
2341 		return res;
2342 	if (rest)
2343 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2344 			     rest * lb_size);
2345 	if (!res)
2346 		return res;
2347 	arr += num * lb_size;
2348 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2349 	if (rest)
2350 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2351 		       rest * lb_size);
2352 	return res;
2353 }
2354 
2355 static __be16 dif_compute_csum(const void *buf, int len)
2356 {
2357 	__be16 csum;
2358 
2359 	if (sdebug_guard)
2360 		csum = (__force __be16)ip_compute_csum(buf, len);
2361 	else
2362 		csum = cpu_to_be16(crc_t10dif(buf, len));
2363 
2364 	return csum;
2365 }
2366 
2367 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2368 		      sector_t sector, u32 ei_lba)
2369 {
2370 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2371 
2372 	if (sdt->guard_tag != csum) {
2373 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2374 			(unsigned long)sector,
2375 			be16_to_cpu(sdt->guard_tag),
2376 			be16_to_cpu(csum));
2377 		return 0x01;
2378 	}
2379 	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2380 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2381 		pr_err("REF check failed on sector %lu\n",
2382 			(unsigned long)sector);
2383 		return 0x03;
2384 	}
2385 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2386 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2387 		pr_err("REF check failed on sector %lu\n",
2388 			(unsigned long)sector);
2389 		return 0x03;
2390 	}
2391 	return 0;
2392 }
2393 
2394 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2395 			  unsigned int sectors, bool read)
2396 {
2397 	size_t resid;
2398 	void *paddr;
2399 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2400 	struct sg_mapping_iter miter;
2401 
2402 	/* Bytes of protection data to copy into sgl */
2403 	resid = sectors * sizeof(*dif_storep);
2404 
2405 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2406 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2407 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2408 
2409 	while (sg_miter_next(&miter) && resid > 0) {
2410 		size_t len = min(miter.length, resid);
2411 		void *start = dif_store(sector);
2412 		size_t rest = 0;
2413 
2414 		if (dif_store_end < start + len)
2415 			rest = start + len - dif_store_end;
2416 
2417 		paddr = miter.addr;
2418 
2419 		if (read)
2420 			memcpy(paddr, start, len - rest);
2421 		else
2422 			memcpy(start, paddr, len - rest);
2423 
2424 		if (rest) {
2425 			if (read)
2426 				memcpy(paddr + len - rest, dif_storep, rest);
2427 			else
2428 				memcpy(dif_storep, paddr + len - rest, rest);
2429 		}
2430 
2431 		sector += len / sizeof(*dif_storep);
2432 		resid -= len;
2433 	}
2434 	sg_miter_stop(&miter);
2435 }
2436 
2437 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2438 			    unsigned int sectors, u32 ei_lba)
2439 {
2440 	unsigned int i;
2441 	struct sd_dif_tuple *sdt;
2442 	sector_t sector;
2443 
2444 	for (i = 0; i < sectors; i++, ei_lba++) {
2445 		int ret;
2446 
2447 		sector = start_sec + i;
2448 		sdt = dif_store(sector);
2449 
2450 		if (sdt->app_tag == cpu_to_be16(0xffff))
2451 			continue;
2452 
2453 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2454 		if (ret) {
2455 			dif_errors++;
2456 			return ret;
2457 		}
2458 	}
2459 
2460 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2461 	dix_reads++;
2462 
2463 	return 0;
2464 }
2465 
2466 static int
2467 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2468 {
2469 	u8 *cmd = scp->cmnd;
2470 	u64 lba;
2471 	u32 num;
2472 	u32 ei_lba;
2473 	unsigned long iflags;
2474 	int ret;
2475 	bool check_prot;
2476 
2477 	switch (cmd[0]) {
2478 	case READ_16:
2479 		ei_lba = 0;
2480 		lba = get_unaligned_be64(cmd + 2);
2481 		num = get_unaligned_be32(cmd + 10);
2482 		check_prot = true;
2483 		break;
2484 	case READ_10:
2485 		ei_lba = 0;
2486 		lba = get_unaligned_be32(cmd + 2);
2487 		num = get_unaligned_be16(cmd + 7);
2488 		check_prot = true;
2489 		break;
2490 	case READ_6:
2491 		ei_lba = 0;
2492 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2493 		      (u32)(cmd[1] & 0x1f) << 16;
2494 		num = (0 == cmd[4]) ? 256 : cmd[4];
2495 		check_prot = true;
2496 		break;
2497 	case READ_12:
2498 		ei_lba = 0;
2499 		lba = get_unaligned_be32(cmd + 2);
2500 		num = get_unaligned_be32(cmd + 6);
2501 		check_prot = true;
2502 		break;
2503 	case XDWRITEREAD_10:
2504 		ei_lba = 0;
2505 		lba = get_unaligned_be32(cmd + 2);
2506 		num = get_unaligned_be16(cmd + 7);
2507 		check_prot = false;
2508 		break;
2509 	default:	/* assume READ(32) */
2510 		lba = get_unaligned_be64(cmd + 12);
2511 		ei_lba = get_unaligned_be32(cmd + 20);
2512 		num = get_unaligned_be32(cmd + 28);
2513 		check_prot = false;
2514 		break;
2515 	}
2516 	if (check_prot) {
2517 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2518 		    (cmd[1] & 0xe0)) {
2519 			mk_sense_invalid_opcode(scp);
2520 			return check_condition_result;
2521 		}
2522 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2523 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2524 		    (cmd[1] & 0xe0) == 0)
2525 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2526 				    "to DIF device\n");
2527 	}
2528 	if (sdebug_any_injecting_opt) {
2529 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2530 
2531 		if (ep->inj_short)
2532 			num /= 2;
2533 	}
2534 
2535 	/* inline check_device_access_params() */
2536 	if (lba + num > sdebug_capacity) {
2537 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2538 		return check_condition_result;
2539 	}
2540 	/* transfer length excessive (tie in to block limits VPD page) */
2541 	if (num > sdebug_store_sectors) {
2542 		/* needs work to find which cdb byte 'num' comes from */
2543 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2544 		return check_condition_result;
2545 	}
2546 
2547 	if ((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2548 	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2549 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2550 		/* claim unrecoverable read error */
2551 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2552 		/* set info field and valid bit for fixed descriptor */
2553 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2554 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2555 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2556 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2557 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2558 		}
2559 		scsi_set_resid(scp, scsi_bufflen(scp));
2560 		return check_condition_result;
2561 	}
2562 
2563 	read_lock_irqsave(&atomic_rw, iflags);
2564 
2565 	/* DIX + T10 DIF */
2566 	if (sdebug_dix && scsi_prot_sg_count(scp)) {
2567 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2568 
2569 		if (prot_ret) {
2570 			read_unlock_irqrestore(&atomic_rw, iflags);
2571 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2572 			return illegal_condition_result;
2573 		}
2574 	}
2575 
2576 	ret = do_device_access(scp, lba, num, false);
2577 	read_unlock_irqrestore(&atomic_rw, iflags);
2578 	if (ret == -1)
2579 		return DID_ERROR << 16;
2580 
2581 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2582 
2583 	if (sdebug_any_injecting_opt) {
2584 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2585 
2586 		if (ep->inj_recovered) {
2587 			mk_sense_buffer(scp, RECOVERED_ERROR,
2588 					THRESHOLD_EXCEEDED, 0);
2589 			return check_condition_result;
2590 		} else if (ep->inj_transport) {
2591 			mk_sense_buffer(scp, ABORTED_COMMAND,
2592 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2593 			return check_condition_result;
2594 		} else if (ep->inj_dif) {
2595 			/* Logical block guard check failed */
2596 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2597 			return illegal_condition_result;
2598 		} else if (ep->inj_dix) {
2599 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2600 			return illegal_condition_result;
2601 		}
2602 	}
2603 	return 0;
2604 }
2605 
2606 static void dump_sector(unsigned char *buf, int len)
2607 {
2608 	int i, j, n;
2609 
2610 	pr_err(">>> Sector Dump <<<\n");
2611 	for (i = 0 ; i < len ; i += 16) {
2612 		char b[128];
2613 
2614 		for (j = 0, n = 0; j < 16; j++) {
2615 			unsigned char c = buf[i+j];
2616 
2617 			if (c >= 0x20 && c < 0x7e)
2618 				n += scnprintf(b + n, sizeof(b) - n,
2619 					       " %c ", buf[i+j]);
2620 			else
2621 				n += scnprintf(b + n, sizeof(b) - n,
2622 					       "%02x ", buf[i+j]);
2623 		}
2624 		pr_err("%04d: %s\n", i, b);
2625 	}
2626 }
2627 
2628 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2629 			     unsigned int sectors, u32 ei_lba)
2630 {
2631 	int ret;
2632 	struct sd_dif_tuple *sdt;
2633 	void *daddr;
2634 	sector_t sector = start_sec;
2635 	int ppage_offset;
2636 	int dpage_offset;
2637 	struct sg_mapping_iter diter;
2638 	struct sg_mapping_iter piter;
2639 
2640 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2641 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2642 
2643 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2644 			scsi_prot_sg_count(SCpnt),
2645 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2646 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2647 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2648 
2649 	/* For each protection page */
2650 	while (sg_miter_next(&piter)) {
2651 		dpage_offset = 0;
2652 		if (WARN_ON(!sg_miter_next(&diter))) {
2653 			ret = 0x01;
2654 			goto out;
2655 		}
2656 
2657 		for (ppage_offset = 0; ppage_offset < piter.length;
2658 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2659 			/* If we're at the end of the current
2660 			 * data page advance to the next one
2661 			 */
2662 			if (dpage_offset >= diter.length) {
2663 				if (WARN_ON(!sg_miter_next(&diter))) {
2664 					ret = 0x01;
2665 					goto out;
2666 				}
2667 				dpage_offset = 0;
2668 			}
2669 
2670 			sdt = piter.addr + ppage_offset;
2671 			daddr = diter.addr + dpage_offset;
2672 
2673 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2674 			if (ret) {
2675 				dump_sector(daddr, sdebug_sector_size);
2676 				goto out;
2677 			}
2678 
2679 			sector++;
2680 			ei_lba++;
2681 			dpage_offset += sdebug_sector_size;
2682 		}
2683 		diter.consumed = dpage_offset;
2684 		sg_miter_stop(&diter);
2685 	}
2686 	sg_miter_stop(&piter);
2687 
2688 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2689 	dix_writes++;
2690 
2691 	return 0;
2692 
2693 out:
2694 	dif_errors++;
2695 	sg_miter_stop(&diter);
2696 	sg_miter_stop(&piter);
2697 	return ret;
2698 }
2699 
2700 static unsigned long lba_to_map_index(sector_t lba)
2701 {
2702 	if (sdebug_unmap_alignment)
2703 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2704 	sector_div(lba, sdebug_unmap_granularity);
2705 	return lba;
2706 }
2707 
2708 static sector_t map_index_to_lba(unsigned long index)
2709 {
2710 	sector_t lba = index * sdebug_unmap_granularity;
2711 
2712 	if (sdebug_unmap_alignment)
2713 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2714 	return lba;
2715 }
2716 
2717 static unsigned int map_state(sector_t lba, unsigned int *num)
2718 {
2719 	sector_t end;
2720 	unsigned int mapped;
2721 	unsigned long index;
2722 	unsigned long next;
2723 
2724 	index = lba_to_map_index(lba);
2725 	mapped = test_bit(index, map_storep);
2726 
2727 	if (mapped)
2728 		next = find_next_zero_bit(map_storep, map_size, index);
2729 	else
2730 		next = find_next_bit(map_storep, map_size, index);
2731 
2732 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2733 	*num = end - lba;
2734 	return mapped;
2735 }
2736 
2737 static void map_region(sector_t lba, unsigned int len)
2738 {
2739 	sector_t end = lba + len;
2740 
2741 	while (lba < end) {
2742 		unsigned long index = lba_to_map_index(lba);
2743 
2744 		if (index < map_size)
2745 			set_bit(index, map_storep);
2746 
2747 		lba = map_index_to_lba(index + 1);
2748 	}
2749 }
2750 
2751 static void unmap_region(sector_t lba, unsigned int len)
2752 {
2753 	sector_t end = lba + len;
2754 
2755 	while (lba < end) {
2756 		unsigned long index = lba_to_map_index(lba);
2757 
2758 		if (lba == map_index_to_lba(index) &&
2759 		    lba + sdebug_unmap_granularity <= end &&
2760 		    index < map_size) {
2761 			clear_bit(index, map_storep);
2762 			if (sdebug_lbprz) {
2763 				memset(fake_storep +
2764 				       lba * sdebug_sector_size, 0,
2765 				       sdebug_sector_size *
2766 				       sdebug_unmap_granularity);
2767 			}
2768 			if (dif_storep) {
2769 				memset(dif_storep + lba, 0xff,
2770 				       sizeof(*dif_storep) *
2771 				       sdebug_unmap_granularity);
2772 			}
2773 		}
2774 		lba = map_index_to_lba(index + 1);
2775 	}
2776 }
2777 
2778 static int
2779 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2780 {
2781 	u8 *cmd = scp->cmnd;
2782 	u64 lba;
2783 	u32 num;
2784 	u32 ei_lba;
2785 	unsigned long iflags;
2786 	int ret;
2787 	bool check_prot;
2788 
2789 	switch (cmd[0]) {
2790 	case WRITE_16:
2791 		ei_lba = 0;
2792 		lba = get_unaligned_be64(cmd + 2);
2793 		num = get_unaligned_be32(cmd + 10);
2794 		check_prot = true;
2795 		break;
2796 	case WRITE_10:
2797 		ei_lba = 0;
2798 		lba = get_unaligned_be32(cmd + 2);
2799 		num = get_unaligned_be16(cmd + 7);
2800 		check_prot = true;
2801 		break;
2802 	case WRITE_6:
2803 		ei_lba = 0;
2804 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2805 		      (u32)(cmd[1] & 0x1f) << 16;
2806 		num = (0 == cmd[4]) ? 256 : cmd[4];
2807 		check_prot = true;
2808 		break;
2809 	case WRITE_12:
2810 		ei_lba = 0;
2811 		lba = get_unaligned_be32(cmd + 2);
2812 		num = get_unaligned_be32(cmd + 6);
2813 		check_prot = true;
2814 		break;
2815 	case 0x53:	/* XDWRITEREAD(10) */
2816 		ei_lba = 0;
2817 		lba = get_unaligned_be32(cmd + 2);
2818 		num = get_unaligned_be16(cmd + 7);
2819 		check_prot = false;
2820 		break;
2821 	default:	/* assume WRITE(32) */
2822 		lba = get_unaligned_be64(cmd + 12);
2823 		ei_lba = get_unaligned_be32(cmd + 20);
2824 		num = get_unaligned_be32(cmd + 28);
2825 		check_prot = false;
2826 		break;
2827 	}
2828 	if (check_prot) {
2829 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2830 		    (cmd[1] & 0xe0)) {
2831 			mk_sense_invalid_opcode(scp);
2832 			return check_condition_result;
2833 		}
2834 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2835 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2836 		    (cmd[1] & 0xe0) == 0)
2837 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2838 				    "to DIF device\n");
2839 	}
2840 
2841 	/* inline check_device_access_params() */
2842 	if (lba + num > sdebug_capacity) {
2843 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2844 		return check_condition_result;
2845 	}
2846 	/* transfer length excessive (tie in to block limits VPD page) */
2847 	if (num > sdebug_store_sectors) {
2848 		/* needs work to find which cdb byte 'num' comes from */
2849 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2850 		return check_condition_result;
2851 	}
2852 
2853 	write_lock_irqsave(&atomic_rw, iflags);
2854 
2855 	/* DIX + T10 DIF */
2856 	if (sdebug_dix && scsi_prot_sg_count(scp)) {
2857 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2858 
2859 		if (prot_ret) {
2860 			write_unlock_irqrestore(&atomic_rw, iflags);
2861 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2862 			return illegal_condition_result;
2863 		}
2864 	}
2865 
2866 	ret = do_device_access(scp, lba, num, true);
2867 	if (scsi_debug_lbp())
2868 		map_region(lba, num);
2869 	write_unlock_irqrestore(&atomic_rw, iflags);
2870 	if (-1 == ret)
2871 		return DID_ERROR << 16;
2872 	else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2873 		sdev_printk(KERN_INFO, scp->device,
2874 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2875 			    my_name, num * sdebug_sector_size, ret);
2876 
2877 	if (sdebug_any_injecting_opt) {
2878 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2879 
2880 		if (ep->inj_recovered) {
2881 			mk_sense_buffer(scp, RECOVERED_ERROR,
2882 					THRESHOLD_EXCEEDED, 0);
2883 			return check_condition_result;
2884 		} else if (ep->inj_dif) {
2885 			/* Logical block guard check failed */
2886 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2887 			return illegal_condition_result;
2888 		} else if (ep->inj_dix) {
2889 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2890 			return illegal_condition_result;
2891 		}
2892 	}
2893 	return 0;
2894 }
2895 
2896 static int
2897 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2898 		bool unmap, bool ndob)
2899 {
2900 	unsigned long iflags;
2901 	unsigned long long i;
2902 	int ret;
2903 	u64 lba_off;
2904 
2905 	ret = check_device_access_params(scp, lba, num);
2906 	if (ret)
2907 		return ret;
2908 
2909 	write_lock_irqsave(&atomic_rw, iflags);
2910 
2911 	if (unmap && scsi_debug_lbp()) {
2912 		unmap_region(lba, num);
2913 		goto out;
2914 	}
2915 
2916 	lba_off = lba * sdebug_sector_size;
2917 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2918 	if (ndob) {
2919 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2920 		ret = 0;
2921 	} else
2922 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2923 					  sdebug_sector_size);
2924 
2925 	if (-1 == ret) {
2926 		write_unlock_irqrestore(&atomic_rw, iflags);
2927 		return DID_ERROR << 16;
2928 	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2929 		sdev_printk(KERN_INFO, scp->device,
2930 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2931 			    my_name, "write same",
2932 			    num * sdebug_sector_size, ret);
2933 
2934 	/* Copy first sector to remaining blocks */
2935 	for (i = 1 ; i < num ; i++)
2936 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2937 		       fake_storep + lba_off,
2938 		       sdebug_sector_size);
2939 
2940 	if (scsi_debug_lbp())
2941 		map_region(lba, num);
2942 out:
2943 	write_unlock_irqrestore(&atomic_rw, iflags);
2944 
2945 	return 0;
2946 }
2947 
2948 static int
2949 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2950 {
2951 	u8 *cmd = scp->cmnd;
2952 	u32 lba;
2953 	u16 num;
2954 	u32 ei_lba = 0;
2955 	bool unmap = false;
2956 
2957 	if (cmd[1] & 0x8) {
2958 		if (sdebug_lbpws10 == 0) {
2959 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2960 			return check_condition_result;
2961 		} else
2962 			unmap = true;
2963 	}
2964 	lba = get_unaligned_be32(cmd + 2);
2965 	num = get_unaligned_be16(cmd + 7);
2966 	if (num > sdebug_write_same_length) {
2967 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2968 		return check_condition_result;
2969 	}
2970 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2971 }
2972 
2973 static int
2974 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2975 {
2976 	u8 *cmd = scp->cmnd;
2977 	u64 lba;
2978 	u32 num;
2979 	u32 ei_lba = 0;
2980 	bool unmap = false;
2981 	bool ndob = false;
2982 
2983 	if (cmd[1] & 0x8) {	/* UNMAP */
2984 		if (sdebug_lbpws == 0) {
2985 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2986 			return check_condition_result;
2987 		} else
2988 			unmap = true;
2989 	}
2990 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
2991 		ndob = true;
2992 	lba = get_unaligned_be64(cmd + 2);
2993 	num = get_unaligned_be32(cmd + 10);
2994 	if (num > sdebug_write_same_length) {
2995 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2996 		return check_condition_result;
2997 	}
2998 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
2999 }
3000 
3001 /* Note the mode field is in the same position as the (lower) service action
3002  * field. For the Report supported operation codes command, SPC-4 suggests
3003  * each mode of this command should be reported separately; for future. */
3004 static int
3005 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3006 {
3007 	u8 *cmd = scp->cmnd;
3008 	struct scsi_device *sdp = scp->device;
3009 	struct sdebug_dev_info *dp;
3010 	u8 mode;
3011 
3012 	mode = cmd[1] & 0x1f;
3013 	switch (mode) {
3014 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3015 		/* set UAs on this device only */
3016 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3017 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3018 		break;
3019 	case 0x5:	/* download MC, save and ACT */
3020 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3021 		break;
3022 	case 0x6:	/* download MC with offsets and ACT */
3023 		/* set UAs on most devices (LUs) in this target */
3024 		list_for_each_entry(dp,
3025 				    &devip->sdbg_host->dev_info_list,
3026 				    dev_list)
3027 			if (dp->target == sdp->id) {
3028 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3029 				if (devip != dp)
3030 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3031 						dp->uas_bm);
3032 			}
3033 		break;
3034 	case 0x7:	/* download MC with offsets, save, and ACT */
3035 		/* set UA on all devices (LUs) in this target */
3036 		list_for_each_entry(dp,
3037 				    &devip->sdbg_host->dev_info_list,
3038 				    dev_list)
3039 			if (dp->target == sdp->id)
3040 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3041 					dp->uas_bm);
3042 		break;
3043 	default:
3044 		/* do nothing for this command for other mode values */
3045 		break;
3046 	}
3047 	return 0;
3048 }
3049 
3050 static int
3051 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3052 {
3053 	u8 *cmd = scp->cmnd;
3054 	u8 *arr;
3055 	u8 *fake_storep_hold;
3056 	u64 lba;
3057 	u32 dnum;
3058 	u32 lb_size = sdebug_sector_size;
3059 	u8 num;
3060 	unsigned long iflags;
3061 	int ret;
3062 	int retval = 0;
3063 
3064 	lba = get_unaligned_be64(cmd + 2);
3065 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3066 	if (0 == num)
3067 		return 0;	/* degenerate case, not an error */
3068 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3069 	    (cmd[1] & 0xe0)) {
3070 		mk_sense_invalid_opcode(scp);
3071 		return check_condition_result;
3072 	}
3073 	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3074 	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3075 	    (cmd[1] & 0xe0) == 0)
3076 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3077 			    "to DIF device\n");
3078 
3079 	/* inline check_device_access_params() */
3080 	if (lba + num > sdebug_capacity) {
3081 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3082 		return check_condition_result;
3083 	}
3084 	/* transfer length excessive (tie in to block limits VPD page) */
3085 	if (num > sdebug_store_sectors) {
3086 		/* needs work to find which cdb byte 'num' comes from */
3087 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3088 		return check_condition_result;
3089 	}
3090 	dnum = 2 * num;
3091 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3092 	if (NULL == arr) {
3093 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3094 				INSUFF_RES_ASCQ);
3095 		return check_condition_result;
3096 	}
3097 
3098 	write_lock_irqsave(&atomic_rw, iflags);
3099 
3100 	/* trick do_device_access() to fetch both compare and write buffers
3101 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3102 	fake_storep_hold = fake_storep;
3103 	fake_storep = arr;
3104 	ret = do_device_access(scp, 0, dnum, true);
3105 	fake_storep = fake_storep_hold;
3106 	if (ret == -1) {
3107 		retval = DID_ERROR << 16;
3108 		goto cleanup;
3109 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3110 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3111 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3112 			    dnum * lb_size, ret);
3113 	if (!comp_write_worker(lba, num, arr)) {
3114 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3115 		retval = check_condition_result;
3116 		goto cleanup;
3117 	}
3118 	if (scsi_debug_lbp())
3119 		map_region(lba, num);
3120 cleanup:
3121 	write_unlock_irqrestore(&atomic_rw, iflags);
3122 	kfree(arr);
3123 	return retval;
3124 }
3125 
3126 struct unmap_block_desc {
3127 	__be64	lba;
3128 	__be32	blocks;
3129 	__be32	__reserved;
3130 };
3131 
3132 static int
3133 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3134 {
3135 	unsigned char *buf;
3136 	struct unmap_block_desc *desc;
3137 	unsigned int i, payload_len, descriptors;
3138 	int ret;
3139 	unsigned long iflags;
3140 
3141 
3142 	if (!scsi_debug_lbp())
3143 		return 0;	/* fib and say its done */
3144 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3145 	BUG_ON(scsi_bufflen(scp) != payload_len);
3146 
3147 	descriptors = (payload_len - 8) / 16;
3148 	if (descriptors > sdebug_unmap_max_desc) {
3149 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3150 		return check_condition_result;
3151 	}
3152 
3153 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3154 	if (!buf) {
3155 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3156 				INSUFF_RES_ASCQ);
3157 		return check_condition_result;
3158 	}
3159 
3160 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3161 
3162 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3163 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3164 
3165 	desc = (void *)&buf[8];
3166 
3167 	write_lock_irqsave(&atomic_rw, iflags);
3168 
3169 	for (i = 0 ; i < descriptors ; i++) {
3170 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3171 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3172 
3173 		ret = check_device_access_params(scp, lba, num);
3174 		if (ret)
3175 			goto out;
3176 
3177 		unmap_region(lba, num);
3178 	}
3179 
3180 	ret = 0;
3181 
3182 out:
3183 	write_unlock_irqrestore(&atomic_rw, iflags);
3184 	kfree(buf);
3185 
3186 	return ret;
3187 }
3188 
3189 #define SDEBUG_GET_LBA_STATUS_LEN 32
3190 
3191 static int
3192 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3193 {
3194 	u8 *cmd = scp->cmnd;
3195 	u64 lba;
3196 	u32 alloc_len, mapped, num;
3197 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3198 	int ret;
3199 
3200 	lba = get_unaligned_be64(cmd + 2);
3201 	alloc_len = get_unaligned_be32(cmd + 10);
3202 
3203 	if (alloc_len < 24)
3204 		return 0;
3205 
3206 	ret = check_device_access_params(scp, lba, 1);
3207 	if (ret)
3208 		return ret;
3209 
3210 	if (scsi_debug_lbp())
3211 		mapped = map_state(lba, &num);
3212 	else {
3213 		mapped = 1;
3214 		/* following just in case virtual_gb changed */
3215 		sdebug_capacity = get_sdebug_capacity();
3216 		if (sdebug_capacity - lba <= 0xffffffff)
3217 			num = sdebug_capacity - lba;
3218 		else
3219 			num = 0xffffffff;
3220 	}
3221 
3222 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3223 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3224 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3225 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3226 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3227 
3228 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3229 }
3230 
3231 #define SDEBUG_RLUN_ARR_SZ 256
3232 
3233 static int resp_report_luns(struct scsi_cmnd * scp,
3234 			    struct sdebug_dev_info * devip)
3235 {
3236 	unsigned int alloc_len;
3237 	int lun_cnt, i, upper, num, n, want_wlun, shortish;
3238 	u64 lun;
3239 	unsigned char *cmd = scp->cmnd;
3240 	int select_report = (int)cmd[2];
3241 	struct scsi_lun *one_lun;
3242 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3243 	unsigned char * max_addr;
3244 
3245 	clear_luns_changed_on_target(devip);
3246 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3247 	shortish = (alloc_len < 4);
3248 	if (shortish || (select_report > 2)) {
3249 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3250 		return check_condition_result;
3251 	}
3252 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
3253 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3254 	lun_cnt = sdebug_max_luns;
3255 	if (1 == select_report)
3256 		lun_cnt = 0;
3257 	else if (sdebug_no_lun_0 && (lun_cnt > 0))
3258 		--lun_cnt;
3259 	want_wlun = (select_report > 0) ? 1 : 0;
3260 	num = lun_cnt + want_wlun;
3261 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3262 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3263 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3264 			    sizeof(struct scsi_lun)), num);
3265 	if (n < num) {
3266 		want_wlun = 0;
3267 		lun_cnt = n;
3268 	}
3269 	one_lun = (struct scsi_lun *) &arr[8];
3270 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3271 	for (i = 0, lun = (sdebug_no_lun_0 ? 1 : 0);
3272              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3273 	     i++, lun++) {
3274 		upper = (lun >> 8) & 0x3f;
3275 		if (upper)
3276 			one_lun[i].scsi_lun[0] =
3277 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3278 		one_lun[i].scsi_lun[1] = lun & 0xff;
3279 	}
3280 	if (want_wlun) {
3281 		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3282 		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3283 		i++;
3284 	}
3285 	alloc_len = (unsigned char *)(one_lun + i) - arr;
3286 	return fill_from_dev_buffer(scp, arr,
3287 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3288 }
3289 
3290 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3291 			    unsigned int num, struct sdebug_dev_info *devip)
3292 {
3293 	int j;
3294 	unsigned char *kaddr, *buf;
3295 	unsigned int offset;
3296 	struct scsi_data_buffer *sdb = scsi_in(scp);
3297 	struct sg_mapping_iter miter;
3298 
3299 	/* better not to use temporary buffer. */
3300 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3301 	if (!buf) {
3302 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3303 				INSUFF_RES_ASCQ);
3304 		return check_condition_result;
3305 	}
3306 
3307 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3308 
3309 	offset = 0;
3310 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3311 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3312 
3313 	while (sg_miter_next(&miter)) {
3314 		kaddr = miter.addr;
3315 		for (j = 0; j < miter.length; j++)
3316 			*(kaddr + j) ^= *(buf + offset + j);
3317 
3318 		offset += miter.length;
3319 	}
3320 	sg_miter_stop(&miter);
3321 	kfree(buf);
3322 
3323 	return 0;
3324 }
3325 
3326 static int
3327 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3328 {
3329 	u8 *cmd = scp->cmnd;
3330 	u64 lba;
3331 	u32 num;
3332 	int errsts;
3333 
3334 	if (!scsi_bidi_cmnd(scp)) {
3335 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3336 				INSUFF_RES_ASCQ);
3337 		return check_condition_result;
3338 	}
3339 	errsts = resp_read_dt0(scp, devip);
3340 	if (errsts)
3341 		return errsts;
3342 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3343 		errsts = resp_write_dt0(scp, devip);
3344 		if (errsts)
3345 			return errsts;
3346 	}
3347 	lba = get_unaligned_be32(cmd + 2);
3348 	num = get_unaligned_be16(cmd + 7);
3349 	return resp_xdwriteread(scp, lba, num, devip);
3350 }
3351 
3352 /* When tasklet goes off this function is called. */
3353 static void sdebug_q_cmd_complete(unsigned long indx)
3354 {
3355 	int qa_indx;
3356 	int retiring = 0;
3357 	unsigned long iflags;
3358 	struct sdebug_queued_cmd *sqcp;
3359 	struct scsi_cmnd *scp;
3360 	struct sdebug_dev_info *devip;
3361 
3362 	atomic_inc(&sdebug_completions);
3363 	qa_indx = indx;
3364 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3365 		pr_err("wild qa_indx=%d\n", qa_indx);
3366 		return;
3367 	}
3368 	spin_lock_irqsave(&queued_arr_lock, iflags);
3369 	sqcp = &queued_arr[qa_indx];
3370 	scp = sqcp->a_cmnd;
3371 	if (NULL == scp) {
3372 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3373 		pr_err("scp is NULL\n");
3374 		return;
3375 	}
3376 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3377 	if (devip)
3378 		atomic_dec(&devip->num_in_q);
3379 	else
3380 		pr_err("devip=NULL\n");
3381 	if (atomic_read(&retired_max_queue) > 0)
3382 		retiring = 1;
3383 
3384 	sqcp->a_cmnd = NULL;
3385 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3386 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3387 		pr_err("Unexpected completion\n");
3388 		return;
3389 	}
3390 
3391 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3392 		int k, retval;
3393 
3394 		retval = atomic_read(&retired_max_queue);
3395 		if (qa_indx >= retval) {
3396 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3397 			pr_err("index %d too large\n", retval);
3398 			return;
3399 		}
3400 		k = find_last_bit(queued_in_use_bm, retval);
3401 		if ((k < sdebug_max_queue) || (k == retval))
3402 			atomic_set(&retired_max_queue, 0);
3403 		else
3404 			atomic_set(&retired_max_queue, k + 1);
3405 	}
3406 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3407 	scp->scsi_done(scp); /* callback to mid level */
3408 }
3409 
3410 /* When high resolution timer goes off this function is called. */
3411 static enum hrtimer_restart
3412 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3413 {
3414 	int qa_indx;
3415 	int retiring = 0;
3416 	unsigned long iflags;
3417 	struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3418 	struct sdebug_queued_cmd *sqcp;
3419 	struct scsi_cmnd *scp;
3420 	struct sdebug_dev_info *devip;
3421 
3422 	atomic_inc(&sdebug_completions);
3423 	qa_indx = sd_hrtp->qa_indx;
3424 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3425 		pr_err("wild qa_indx=%d\n", qa_indx);
3426 		goto the_end;
3427 	}
3428 	spin_lock_irqsave(&queued_arr_lock, iflags);
3429 	sqcp = &queued_arr[qa_indx];
3430 	scp = sqcp->a_cmnd;
3431 	if (NULL == scp) {
3432 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3433 		pr_err("scp is NULL\n");
3434 		goto the_end;
3435 	}
3436 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3437 	if (devip)
3438 		atomic_dec(&devip->num_in_q);
3439 	else
3440 		pr_err("devip=NULL\n");
3441 	if (atomic_read(&retired_max_queue) > 0)
3442 		retiring = 1;
3443 
3444 	sqcp->a_cmnd = NULL;
3445 	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3446 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3447 		pr_err("Unexpected completion\n");
3448 		goto the_end;
3449 	}
3450 
3451 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3452 		int k, retval;
3453 
3454 		retval = atomic_read(&retired_max_queue);
3455 		if (qa_indx >= retval) {
3456 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3457 			pr_err("index %d too large\n", retval);
3458 			goto the_end;
3459 		}
3460 		k = find_last_bit(queued_in_use_bm, retval);
3461 		if ((k < sdebug_max_queue) || (k == retval))
3462 			atomic_set(&retired_max_queue, 0);
3463 		else
3464 			atomic_set(&retired_max_queue, k + 1);
3465 	}
3466 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3467 	scp->scsi_done(scp); /* callback to mid level */
3468 the_end:
3469 	return HRTIMER_NORESTART;
3470 }
3471 
3472 static struct sdebug_dev_info *
3473 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3474 {
3475 	struct sdebug_dev_info *devip;
3476 
3477 	devip = kzalloc(sizeof(*devip), flags);
3478 	if (devip) {
3479 		devip->sdbg_host = sdbg_host;
3480 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3481 	}
3482 	return devip;
3483 }
3484 
3485 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3486 {
3487 	struct sdebug_host_info * sdbg_host;
3488 	struct sdebug_dev_info * open_devip = NULL;
3489 	struct sdebug_dev_info * devip =
3490 			(struct sdebug_dev_info *)sdev->hostdata;
3491 
3492 	if (devip)
3493 		return devip;
3494 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3495 	if (!sdbg_host) {
3496 		pr_err("Host info NULL\n");
3497 		return NULL;
3498         }
3499 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3500 		if ((devip->used) && (devip->channel == sdev->channel) &&
3501                     (devip->target == sdev->id) &&
3502                     (devip->lun == sdev->lun))
3503                         return devip;
3504 		else {
3505 			if ((!devip->used) && (!open_devip))
3506 				open_devip = devip;
3507 		}
3508 	}
3509 	if (!open_devip) { /* try and make a new one */
3510 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3511 		if (!open_devip) {
3512 			pr_err("out of memory at line %d\n", __LINE__);
3513 			return NULL;
3514 		}
3515 	}
3516 
3517 	open_devip->channel = sdev->channel;
3518 	open_devip->target = sdev->id;
3519 	open_devip->lun = sdev->lun;
3520 	open_devip->sdbg_host = sdbg_host;
3521 	atomic_set(&open_devip->num_in_q, 0);
3522 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3523 	open_devip->used = true;
3524 	return open_devip;
3525 }
3526 
3527 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3528 {
3529 	if (sdebug_verbose)
3530 		pr_info("slave_alloc <%u %u %u %llu>\n",
3531 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3532 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3533 	return 0;
3534 }
3535 
3536 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3537 {
3538 	struct sdebug_dev_info *devip;
3539 
3540 	if (sdebug_verbose)
3541 		pr_info("slave_configure <%u %u %u %llu>\n",
3542 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3543 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3544 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3545 	devip = devInfoReg(sdp);
3546 	if (NULL == devip)
3547 		return 1;	/* no resources, will be marked offline */
3548 	sdp->hostdata = devip;
3549 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3550 	if (sdebug_no_uld)
3551 		sdp->no_uld_attach = 1;
3552 	return 0;
3553 }
3554 
3555 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3556 {
3557 	struct sdebug_dev_info *devip =
3558 		(struct sdebug_dev_info *)sdp->hostdata;
3559 
3560 	if (sdebug_verbose)
3561 		pr_info("slave_destroy <%u %u %u %llu>\n",
3562 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3563 	if (devip) {
3564 		/* make this slot available for re-use */
3565 		devip->used = false;
3566 		sdp->hostdata = NULL;
3567 	}
3568 }
3569 
3570 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3571 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3572 {
3573 	unsigned long iflags;
3574 	int k, qmax, r_qmax;
3575 	struct sdebug_queued_cmd *sqcp;
3576 	struct sdebug_dev_info *devip;
3577 
3578 	spin_lock_irqsave(&queued_arr_lock, iflags);
3579 	qmax = sdebug_max_queue;
3580 	r_qmax = atomic_read(&retired_max_queue);
3581 	if (r_qmax > qmax)
3582 		qmax = r_qmax;
3583 	for (k = 0; k < qmax; ++k) {
3584 		if (test_bit(k, queued_in_use_bm)) {
3585 			sqcp = &queued_arr[k];
3586 			if (cmnd == sqcp->a_cmnd) {
3587 				devip = (struct sdebug_dev_info *)
3588 					cmnd->device->hostdata;
3589 				if (devip)
3590 					atomic_dec(&devip->num_in_q);
3591 				sqcp->a_cmnd = NULL;
3592 				spin_unlock_irqrestore(&queued_arr_lock,
3593 						       iflags);
3594 				if ((sdebug_delay > 0) ||
3595 				    (sdebug_ndelay > 0)) {
3596 					if (sqcp->sd_hrtp)
3597 						hrtimer_cancel(
3598 							&sqcp->sd_hrtp->hrt);
3599 				} else if (sdebug_delay < 0) {
3600 					if (sqcp->tletp)
3601 						tasklet_kill(sqcp->tletp);
3602 				}
3603 				clear_bit(k, queued_in_use_bm);
3604 				return 1;
3605 			}
3606 		}
3607 	}
3608 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3609 	return 0;
3610 }
3611 
3612 /* Deletes (stops) timers or tasklets of all queued commands */
3613 static void stop_all_queued(void)
3614 {
3615 	unsigned long iflags;
3616 	int k;
3617 	struct sdebug_queued_cmd *sqcp;
3618 	struct sdebug_dev_info *devip;
3619 
3620 	spin_lock_irqsave(&queued_arr_lock, iflags);
3621 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3622 		if (test_bit(k, queued_in_use_bm)) {
3623 			sqcp = &queued_arr[k];
3624 			if (sqcp->a_cmnd) {
3625 				devip = (struct sdebug_dev_info *)
3626 					sqcp->a_cmnd->device->hostdata;
3627 				if (devip)
3628 					atomic_dec(&devip->num_in_q);
3629 				sqcp->a_cmnd = NULL;
3630 				spin_unlock_irqrestore(&queued_arr_lock,
3631 						       iflags);
3632 				if ((sdebug_delay > 0) ||
3633 				    (sdebug_ndelay > 0)) {
3634 					if (sqcp->sd_hrtp)
3635 						hrtimer_cancel(
3636 							&sqcp->sd_hrtp->hrt);
3637 				} else if (sdebug_delay < 0) {
3638 					if (sqcp->tletp)
3639 						tasklet_kill(sqcp->tletp);
3640 				}
3641 				clear_bit(k, queued_in_use_bm);
3642 				spin_lock_irqsave(&queued_arr_lock, iflags);
3643 			}
3644 		}
3645 	}
3646 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3647 }
3648 
3649 /* Free queued command memory on heap */
3650 static void free_all_queued(void)
3651 {
3652 	unsigned long iflags;
3653 	int k;
3654 	struct sdebug_queued_cmd *sqcp;
3655 
3656 	spin_lock_irqsave(&queued_arr_lock, iflags);
3657 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3658 		sqcp = &queued_arr[k];
3659 		kfree(sqcp->tletp);
3660 		sqcp->tletp = NULL;
3661 		kfree(sqcp->sd_hrtp);
3662 		sqcp->sd_hrtp = NULL;
3663 	}
3664 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3665 }
3666 
3667 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3668 {
3669 	++num_aborts;
3670 	if (SCpnt) {
3671 		if (SCpnt->device &&
3672 		    (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3673 			sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3674 				    __func__);
3675 		stop_queued_cmnd(SCpnt);
3676 	}
3677 	return SUCCESS;
3678 }
3679 
3680 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3681 {
3682 	struct sdebug_dev_info * devip;
3683 
3684 	++num_dev_resets;
3685 	if (SCpnt && SCpnt->device) {
3686 		struct scsi_device *sdp = SCpnt->device;
3687 
3688 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3689 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3690 		devip = devInfoReg(sdp);
3691 		if (devip)
3692 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3693 	}
3694 	return SUCCESS;
3695 }
3696 
3697 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3698 {
3699 	struct sdebug_host_info *sdbg_host;
3700 	struct sdebug_dev_info *devip;
3701 	struct scsi_device *sdp;
3702 	struct Scsi_Host *hp;
3703 	int k = 0;
3704 
3705 	++num_target_resets;
3706 	if (!SCpnt)
3707 		goto lie;
3708 	sdp = SCpnt->device;
3709 	if (!sdp)
3710 		goto lie;
3711 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3712 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3713 	hp = sdp->host;
3714 	if (!hp)
3715 		goto lie;
3716 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3717 	if (sdbg_host) {
3718 		list_for_each_entry(devip,
3719 				    &sdbg_host->dev_info_list,
3720 				    dev_list)
3721 			if (devip->target == sdp->id) {
3722 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3723 				++k;
3724 			}
3725 	}
3726 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3727 		sdev_printk(KERN_INFO, sdp,
3728 			    "%s: %d device(s) found in target\n", __func__, k);
3729 lie:
3730 	return SUCCESS;
3731 }
3732 
3733 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3734 {
3735 	struct sdebug_host_info *sdbg_host;
3736 	struct sdebug_dev_info *devip;
3737         struct scsi_device * sdp;
3738         struct Scsi_Host * hp;
3739 	int k = 0;
3740 
3741 	++num_bus_resets;
3742 	if (!(SCpnt && SCpnt->device))
3743 		goto lie;
3744 	sdp = SCpnt->device;
3745 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3746 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3747 	hp = sdp->host;
3748 	if (hp) {
3749 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3750 		if (sdbg_host) {
3751 			list_for_each_entry(devip,
3752                                             &sdbg_host->dev_info_list,
3753 					    dev_list) {
3754 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3755 				++k;
3756 			}
3757 		}
3758 	}
3759 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3760 		sdev_printk(KERN_INFO, sdp,
3761 			    "%s: %d device(s) found in host\n", __func__, k);
3762 lie:
3763 	return SUCCESS;
3764 }
3765 
3766 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3767 {
3768 	struct sdebug_host_info * sdbg_host;
3769 	struct sdebug_dev_info *devip;
3770 	int k = 0;
3771 
3772 	++num_host_resets;
3773 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3774 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3775         spin_lock(&sdebug_host_list_lock);
3776         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3777 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3778 				    dev_list) {
3779 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3780 			++k;
3781 		}
3782         }
3783         spin_unlock(&sdebug_host_list_lock);
3784 	stop_all_queued();
3785 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3786 		sdev_printk(KERN_INFO, SCpnt->device,
3787 			    "%s: %d device(s) found\n", __func__, k);
3788 	return SUCCESS;
3789 }
3790 
3791 static void __init sdebug_build_parts(unsigned char *ramp,
3792 				      unsigned long store_size)
3793 {
3794 	struct partition * pp;
3795 	int starts[SDEBUG_MAX_PARTS + 2];
3796 	int sectors_per_part, num_sectors, k;
3797 	int heads_by_sects, start_sec, end_sec;
3798 
3799 	/* assume partition table already zeroed */
3800 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3801 		return;
3802 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3803 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3804 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3805 	}
3806 	num_sectors = (int)sdebug_store_sectors;
3807 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3808 			   / sdebug_num_parts;
3809 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3810         starts[0] = sdebug_sectors_per;
3811 	for (k = 1; k < sdebug_num_parts; ++k)
3812 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3813 			    * heads_by_sects;
3814 	starts[sdebug_num_parts] = num_sectors;
3815 	starts[sdebug_num_parts + 1] = 0;
3816 
3817 	ramp[510] = 0x55;	/* magic partition markings */
3818 	ramp[511] = 0xAA;
3819 	pp = (struct partition *)(ramp + 0x1be);
3820 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3821 		start_sec = starts[k];
3822 		end_sec = starts[k + 1] - 1;
3823 		pp->boot_ind = 0;
3824 
3825 		pp->cyl = start_sec / heads_by_sects;
3826 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3827 			   / sdebug_sectors_per;
3828 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3829 
3830 		pp->end_cyl = end_sec / heads_by_sects;
3831 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3832 			       / sdebug_sectors_per;
3833 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3834 
3835 		pp->start_sect = cpu_to_le32(start_sec);
3836 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3837 		pp->sys_ind = 0x83;	/* plain Linux partition */
3838 	}
3839 }
3840 
3841 static int
3842 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3843 	      int scsi_result, int delta_jiff)
3844 {
3845 	unsigned long iflags;
3846 	int k, num_in_q, qdepth, inject;
3847 	struct sdebug_queued_cmd *sqcp = NULL;
3848 	struct scsi_device *sdp;
3849 
3850 	/* this should never happen */
3851 	if (WARN_ON(!cmnd))
3852 		return SCSI_MLQUEUE_HOST_BUSY;
3853 
3854 	if (NULL == devip) {
3855 		pr_warn("called devip == NULL\n");
3856 		/* no particularly good error to report back */
3857 		return SCSI_MLQUEUE_HOST_BUSY;
3858 	}
3859 
3860 	sdp = cmnd->device;
3861 
3862 	if (sdebug_verbose && scsi_result)
3863 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3864 			    __func__, scsi_result);
3865 	if (delta_jiff == 0)
3866 		goto respond_in_thread;
3867 
3868 	/* schedule the response at a later time if resources permit */
3869 	spin_lock_irqsave(&queued_arr_lock, iflags);
3870 	num_in_q = atomic_read(&devip->num_in_q);
3871 	qdepth = cmnd->device->queue_depth;
3872 	inject = 0;
3873 	if ((qdepth > 0) && (num_in_q >= qdepth)) {
3874 		if (scsi_result) {
3875 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3876 			goto respond_in_thread;
3877 		} else
3878 			scsi_result = device_qfull_result;
3879 	} else if ((sdebug_every_nth != 0) &&
3880 		   (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
3881 		   (scsi_result == 0)) {
3882 		if ((num_in_q == (qdepth - 1)) &&
3883 		    (atomic_inc_return(&sdebug_a_tsf) >=
3884 		     abs(sdebug_every_nth))) {
3885 			atomic_set(&sdebug_a_tsf, 0);
3886 			inject = 1;
3887 			scsi_result = device_qfull_result;
3888 		}
3889 	}
3890 
3891 	k = find_first_zero_bit(queued_in_use_bm, sdebug_max_queue);
3892 	if (k >= sdebug_max_queue) {
3893 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3894 		if (scsi_result)
3895 			goto respond_in_thread;
3896 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
3897 			scsi_result = device_qfull_result;
3898 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
3899 			sdev_printk(KERN_INFO, sdp,
3900 				    "%s: max_queue=%d exceeded, %s\n",
3901 				    __func__, sdebug_max_queue,
3902 				    (scsi_result ?  "status: TASK SET FULL" :
3903 						    "report: host busy"));
3904 		if (scsi_result)
3905 			goto respond_in_thread;
3906 		else
3907 			return SCSI_MLQUEUE_HOST_BUSY;
3908 	}
3909 	__set_bit(k, queued_in_use_bm);
3910 	atomic_inc(&devip->num_in_q);
3911 	sqcp = &queued_arr[k];
3912 	sqcp->a_cmnd = cmnd;
3913 	cmnd->result = scsi_result;
3914 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3915 	if ((delta_jiff > 0) || (sdebug_ndelay > 0)) {
3916 		struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3917 		ktime_t kt;
3918 
3919 		if (delta_jiff > 0) {
3920 			struct timespec ts;
3921 
3922 			jiffies_to_timespec(delta_jiff, &ts);
3923 			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
3924 		} else
3925 			kt = ktime_set(0, sdebug_ndelay);
3926 		if (NULL == sd_hp) {
3927 			sd_hp = kzalloc(sizeof(*sd_hp), GFP_ATOMIC);
3928 			if (NULL == sd_hp)
3929 				return SCSI_MLQUEUE_HOST_BUSY;
3930 			sqcp->sd_hrtp = sd_hp;
3931 			hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3932 				     HRTIMER_MODE_REL);
3933 			sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3934 			sd_hp->qa_indx = k;
3935 		}
3936 		hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3937 	} else {	/* delay < 0 */
3938 		if (NULL == sqcp->tletp) {
3939 			sqcp->tletp = kzalloc(sizeof(*sqcp->tletp),
3940 					      GFP_ATOMIC);
3941 			if (NULL == sqcp->tletp)
3942 				return SCSI_MLQUEUE_HOST_BUSY;
3943 			tasklet_init(sqcp->tletp,
3944 				     sdebug_q_cmd_complete, k);
3945 		}
3946 		if (-1 == delta_jiff)
3947 			tasklet_hi_schedule(sqcp->tletp);
3948 		else
3949 			tasklet_schedule(sqcp->tletp);
3950 	}
3951 	if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
3952 	    (scsi_result == device_qfull_result))
3953 		sdev_printk(KERN_INFO, sdp,
3954 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
3955 			    num_in_q, (inject ? "<inject> " : ""),
3956 			    "status: TASK SET FULL");
3957 	return 0;
3958 
3959 respond_in_thread:	/* call back to mid-layer using invocation thread */
3960 	cmnd->result = scsi_result;
3961 	cmnd->scsi_done(cmnd);
3962 	return 0;
3963 }
3964 
3965 /* Note: The following macros create attribute files in the
3966    /sys/module/scsi_debug/parameters directory. Unfortunately this
3967    driver is unaware of a change and cannot trigger auxiliary actions
3968    as it can when the corresponding attribute in the
3969    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3970  */
3971 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
3972 module_param_named(ato, sdebug_ato, int, S_IRUGO);
3973 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
3974 module_param_named(delay, sdebug_delay, int, S_IRUGO | S_IWUSR);
3975 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
3976 module_param_named(dif, sdebug_dif, int, S_IRUGO);
3977 module_param_named(dix, sdebug_dix, int, S_IRUGO);
3978 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
3979 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
3980 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
3981 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
3982 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
3983 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
3984 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
3985 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
3986 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
3987 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
3988 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
3989 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
3990 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
3991 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
3992 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
3993 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
3994 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
3995 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
3996 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
3997 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
3998 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
3999 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4000 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4001 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4002 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4003 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4004 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4005 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4006 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4007 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4008 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4009 		   S_IRUGO | S_IWUSR);
4010 module_param_named(write_same_length, sdebug_write_same_length, int,
4011 		   S_IRUGO | S_IWUSR);
4012 
4013 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4014 MODULE_DESCRIPTION("SCSI debug adapter driver");
4015 MODULE_LICENSE("GPL");
4016 MODULE_VERSION(SCSI_DEBUG_VERSION);
4017 
4018 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4019 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4020 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4021 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4022 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4023 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4024 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4025 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4026 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4027 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4028 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4029 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4030 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4031 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4032 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4033 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4034 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4035 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4036 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4037 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4038 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4039 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4040 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4041 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4042 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4043 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4044 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4045 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4046 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4047 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4048 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4049 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4050 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4051 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4052 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4053 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4054 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4055 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4056 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4057 
4058 static char sdebug_info[256];
4059 
4060 static const char * scsi_debug_info(struct Scsi_Host * shp)
4061 {
4062 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4063 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4064 		sdebug_version_date, sdebug_dev_size_mb, sdebug_opts);
4065 	return sdebug_info;
4066 }
4067 
4068 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4069 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4070 {
4071 	char arr[16];
4072 	int opts;
4073 	int minLen = length > 15 ? 15 : length;
4074 
4075 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4076 		return -EACCES;
4077 	memcpy(arr, buffer, minLen);
4078 	arr[minLen] = '\0';
4079 	if (1 != sscanf(arr, "%d", &opts))
4080 		return -EINVAL;
4081 	sdebug_opts = opts;
4082 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4083 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4084 	if (sdebug_every_nth != 0)
4085 		atomic_set(&sdebug_cmnd_count, 0);
4086 	return length;
4087 }
4088 
4089 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4090  * same for each scsi_debug host (if more than one). Some of the counters
4091  * output are not atomics so might be inaccurate in a busy system. */
4092 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4093 {
4094 	int f, l;
4095 	char b[32];
4096 
4097 	if (sdebug_every_nth > 0)
4098 		snprintf(b, sizeof(b), " (curr:%d)",
4099 			 ((SDEBUG_OPT_RARE_TSF & sdebug_opts) ?
4100 				atomic_read(&sdebug_a_tsf) :
4101 				atomic_read(&sdebug_cmnd_count)));
4102 	else
4103 		b[0] = '\0';
4104 
4105 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4106 		"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4107 		"every_nth=%d%s\n"
4108 		"delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4109 		"sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4110 		"command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4111 		"host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4112 		"usec_in_jiffy=%lu\n",
4113 		SCSI_DEBUG_VERSION, sdebug_version_date,
4114 		sdebug_num_tgts, sdebug_dev_size_mb, sdebug_opts,
4115 		sdebug_every_nth, b, sdebug_delay, sdebug_ndelay,
4116 		sdebug_max_luns, atomic_read(&sdebug_completions),
4117 		sdebug_sector_size, sdebug_cylinders_per, sdebug_heads,
4118 		sdebug_sectors_per, num_aborts, num_dev_resets,
4119 		num_target_resets, num_bus_resets, num_host_resets,
4120 		dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4121 
4122 	f = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4123 	if (f != sdebug_max_queue) {
4124 		l = find_last_bit(queued_in_use_bm, sdebug_max_queue);
4125 		seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4126 			   "queued_in_use_bm", f, l);
4127 	}
4128 	return 0;
4129 }
4130 
4131 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4132 {
4133 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_delay);
4134 }
4135 /* Returns -EBUSY if delay is being changed and commands are queued */
4136 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4137 			   size_t count)
4138 {
4139 	int delay, res;
4140 
4141 	if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4142 		res = count;
4143 		if (sdebug_delay != delay) {
4144 			unsigned long iflags;
4145 			int k;
4146 
4147 			spin_lock_irqsave(&queued_arr_lock, iflags);
4148 			k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4149 			if (k != sdebug_max_queue)
4150 				res = -EBUSY;	/* have queued commands */
4151 			else {
4152 				sdebug_delay = delay;
4153 				sdebug_ndelay = 0;
4154 			}
4155 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4156 		}
4157 		return res;
4158 	}
4159 	return -EINVAL;
4160 }
4161 static DRIVER_ATTR_RW(delay);
4162 
4163 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4164 {
4165 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4166 }
4167 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4168 /* If > 0 and accepted then sdebug_delay is set to DELAY_OVERRIDDEN */
4169 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4170 			   size_t count)
4171 {
4172 	unsigned long iflags;
4173 	int ndelay, res, k;
4174 
4175 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4176 	    (ndelay >= 0) && (ndelay < 1000000000)) {
4177 		res = count;
4178 		if (sdebug_ndelay != ndelay) {
4179 			spin_lock_irqsave(&queued_arr_lock, iflags);
4180 			k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4181 			if (k != sdebug_max_queue)
4182 				res = -EBUSY;	/* have queued commands */
4183 			else {
4184 				sdebug_ndelay = ndelay;
4185 				sdebug_delay = ndelay ? DELAY_OVERRIDDEN
4186 							  : DEF_DELAY;
4187 			}
4188 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4189 		}
4190 		return res;
4191 	}
4192 	return -EINVAL;
4193 }
4194 static DRIVER_ATTR_RW(ndelay);
4195 
4196 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4197 {
4198 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4199 }
4200 
4201 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4202 			  size_t count)
4203 {
4204         int opts;
4205 	char work[20];
4206 
4207         if (1 == sscanf(buf, "%10s", work)) {
4208 		if (0 == strncasecmp(work,"0x", 2)) {
4209 			if (1 == sscanf(&work[2], "%x", &opts))
4210 				goto opts_done;
4211 		} else {
4212 			if (1 == sscanf(work, "%d", &opts))
4213 				goto opts_done;
4214 		}
4215 	}
4216 	return -EINVAL;
4217 opts_done:
4218 	sdebug_opts = opts;
4219 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4220 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4221 	atomic_set(&sdebug_cmnd_count, 0);
4222 	atomic_set(&sdebug_a_tsf, 0);
4223 	return count;
4224 }
4225 static DRIVER_ATTR_RW(opts);
4226 
4227 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4228 {
4229 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4230 }
4231 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4232 			   size_t count)
4233 {
4234         int n;
4235 
4236 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4237 		sdebug_ptype = n;
4238 		return count;
4239 	}
4240 	return -EINVAL;
4241 }
4242 static DRIVER_ATTR_RW(ptype);
4243 
4244 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4245 {
4246 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4247 }
4248 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4249 			    size_t count)
4250 {
4251         int n;
4252 
4253 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4254 		sdebug_dsense = n;
4255 		return count;
4256 	}
4257 	return -EINVAL;
4258 }
4259 static DRIVER_ATTR_RW(dsense);
4260 
4261 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4262 {
4263 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4264 }
4265 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4266 			     size_t count)
4267 {
4268         int n;
4269 
4270 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4271 		n = (n > 0);
4272 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4273 		if (sdebug_fake_rw != n) {
4274 			if ((0 == n) && (NULL == fake_storep)) {
4275 				unsigned long sz =
4276 					(unsigned long)sdebug_dev_size_mb *
4277 					1048576;
4278 
4279 				fake_storep = vmalloc(sz);
4280 				if (NULL == fake_storep) {
4281 					pr_err("out of memory, 9\n");
4282 					return -ENOMEM;
4283 				}
4284 				memset(fake_storep, 0, sz);
4285 			}
4286 			sdebug_fake_rw = n;
4287 		}
4288 		return count;
4289 	}
4290 	return -EINVAL;
4291 }
4292 static DRIVER_ATTR_RW(fake_rw);
4293 
4294 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4295 {
4296 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4297 }
4298 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4299 			      size_t count)
4300 {
4301         int n;
4302 
4303 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4304 		sdebug_no_lun_0 = n;
4305 		return count;
4306 	}
4307 	return -EINVAL;
4308 }
4309 static DRIVER_ATTR_RW(no_lun_0);
4310 
4311 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4312 {
4313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4314 }
4315 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4316 			      size_t count)
4317 {
4318         int n;
4319 
4320 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4321 		sdebug_num_tgts = n;
4322 		sdebug_max_tgts_luns();
4323 		return count;
4324 	}
4325 	return -EINVAL;
4326 }
4327 static DRIVER_ATTR_RW(num_tgts);
4328 
4329 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4330 {
4331 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4332 }
4333 static DRIVER_ATTR_RO(dev_size_mb);
4334 
4335 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4336 {
4337 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4338 }
4339 static DRIVER_ATTR_RO(num_parts);
4340 
4341 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4342 {
4343 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4344 }
4345 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4346 			       size_t count)
4347 {
4348         int nth;
4349 
4350 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4351 		sdebug_every_nth = nth;
4352 		atomic_set(&sdebug_cmnd_count, 0);
4353 		return count;
4354 	}
4355 	return -EINVAL;
4356 }
4357 static DRIVER_ATTR_RW(every_nth);
4358 
4359 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4360 {
4361 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4362 }
4363 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4364 			      size_t count)
4365 {
4366         int n;
4367 	bool changed;
4368 
4369 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4370 		changed = (sdebug_max_luns != n);
4371 		sdebug_max_luns = n;
4372 		sdebug_max_tgts_luns();
4373 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4374 			struct sdebug_host_info *sdhp;
4375 			struct sdebug_dev_info *dp;
4376 
4377 			spin_lock(&sdebug_host_list_lock);
4378 			list_for_each_entry(sdhp, &sdebug_host_list,
4379 					    host_list) {
4380 				list_for_each_entry(dp, &sdhp->dev_info_list,
4381 						    dev_list) {
4382 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4383 						dp->uas_bm);
4384 				}
4385 			}
4386 			spin_unlock(&sdebug_host_list_lock);
4387 		}
4388 		return count;
4389 	}
4390 	return -EINVAL;
4391 }
4392 static DRIVER_ATTR_RW(max_luns);
4393 
4394 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4395 {
4396 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4397 }
4398 /* N.B. max_queue can be changed while there are queued commands. In flight
4399  * commands beyond the new max_queue will be completed. */
4400 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4401 			       size_t count)
4402 {
4403 	unsigned long iflags;
4404 	int n, k;
4405 
4406 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4407 	    (n <= SCSI_DEBUG_CANQUEUE)) {
4408 		spin_lock_irqsave(&queued_arr_lock, iflags);
4409 		k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4410 		sdebug_max_queue = n;
4411 		if (SCSI_DEBUG_CANQUEUE == k)
4412 			atomic_set(&retired_max_queue, 0);
4413 		else if (k >= n)
4414 			atomic_set(&retired_max_queue, k + 1);
4415 		else
4416 			atomic_set(&retired_max_queue, 0);
4417 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4418 		return count;
4419 	}
4420 	return -EINVAL;
4421 }
4422 static DRIVER_ATTR_RW(max_queue);
4423 
4424 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4425 {
4426 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4427 }
4428 static DRIVER_ATTR_RO(no_uld);
4429 
4430 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4431 {
4432 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4433 }
4434 static DRIVER_ATTR_RO(scsi_level);
4435 
4436 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4437 {
4438 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4439 }
4440 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4441 				size_t count)
4442 {
4443         int n;
4444 	bool changed;
4445 
4446 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4447 		changed = (sdebug_virtual_gb != n);
4448 		sdebug_virtual_gb = n;
4449 		sdebug_capacity = get_sdebug_capacity();
4450 		if (changed) {
4451 			struct sdebug_host_info *sdhp;
4452 			struct sdebug_dev_info *dp;
4453 
4454 			spin_lock(&sdebug_host_list_lock);
4455 			list_for_each_entry(sdhp, &sdebug_host_list,
4456 					    host_list) {
4457 				list_for_each_entry(dp, &sdhp->dev_info_list,
4458 						    dev_list) {
4459 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4460 						dp->uas_bm);
4461 				}
4462 			}
4463 			spin_unlock(&sdebug_host_list_lock);
4464 		}
4465 		return count;
4466 	}
4467 	return -EINVAL;
4468 }
4469 static DRIVER_ATTR_RW(virtual_gb);
4470 
4471 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4472 {
4473 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4474 }
4475 
4476 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4477 			      size_t count)
4478 {
4479 	int delta_hosts;
4480 
4481 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4482 		return -EINVAL;
4483 	if (delta_hosts > 0) {
4484 		do {
4485 			sdebug_add_adapter();
4486 		} while (--delta_hosts);
4487 	} else if (delta_hosts < 0) {
4488 		do {
4489 			sdebug_remove_adapter();
4490 		} while (++delta_hosts);
4491 	}
4492 	return count;
4493 }
4494 static DRIVER_ATTR_RW(add_host);
4495 
4496 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4497 {
4498 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4499 }
4500 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4501 				    size_t count)
4502 {
4503 	int n;
4504 
4505 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4506 		sdebug_vpd_use_hostno = n;
4507 		return count;
4508 	}
4509 	return -EINVAL;
4510 }
4511 static DRIVER_ATTR_RW(vpd_use_hostno);
4512 
4513 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4514 {
4515 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4516 }
4517 static DRIVER_ATTR_RO(sector_size);
4518 
4519 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4520 {
4521 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4522 }
4523 static DRIVER_ATTR_RO(dix);
4524 
4525 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4526 {
4527 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4528 }
4529 static DRIVER_ATTR_RO(dif);
4530 
4531 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4532 {
4533 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4534 }
4535 static DRIVER_ATTR_RO(guard);
4536 
4537 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4538 {
4539 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4540 }
4541 static DRIVER_ATTR_RO(ato);
4542 
4543 static ssize_t map_show(struct device_driver *ddp, char *buf)
4544 {
4545 	ssize_t count;
4546 
4547 	if (!scsi_debug_lbp())
4548 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4549 				 sdebug_store_sectors);
4550 
4551 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4552 			  (int)map_size, map_storep);
4553 	buf[count++] = '\n';
4554 	buf[count] = '\0';
4555 
4556 	return count;
4557 }
4558 static DRIVER_ATTR_RO(map);
4559 
4560 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4561 {
4562 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4563 }
4564 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4565 			       size_t count)
4566 {
4567 	int n;
4568 
4569 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4570 		sdebug_removable = (n > 0);
4571 		return count;
4572 	}
4573 	return -EINVAL;
4574 }
4575 static DRIVER_ATTR_RW(removable);
4576 
4577 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4578 {
4579 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4580 }
4581 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4582 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4583 			       size_t count)
4584 {
4585 	int n;
4586 
4587 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4588 		sdebug_host_lock = (n > 0);
4589 		return count;
4590 	}
4591 	return -EINVAL;
4592 }
4593 static DRIVER_ATTR_RW(host_lock);
4594 
4595 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4596 {
4597 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4598 }
4599 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4600 			    size_t count)
4601 {
4602 	int n;
4603 
4604 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605 		sdebug_strict = (n > 0);
4606 		return count;
4607 	}
4608 	return -EINVAL;
4609 }
4610 static DRIVER_ATTR_RW(strict);
4611 
4612 
4613 /* Note: The following array creates attribute files in the
4614    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4615    files (over those found in the /sys/module/scsi_debug/parameters
4616    directory) is that auxiliary actions can be triggered when an attribute
4617    is changed. For example see: sdebug_add_host_store() above.
4618  */
4619 
4620 static struct attribute *sdebug_drv_attrs[] = {
4621 	&driver_attr_delay.attr,
4622 	&driver_attr_opts.attr,
4623 	&driver_attr_ptype.attr,
4624 	&driver_attr_dsense.attr,
4625 	&driver_attr_fake_rw.attr,
4626 	&driver_attr_no_lun_0.attr,
4627 	&driver_attr_num_tgts.attr,
4628 	&driver_attr_dev_size_mb.attr,
4629 	&driver_attr_num_parts.attr,
4630 	&driver_attr_every_nth.attr,
4631 	&driver_attr_max_luns.attr,
4632 	&driver_attr_max_queue.attr,
4633 	&driver_attr_no_uld.attr,
4634 	&driver_attr_scsi_level.attr,
4635 	&driver_attr_virtual_gb.attr,
4636 	&driver_attr_add_host.attr,
4637 	&driver_attr_vpd_use_hostno.attr,
4638 	&driver_attr_sector_size.attr,
4639 	&driver_attr_dix.attr,
4640 	&driver_attr_dif.attr,
4641 	&driver_attr_guard.attr,
4642 	&driver_attr_ato.attr,
4643 	&driver_attr_map.attr,
4644 	&driver_attr_removable.attr,
4645 	&driver_attr_host_lock.attr,
4646 	&driver_attr_ndelay.attr,
4647 	&driver_attr_strict.attr,
4648 	NULL,
4649 };
4650 ATTRIBUTE_GROUPS(sdebug_drv);
4651 
4652 static struct device *pseudo_primary;
4653 
4654 static int __init scsi_debug_init(void)
4655 {
4656 	unsigned long sz;
4657 	int host_to_add;
4658 	int k;
4659 	int ret;
4660 
4661 	atomic_set(&sdebug_cmnd_count, 0);
4662 	atomic_set(&sdebug_completions, 0);
4663 	atomic_set(&retired_max_queue, 0);
4664 
4665 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4666 		pr_warn("ndelay must be less than 1 second, ignored\n");
4667 		sdebug_ndelay = 0;
4668 	} else if (sdebug_ndelay > 0)
4669 		sdebug_delay = DELAY_OVERRIDDEN;
4670 
4671 	switch (sdebug_sector_size) {
4672 	case  512:
4673 	case 1024:
4674 	case 2048:
4675 	case 4096:
4676 		break;
4677 	default:
4678 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
4679 		return -EINVAL;
4680 	}
4681 
4682 	switch (sdebug_dif) {
4683 
4684 	case SD_DIF_TYPE0_PROTECTION:
4685 	case SD_DIF_TYPE1_PROTECTION:
4686 	case SD_DIF_TYPE2_PROTECTION:
4687 	case SD_DIF_TYPE3_PROTECTION:
4688 		break;
4689 
4690 	default:
4691 		pr_err("dif must be 0, 1, 2 or 3\n");
4692 		return -EINVAL;
4693 	}
4694 
4695 	if (sdebug_guard > 1) {
4696 		pr_err("guard must be 0 or 1\n");
4697 		return -EINVAL;
4698 	}
4699 
4700 	if (sdebug_ato > 1) {
4701 		pr_err("ato must be 0 or 1\n");
4702 		return -EINVAL;
4703 	}
4704 
4705 	if (sdebug_physblk_exp > 15) {
4706 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4707 		return -EINVAL;
4708 	}
4709 
4710 	if (sdebug_lowest_aligned > 0x3fff) {
4711 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4712 		return -EINVAL;
4713 	}
4714 
4715 	if (sdebug_dev_size_mb < 1)
4716 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4717 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4718 	sdebug_store_sectors = sz / sdebug_sector_size;
4719 	sdebug_capacity = get_sdebug_capacity();
4720 
4721 	/* play around with geometry, don't waste too much on track 0 */
4722 	sdebug_heads = 8;
4723 	sdebug_sectors_per = 32;
4724 	if (sdebug_dev_size_mb >= 256)
4725 		sdebug_heads = 64;
4726 	else if (sdebug_dev_size_mb >= 16)
4727 		sdebug_heads = 32;
4728 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4729 			       (sdebug_sectors_per * sdebug_heads);
4730 	if (sdebug_cylinders_per >= 1024) {
4731 		/* other LLDs do this; implies >= 1GB ram disk ... */
4732 		sdebug_heads = 255;
4733 		sdebug_sectors_per = 63;
4734 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4735 			       (sdebug_sectors_per * sdebug_heads);
4736 	}
4737 
4738 	if (0 == sdebug_fake_rw) {
4739 		fake_storep = vmalloc(sz);
4740 		if (NULL == fake_storep) {
4741 			pr_err("out of memory, 1\n");
4742 			return -ENOMEM;
4743 		}
4744 		memset(fake_storep, 0, sz);
4745 		if (sdebug_num_parts > 0)
4746 			sdebug_build_parts(fake_storep, sz);
4747 	}
4748 
4749 	if (sdebug_dix) {
4750 		int dif_size;
4751 
4752 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4753 		dif_storep = vmalloc(dif_size);
4754 
4755 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4756 
4757 		if (dif_storep == NULL) {
4758 			pr_err("out of mem. (DIX)\n");
4759 			ret = -ENOMEM;
4760 			goto free_vm;
4761 		}
4762 
4763 		memset(dif_storep, 0xff, dif_size);
4764 	}
4765 
4766 	/* Logical Block Provisioning */
4767 	if (scsi_debug_lbp()) {
4768 		sdebug_unmap_max_blocks =
4769 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
4770 
4771 		sdebug_unmap_max_desc =
4772 			clamp(sdebug_unmap_max_desc, 0U, 256U);
4773 
4774 		sdebug_unmap_granularity =
4775 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
4776 
4777 		if (sdebug_unmap_alignment &&
4778 		    sdebug_unmap_granularity <=
4779 		    sdebug_unmap_alignment) {
4780 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4781 			return -EINVAL;
4782 		}
4783 
4784 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4785 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4786 
4787 		pr_info("%lu provisioning blocks\n", map_size);
4788 
4789 		if (map_storep == NULL) {
4790 			pr_err("out of mem. (MAP)\n");
4791 			ret = -ENOMEM;
4792 			goto free_vm;
4793 		}
4794 
4795 		bitmap_zero(map_storep, map_size);
4796 
4797 		/* Map first 1KB for partition table */
4798 		if (sdebug_num_parts)
4799 			map_region(0, 2);
4800 	}
4801 
4802 	pseudo_primary = root_device_register("pseudo_0");
4803 	if (IS_ERR(pseudo_primary)) {
4804 		pr_warn("root_device_register() error\n");
4805 		ret = PTR_ERR(pseudo_primary);
4806 		goto free_vm;
4807 	}
4808 	ret = bus_register(&pseudo_lld_bus);
4809 	if (ret < 0) {
4810 		pr_warn("bus_register error: %d\n", ret);
4811 		goto dev_unreg;
4812 	}
4813 	ret = driver_register(&sdebug_driverfs_driver);
4814 	if (ret < 0) {
4815 		pr_warn("driver_register error: %d\n", ret);
4816 		goto bus_unreg;
4817 	}
4818 
4819 	host_to_add = sdebug_add_host;
4820 	sdebug_add_host = 0;
4821 
4822         for (k = 0; k < host_to_add; k++) {
4823                 if (sdebug_add_adapter()) {
4824 			pr_err("sdebug_add_adapter failed k=%d\n", k);
4825                         break;
4826                 }
4827         }
4828 
4829 	if (sdebug_verbose)
4830 		pr_info("built %d host(s)\n", sdebug_add_host);
4831 
4832 	return 0;
4833 
4834 bus_unreg:
4835 	bus_unregister(&pseudo_lld_bus);
4836 dev_unreg:
4837 	root_device_unregister(pseudo_primary);
4838 free_vm:
4839 	vfree(map_storep);
4840 	vfree(dif_storep);
4841 	vfree(fake_storep);
4842 
4843 	return ret;
4844 }
4845 
4846 static void __exit scsi_debug_exit(void)
4847 {
4848 	int k = sdebug_add_host;
4849 
4850 	stop_all_queued();
4851 	free_all_queued();
4852 	for (; k; k--)
4853 		sdebug_remove_adapter();
4854 	driver_unregister(&sdebug_driverfs_driver);
4855 	bus_unregister(&pseudo_lld_bus);
4856 	root_device_unregister(pseudo_primary);
4857 
4858 	vfree(dif_storep);
4859 	vfree(fake_storep);
4860 }
4861 
4862 device_initcall(scsi_debug_init);
4863 module_exit(scsi_debug_exit);
4864 
4865 static void sdebug_release_adapter(struct device * dev)
4866 {
4867         struct sdebug_host_info *sdbg_host;
4868 
4869 	sdbg_host = to_sdebug_host(dev);
4870         kfree(sdbg_host);
4871 }
4872 
4873 static int sdebug_add_adapter(void)
4874 {
4875 	int k, devs_per_host;
4876         int error = 0;
4877         struct sdebug_host_info *sdbg_host;
4878 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4879 
4880         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4881         if (NULL == sdbg_host) {
4882 		pr_err("out of memory at line %d\n", __LINE__);
4883                 return -ENOMEM;
4884         }
4885 
4886         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4887 
4888 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
4889         for (k = 0; k < devs_per_host; k++) {
4890 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4891 		if (!sdbg_devinfo) {
4892 			pr_err("out of memory at line %d\n", __LINE__);
4893                         error = -ENOMEM;
4894 			goto clean;
4895                 }
4896         }
4897 
4898         spin_lock(&sdebug_host_list_lock);
4899         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4900         spin_unlock(&sdebug_host_list_lock);
4901 
4902         sdbg_host->dev.bus = &pseudo_lld_bus;
4903         sdbg_host->dev.parent = pseudo_primary;
4904         sdbg_host->dev.release = &sdebug_release_adapter;
4905 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
4906 
4907         error = device_register(&sdbg_host->dev);
4908 
4909         if (error)
4910 		goto clean;
4911 
4912 	++sdebug_add_host;
4913         return error;
4914 
4915 clean:
4916 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4917 				 dev_list) {
4918 		list_del(&sdbg_devinfo->dev_list);
4919 		kfree(sdbg_devinfo);
4920 	}
4921 
4922 	kfree(sdbg_host);
4923         return error;
4924 }
4925 
4926 static void sdebug_remove_adapter(void)
4927 {
4928         struct sdebug_host_info * sdbg_host = NULL;
4929 
4930         spin_lock(&sdebug_host_list_lock);
4931         if (!list_empty(&sdebug_host_list)) {
4932                 sdbg_host = list_entry(sdebug_host_list.prev,
4933                                        struct sdebug_host_info, host_list);
4934 		list_del(&sdbg_host->host_list);
4935 	}
4936         spin_unlock(&sdebug_host_list_lock);
4937 
4938 	if (!sdbg_host)
4939 		return;
4940 
4941 	device_unregister(&sdbg_host->dev);
4942 	--sdebug_add_host;
4943 }
4944 
4945 static int
4946 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4947 {
4948 	int num_in_q = 0;
4949 	unsigned long iflags;
4950 	struct sdebug_dev_info *devip;
4951 
4952 	spin_lock_irqsave(&queued_arr_lock, iflags);
4953 	devip = (struct sdebug_dev_info *)sdev->hostdata;
4954 	if (NULL == devip) {
4955 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4956 		return	-ENODEV;
4957 	}
4958 	num_in_q = atomic_read(&devip->num_in_q);
4959 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
4960 
4961 	if (qdepth < 1)
4962 		qdepth = 1;
4963 	/* allow to exceed max host queued_arr elements for testing */
4964 	if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4965 		qdepth = SCSI_DEBUG_CANQUEUE + 10;
4966 	scsi_change_queue_depth(sdev, qdepth);
4967 
4968 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
4969 		sdev_printk(KERN_INFO, sdev,
4970 			    "%s: qdepth=%d, num_in_q=%d\n",
4971 			    __func__, qdepth, num_in_q);
4972 	}
4973 	return sdev->queue_depth;
4974 }
4975 
4976 static int
4977 check_inject(struct scsi_cmnd *scp)
4978 {
4979 	struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4980 
4981 	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4982 
4983 	if (atomic_inc_return(&sdebug_cmnd_count) >= abs(sdebug_every_nth)) {
4984 		atomic_set(&sdebug_cmnd_count, 0);
4985 		if (sdebug_every_nth < -1)
4986 			sdebug_every_nth = -1;
4987 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
4988 			return 1; /* ignore command causing timeout */
4989 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
4990 			 scsi_medium_access_command(scp))
4991 			return 1; /* time out reads and writes */
4992 		if (sdebug_any_injecting_opt) {
4993 			if (SDEBUG_OPT_RECOVERED_ERR & sdebug_opts)
4994 				ep->inj_recovered = true;
4995 			if (SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts)
4996 				ep->inj_transport = true;
4997 			if (SDEBUG_OPT_DIF_ERR & sdebug_opts)
4998 				ep->inj_dif = true;
4999 			if (SDEBUG_OPT_DIX_ERR & sdebug_opts)
5000 				ep->inj_dix = true;
5001 			if (SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts)
5002 				ep->inj_short = true;
5003 		}
5004 	}
5005 	return 0;
5006 }
5007 
5008 static int
5009 scsi_debug_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scp)
5010 {
5011 	u8 sdeb_i;
5012 	struct scsi_device *sdp = scp->device;
5013 	const struct opcode_info_t *oip;
5014 	const struct opcode_info_t *r_oip;
5015 	struct sdebug_dev_info *devip;
5016 	u8 *cmd = scp->cmnd;
5017 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5018 	int k, na;
5019 	int errsts = 0;
5020 	u32 flags;
5021 	u16 sa;
5022 	u8 opcode = cmd[0];
5023 	bool has_wlun_rl;
5024 
5025 	scsi_set_resid(scp, 0);
5026 	if (sdebug_verbose && !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts)) {
5027 		char b[120];
5028 		int n, len, sb;
5029 
5030 		len = scp->cmd_len;
5031 		sb = (int)sizeof(b);
5032 		if (len > 32)
5033 			strcpy(b, "too long, over 32 bytes");
5034 		else {
5035 			for (k = 0, n = 0; k < len && n < sb; ++k)
5036 				n += scnprintf(b + n, sb - n, "%02x ",
5037 					       (u32)cmd[k]);
5038 		}
5039 		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5040 	}
5041 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5042 	if ((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)
5043 		return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5044 
5045 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5046 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5047 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5048 	if (!devip) {
5049 		devip = devInfoReg(sdp);
5050 		if (NULL == devip)
5051 			return schedule_resp(scp, NULL, DID_NO_CONNECT << 16,
5052 					     0);
5053 	}
5054 	na = oip->num_attached;
5055 	r_pfp = oip->pfp;
5056 	if (na) {	/* multiple commands with this opcode */
5057 		r_oip = oip;
5058 		if (FF_SA & r_oip->flags) {
5059 			if (F_SA_LOW & oip->flags)
5060 				sa = 0x1f & cmd[1];
5061 			else
5062 				sa = get_unaligned_be16(cmd + 8);
5063 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5064 				if (opcode == oip->opcode && sa == oip->sa)
5065 					break;
5066 			}
5067 		} else {   /* since no service action only check opcode */
5068 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5069 				if (opcode == oip->opcode)
5070 					break;
5071 			}
5072 		}
5073 		if (k > na) {
5074 			if (F_SA_LOW & r_oip->flags)
5075 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5076 			else if (F_SA_HIGH & r_oip->flags)
5077 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5078 			else
5079 				mk_sense_invalid_opcode(scp);
5080 			goto check_cond;
5081 		}
5082 	}	/* else (when na==0) we assume the oip is a match */
5083 	flags = oip->flags;
5084 	if (F_INV_OP & flags) {
5085 		mk_sense_invalid_opcode(scp);
5086 		goto check_cond;
5087 	}
5088 	if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5089 		if (sdebug_verbose)
5090 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5091 				    my_name, opcode, " supported for wlun");
5092 		mk_sense_invalid_opcode(scp);
5093 		goto check_cond;
5094 	}
5095 	if (sdebug_strict) {	/* check cdb against mask */
5096 		u8 rem;
5097 		int j;
5098 
5099 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5100 			rem = ~oip->len_mask[k] & cmd[k];
5101 			if (rem) {
5102 				for (j = 7; j >= 0; --j, rem <<= 1) {
5103 					if (0x80 & rem)
5104 						break;
5105 				}
5106 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5107 				goto check_cond;
5108 			}
5109 		}
5110 	}
5111 	if (!(F_SKIP_UA & flags) &&
5112 	    SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5113 		errsts = check_readiness(scp, UAS_ONLY, devip);
5114 		if (errsts)
5115 			goto check_cond;
5116 	}
5117 	if ((F_M_ACCESS & flags) && devip->stopped) {
5118 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5119 		if (sdebug_verbose)
5120 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5121 				    "%s\n", my_name, "initializing command "
5122 				    "required");
5123 		errsts = check_condition_result;
5124 		goto fini;
5125 	}
5126 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5127 		goto fini;
5128 	if (sdebug_every_nth) {
5129 		if (check_inject(scp))
5130 			return 0;	/* ignore command: make trouble */
5131 	}
5132 	if (oip->pfp)	/* if this command has a resp_* function, call it */
5133 		errsts = oip->pfp(scp, devip);
5134 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5135 		errsts = r_pfp(scp, devip);
5136 
5137 fini:
5138 	return schedule_resp(scp, devip, errsts,
5139 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_delay));
5140 check_cond:
5141 	return schedule_resp(scp, devip, check_condition_result, 0);
5142 }
5143 
5144 static struct scsi_host_template sdebug_driver_template = {
5145 	.show_info =		scsi_debug_show_info,
5146 	.write_info =		scsi_debug_write_info,
5147 	.proc_name =		sdebug_proc_name,
5148 	.name =			"SCSI DEBUG",
5149 	.info =			scsi_debug_info,
5150 	.slave_alloc =		scsi_debug_slave_alloc,
5151 	.slave_configure =	scsi_debug_slave_configure,
5152 	.slave_destroy =	scsi_debug_slave_destroy,
5153 	.ioctl =		scsi_debug_ioctl,
5154 	.queuecommand =		scsi_debug_queuecommand,
5155 	.change_queue_depth =	sdebug_change_qdepth,
5156 	.eh_abort_handler =	scsi_debug_abort,
5157 	.eh_device_reset_handler = scsi_debug_device_reset,
5158 	.eh_target_reset_handler = scsi_debug_target_reset,
5159 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5160 	.eh_host_reset_handler = scsi_debug_host_reset,
5161 	.can_queue =		SCSI_DEBUG_CANQUEUE,
5162 	.this_id =		7,
5163 	.sg_tablesize =		SG_MAX_SEGMENTS,
5164 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5165 	.max_sectors =		-1U,
5166 	.use_clustering = 	DISABLE_CLUSTERING,
5167 	.module =		THIS_MODULE,
5168 	.track_queue_depth =	1,
5169 	.cmd_size =		sizeof(struct sdebug_scmd_extra_t),
5170 };
5171 
5172 static int sdebug_driver_probe(struct device * dev)
5173 {
5174 	int error = 0;
5175 	struct sdebug_host_info *sdbg_host;
5176 	struct Scsi_Host *hpnt;
5177 	int host_prot;
5178 
5179 	sdbg_host = to_sdebug_host(dev);
5180 
5181 	sdebug_driver_template.can_queue = sdebug_max_queue;
5182 	if (sdebug_clustering)
5183 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5184 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5185 	if (NULL == hpnt) {
5186 		pr_err("scsi_host_alloc failed\n");
5187 		error = -ENODEV;
5188 		return error;
5189 	}
5190 
5191         sdbg_host->shost = hpnt;
5192 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5193 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5194 		hpnt->max_id = sdebug_num_tgts + 1;
5195 	else
5196 		hpnt->max_id = sdebug_num_tgts;
5197 	/* = sdebug_max_luns; */
5198 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5199 
5200 	host_prot = 0;
5201 
5202 	switch (sdebug_dif) {
5203 
5204 	case SD_DIF_TYPE1_PROTECTION:
5205 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
5206 		if (sdebug_dix)
5207 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5208 		break;
5209 
5210 	case SD_DIF_TYPE2_PROTECTION:
5211 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
5212 		if (sdebug_dix)
5213 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5214 		break;
5215 
5216 	case SD_DIF_TYPE3_PROTECTION:
5217 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
5218 		if (sdebug_dix)
5219 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5220 		break;
5221 
5222 	default:
5223 		if (sdebug_dix)
5224 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5225 		break;
5226 	}
5227 
5228 	scsi_host_set_prot(hpnt, host_prot);
5229 
5230 	pr_info("host protection%s%s%s%s%s%s%s\n",
5231 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5232 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5233 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5234 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5235 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5236 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5237 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5238 
5239 	if (sdebug_guard == 1)
5240 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5241 	else
5242 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5243 
5244 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5245 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5246         error = scsi_add_host(hpnt, &sdbg_host->dev);
5247         if (error) {
5248 		pr_err("scsi_add_host failed\n");
5249                 error = -ENODEV;
5250 		scsi_host_put(hpnt);
5251         } else
5252 		scsi_scan_host(hpnt);
5253 
5254 	return error;
5255 }
5256 
5257 static int sdebug_driver_remove(struct device * dev)
5258 {
5259         struct sdebug_host_info *sdbg_host;
5260 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5261 
5262 	sdbg_host = to_sdebug_host(dev);
5263 
5264 	if (!sdbg_host) {
5265 		pr_err("Unable to locate host info\n");
5266 		return -ENODEV;
5267 	}
5268 
5269         scsi_remove_host(sdbg_host->shost);
5270 
5271 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5272 				 dev_list) {
5273                 list_del(&sdbg_devinfo->dev_list);
5274                 kfree(sdbg_devinfo);
5275         }
5276 
5277         scsi_host_put(sdbg_host->shost);
5278         return 0;
5279 }
5280 
5281 static int pseudo_lld_bus_match(struct device *dev,
5282 				struct device_driver *dev_driver)
5283 {
5284 	return 1;
5285 }
5286 
5287 static struct bus_type pseudo_lld_bus = {
5288 	.name = "pseudo",
5289 	.match = pseudo_lld_bus_match,
5290 	.probe = sdebug_driver_probe,
5291 	.remove = sdebug_driver_remove,
5292 	.drv_groups = sdebug_drv_groups,
5293 };
5294