xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision c4837394)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "1.86"
63 static const char *sdebug_version_date = "20160430";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define UA_RESET_ASC 0x29
78 #define UA_CHANGED_ASC 0x2a
79 #define TARGET_CHANGED_ASC 0x3f
80 #define LUNS_CHANGED_ASCQ 0x0e
81 #define INSUFF_RES_ASC 0x55
82 #define INSUFF_RES_ASCQ 0x3
83 #define POWER_ON_RESET_ASCQ 0x0
84 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
85 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
86 #define CAPACITY_CHANGED_ASCQ 0x9
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91 #define MISCOMPARE_VERIFY_ASC 0x1d
92 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
93 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
94 
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97 
98 /* Default values for driver parameters */
99 #define DEF_NUM_HOST   1
100 #define DEF_NUM_TGTS   1
101 #define DEF_MAX_LUNS   1
102 /* With these defaults, this driver will make 1 host with 1 target
103  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
104  */
105 #define DEF_ATO 1
106 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
107 #define DEF_DEV_SIZE_MB   8
108 #define DEF_DIF 0
109 #define DEF_DIX 0
110 #define DEF_D_SENSE   0
111 #define DEF_EVERY_NTH   0
112 #define DEF_FAKE_RW	0
113 #define DEF_GUARD 0
114 #define DEF_HOST_LOCK 0
115 #define DEF_LBPU 0
116 #define DEF_LBPWS 0
117 #define DEF_LBPWS10 0
118 #define DEF_LBPRZ 1
119 #define DEF_LOWEST_ALIGNED 0
120 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
121 #define DEF_NO_LUN_0   0
122 #define DEF_NUM_PARTS   0
123 #define DEF_OPTS   0
124 #define DEF_OPT_BLKS 1024
125 #define DEF_PHYSBLK_EXP 0
126 #define DEF_PTYPE   TYPE_DISK
127 #define DEF_REMOVABLE false
128 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
129 #define DEF_SECTOR_SIZE 512
130 #define DEF_UNMAP_ALIGNMENT 0
131 #define DEF_UNMAP_GRANULARITY 1
132 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
133 #define DEF_UNMAP_MAX_DESC 256
134 #define DEF_VIRTUAL_GB   0
135 #define DEF_VPD_USE_HOSTNO 1
136 #define DEF_WRITESAME_LENGTH 0xFFFF
137 #define DEF_STRICT 0
138 #define DEF_STATISTICS false
139 #define DEF_SUBMIT_QUEUES 1
140 #define JDELAY_OVERRIDDEN -9999
141 
142 #define SDEBUG_LUN_0_VAL 0
143 
144 /* bit mask values for sdebug_opts */
145 #define SDEBUG_OPT_NOISE		1
146 #define SDEBUG_OPT_MEDIUM_ERR		2
147 #define SDEBUG_OPT_TIMEOUT		4
148 #define SDEBUG_OPT_RECOVERED_ERR	8
149 #define SDEBUG_OPT_TRANSPORT_ERR	16
150 #define SDEBUG_OPT_DIF_ERR		32
151 #define SDEBUG_OPT_DIX_ERR		64
152 #define SDEBUG_OPT_MAC_TIMEOUT		128
153 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
154 #define SDEBUG_OPT_Q_NOISE		0x200
155 #define SDEBUG_OPT_ALL_TSF		0x400
156 #define SDEBUG_OPT_RARE_TSF		0x800
157 #define SDEBUG_OPT_N_WCE		0x1000
158 #define SDEBUG_OPT_RESET_NOISE		0x2000
159 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
160 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
161 			      SDEBUG_OPT_RESET_NOISE)
162 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
163 				  SDEBUG_OPT_TRANSPORT_ERR | \
164 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
165 				  SDEBUG_OPT_SHORT_TRANSFER)
166 /* When "every_nth" > 0 then modulo "every_nth" commands:
167  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
168  *   - a RECOVERED_ERROR is simulated on successful read and write
169  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
170  *   - a TRANSPORT_ERROR is simulated on successful read and write
171  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
172  *
173  * When "every_nth" < 0 then after "- every_nth" commands:
174  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
175  *   - a RECOVERED_ERROR is simulated on successful read and write
176  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
177  *   - a TRANSPORT_ERROR is simulated on successful read and write
178  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
179  * This will continue on every subsequent command until some other action
180  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
181  * every_nth via sysfs).
182  */
183 
184 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
185  * priority order. In the subset implemented here lower numbers have higher
186  * priority. The UA numbers should be a sequence starting from 0 with
187  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
188 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
189 #define SDEBUG_UA_BUS_RESET 1
190 #define SDEBUG_UA_MODE_CHANGED 2
191 #define SDEBUG_UA_CAPACITY_CHANGED 3
192 #define SDEBUG_UA_LUNS_CHANGED 4
193 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
194 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
195 #define SDEBUG_NUM_UAS 7
196 
197 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
198  * sector on read commands: */
199 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
200 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
201 
202 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
203  * or "peripheral device" addressing (value 0) */
204 #define SAM2_LUN_ADDRESS_METHOD 0
205 
206 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
207  * (for response) per submit queue at one time. Can be reduced by max_queue
208  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
209  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
210  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
211  * but cannot exceed SDEBUG_CANQUEUE .
212  */
213 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
214 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
215 #define DEF_CMD_PER_LUN  255
216 
217 #define F_D_IN			1
218 #define F_D_OUT			2
219 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
220 #define F_D_UNKN		8
221 #define F_RL_WLUN_OK		0x10
222 #define F_SKIP_UA		0x20
223 #define F_DELAY_OVERR		0x40
224 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
225 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
226 #define F_INV_OP		0x200
227 #define F_FAKE_RW		0x400
228 #define F_M_ACCESS		0x800	/* media access */
229 
230 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
231 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
232 #define FF_SA (F_SA_HIGH | F_SA_LOW)
233 
234 #define SDEBUG_MAX_PARTS 4
235 
236 #define SDEBUG_MAX_CMD_LEN 32
237 
238 
239 struct sdebug_dev_info {
240 	struct list_head dev_list;
241 	unsigned int channel;
242 	unsigned int target;
243 	u64 lun;
244 	struct sdebug_host_info *sdbg_host;
245 	unsigned long uas_bm[1];
246 	atomic_t num_in_q;
247 	atomic_t stopped;
248 	bool used;
249 };
250 
251 struct sdebug_host_info {
252 	struct list_head host_list;
253 	struct Scsi_Host *shost;
254 	struct device dev;
255 	struct list_head dev_info_list;
256 };
257 
258 #define to_sdebug_host(d)	\
259 	container_of(d, struct sdebug_host_info, dev)
260 
261 struct sdebug_defer {
262 	struct hrtimer hrt;
263 	struct execute_work ew;
264 	int sqa_idx;	/* index of sdebug_queue array */
265 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
266 	int issuing_cpu;
267 };
268 
269 struct sdebug_queued_cmd {
270 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
271 	 * instance indicates this slot is in use.
272 	 */
273 	struct sdebug_defer *sd_dp;
274 	struct scsi_cmnd *a_cmnd;
275 	unsigned int inj_recovered:1;
276 	unsigned int inj_transport:1;
277 	unsigned int inj_dif:1;
278 	unsigned int inj_dix:1;
279 	unsigned int inj_short:1;
280 };
281 
282 struct sdebug_queue {
283 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
284 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
285 	spinlock_t qc_lock;
286 	atomic_t blocked;	/* to temporarily stop more being queued */
287 };
288 
289 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
290 static atomic_t sdebug_completions;  /* count of deferred completions */
291 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
292 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
293 
294 struct opcode_info_t {
295 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
296 				/* for terminating element */
297 	u8 opcode;		/* if num_attached > 0, preferred */
298 	u16 sa;			/* service action */
299 	u32 flags;		/* OR-ed set of SDEB_F_* */
300 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
301 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
302 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
303 				/* ignore cdb bytes after position 15 */
304 };
305 
306 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
307 enum sdeb_opcode_index {
308 	SDEB_I_INVALID_OPCODE =	0,
309 	SDEB_I_INQUIRY = 1,
310 	SDEB_I_REPORT_LUNS = 2,
311 	SDEB_I_REQUEST_SENSE = 3,
312 	SDEB_I_TEST_UNIT_READY = 4,
313 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
314 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
315 	SDEB_I_LOG_SENSE = 7,
316 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
317 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
318 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
319 	SDEB_I_START_STOP = 11,
320 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
321 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
322 	SDEB_I_MAINT_IN = 14,
323 	SDEB_I_MAINT_OUT = 15,
324 	SDEB_I_VERIFY = 16,		/* 10 only */
325 	SDEB_I_VARIABLE_LEN = 17,
326 	SDEB_I_RESERVE = 18,		/* 6, 10 */
327 	SDEB_I_RELEASE = 19,		/* 6, 10 */
328 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
329 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
330 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
331 	SDEB_I_SEND_DIAG = 23,
332 	SDEB_I_UNMAP = 24,
333 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
334 	SDEB_I_WRITE_BUFFER = 26,
335 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
336 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
337 	SDEB_I_COMP_WRITE = 29,
338 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
339 };
340 
341 
342 static const unsigned char opcode_ind_arr[256] = {
343 /* 0x0; 0x0->0x1f: 6 byte cdbs */
344 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
345 	    0, 0, 0, 0,
346 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
347 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
348 	    SDEB_I_RELEASE,
349 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
350 	    SDEB_I_ALLOW_REMOVAL, 0,
351 /* 0x20; 0x20->0x3f: 10 byte cdbs */
352 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
353 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
354 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
355 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
356 /* 0x40; 0x40->0x5f: 10 byte cdbs */
357 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
358 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
359 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
360 	    SDEB_I_RELEASE,
361 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
362 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
363 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
364 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
365 	0, SDEB_I_VARIABLE_LEN,
366 /* 0x80; 0x80->0x9f: 16 byte cdbs */
367 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
368 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
369 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
370 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
371 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
372 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
373 	     SDEB_I_MAINT_OUT, 0, 0, 0,
374 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
375 	     0, 0, 0, 0,
376 	0, 0, 0, 0, 0, 0, 0, 0,
377 	0, 0, 0, 0, 0, 0, 0, 0,
378 /* 0xc0; 0xc0->0xff: vendor specific */
379 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
382 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 };
384 
385 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
386 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
387 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
388 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
389 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
390 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
391 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
406 
407 static const struct opcode_info_t msense_iarr[1] = {
408 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
409 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
410 };
411 
412 static const struct opcode_info_t mselect_iarr[1] = {
413 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
414 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415 };
416 
417 static const struct opcode_info_t read_iarr[3] = {
418 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
419 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
420 	     0, 0, 0, 0} },
421 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
422 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
423 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
424 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
425 	     0xc7, 0, 0, 0, 0} },
426 };
427 
428 static const struct opcode_info_t write_iarr[3] = {
429 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
430 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
431 	     0, 0, 0, 0} },
432 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
433 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
434 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
435 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
436 	     0xc7, 0, 0, 0, 0} },
437 };
438 
439 static const struct opcode_info_t sa_in_iarr[1] = {
440 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
441 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
442 	     0xff, 0xff, 0xff, 0, 0xc7} },
443 };
444 
445 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
446 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
447 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
448 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
449 };
450 
451 static const struct opcode_info_t maint_in_iarr[2] = {
452 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
453 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
454 	     0xc7, 0, 0, 0, 0} },
455 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
456 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
457 	     0, 0} },
458 };
459 
460 static const struct opcode_info_t write_same_iarr[1] = {
461 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
462 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
463 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
464 };
465 
466 static const struct opcode_info_t reserve_iarr[1] = {
467 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
468 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
469 };
470 
471 static const struct opcode_info_t release_iarr[1] = {
472 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
473 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 };
475 
476 
477 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
478  * plus the terminating elements for logic that scans this table such as
479  * REPORT SUPPORTED OPERATION CODES. */
480 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
481 /* 0 */
482 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
483 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
485 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
487 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
488 	     0, 0} },
489 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
490 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
491 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
492 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
493 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
494 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
495 	     0} },
496 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
497 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
498 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
499 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
500 	     0, 0, 0} },
501 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
502 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
503 	     0, 0} },
504 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
505 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
506 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
507 /* 10 */
508 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
509 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
510 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
511 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
512 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
514 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
516 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
517 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
519 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
520 	     0} },
521 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
522 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
524 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
525 	     0, 0, 0, 0, 0, 0} },
526 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
527 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
528 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
529 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
530 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
531 	     0} },
532 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
533 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
534 	     0} },
535 /* 20 */
536 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
537 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
538 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
539 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
540 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
541 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
542 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
543 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
544 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
545 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
546 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
547 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
548 		   0, 0, 0, 0, 0, 0} },
549 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
550 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
551 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
552 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
553 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
554 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
555 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
556 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
557 	     0, 0, 0, 0} },
558 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
559 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
560 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
561 
562 /* 30 */
563 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
564 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
565 };
566 
567 static int sdebug_add_host = DEF_NUM_HOST;
568 static int sdebug_ato = DEF_ATO;
569 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
570 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
571 static int sdebug_dif = DEF_DIF;
572 static int sdebug_dix = DEF_DIX;
573 static int sdebug_dsense = DEF_D_SENSE;
574 static int sdebug_every_nth = DEF_EVERY_NTH;
575 static int sdebug_fake_rw = DEF_FAKE_RW;
576 static unsigned int sdebug_guard = DEF_GUARD;
577 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
578 static int sdebug_max_luns = DEF_MAX_LUNS;
579 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
580 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
581 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
582 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
583 static int sdebug_no_uld;
584 static int sdebug_num_parts = DEF_NUM_PARTS;
585 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
586 static int sdebug_opt_blks = DEF_OPT_BLKS;
587 static int sdebug_opts = DEF_OPTS;
588 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
589 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
590 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
591 static int sdebug_sector_size = DEF_SECTOR_SIZE;
592 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
593 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
594 static unsigned int sdebug_lbpu = DEF_LBPU;
595 static unsigned int sdebug_lbpws = DEF_LBPWS;
596 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
597 static unsigned int sdebug_lbprz = DEF_LBPRZ;
598 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
599 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
600 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
601 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
602 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
603 static bool sdebug_removable = DEF_REMOVABLE;
604 static bool sdebug_clustering;
605 static bool sdebug_host_lock = DEF_HOST_LOCK;
606 static bool sdebug_strict = DEF_STRICT;
607 static bool sdebug_any_injecting_opt;
608 static bool sdebug_verbose;
609 static bool have_dif_prot;
610 static bool sdebug_statistics = DEF_STATISTICS;
611 static bool sdebug_mq_active;
612 
613 static unsigned int sdebug_store_sectors;
614 static sector_t sdebug_capacity;	/* in sectors */
615 
616 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
617    may still need them */
618 static int sdebug_heads;		/* heads per disk */
619 static int sdebug_cylinders_per;	/* cylinders per surface */
620 static int sdebug_sectors_per;		/* sectors per cylinder */
621 
622 static LIST_HEAD(sdebug_host_list);
623 static DEFINE_SPINLOCK(sdebug_host_list_lock);
624 
625 static unsigned char *fake_storep;	/* ramdisk storage */
626 static struct sd_dif_tuple *dif_storep;	/* protection info */
627 static void *map_storep;		/* provisioning map */
628 
629 static unsigned long map_size;
630 static int num_aborts;
631 static int num_dev_resets;
632 static int num_target_resets;
633 static int num_bus_resets;
634 static int num_host_resets;
635 static int dix_writes;
636 static int dix_reads;
637 static int dif_errors;
638 
639 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
640 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
641 
642 static DEFINE_RWLOCK(atomic_rw);
643 
644 static char sdebug_proc_name[] = MY_NAME;
645 static const char *my_name = MY_NAME;
646 
647 static struct bus_type pseudo_lld_bus;
648 
649 static struct device_driver sdebug_driverfs_driver = {
650 	.name 		= sdebug_proc_name,
651 	.bus		= &pseudo_lld_bus,
652 };
653 
654 static const int check_condition_result =
655 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
656 
657 static const int illegal_condition_result =
658 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
659 
660 static const int device_qfull_result =
661 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
662 
663 
664 static inline unsigned int scsi_debug_lbp(void)
665 {
666 	return 0 == sdebug_fake_rw &&
667 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
668 }
669 
670 static void *fake_store(unsigned long long lba)
671 {
672 	lba = do_div(lba, sdebug_store_sectors);
673 
674 	return fake_storep + lba * sdebug_sector_size;
675 }
676 
677 static struct sd_dif_tuple *dif_store(sector_t sector)
678 {
679 	sector = sector_div(sector, sdebug_store_sectors);
680 
681 	return dif_storep + sector;
682 }
683 
684 static void sdebug_max_tgts_luns(void)
685 {
686 	struct sdebug_host_info *sdbg_host;
687 	struct Scsi_Host *hpnt;
688 
689 	spin_lock(&sdebug_host_list_lock);
690 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
691 		hpnt = sdbg_host->shost;
692 		if ((hpnt->this_id >= 0) &&
693 		    (sdebug_num_tgts > hpnt->this_id))
694 			hpnt->max_id = sdebug_num_tgts + 1;
695 		else
696 			hpnt->max_id = sdebug_num_tgts;
697 		/* sdebug_max_luns; */
698 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
699 	}
700 	spin_unlock(&sdebug_host_list_lock);
701 }
702 
703 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
704 
705 /* Set in_bit to -1 to indicate no bit position of invalid field */
706 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
707 				 enum sdeb_cmd_data c_d,
708 				 int in_byte, int in_bit)
709 {
710 	unsigned char *sbuff;
711 	u8 sks[4];
712 	int sl, asc;
713 
714 	sbuff = scp->sense_buffer;
715 	if (!sbuff) {
716 		sdev_printk(KERN_ERR, scp->device,
717 			    "%s: sense_buffer is NULL\n", __func__);
718 		return;
719 	}
720 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
721 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
722 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
723 	memset(sks, 0, sizeof(sks));
724 	sks[0] = 0x80;
725 	if (c_d)
726 		sks[0] |= 0x40;
727 	if (in_bit >= 0) {
728 		sks[0] |= 0x8;
729 		sks[0] |= 0x7 & in_bit;
730 	}
731 	put_unaligned_be16(in_byte, sks + 1);
732 	if (sdebug_dsense) {
733 		sl = sbuff[7] + 8;
734 		sbuff[7] = sl;
735 		sbuff[sl] = 0x2;
736 		sbuff[sl + 1] = 0x6;
737 		memcpy(sbuff + sl + 4, sks, 3);
738 	} else
739 		memcpy(sbuff + 15, sks, 3);
740 	if (sdebug_verbose)
741 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
742 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
743 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
744 }
745 
746 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
747 {
748 	unsigned char *sbuff;
749 
750 	sbuff = scp->sense_buffer;
751 	if (!sbuff) {
752 		sdev_printk(KERN_ERR, scp->device,
753 			    "%s: sense_buffer is NULL\n", __func__);
754 		return;
755 	}
756 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
757 
758 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
759 
760 	if (sdebug_verbose)
761 		sdev_printk(KERN_INFO, scp->device,
762 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
763 			    my_name, key, asc, asq);
764 }
765 
766 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
767 {
768 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
769 }
770 
771 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
772 {
773 	if (sdebug_verbose) {
774 		if (0x1261 == cmd)
775 			sdev_printk(KERN_INFO, dev,
776 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
777 		else if (0x5331 == cmd)
778 			sdev_printk(KERN_INFO, dev,
779 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
780 				    __func__);
781 		else
782 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
783 				    __func__, cmd);
784 	}
785 	return -EINVAL;
786 	/* return -ENOTTY; // correct return but upsets fdisk */
787 }
788 
789 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
790 {
791 	struct sdebug_host_info *sdhp;
792 	struct sdebug_dev_info *dp;
793 
794 	spin_lock(&sdebug_host_list_lock);
795 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
796 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
797 			if ((devip->sdbg_host == dp->sdbg_host) &&
798 			    (devip->target == dp->target))
799 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
800 		}
801 	}
802 	spin_unlock(&sdebug_host_list_lock);
803 }
804 
805 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
806 {
807 	int k;
808 
809 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
810 	if (k != SDEBUG_NUM_UAS) {
811 		const char *cp = NULL;
812 
813 		switch (k) {
814 		case SDEBUG_UA_POR:
815 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
816 					POWER_ON_RESET_ASCQ);
817 			if (sdebug_verbose)
818 				cp = "power on reset";
819 			break;
820 		case SDEBUG_UA_BUS_RESET:
821 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
822 					BUS_RESET_ASCQ);
823 			if (sdebug_verbose)
824 				cp = "bus reset";
825 			break;
826 		case SDEBUG_UA_MODE_CHANGED:
827 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
828 					MODE_CHANGED_ASCQ);
829 			if (sdebug_verbose)
830 				cp = "mode parameters changed";
831 			break;
832 		case SDEBUG_UA_CAPACITY_CHANGED:
833 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
834 					CAPACITY_CHANGED_ASCQ);
835 			if (sdebug_verbose)
836 				cp = "capacity data changed";
837 			break;
838 		case SDEBUG_UA_MICROCODE_CHANGED:
839 			mk_sense_buffer(scp, UNIT_ATTENTION,
840 					TARGET_CHANGED_ASC,
841 					MICROCODE_CHANGED_ASCQ);
842 			if (sdebug_verbose)
843 				cp = "microcode has been changed";
844 			break;
845 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
846 			mk_sense_buffer(scp, UNIT_ATTENTION,
847 					TARGET_CHANGED_ASC,
848 					MICROCODE_CHANGED_WO_RESET_ASCQ);
849 			if (sdebug_verbose)
850 				cp = "microcode has been changed without reset";
851 			break;
852 		case SDEBUG_UA_LUNS_CHANGED:
853 			/*
854 			 * SPC-3 behavior is to report a UNIT ATTENTION with
855 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
856 			 * on the target, until a REPORT LUNS command is
857 			 * received.  SPC-4 behavior is to report it only once.
858 			 * NOTE:  sdebug_scsi_level does not use the same
859 			 * values as struct scsi_device->scsi_level.
860 			 */
861 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
862 				clear_luns_changed_on_target(devip);
863 			mk_sense_buffer(scp, UNIT_ATTENTION,
864 					TARGET_CHANGED_ASC,
865 					LUNS_CHANGED_ASCQ);
866 			if (sdebug_verbose)
867 				cp = "reported luns data has changed";
868 			break;
869 		default:
870 			pr_warn("unexpected unit attention code=%d\n", k);
871 			if (sdebug_verbose)
872 				cp = "unknown";
873 			break;
874 		}
875 		clear_bit(k, devip->uas_bm);
876 		if (sdebug_verbose)
877 			sdev_printk(KERN_INFO, scp->device,
878 				   "%s reports: Unit attention: %s\n",
879 				   my_name, cp);
880 		return check_condition_result;
881 	}
882 	return 0;
883 }
884 
885 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
886 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
887 				int arr_len)
888 {
889 	int act_len;
890 	struct scsi_data_buffer *sdb = scsi_in(scp);
891 
892 	if (!sdb->length)
893 		return 0;
894 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
895 		return DID_ERROR << 16;
896 
897 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
898 				      arr, arr_len);
899 	sdb->resid = scsi_bufflen(scp) - act_len;
900 
901 	return 0;
902 }
903 
904 /* Returns number of bytes fetched into 'arr' or -1 if error. */
905 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
906 			       int arr_len)
907 {
908 	if (!scsi_bufflen(scp))
909 		return 0;
910 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
911 		return -1;
912 
913 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
914 }
915 
916 
917 static const char * inq_vendor_id = "Linux   ";
918 static const char * inq_product_id = "scsi_debug      ";
919 static const char *inq_product_rev = "0186";	/* version less '.' */
920 static const u64 naa5_comp_a = 0x5222222000000000ULL;
921 static const u64 naa5_comp_b = 0x5333333000000000ULL;
922 static const u64 naa5_comp_c = 0x5111111000000000ULL;
923 
924 /* Device identification VPD page. Returns number of bytes placed in arr */
925 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
926 			   int target_dev_id, int dev_id_num,
927 			   const char * dev_id_str,
928 			   int dev_id_str_len)
929 {
930 	int num, port_a;
931 	char b[32];
932 
933 	port_a = target_dev_id + 1;
934 	/* T10 vendor identifier field format (faked) */
935 	arr[0] = 0x2;	/* ASCII */
936 	arr[1] = 0x1;
937 	arr[2] = 0x0;
938 	memcpy(&arr[4], inq_vendor_id, 8);
939 	memcpy(&arr[12], inq_product_id, 16);
940 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
941 	num = 8 + 16 + dev_id_str_len;
942 	arr[3] = num;
943 	num += 4;
944 	if (dev_id_num >= 0) {
945 		/* NAA-5, Logical unit identifier (binary) */
946 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
947 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
948 		arr[num++] = 0x0;
949 		arr[num++] = 0x8;
950 		put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
951 		num += 8;
952 		/* Target relative port number */
953 		arr[num++] = 0x61;	/* proto=sas, binary */
954 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
955 		arr[num++] = 0x0;	/* reserved */
956 		arr[num++] = 0x4;	/* length */
957 		arr[num++] = 0x0;	/* reserved */
958 		arr[num++] = 0x0;	/* reserved */
959 		arr[num++] = 0x0;
960 		arr[num++] = 0x1;	/* relative port A */
961 	}
962 	/* NAA-5, Target port identifier */
963 	arr[num++] = 0x61;	/* proto=sas, binary */
964 	arr[num++] = 0x93;	/* piv=1, target port, naa */
965 	arr[num++] = 0x0;
966 	arr[num++] = 0x8;
967 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
968 	num += 8;
969 	/* NAA-5, Target port group identifier */
970 	arr[num++] = 0x61;	/* proto=sas, binary */
971 	arr[num++] = 0x95;	/* piv=1, target port group id */
972 	arr[num++] = 0x0;
973 	arr[num++] = 0x4;
974 	arr[num++] = 0;
975 	arr[num++] = 0;
976 	put_unaligned_be16(port_group_id, arr + num);
977 	num += 2;
978 	/* NAA-5, Target device identifier */
979 	arr[num++] = 0x61;	/* proto=sas, binary */
980 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
981 	arr[num++] = 0x0;
982 	arr[num++] = 0x8;
983 	put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
984 	num += 8;
985 	/* SCSI name string: Target device identifier */
986 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
987 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
988 	arr[num++] = 0x0;
989 	arr[num++] = 24;
990 	memcpy(arr + num, "naa.52222220", 12);
991 	num += 12;
992 	snprintf(b, sizeof(b), "%08X", target_dev_id);
993 	memcpy(arr + num, b, 8);
994 	num += 8;
995 	memset(arr + num, 0, 4);
996 	num += 4;
997 	return num;
998 }
999 
1000 static unsigned char vpd84_data[] = {
1001 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1002     0x22,0x22,0x22,0x0,0xbb,0x1,
1003     0x22,0x22,0x22,0x0,0xbb,0x2,
1004 };
1005 
1006 /*  Software interface identification VPD page */
1007 static int inquiry_evpd_84(unsigned char * arr)
1008 {
1009 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1010 	return sizeof(vpd84_data);
1011 }
1012 
1013 /* Management network addresses VPD page */
1014 static int inquiry_evpd_85(unsigned char * arr)
1015 {
1016 	int num = 0;
1017 	const char * na1 = "https://www.kernel.org/config";
1018 	const char * na2 = "http://www.kernel.org/log";
1019 	int plen, olen;
1020 
1021 	arr[num++] = 0x1;	/* lu, storage config */
1022 	arr[num++] = 0x0;	/* reserved */
1023 	arr[num++] = 0x0;
1024 	olen = strlen(na1);
1025 	plen = olen + 1;
1026 	if (plen % 4)
1027 		plen = ((plen / 4) + 1) * 4;
1028 	arr[num++] = plen;	/* length, null termianted, padded */
1029 	memcpy(arr + num, na1, olen);
1030 	memset(arr + num + olen, 0, plen - olen);
1031 	num += plen;
1032 
1033 	arr[num++] = 0x4;	/* lu, logging */
1034 	arr[num++] = 0x0;	/* reserved */
1035 	arr[num++] = 0x0;
1036 	olen = strlen(na2);
1037 	plen = olen + 1;
1038 	if (plen % 4)
1039 		plen = ((plen / 4) + 1) * 4;
1040 	arr[num++] = plen;	/* length, null terminated, padded */
1041 	memcpy(arr + num, na2, olen);
1042 	memset(arr + num + olen, 0, plen - olen);
1043 	num += plen;
1044 
1045 	return num;
1046 }
1047 
1048 /* SCSI ports VPD page */
1049 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1050 {
1051 	int num = 0;
1052 	int port_a, port_b;
1053 
1054 	port_a = target_dev_id + 1;
1055 	port_b = port_a + 1;
1056 	arr[num++] = 0x0;	/* reserved */
1057 	arr[num++] = 0x0;	/* reserved */
1058 	arr[num++] = 0x0;
1059 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1060 	memset(arr + num, 0, 6);
1061 	num += 6;
1062 	arr[num++] = 0x0;
1063 	arr[num++] = 12;	/* length tp descriptor */
1064 	/* naa-5 target port identifier (A) */
1065 	arr[num++] = 0x61;	/* proto=sas, binary */
1066 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1067 	arr[num++] = 0x0;	/* reserved */
1068 	arr[num++] = 0x8;	/* length */
1069 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1070 	num += 8;
1071 	arr[num++] = 0x0;	/* reserved */
1072 	arr[num++] = 0x0;	/* reserved */
1073 	arr[num++] = 0x0;
1074 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1075 	memset(arr + num, 0, 6);
1076 	num += 6;
1077 	arr[num++] = 0x0;
1078 	arr[num++] = 12;	/* length tp descriptor */
1079 	/* naa-5 target port identifier (B) */
1080 	arr[num++] = 0x61;	/* proto=sas, binary */
1081 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1082 	arr[num++] = 0x0;	/* reserved */
1083 	arr[num++] = 0x8;	/* length */
1084 	put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1085 	num += 8;
1086 
1087 	return num;
1088 }
1089 
1090 
1091 static unsigned char vpd89_data[] = {
1092 /* from 4th byte */ 0,0,0,0,
1093 'l','i','n','u','x',' ',' ',' ',
1094 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1095 '1','2','3','4',
1096 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1097 0xec,0,0,0,
1098 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1099 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1100 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1101 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1102 0x53,0x41,
1103 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1104 0x20,0x20,
1105 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1106 0x10,0x80,
1107 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1108 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1109 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1110 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1111 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1112 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1113 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1118 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1119 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1120 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1122 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1123 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1124 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1125 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1126 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1127 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1128 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1129 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1132 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1133 };
1134 
1135 /* ATA Information VPD page */
1136 static int inquiry_evpd_89(unsigned char * arr)
1137 {
1138 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1139 	return sizeof(vpd89_data);
1140 }
1141 
1142 
1143 static unsigned char vpdb0_data[] = {
1144 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1145 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 };
1149 
1150 /* Block limits VPD page (SBC-3) */
1151 static int inquiry_evpd_b0(unsigned char * arr)
1152 {
1153 	unsigned int gran;
1154 
1155 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1156 
1157 	/* Optimal transfer length granularity */
1158 	gran = 1 << sdebug_physblk_exp;
1159 	put_unaligned_be16(gran, arr + 2);
1160 
1161 	/* Maximum Transfer Length */
1162 	if (sdebug_store_sectors > 0x400)
1163 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1164 
1165 	/* Optimal Transfer Length */
1166 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1167 
1168 	if (sdebug_lbpu) {
1169 		/* Maximum Unmap LBA Count */
1170 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1171 
1172 		/* Maximum Unmap Block Descriptor Count */
1173 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1174 	}
1175 
1176 	/* Unmap Granularity Alignment */
1177 	if (sdebug_unmap_alignment) {
1178 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1179 		arr[28] |= 0x80; /* UGAVALID */
1180 	}
1181 
1182 	/* Optimal Unmap Granularity */
1183 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1184 
1185 	/* Maximum WRITE SAME Length */
1186 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1187 
1188 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1189 
1190 	return sizeof(vpdb0_data);
1191 }
1192 
1193 /* Block device characteristics VPD page (SBC-3) */
1194 static int inquiry_evpd_b1(unsigned char *arr)
1195 {
1196 	memset(arr, 0, 0x3c);
1197 	arr[0] = 0;
1198 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1199 	arr[2] = 0;
1200 	arr[3] = 5;	/* less than 1.8" */
1201 
1202 	return 0x3c;
1203 }
1204 
1205 /* Logical block provisioning VPD page (SBC-3) */
1206 static int inquiry_evpd_b2(unsigned char *arr)
1207 {
1208 	memset(arr, 0, 0x4);
1209 	arr[0] = 0;			/* threshold exponent */
1210 
1211 	if (sdebug_lbpu)
1212 		arr[1] = 1 << 7;
1213 
1214 	if (sdebug_lbpws)
1215 		arr[1] |= 1 << 6;
1216 
1217 	if (sdebug_lbpws10)
1218 		arr[1] |= 1 << 5;
1219 
1220 	if (sdebug_lbprz)
1221 		arr[1] |= 1 << 2;
1222 
1223 	return 0x4;
1224 }
1225 
1226 #define SDEBUG_LONG_INQ_SZ 96
1227 #define SDEBUG_MAX_INQ_ARR_SZ 584
1228 
1229 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1230 {
1231 	unsigned char pq_pdt;
1232 	unsigned char * arr;
1233 	unsigned char *cmd = scp->cmnd;
1234 	int alloc_len, n, ret;
1235 	bool have_wlun;
1236 
1237 	alloc_len = get_unaligned_be16(cmd + 3);
1238 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1239 	if (! arr)
1240 		return DID_REQUEUE << 16;
1241 	have_wlun = scsi_is_wlun(scp->device->lun);
1242 	if (have_wlun)
1243 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1244 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1245 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1246 	else
1247 		pq_pdt = (sdebug_ptype & 0x1f);
1248 	arr[0] = pq_pdt;
1249 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1250 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1251 		kfree(arr);
1252 		return check_condition_result;
1253 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1254 		int lu_id_num, port_group_id, target_dev_id, len;
1255 		char lu_id_str[6];
1256 		int host_no = devip->sdbg_host->shost->host_no;
1257 
1258 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1259 		    (devip->channel & 0x7f);
1260 		if (sdebug_vpd_use_hostno == 0)
1261 			host_no = 0;
1262 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1263 			    (devip->target * 1000) + devip->lun);
1264 		target_dev_id = ((host_no + 1) * 2000) +
1265 				 (devip->target * 1000) - 3;
1266 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1267 		if (0 == cmd[2]) { /* supported vital product data pages */
1268 			arr[1] = cmd[2];	/*sanity */
1269 			n = 4;
1270 			arr[n++] = 0x0;   /* this page */
1271 			arr[n++] = 0x80;  /* unit serial number */
1272 			arr[n++] = 0x83;  /* device identification */
1273 			arr[n++] = 0x84;  /* software interface ident. */
1274 			arr[n++] = 0x85;  /* management network addresses */
1275 			arr[n++] = 0x86;  /* extended inquiry */
1276 			arr[n++] = 0x87;  /* mode page policy */
1277 			arr[n++] = 0x88;  /* SCSI ports */
1278 			arr[n++] = 0x89;  /* ATA information */
1279 			arr[n++] = 0xb0;  /* Block limits (SBC) */
1280 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1281 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1282 				arr[n++] = 0xb2;
1283 			arr[3] = n - 4;	  /* number of supported VPD pages */
1284 		} else if (0x80 == cmd[2]) { /* unit serial number */
1285 			arr[1] = cmd[2];	/*sanity */
1286 			arr[3] = len;
1287 			memcpy(&arr[4], lu_id_str, len);
1288 		} else if (0x83 == cmd[2]) { /* device identification */
1289 			arr[1] = cmd[2];	/*sanity */
1290 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1291 						 target_dev_id, lu_id_num,
1292 						 lu_id_str, len);
1293 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1294 			arr[1] = cmd[2];	/*sanity */
1295 			arr[3] = inquiry_evpd_84(&arr[4]);
1296 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1297 			arr[1] = cmd[2];	/*sanity */
1298 			arr[3] = inquiry_evpd_85(&arr[4]);
1299 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1300 			arr[1] = cmd[2];	/*sanity */
1301 			arr[3] = 0x3c;	/* number of following entries */
1302 			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1303 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1304 			else if (sdebug_dif)
1305 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1306 			else
1307 				arr[4] = 0x0;   /* no protection stuff */
1308 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1309 		} else if (0x87 == cmd[2]) { /* mode page policy */
1310 			arr[1] = cmd[2];	/*sanity */
1311 			arr[3] = 0x8;	/* number of following entries */
1312 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1313 			arr[6] = 0x80;	/* mlus, shared */
1314 			arr[8] = 0x18;	 /* protocol specific lu */
1315 			arr[10] = 0x82;	 /* mlus, per initiator port */
1316 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1317 			arr[1] = cmd[2];	/*sanity */
1318 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1319 		} else if (0x89 == cmd[2]) { /* ATA information */
1320 			arr[1] = cmd[2];        /*sanity */
1321 			n = inquiry_evpd_89(&arr[4]);
1322 			put_unaligned_be16(n, arr + 2);
1323 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1324 			arr[1] = cmd[2];        /*sanity */
1325 			arr[3] = inquiry_evpd_b0(&arr[4]);
1326 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1327 			arr[1] = cmd[2];        /*sanity */
1328 			arr[3] = inquiry_evpd_b1(&arr[4]);
1329 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1330 			arr[1] = cmd[2];        /*sanity */
1331 			arr[3] = inquiry_evpd_b2(&arr[4]);
1332 		} else {
1333 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1334 			kfree(arr);
1335 			return check_condition_result;
1336 		}
1337 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1338 		ret = fill_from_dev_buffer(scp, arr,
1339 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1340 		kfree(arr);
1341 		return ret;
1342 	}
1343 	/* drops through here for a standard inquiry */
1344 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1345 	arr[2] = sdebug_scsi_level;
1346 	arr[3] = 2;    /* response_data_format==2 */
1347 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1348 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1349 	if (sdebug_vpd_use_hostno == 0)
1350 		arr[5] = 0x10; /* claim: implicit TGPS */
1351 	arr[6] = 0x10; /* claim: MultiP */
1352 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1353 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1354 	memcpy(&arr[8], inq_vendor_id, 8);
1355 	memcpy(&arr[16], inq_product_id, 16);
1356 	memcpy(&arr[32], inq_product_rev, 4);
1357 	/* version descriptors (2 bytes each) follow */
1358 	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1359 	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1360 	n = 62;
1361 	if (sdebug_ptype == TYPE_DISK) {
1362 		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1363 	} else if (sdebug_ptype == TYPE_TAPE) {
1364 		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1365 	}
1366 	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1367 	ret = fill_from_dev_buffer(scp, arr,
1368 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1369 	kfree(arr);
1370 	return ret;
1371 }
1372 
1373 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1374 				   0, 0, 0x0, 0x0};
1375 
1376 static int resp_requests(struct scsi_cmnd * scp,
1377 			 struct sdebug_dev_info * devip)
1378 {
1379 	unsigned char * sbuff;
1380 	unsigned char *cmd = scp->cmnd;
1381 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1382 	bool dsense;
1383 	int len = 18;
1384 
1385 	memset(arr, 0, sizeof(arr));
1386 	dsense = !!(cmd[1] & 1);
1387 	sbuff = scp->sense_buffer;
1388 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1389 		if (dsense) {
1390 			arr[0] = 0x72;
1391 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1392 			arr[2] = THRESHOLD_EXCEEDED;
1393 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1394 			len = 8;
1395 		} else {
1396 			arr[0] = 0x70;
1397 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1398 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1399 			arr[12] = THRESHOLD_EXCEEDED;
1400 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1401 		}
1402 	} else {
1403 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1404 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1405 			;	/* have sense and formats match */
1406 		else if (arr[0] <= 0x70) {
1407 			if (dsense) {
1408 				memset(arr, 0, 8);
1409 				arr[0] = 0x72;
1410 				len = 8;
1411 			} else {
1412 				memset(arr, 0, 18);
1413 				arr[0] = 0x70;
1414 				arr[7] = 0xa;
1415 			}
1416 		} else if (dsense) {
1417 			memset(arr, 0, 8);
1418 			arr[0] = 0x72;
1419 			arr[1] = sbuff[2];     /* sense key */
1420 			arr[2] = sbuff[12];    /* asc */
1421 			arr[3] = sbuff[13];    /* ascq */
1422 			len = 8;
1423 		} else {
1424 			memset(arr, 0, 18);
1425 			arr[0] = 0x70;
1426 			arr[2] = sbuff[1];
1427 			arr[7] = 0xa;
1428 			arr[12] = sbuff[1];
1429 			arr[13] = sbuff[3];
1430 		}
1431 
1432 	}
1433 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1434 	return fill_from_dev_buffer(scp, arr, len);
1435 }
1436 
1437 static int resp_start_stop(struct scsi_cmnd * scp,
1438 			   struct sdebug_dev_info * devip)
1439 {
1440 	unsigned char *cmd = scp->cmnd;
1441 	int power_cond, stop;
1442 
1443 	power_cond = (cmd[4] & 0xf0) >> 4;
1444 	if (power_cond) {
1445 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1446 		return check_condition_result;
1447 	}
1448 	stop = !(cmd[4] & 1);
1449 	atomic_xchg(&devip->stopped, stop);
1450 	return 0;
1451 }
1452 
1453 static sector_t get_sdebug_capacity(void)
1454 {
1455 	static const unsigned int gibibyte = 1073741824;
1456 
1457 	if (sdebug_virtual_gb > 0)
1458 		return (sector_t)sdebug_virtual_gb *
1459 			(gibibyte / sdebug_sector_size);
1460 	else
1461 		return sdebug_store_sectors;
1462 }
1463 
1464 #define SDEBUG_READCAP_ARR_SZ 8
1465 static int resp_readcap(struct scsi_cmnd * scp,
1466 			struct sdebug_dev_info * devip)
1467 {
1468 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1469 	unsigned int capac;
1470 
1471 	/* following just in case virtual_gb changed */
1472 	sdebug_capacity = get_sdebug_capacity();
1473 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1474 	if (sdebug_capacity < 0xffffffff) {
1475 		capac = (unsigned int)sdebug_capacity - 1;
1476 		put_unaligned_be32(capac, arr + 0);
1477 	} else
1478 		put_unaligned_be32(0xffffffff, arr + 0);
1479 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1480 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1481 }
1482 
1483 #define SDEBUG_READCAP16_ARR_SZ 32
1484 static int resp_readcap16(struct scsi_cmnd * scp,
1485 			  struct sdebug_dev_info * devip)
1486 {
1487 	unsigned char *cmd = scp->cmnd;
1488 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1489 	int alloc_len;
1490 
1491 	alloc_len = get_unaligned_be32(cmd + 10);
1492 	/* following just in case virtual_gb changed */
1493 	sdebug_capacity = get_sdebug_capacity();
1494 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1495 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1496 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1497 	arr[13] = sdebug_physblk_exp & 0xf;
1498 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1499 
1500 	if (scsi_debug_lbp()) {
1501 		arr[14] |= 0x80; /* LBPME */
1502 		if (sdebug_lbprz)
1503 			arr[14] |= 0x40; /* LBPRZ */
1504 	}
1505 
1506 	arr[15] = sdebug_lowest_aligned & 0xff;
1507 
1508 	if (sdebug_dif) {
1509 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1510 		arr[12] |= 1; /* PROT_EN */
1511 	}
1512 
1513 	return fill_from_dev_buffer(scp, arr,
1514 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1515 }
1516 
1517 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1518 
1519 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1520 			      struct sdebug_dev_info * devip)
1521 {
1522 	unsigned char *cmd = scp->cmnd;
1523 	unsigned char * arr;
1524 	int host_no = devip->sdbg_host->shost->host_no;
1525 	int n, ret, alen, rlen;
1526 	int port_group_a, port_group_b, port_a, port_b;
1527 
1528 	alen = get_unaligned_be32(cmd + 6);
1529 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1530 	if (! arr)
1531 		return DID_REQUEUE << 16;
1532 	/*
1533 	 * EVPD page 0x88 states we have two ports, one
1534 	 * real and a fake port with no device connected.
1535 	 * So we create two port groups with one port each
1536 	 * and set the group with port B to unavailable.
1537 	 */
1538 	port_a = 0x1; /* relative port A */
1539 	port_b = 0x2; /* relative port B */
1540 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1541 			(devip->channel & 0x7f);
1542 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1543 			(devip->channel & 0x7f) + 0x80;
1544 
1545 	/*
1546 	 * The asymmetric access state is cycled according to the host_id.
1547 	 */
1548 	n = 4;
1549 	if (sdebug_vpd_use_hostno == 0) {
1550 		arr[n++] = host_no % 3; /* Asymm access state */
1551 		arr[n++] = 0x0F; /* claim: all states are supported */
1552 	} else {
1553 		arr[n++] = 0x0; /* Active/Optimized path */
1554 		arr[n++] = 0x01; /* only support active/optimized paths */
1555 	}
1556 	put_unaligned_be16(port_group_a, arr + n);
1557 	n += 2;
1558 	arr[n++] = 0;    /* Reserved */
1559 	arr[n++] = 0;    /* Status code */
1560 	arr[n++] = 0;    /* Vendor unique */
1561 	arr[n++] = 0x1;  /* One port per group */
1562 	arr[n++] = 0;    /* Reserved */
1563 	arr[n++] = 0;    /* Reserved */
1564 	put_unaligned_be16(port_a, arr + n);
1565 	n += 2;
1566 	arr[n++] = 3;    /* Port unavailable */
1567 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1568 	put_unaligned_be16(port_group_b, arr + n);
1569 	n += 2;
1570 	arr[n++] = 0;    /* Reserved */
1571 	arr[n++] = 0;    /* Status code */
1572 	arr[n++] = 0;    /* Vendor unique */
1573 	arr[n++] = 0x1;  /* One port per group */
1574 	arr[n++] = 0;    /* Reserved */
1575 	arr[n++] = 0;    /* Reserved */
1576 	put_unaligned_be16(port_b, arr + n);
1577 	n += 2;
1578 
1579 	rlen = n - 4;
1580 	put_unaligned_be32(rlen, arr + 0);
1581 
1582 	/*
1583 	 * Return the smallest value of either
1584 	 * - The allocated length
1585 	 * - The constructed command length
1586 	 * - The maximum array size
1587 	 */
1588 	rlen = min(alen,n);
1589 	ret = fill_from_dev_buffer(scp, arr,
1590 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1591 	kfree(arr);
1592 	return ret;
1593 }
1594 
1595 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1596 			     struct sdebug_dev_info *devip)
1597 {
1598 	bool rctd;
1599 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1600 	u16 req_sa, u;
1601 	u32 alloc_len, a_len;
1602 	int k, offset, len, errsts, count, bump, na;
1603 	const struct opcode_info_t *oip;
1604 	const struct opcode_info_t *r_oip;
1605 	u8 *arr;
1606 	u8 *cmd = scp->cmnd;
1607 
1608 	rctd = !!(cmd[2] & 0x80);
1609 	reporting_opts = cmd[2] & 0x7;
1610 	req_opcode = cmd[3];
1611 	req_sa = get_unaligned_be16(cmd + 4);
1612 	alloc_len = get_unaligned_be32(cmd + 6);
1613 	if (alloc_len < 4 || alloc_len > 0xffff) {
1614 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1615 		return check_condition_result;
1616 	}
1617 	if (alloc_len > 8192)
1618 		a_len = 8192;
1619 	else
1620 		a_len = alloc_len;
1621 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1622 	if (NULL == arr) {
1623 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1624 				INSUFF_RES_ASCQ);
1625 		return check_condition_result;
1626 	}
1627 	switch (reporting_opts) {
1628 	case 0:	/* all commands */
1629 		/* count number of commands */
1630 		for (count = 0, oip = opcode_info_arr;
1631 		     oip->num_attached != 0xff; ++oip) {
1632 			if (F_INV_OP & oip->flags)
1633 				continue;
1634 			count += (oip->num_attached + 1);
1635 		}
1636 		bump = rctd ? 20 : 8;
1637 		put_unaligned_be32(count * bump, arr);
1638 		for (offset = 4, oip = opcode_info_arr;
1639 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1640 			if (F_INV_OP & oip->flags)
1641 				continue;
1642 			na = oip->num_attached;
1643 			arr[offset] = oip->opcode;
1644 			put_unaligned_be16(oip->sa, arr + offset + 2);
1645 			if (rctd)
1646 				arr[offset + 5] |= 0x2;
1647 			if (FF_SA & oip->flags)
1648 				arr[offset + 5] |= 0x1;
1649 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1650 			if (rctd)
1651 				put_unaligned_be16(0xa, arr + offset + 8);
1652 			r_oip = oip;
1653 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1654 				if (F_INV_OP & oip->flags)
1655 					continue;
1656 				offset += bump;
1657 				arr[offset] = oip->opcode;
1658 				put_unaligned_be16(oip->sa, arr + offset + 2);
1659 				if (rctd)
1660 					arr[offset + 5] |= 0x2;
1661 				if (FF_SA & oip->flags)
1662 					arr[offset + 5] |= 0x1;
1663 				put_unaligned_be16(oip->len_mask[0],
1664 						   arr + offset + 6);
1665 				if (rctd)
1666 					put_unaligned_be16(0xa,
1667 							   arr + offset + 8);
1668 			}
1669 			oip = r_oip;
1670 			offset += bump;
1671 		}
1672 		break;
1673 	case 1:	/* one command: opcode only */
1674 	case 2:	/* one command: opcode plus service action */
1675 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1676 		sdeb_i = opcode_ind_arr[req_opcode];
1677 		oip = &opcode_info_arr[sdeb_i];
1678 		if (F_INV_OP & oip->flags) {
1679 			supp = 1;
1680 			offset = 4;
1681 		} else {
1682 			if (1 == reporting_opts) {
1683 				if (FF_SA & oip->flags) {
1684 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1685 							     2, 2);
1686 					kfree(arr);
1687 					return check_condition_result;
1688 				}
1689 				req_sa = 0;
1690 			} else if (2 == reporting_opts &&
1691 				   0 == (FF_SA & oip->flags)) {
1692 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1693 				kfree(arr);	/* point at requested sa */
1694 				return check_condition_result;
1695 			}
1696 			if (0 == (FF_SA & oip->flags) &&
1697 			    req_opcode == oip->opcode)
1698 				supp = 3;
1699 			else if (0 == (FF_SA & oip->flags)) {
1700 				na = oip->num_attached;
1701 				for (k = 0, oip = oip->arrp; k < na;
1702 				     ++k, ++oip) {
1703 					if (req_opcode == oip->opcode)
1704 						break;
1705 				}
1706 				supp = (k >= na) ? 1 : 3;
1707 			} else if (req_sa != oip->sa) {
1708 				na = oip->num_attached;
1709 				for (k = 0, oip = oip->arrp; k < na;
1710 				     ++k, ++oip) {
1711 					if (req_sa == oip->sa)
1712 						break;
1713 				}
1714 				supp = (k >= na) ? 1 : 3;
1715 			} else
1716 				supp = 3;
1717 			if (3 == supp) {
1718 				u = oip->len_mask[0];
1719 				put_unaligned_be16(u, arr + 2);
1720 				arr[4] = oip->opcode;
1721 				for (k = 1; k < u; ++k)
1722 					arr[4 + k] = (k < 16) ?
1723 						 oip->len_mask[k] : 0xff;
1724 				offset = 4 + u;
1725 			} else
1726 				offset = 4;
1727 		}
1728 		arr[1] = (rctd ? 0x80 : 0) | supp;
1729 		if (rctd) {
1730 			put_unaligned_be16(0xa, arr + offset);
1731 			offset += 12;
1732 		}
1733 		break;
1734 	default:
1735 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1736 		kfree(arr);
1737 		return check_condition_result;
1738 	}
1739 	offset = (offset < a_len) ? offset : a_len;
1740 	len = (offset < alloc_len) ? offset : alloc_len;
1741 	errsts = fill_from_dev_buffer(scp, arr, len);
1742 	kfree(arr);
1743 	return errsts;
1744 }
1745 
1746 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1747 			  struct sdebug_dev_info *devip)
1748 {
1749 	bool repd;
1750 	u32 alloc_len, len;
1751 	u8 arr[16];
1752 	u8 *cmd = scp->cmnd;
1753 
1754 	memset(arr, 0, sizeof(arr));
1755 	repd = !!(cmd[2] & 0x80);
1756 	alloc_len = get_unaligned_be32(cmd + 6);
1757 	if (alloc_len < 4) {
1758 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1759 		return check_condition_result;
1760 	}
1761 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1762 	arr[1] = 0x1;		/* ITNRS */
1763 	if (repd) {
1764 		arr[3] = 0xc;
1765 		len = 16;
1766 	} else
1767 		len = 4;
1768 
1769 	len = (len < alloc_len) ? len : alloc_len;
1770 	return fill_from_dev_buffer(scp, arr, len);
1771 }
1772 
1773 /* <<Following mode page info copied from ST318451LW>> */
1774 
1775 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1776 {	/* Read-Write Error Recovery page for mode_sense */
1777 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1778 					5, 0, 0xff, 0xff};
1779 
1780 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1781 	if (1 == pcontrol)
1782 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1783 	return sizeof(err_recov_pg);
1784 }
1785 
1786 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1787 { 	/* Disconnect-Reconnect page for mode_sense */
1788 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1789 					 0, 0, 0, 0, 0, 0, 0, 0};
1790 
1791 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1792 	if (1 == pcontrol)
1793 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1794 	return sizeof(disconnect_pg);
1795 }
1796 
1797 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1798 {       /* Format device page for mode_sense */
1799 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1800 				     0, 0, 0, 0, 0, 0, 0, 0,
1801 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1802 
1803 	memcpy(p, format_pg, sizeof(format_pg));
1804 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1805 	put_unaligned_be16(sdebug_sector_size, p + 12);
1806 	if (sdebug_removable)
1807 		p[20] |= 0x20; /* should agree with INQUIRY */
1808 	if (1 == pcontrol)
1809 		memset(p + 2, 0, sizeof(format_pg) - 2);
1810 	return sizeof(format_pg);
1811 }
1812 
1813 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1814 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1815 				     0, 0, 0, 0};
1816 
1817 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1818 { 	/* Caching page for mode_sense */
1819 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1820 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1821 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1822 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1823 
1824 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1825 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1826 	memcpy(p, caching_pg, sizeof(caching_pg));
1827 	if (1 == pcontrol)
1828 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1829 	else if (2 == pcontrol)
1830 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1831 	return sizeof(caching_pg);
1832 }
1833 
1834 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1835 				    0, 0, 0x2, 0x4b};
1836 
1837 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1838 { 	/* Control mode page for mode_sense */
1839 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1840 				        0, 0, 0, 0};
1841 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1842 				     0, 0, 0x2, 0x4b};
1843 
1844 	if (sdebug_dsense)
1845 		ctrl_m_pg[2] |= 0x4;
1846 	else
1847 		ctrl_m_pg[2] &= ~0x4;
1848 
1849 	if (sdebug_ato)
1850 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1851 
1852 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1853 	if (1 == pcontrol)
1854 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1855 	else if (2 == pcontrol)
1856 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1857 	return sizeof(ctrl_m_pg);
1858 }
1859 
1860 
1861 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1862 {	/* Informational Exceptions control mode page for mode_sense */
1863 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1864 				       0, 0, 0x0, 0x0};
1865 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1866 				      0, 0, 0x0, 0x0};
1867 
1868 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1869 	if (1 == pcontrol)
1870 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1871 	else if (2 == pcontrol)
1872 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1873 	return sizeof(iec_m_pg);
1874 }
1875 
1876 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1877 {	/* SAS SSP mode page - short format for mode_sense */
1878 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1879 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1880 
1881 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1882 	if (1 == pcontrol)
1883 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1884 	return sizeof(sas_sf_m_pg);
1885 }
1886 
1887 
1888 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1889 			      int target_dev_id)
1890 {	/* SAS phy control and discover mode page for mode_sense */
1891 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1892 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1893 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1894 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1895 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1896 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1897 		    0, 0, 0, 0, 0, 0, 0, 0,
1898 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1899 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1900 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1901 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1902 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1903 		    0, 0, 0, 0, 0, 0, 0, 0,
1904 		};
1905 	int port_a, port_b;
1906 
1907 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1908 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1909 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1910 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1911 	port_a = target_dev_id + 1;
1912 	port_b = port_a + 1;
1913 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1914 	put_unaligned_be32(port_a, p + 20);
1915 	put_unaligned_be32(port_b, p + 48 + 20);
1916 	if (1 == pcontrol)
1917 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1918 	return sizeof(sas_pcd_m_pg);
1919 }
1920 
1921 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1922 {	/* SAS SSP shared protocol specific port mode subpage */
1923 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1924 		    0, 0, 0, 0, 0, 0, 0, 0,
1925 		};
1926 
1927 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1928 	if (1 == pcontrol)
1929 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1930 	return sizeof(sas_sha_m_pg);
1931 }
1932 
1933 #define SDEBUG_MAX_MSENSE_SZ 256
1934 
1935 static int resp_mode_sense(struct scsi_cmnd *scp,
1936 			   struct sdebug_dev_info *devip)
1937 {
1938 	unsigned char dbd, llbaa;
1939 	int pcontrol, pcode, subpcode, bd_len;
1940 	unsigned char dev_spec;
1941 	int alloc_len, msense_6, offset, len, target_dev_id;
1942 	int target = scp->device->id;
1943 	unsigned char * ap;
1944 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1945 	unsigned char *cmd = scp->cmnd;
1946 
1947 	dbd = !!(cmd[1] & 0x8);
1948 	pcontrol = (cmd[2] & 0xc0) >> 6;
1949 	pcode = cmd[2] & 0x3f;
1950 	subpcode = cmd[3];
1951 	msense_6 = (MODE_SENSE == cmd[0]);
1952 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1953 	if ((sdebug_ptype == TYPE_DISK) && (dbd == 0))
1954 		bd_len = llbaa ? 16 : 8;
1955 	else
1956 		bd_len = 0;
1957 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1958 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1959 	if (0x3 == pcontrol) {  /* Saving values not supported */
1960 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1961 		return check_condition_result;
1962 	}
1963 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1964 			(devip->target * 1000) - 3;
1965 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
1966 	if (sdebug_ptype == TYPE_DISK)
1967 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
1968 	else
1969 		dev_spec = 0x0;
1970 	if (msense_6) {
1971 		arr[2] = dev_spec;
1972 		arr[3] = bd_len;
1973 		offset = 4;
1974 	} else {
1975 		arr[3] = dev_spec;
1976 		if (16 == bd_len)
1977 			arr[4] = 0x1;	/* set LONGLBA bit */
1978 		arr[7] = bd_len;	/* assume 255 or less */
1979 		offset = 8;
1980 	}
1981 	ap = arr + offset;
1982 	if ((bd_len > 0) && (!sdebug_capacity))
1983 		sdebug_capacity = get_sdebug_capacity();
1984 
1985 	if (8 == bd_len) {
1986 		if (sdebug_capacity > 0xfffffffe)
1987 			put_unaligned_be32(0xffffffff, ap + 0);
1988 		else
1989 			put_unaligned_be32(sdebug_capacity, ap + 0);
1990 		put_unaligned_be16(sdebug_sector_size, ap + 6);
1991 		offset += bd_len;
1992 		ap = arr + offset;
1993 	} else if (16 == bd_len) {
1994 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
1995 		put_unaligned_be32(sdebug_sector_size, ap + 12);
1996 		offset += bd_len;
1997 		ap = arr + offset;
1998 	}
1999 
2000 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2001 		/* TODO: Control Extension page */
2002 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2003 		return check_condition_result;
2004 	}
2005 	switch (pcode) {
2006 	case 0x1:	/* Read-Write error recovery page, direct access */
2007 		len = resp_err_recov_pg(ap, pcontrol, target);
2008 		offset += len;
2009 		break;
2010 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2011 		len = resp_disconnect_pg(ap, pcontrol, target);
2012 		offset += len;
2013 		break;
2014         case 0x3:       /* Format device page, direct access */
2015                 len = resp_format_pg(ap, pcontrol, target);
2016                 offset += len;
2017                 break;
2018 	case 0x8:	/* Caching page, direct access */
2019 		len = resp_caching_pg(ap, pcontrol, target);
2020 		offset += len;
2021 		break;
2022 	case 0xa:	/* Control Mode page, all devices */
2023 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2024 		offset += len;
2025 		break;
2026 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2027 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2028 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2029 			return check_condition_result;
2030 	        }
2031 		len = 0;
2032 		if ((0x0 == subpcode) || (0xff == subpcode))
2033 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2034 		if ((0x1 == subpcode) || (0xff == subpcode))
2035 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2036 						  target_dev_id);
2037 		if ((0x2 == subpcode) || (0xff == subpcode))
2038 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2039 		offset += len;
2040 		break;
2041 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2042 		len = resp_iec_m_pg(ap, pcontrol, target);
2043 		offset += len;
2044 		break;
2045 	case 0x3f:	/* Read all Mode pages */
2046 		if ((0 == subpcode) || (0xff == subpcode)) {
2047 			len = resp_err_recov_pg(ap, pcontrol, target);
2048 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2049 			len += resp_format_pg(ap + len, pcontrol, target);
2050 			len += resp_caching_pg(ap + len, pcontrol, target);
2051 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2052 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2053 			if (0xff == subpcode) {
2054 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2055 						  target, target_dev_id);
2056 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2057 			}
2058 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2059 		} else {
2060 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2061 			return check_condition_result;
2062                 }
2063 		offset += len;
2064 		break;
2065 	default:
2066 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2067 		return check_condition_result;
2068 	}
2069 	if (msense_6)
2070 		arr[0] = offset - 1;
2071 	else
2072 		put_unaligned_be16((offset - 2), arr + 0);
2073 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2074 }
2075 
2076 #define SDEBUG_MAX_MSELECT_SZ 512
2077 
2078 static int resp_mode_select(struct scsi_cmnd *scp,
2079 			    struct sdebug_dev_info *devip)
2080 {
2081 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2082 	int param_len, res, mpage;
2083 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2084 	unsigned char *cmd = scp->cmnd;
2085 	int mselect6 = (MODE_SELECT == cmd[0]);
2086 
2087 	memset(arr, 0, sizeof(arr));
2088 	pf = cmd[1] & 0x10;
2089 	sp = cmd[1] & 0x1;
2090 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2091 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2092 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2093 		return check_condition_result;
2094 	}
2095         res = fetch_to_dev_buffer(scp, arr, param_len);
2096         if (-1 == res)
2097 		return DID_ERROR << 16;
2098 	else if (sdebug_verbose && (res < param_len))
2099 		sdev_printk(KERN_INFO, scp->device,
2100 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2101 			    __func__, param_len, res);
2102 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2103 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2104 	if (md_len > 2) {
2105 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2106 		return check_condition_result;
2107 	}
2108 	off = bd_len + (mselect6 ? 4 : 8);
2109 	mpage = arr[off] & 0x3f;
2110 	ps = !!(arr[off] & 0x80);
2111 	if (ps) {
2112 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2113 		return check_condition_result;
2114 	}
2115 	spf = !!(arr[off] & 0x40);
2116 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2117 		       (arr[off + 1] + 2);
2118 	if ((pg_len + off) > param_len) {
2119 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2120 				PARAMETER_LIST_LENGTH_ERR, 0);
2121 		return check_condition_result;
2122 	}
2123 	switch (mpage) {
2124 	case 0x8:      /* Caching Mode page */
2125 		if (caching_pg[1] == arr[off + 1]) {
2126 			memcpy(caching_pg + 2, arr + off + 2,
2127 			       sizeof(caching_pg) - 2);
2128 			goto set_mode_changed_ua;
2129 		}
2130 		break;
2131 	case 0xa:      /* Control Mode page */
2132 		if (ctrl_m_pg[1] == arr[off + 1]) {
2133 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2134 			       sizeof(ctrl_m_pg) - 2);
2135 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2136 			goto set_mode_changed_ua;
2137 		}
2138 		break;
2139 	case 0x1c:      /* Informational Exceptions Mode page */
2140 		if (iec_m_pg[1] == arr[off + 1]) {
2141 			memcpy(iec_m_pg + 2, arr + off + 2,
2142 			       sizeof(iec_m_pg) - 2);
2143 			goto set_mode_changed_ua;
2144 		}
2145 		break;
2146 	default:
2147 		break;
2148 	}
2149 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2150 	return check_condition_result;
2151 set_mode_changed_ua:
2152 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2153 	return 0;
2154 }
2155 
2156 static int resp_temp_l_pg(unsigned char * arr)
2157 {
2158 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2159 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2160 		};
2161 
2162         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2163         return sizeof(temp_l_pg);
2164 }
2165 
2166 static int resp_ie_l_pg(unsigned char * arr)
2167 {
2168 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2169 		};
2170 
2171         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2172 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2173 		arr[4] = THRESHOLD_EXCEEDED;
2174 		arr[5] = 0xff;
2175 	}
2176         return sizeof(ie_l_pg);
2177 }
2178 
2179 #define SDEBUG_MAX_LSENSE_SZ 512
2180 
2181 static int resp_log_sense(struct scsi_cmnd * scp,
2182                           struct sdebug_dev_info * devip)
2183 {
2184 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2185 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2186 	unsigned char *cmd = scp->cmnd;
2187 
2188 	memset(arr, 0, sizeof(arr));
2189 	ppc = cmd[1] & 0x2;
2190 	sp = cmd[1] & 0x1;
2191 	if (ppc || sp) {
2192 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2193 		return check_condition_result;
2194 	}
2195 	pcontrol = (cmd[2] & 0xc0) >> 6;
2196 	pcode = cmd[2] & 0x3f;
2197 	subpcode = cmd[3] & 0xff;
2198 	alloc_len = get_unaligned_be16(cmd + 7);
2199 	arr[0] = pcode;
2200 	if (0 == subpcode) {
2201 		switch (pcode) {
2202 		case 0x0:	/* Supported log pages log page */
2203 			n = 4;
2204 			arr[n++] = 0x0;		/* this page */
2205 			arr[n++] = 0xd;		/* Temperature */
2206 			arr[n++] = 0x2f;	/* Informational exceptions */
2207 			arr[3] = n - 4;
2208 			break;
2209 		case 0xd:	/* Temperature log page */
2210 			arr[3] = resp_temp_l_pg(arr + 4);
2211 			break;
2212 		case 0x2f:	/* Informational exceptions log page */
2213 			arr[3] = resp_ie_l_pg(arr + 4);
2214 			break;
2215 		default:
2216 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2217 			return check_condition_result;
2218 		}
2219 	} else if (0xff == subpcode) {
2220 		arr[0] |= 0x40;
2221 		arr[1] = subpcode;
2222 		switch (pcode) {
2223 		case 0x0:	/* Supported log pages and subpages log page */
2224 			n = 4;
2225 			arr[n++] = 0x0;
2226 			arr[n++] = 0x0;		/* 0,0 page */
2227 			arr[n++] = 0x0;
2228 			arr[n++] = 0xff;	/* this page */
2229 			arr[n++] = 0xd;
2230 			arr[n++] = 0x0;		/* Temperature */
2231 			arr[n++] = 0x2f;
2232 			arr[n++] = 0x0;	/* Informational exceptions */
2233 			arr[3] = n - 4;
2234 			break;
2235 		case 0xd:	/* Temperature subpages */
2236 			n = 4;
2237 			arr[n++] = 0xd;
2238 			arr[n++] = 0x0;		/* Temperature */
2239 			arr[3] = n - 4;
2240 			break;
2241 		case 0x2f:	/* Informational exceptions subpages */
2242 			n = 4;
2243 			arr[n++] = 0x2f;
2244 			arr[n++] = 0x0;		/* Informational exceptions */
2245 			arr[3] = n - 4;
2246 			break;
2247 		default:
2248 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2249 			return check_condition_result;
2250 		}
2251 	} else {
2252 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2253 		return check_condition_result;
2254 	}
2255 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2256 	return fill_from_dev_buffer(scp, arr,
2257 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2258 }
2259 
2260 static int check_device_access_params(struct scsi_cmnd *scp,
2261 				      unsigned long long lba, unsigned int num)
2262 {
2263 	if (lba + num > sdebug_capacity) {
2264 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2265 		return check_condition_result;
2266 	}
2267 	/* transfer length excessive (tie in to block limits VPD page) */
2268 	if (num > sdebug_store_sectors) {
2269 		/* needs work to find which cdb byte 'num' comes from */
2270 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2271 		return check_condition_result;
2272 	}
2273 	return 0;
2274 }
2275 
2276 /* Returns number of bytes copied or -1 if error. */
2277 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2278 			    bool do_write)
2279 {
2280 	int ret;
2281 	u64 block, rest = 0;
2282 	struct scsi_data_buffer *sdb;
2283 	enum dma_data_direction dir;
2284 
2285 	if (do_write) {
2286 		sdb = scsi_out(scmd);
2287 		dir = DMA_TO_DEVICE;
2288 	} else {
2289 		sdb = scsi_in(scmd);
2290 		dir = DMA_FROM_DEVICE;
2291 	}
2292 
2293 	if (!sdb->length)
2294 		return 0;
2295 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2296 		return -1;
2297 
2298 	block = do_div(lba, sdebug_store_sectors);
2299 	if (block + num > sdebug_store_sectors)
2300 		rest = block + num - sdebug_store_sectors;
2301 
2302 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2303 		   fake_storep + (block * sdebug_sector_size),
2304 		   (num - rest) * sdebug_sector_size, 0, do_write);
2305 	if (ret != (num - rest) * sdebug_sector_size)
2306 		return ret;
2307 
2308 	if (rest) {
2309 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2310 			    fake_storep, rest * sdebug_sector_size,
2311 			    (num - rest) * sdebug_sector_size, do_write);
2312 	}
2313 
2314 	return ret;
2315 }
2316 
2317 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2318  * arr into fake_store(lba,num) and return true. If comparison fails then
2319  * return false. */
2320 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2321 {
2322 	bool res;
2323 	u64 block, rest = 0;
2324 	u32 store_blks = sdebug_store_sectors;
2325 	u32 lb_size = sdebug_sector_size;
2326 
2327 	block = do_div(lba, store_blks);
2328 	if (block + num > store_blks)
2329 		rest = block + num - store_blks;
2330 
2331 	res = !memcmp(fake_storep + (block * lb_size), arr,
2332 		      (num - rest) * lb_size);
2333 	if (!res)
2334 		return res;
2335 	if (rest)
2336 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2337 			     rest * lb_size);
2338 	if (!res)
2339 		return res;
2340 	arr += num * lb_size;
2341 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2342 	if (rest)
2343 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2344 		       rest * lb_size);
2345 	return res;
2346 }
2347 
2348 static __be16 dif_compute_csum(const void *buf, int len)
2349 {
2350 	__be16 csum;
2351 
2352 	if (sdebug_guard)
2353 		csum = (__force __be16)ip_compute_csum(buf, len);
2354 	else
2355 		csum = cpu_to_be16(crc_t10dif(buf, len));
2356 
2357 	return csum;
2358 }
2359 
2360 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2361 		      sector_t sector, u32 ei_lba)
2362 {
2363 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2364 
2365 	if (sdt->guard_tag != csum) {
2366 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2367 			(unsigned long)sector,
2368 			be16_to_cpu(sdt->guard_tag),
2369 			be16_to_cpu(csum));
2370 		return 0x01;
2371 	}
2372 	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2373 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2374 		pr_err("REF check failed on sector %lu\n",
2375 			(unsigned long)sector);
2376 		return 0x03;
2377 	}
2378 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2379 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2380 		pr_err("REF check failed on sector %lu\n",
2381 			(unsigned long)sector);
2382 		return 0x03;
2383 	}
2384 	return 0;
2385 }
2386 
2387 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2388 			  unsigned int sectors, bool read)
2389 {
2390 	size_t resid;
2391 	void *paddr;
2392 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2393 	struct sg_mapping_iter miter;
2394 
2395 	/* Bytes of protection data to copy into sgl */
2396 	resid = sectors * sizeof(*dif_storep);
2397 
2398 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2399 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2400 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2401 
2402 	while (sg_miter_next(&miter) && resid > 0) {
2403 		size_t len = min(miter.length, resid);
2404 		void *start = dif_store(sector);
2405 		size_t rest = 0;
2406 
2407 		if (dif_store_end < start + len)
2408 			rest = start + len - dif_store_end;
2409 
2410 		paddr = miter.addr;
2411 
2412 		if (read)
2413 			memcpy(paddr, start, len - rest);
2414 		else
2415 			memcpy(start, paddr, len - rest);
2416 
2417 		if (rest) {
2418 			if (read)
2419 				memcpy(paddr + len - rest, dif_storep, rest);
2420 			else
2421 				memcpy(dif_storep, paddr + len - rest, rest);
2422 		}
2423 
2424 		sector += len / sizeof(*dif_storep);
2425 		resid -= len;
2426 	}
2427 	sg_miter_stop(&miter);
2428 }
2429 
2430 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2431 			    unsigned int sectors, u32 ei_lba)
2432 {
2433 	unsigned int i;
2434 	struct sd_dif_tuple *sdt;
2435 	sector_t sector;
2436 
2437 	for (i = 0; i < sectors; i++, ei_lba++) {
2438 		int ret;
2439 
2440 		sector = start_sec + i;
2441 		sdt = dif_store(sector);
2442 
2443 		if (sdt->app_tag == cpu_to_be16(0xffff))
2444 			continue;
2445 
2446 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2447 		if (ret) {
2448 			dif_errors++;
2449 			return ret;
2450 		}
2451 	}
2452 
2453 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2454 	dix_reads++;
2455 
2456 	return 0;
2457 }
2458 
2459 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2460 {
2461 	u8 *cmd = scp->cmnd;
2462 	struct sdebug_queued_cmd *sqcp;
2463 	u64 lba;
2464 	u32 num;
2465 	u32 ei_lba;
2466 	unsigned long iflags;
2467 	int ret;
2468 	bool check_prot;
2469 
2470 	switch (cmd[0]) {
2471 	case READ_16:
2472 		ei_lba = 0;
2473 		lba = get_unaligned_be64(cmd + 2);
2474 		num = get_unaligned_be32(cmd + 10);
2475 		check_prot = true;
2476 		break;
2477 	case READ_10:
2478 		ei_lba = 0;
2479 		lba = get_unaligned_be32(cmd + 2);
2480 		num = get_unaligned_be16(cmd + 7);
2481 		check_prot = true;
2482 		break;
2483 	case READ_6:
2484 		ei_lba = 0;
2485 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2486 		      (u32)(cmd[1] & 0x1f) << 16;
2487 		num = (0 == cmd[4]) ? 256 : cmd[4];
2488 		check_prot = true;
2489 		break;
2490 	case READ_12:
2491 		ei_lba = 0;
2492 		lba = get_unaligned_be32(cmd + 2);
2493 		num = get_unaligned_be32(cmd + 6);
2494 		check_prot = true;
2495 		break;
2496 	case XDWRITEREAD_10:
2497 		ei_lba = 0;
2498 		lba = get_unaligned_be32(cmd + 2);
2499 		num = get_unaligned_be16(cmd + 7);
2500 		check_prot = false;
2501 		break;
2502 	default:	/* assume READ(32) */
2503 		lba = get_unaligned_be64(cmd + 12);
2504 		ei_lba = get_unaligned_be32(cmd + 20);
2505 		num = get_unaligned_be32(cmd + 28);
2506 		check_prot = false;
2507 		break;
2508 	}
2509 	if (unlikely(have_dif_prot && check_prot)) {
2510 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2511 		    (cmd[1] & 0xe0)) {
2512 			mk_sense_invalid_opcode(scp);
2513 			return check_condition_result;
2514 		}
2515 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2516 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2517 		    (cmd[1] & 0xe0) == 0)
2518 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2519 				    "to DIF device\n");
2520 	}
2521 	if (unlikely(sdebug_any_injecting_opt)) {
2522 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2523 
2524 		if (sqcp) {
2525 			if (sqcp->inj_short)
2526 				num /= 2;
2527 		}
2528 	} else
2529 		sqcp = NULL;
2530 
2531 	/* inline check_device_access_params() */
2532 	if (unlikely(lba + num > sdebug_capacity)) {
2533 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2534 		return check_condition_result;
2535 	}
2536 	/* transfer length excessive (tie in to block limits VPD page) */
2537 	if (unlikely(num > sdebug_store_sectors)) {
2538 		/* needs work to find which cdb byte 'num' comes from */
2539 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2540 		return check_condition_result;
2541 	}
2542 
2543 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2544 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2545 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2546 		/* claim unrecoverable read error */
2547 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2548 		/* set info field and valid bit for fixed descriptor */
2549 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2550 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2551 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2552 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2553 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2554 		}
2555 		scsi_set_resid(scp, scsi_bufflen(scp));
2556 		return check_condition_result;
2557 	}
2558 
2559 	read_lock_irqsave(&atomic_rw, iflags);
2560 
2561 	/* DIX + T10 DIF */
2562 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2563 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2564 
2565 		if (prot_ret) {
2566 			read_unlock_irqrestore(&atomic_rw, iflags);
2567 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2568 			return illegal_condition_result;
2569 		}
2570 	}
2571 
2572 	ret = do_device_access(scp, lba, num, false);
2573 	read_unlock_irqrestore(&atomic_rw, iflags);
2574 	if (unlikely(ret == -1))
2575 		return DID_ERROR << 16;
2576 
2577 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2578 
2579 	if (unlikely(sqcp)) {
2580 		if (sqcp->inj_recovered) {
2581 			mk_sense_buffer(scp, RECOVERED_ERROR,
2582 					THRESHOLD_EXCEEDED, 0);
2583 			return check_condition_result;
2584 		} else if (sqcp->inj_transport) {
2585 			mk_sense_buffer(scp, ABORTED_COMMAND,
2586 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2587 			return check_condition_result;
2588 		} else if (sqcp->inj_dif) {
2589 			/* Logical block guard check failed */
2590 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2591 			return illegal_condition_result;
2592 		} else if (sqcp->inj_dix) {
2593 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2594 			return illegal_condition_result;
2595 		}
2596 	}
2597 	return 0;
2598 }
2599 
2600 static void dump_sector(unsigned char *buf, int len)
2601 {
2602 	int i, j, n;
2603 
2604 	pr_err(">>> Sector Dump <<<\n");
2605 	for (i = 0 ; i < len ; i += 16) {
2606 		char b[128];
2607 
2608 		for (j = 0, n = 0; j < 16; j++) {
2609 			unsigned char c = buf[i+j];
2610 
2611 			if (c >= 0x20 && c < 0x7e)
2612 				n += scnprintf(b + n, sizeof(b) - n,
2613 					       " %c ", buf[i+j]);
2614 			else
2615 				n += scnprintf(b + n, sizeof(b) - n,
2616 					       "%02x ", buf[i+j]);
2617 		}
2618 		pr_err("%04d: %s\n", i, b);
2619 	}
2620 }
2621 
2622 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2623 			     unsigned int sectors, u32 ei_lba)
2624 {
2625 	int ret;
2626 	struct sd_dif_tuple *sdt;
2627 	void *daddr;
2628 	sector_t sector = start_sec;
2629 	int ppage_offset;
2630 	int dpage_offset;
2631 	struct sg_mapping_iter diter;
2632 	struct sg_mapping_iter piter;
2633 
2634 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2635 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2636 
2637 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2638 			scsi_prot_sg_count(SCpnt),
2639 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2640 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2641 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2642 
2643 	/* For each protection page */
2644 	while (sg_miter_next(&piter)) {
2645 		dpage_offset = 0;
2646 		if (WARN_ON(!sg_miter_next(&diter))) {
2647 			ret = 0x01;
2648 			goto out;
2649 		}
2650 
2651 		for (ppage_offset = 0; ppage_offset < piter.length;
2652 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2653 			/* If we're at the end of the current
2654 			 * data page advance to the next one
2655 			 */
2656 			if (dpage_offset >= diter.length) {
2657 				if (WARN_ON(!sg_miter_next(&diter))) {
2658 					ret = 0x01;
2659 					goto out;
2660 				}
2661 				dpage_offset = 0;
2662 			}
2663 
2664 			sdt = piter.addr + ppage_offset;
2665 			daddr = diter.addr + dpage_offset;
2666 
2667 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2668 			if (ret) {
2669 				dump_sector(daddr, sdebug_sector_size);
2670 				goto out;
2671 			}
2672 
2673 			sector++;
2674 			ei_lba++;
2675 			dpage_offset += sdebug_sector_size;
2676 		}
2677 		diter.consumed = dpage_offset;
2678 		sg_miter_stop(&diter);
2679 	}
2680 	sg_miter_stop(&piter);
2681 
2682 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2683 	dix_writes++;
2684 
2685 	return 0;
2686 
2687 out:
2688 	dif_errors++;
2689 	sg_miter_stop(&diter);
2690 	sg_miter_stop(&piter);
2691 	return ret;
2692 }
2693 
2694 static unsigned long lba_to_map_index(sector_t lba)
2695 {
2696 	if (sdebug_unmap_alignment)
2697 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2698 	sector_div(lba, sdebug_unmap_granularity);
2699 	return lba;
2700 }
2701 
2702 static sector_t map_index_to_lba(unsigned long index)
2703 {
2704 	sector_t lba = index * sdebug_unmap_granularity;
2705 
2706 	if (sdebug_unmap_alignment)
2707 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2708 	return lba;
2709 }
2710 
2711 static unsigned int map_state(sector_t lba, unsigned int *num)
2712 {
2713 	sector_t end;
2714 	unsigned int mapped;
2715 	unsigned long index;
2716 	unsigned long next;
2717 
2718 	index = lba_to_map_index(lba);
2719 	mapped = test_bit(index, map_storep);
2720 
2721 	if (mapped)
2722 		next = find_next_zero_bit(map_storep, map_size, index);
2723 	else
2724 		next = find_next_bit(map_storep, map_size, index);
2725 
2726 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2727 	*num = end - lba;
2728 	return mapped;
2729 }
2730 
2731 static void map_region(sector_t lba, unsigned int len)
2732 {
2733 	sector_t end = lba + len;
2734 
2735 	while (lba < end) {
2736 		unsigned long index = lba_to_map_index(lba);
2737 
2738 		if (index < map_size)
2739 			set_bit(index, map_storep);
2740 
2741 		lba = map_index_to_lba(index + 1);
2742 	}
2743 }
2744 
2745 static void unmap_region(sector_t lba, unsigned int len)
2746 {
2747 	sector_t end = lba + len;
2748 
2749 	while (lba < end) {
2750 		unsigned long index = lba_to_map_index(lba);
2751 
2752 		if (lba == map_index_to_lba(index) &&
2753 		    lba + sdebug_unmap_granularity <= end &&
2754 		    index < map_size) {
2755 			clear_bit(index, map_storep);
2756 			if (sdebug_lbprz) {
2757 				memset(fake_storep +
2758 				       lba * sdebug_sector_size, 0,
2759 				       sdebug_sector_size *
2760 				       sdebug_unmap_granularity);
2761 			}
2762 			if (dif_storep) {
2763 				memset(dif_storep + lba, 0xff,
2764 				       sizeof(*dif_storep) *
2765 				       sdebug_unmap_granularity);
2766 			}
2767 		}
2768 		lba = map_index_to_lba(index + 1);
2769 	}
2770 }
2771 
2772 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2773 {
2774 	u8 *cmd = scp->cmnd;
2775 	u64 lba;
2776 	u32 num;
2777 	u32 ei_lba;
2778 	unsigned long iflags;
2779 	int ret;
2780 	bool check_prot;
2781 
2782 	switch (cmd[0]) {
2783 	case WRITE_16:
2784 		ei_lba = 0;
2785 		lba = get_unaligned_be64(cmd + 2);
2786 		num = get_unaligned_be32(cmd + 10);
2787 		check_prot = true;
2788 		break;
2789 	case WRITE_10:
2790 		ei_lba = 0;
2791 		lba = get_unaligned_be32(cmd + 2);
2792 		num = get_unaligned_be16(cmd + 7);
2793 		check_prot = true;
2794 		break;
2795 	case WRITE_6:
2796 		ei_lba = 0;
2797 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2798 		      (u32)(cmd[1] & 0x1f) << 16;
2799 		num = (0 == cmd[4]) ? 256 : cmd[4];
2800 		check_prot = true;
2801 		break;
2802 	case WRITE_12:
2803 		ei_lba = 0;
2804 		lba = get_unaligned_be32(cmd + 2);
2805 		num = get_unaligned_be32(cmd + 6);
2806 		check_prot = true;
2807 		break;
2808 	case 0x53:	/* XDWRITEREAD(10) */
2809 		ei_lba = 0;
2810 		lba = get_unaligned_be32(cmd + 2);
2811 		num = get_unaligned_be16(cmd + 7);
2812 		check_prot = false;
2813 		break;
2814 	default:	/* assume WRITE(32) */
2815 		lba = get_unaligned_be64(cmd + 12);
2816 		ei_lba = get_unaligned_be32(cmd + 20);
2817 		num = get_unaligned_be32(cmd + 28);
2818 		check_prot = false;
2819 		break;
2820 	}
2821 	if (unlikely(have_dif_prot && check_prot)) {
2822 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2823 		    (cmd[1] & 0xe0)) {
2824 			mk_sense_invalid_opcode(scp);
2825 			return check_condition_result;
2826 		}
2827 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2828 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2829 		    (cmd[1] & 0xe0) == 0)
2830 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2831 				    "to DIF device\n");
2832 	}
2833 
2834 	/* inline check_device_access_params() */
2835 	if (unlikely(lba + num > sdebug_capacity)) {
2836 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2837 		return check_condition_result;
2838 	}
2839 	/* transfer length excessive (tie in to block limits VPD page) */
2840 	if (unlikely(num > sdebug_store_sectors)) {
2841 		/* needs work to find which cdb byte 'num' comes from */
2842 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2843 		return check_condition_result;
2844 	}
2845 
2846 	write_lock_irqsave(&atomic_rw, iflags);
2847 
2848 	/* DIX + T10 DIF */
2849 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2850 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2851 
2852 		if (prot_ret) {
2853 			write_unlock_irqrestore(&atomic_rw, iflags);
2854 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2855 			return illegal_condition_result;
2856 		}
2857 	}
2858 
2859 	ret = do_device_access(scp, lba, num, true);
2860 	if (unlikely(scsi_debug_lbp()))
2861 		map_region(lba, num);
2862 	write_unlock_irqrestore(&atomic_rw, iflags);
2863 	if (unlikely(-1 == ret))
2864 		return DID_ERROR << 16;
2865 	else if (unlikely(sdebug_verbose &&
2866 			  (ret < (num * sdebug_sector_size))))
2867 		sdev_printk(KERN_INFO, scp->device,
2868 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2869 			    my_name, num * sdebug_sector_size, ret);
2870 
2871 	if (unlikely(sdebug_any_injecting_opt)) {
2872 		struct sdebug_queued_cmd *sqcp =
2873 				(struct sdebug_queued_cmd *)scp->host_scribble;
2874 
2875 		if (sqcp) {
2876 			if (sqcp->inj_recovered) {
2877 				mk_sense_buffer(scp, RECOVERED_ERROR,
2878 						THRESHOLD_EXCEEDED, 0);
2879 				return check_condition_result;
2880 			} else if (sqcp->inj_dif) {
2881 				/* Logical block guard check failed */
2882 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2883 				return illegal_condition_result;
2884 			} else if (sqcp->inj_dix) {
2885 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2886 				return illegal_condition_result;
2887 			}
2888 		}
2889 	}
2890 	return 0;
2891 }
2892 
2893 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2894 			   u32 ei_lba, bool unmap, bool ndob)
2895 {
2896 	unsigned long iflags;
2897 	unsigned long long i;
2898 	int ret;
2899 	u64 lba_off;
2900 
2901 	ret = check_device_access_params(scp, lba, num);
2902 	if (ret)
2903 		return ret;
2904 
2905 	write_lock_irqsave(&atomic_rw, iflags);
2906 
2907 	if (unmap && scsi_debug_lbp()) {
2908 		unmap_region(lba, num);
2909 		goto out;
2910 	}
2911 
2912 	lba_off = lba * sdebug_sector_size;
2913 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2914 	if (ndob) {
2915 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2916 		ret = 0;
2917 	} else
2918 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2919 					  sdebug_sector_size);
2920 
2921 	if (-1 == ret) {
2922 		write_unlock_irqrestore(&atomic_rw, iflags);
2923 		return DID_ERROR << 16;
2924 	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2925 		sdev_printk(KERN_INFO, scp->device,
2926 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2927 			    my_name, "write same",
2928 			    num * sdebug_sector_size, ret);
2929 
2930 	/* Copy first sector to remaining blocks */
2931 	for (i = 1 ; i < num ; i++)
2932 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2933 		       fake_storep + lba_off,
2934 		       sdebug_sector_size);
2935 
2936 	if (scsi_debug_lbp())
2937 		map_region(lba, num);
2938 out:
2939 	write_unlock_irqrestore(&atomic_rw, iflags);
2940 
2941 	return 0;
2942 }
2943 
2944 static int resp_write_same_10(struct scsi_cmnd *scp,
2945 			      struct sdebug_dev_info *devip)
2946 {
2947 	u8 *cmd = scp->cmnd;
2948 	u32 lba;
2949 	u16 num;
2950 	u32 ei_lba = 0;
2951 	bool unmap = false;
2952 
2953 	if (cmd[1] & 0x8) {
2954 		if (sdebug_lbpws10 == 0) {
2955 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2956 			return check_condition_result;
2957 		} else
2958 			unmap = true;
2959 	}
2960 	lba = get_unaligned_be32(cmd + 2);
2961 	num = get_unaligned_be16(cmd + 7);
2962 	if (num > sdebug_write_same_length) {
2963 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2964 		return check_condition_result;
2965 	}
2966 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2967 }
2968 
2969 static int resp_write_same_16(struct scsi_cmnd *scp,
2970 			      struct sdebug_dev_info *devip)
2971 {
2972 	u8 *cmd = scp->cmnd;
2973 	u64 lba;
2974 	u32 num;
2975 	u32 ei_lba = 0;
2976 	bool unmap = false;
2977 	bool ndob = false;
2978 
2979 	if (cmd[1] & 0x8) {	/* UNMAP */
2980 		if (sdebug_lbpws == 0) {
2981 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2982 			return check_condition_result;
2983 		} else
2984 			unmap = true;
2985 	}
2986 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
2987 		ndob = true;
2988 	lba = get_unaligned_be64(cmd + 2);
2989 	num = get_unaligned_be32(cmd + 10);
2990 	if (num > sdebug_write_same_length) {
2991 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2992 		return check_condition_result;
2993 	}
2994 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
2995 }
2996 
2997 /* Note the mode field is in the same position as the (lower) service action
2998  * field. For the Report supported operation codes command, SPC-4 suggests
2999  * each mode of this command should be reported separately; for future. */
3000 static int resp_write_buffer(struct scsi_cmnd *scp,
3001 			     struct sdebug_dev_info *devip)
3002 {
3003 	u8 *cmd = scp->cmnd;
3004 	struct scsi_device *sdp = scp->device;
3005 	struct sdebug_dev_info *dp;
3006 	u8 mode;
3007 
3008 	mode = cmd[1] & 0x1f;
3009 	switch (mode) {
3010 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3011 		/* set UAs on this device only */
3012 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3013 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3014 		break;
3015 	case 0x5:	/* download MC, save and ACT */
3016 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3017 		break;
3018 	case 0x6:	/* download MC with offsets and ACT */
3019 		/* set UAs on most devices (LUs) in this target */
3020 		list_for_each_entry(dp,
3021 				    &devip->sdbg_host->dev_info_list,
3022 				    dev_list)
3023 			if (dp->target == sdp->id) {
3024 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3025 				if (devip != dp)
3026 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3027 						dp->uas_bm);
3028 			}
3029 		break;
3030 	case 0x7:	/* download MC with offsets, save, and ACT */
3031 		/* set UA on all devices (LUs) in this target */
3032 		list_for_each_entry(dp,
3033 				    &devip->sdbg_host->dev_info_list,
3034 				    dev_list)
3035 			if (dp->target == sdp->id)
3036 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3037 					dp->uas_bm);
3038 		break;
3039 	default:
3040 		/* do nothing for this command for other mode values */
3041 		break;
3042 	}
3043 	return 0;
3044 }
3045 
3046 static int resp_comp_write(struct scsi_cmnd *scp,
3047 			   struct sdebug_dev_info *devip)
3048 {
3049 	u8 *cmd = scp->cmnd;
3050 	u8 *arr;
3051 	u8 *fake_storep_hold;
3052 	u64 lba;
3053 	u32 dnum;
3054 	u32 lb_size = sdebug_sector_size;
3055 	u8 num;
3056 	unsigned long iflags;
3057 	int ret;
3058 	int retval = 0;
3059 
3060 	lba = get_unaligned_be64(cmd + 2);
3061 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3062 	if (0 == num)
3063 		return 0;	/* degenerate case, not an error */
3064 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3065 	    (cmd[1] & 0xe0)) {
3066 		mk_sense_invalid_opcode(scp);
3067 		return check_condition_result;
3068 	}
3069 	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3070 	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3071 	    (cmd[1] & 0xe0) == 0)
3072 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3073 			    "to DIF device\n");
3074 
3075 	/* inline check_device_access_params() */
3076 	if (lba + num > sdebug_capacity) {
3077 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3078 		return check_condition_result;
3079 	}
3080 	/* transfer length excessive (tie in to block limits VPD page) */
3081 	if (num > sdebug_store_sectors) {
3082 		/* needs work to find which cdb byte 'num' comes from */
3083 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3084 		return check_condition_result;
3085 	}
3086 	dnum = 2 * num;
3087 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3088 	if (NULL == arr) {
3089 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3090 				INSUFF_RES_ASCQ);
3091 		return check_condition_result;
3092 	}
3093 
3094 	write_lock_irqsave(&atomic_rw, iflags);
3095 
3096 	/* trick do_device_access() to fetch both compare and write buffers
3097 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3098 	fake_storep_hold = fake_storep;
3099 	fake_storep = arr;
3100 	ret = do_device_access(scp, 0, dnum, true);
3101 	fake_storep = fake_storep_hold;
3102 	if (ret == -1) {
3103 		retval = DID_ERROR << 16;
3104 		goto cleanup;
3105 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3106 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3107 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3108 			    dnum * lb_size, ret);
3109 	if (!comp_write_worker(lba, num, arr)) {
3110 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3111 		retval = check_condition_result;
3112 		goto cleanup;
3113 	}
3114 	if (scsi_debug_lbp())
3115 		map_region(lba, num);
3116 cleanup:
3117 	write_unlock_irqrestore(&atomic_rw, iflags);
3118 	kfree(arr);
3119 	return retval;
3120 }
3121 
3122 struct unmap_block_desc {
3123 	__be64	lba;
3124 	__be32	blocks;
3125 	__be32	__reserved;
3126 };
3127 
3128 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3129 {
3130 	unsigned char *buf;
3131 	struct unmap_block_desc *desc;
3132 	unsigned int i, payload_len, descriptors;
3133 	int ret;
3134 	unsigned long iflags;
3135 
3136 
3137 	if (!scsi_debug_lbp())
3138 		return 0;	/* fib and say its done */
3139 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3140 	BUG_ON(scsi_bufflen(scp) != payload_len);
3141 
3142 	descriptors = (payload_len - 8) / 16;
3143 	if (descriptors > sdebug_unmap_max_desc) {
3144 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3145 		return check_condition_result;
3146 	}
3147 
3148 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3149 	if (!buf) {
3150 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3151 				INSUFF_RES_ASCQ);
3152 		return check_condition_result;
3153 	}
3154 
3155 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3156 
3157 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3158 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3159 
3160 	desc = (void *)&buf[8];
3161 
3162 	write_lock_irqsave(&atomic_rw, iflags);
3163 
3164 	for (i = 0 ; i < descriptors ; i++) {
3165 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3166 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3167 
3168 		ret = check_device_access_params(scp, lba, num);
3169 		if (ret)
3170 			goto out;
3171 
3172 		unmap_region(lba, num);
3173 	}
3174 
3175 	ret = 0;
3176 
3177 out:
3178 	write_unlock_irqrestore(&atomic_rw, iflags);
3179 	kfree(buf);
3180 
3181 	return ret;
3182 }
3183 
3184 #define SDEBUG_GET_LBA_STATUS_LEN 32
3185 
3186 static int resp_get_lba_status(struct scsi_cmnd *scp,
3187 			       struct sdebug_dev_info *devip)
3188 {
3189 	u8 *cmd = scp->cmnd;
3190 	u64 lba;
3191 	u32 alloc_len, mapped, num;
3192 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3193 	int ret;
3194 
3195 	lba = get_unaligned_be64(cmd + 2);
3196 	alloc_len = get_unaligned_be32(cmd + 10);
3197 
3198 	if (alloc_len < 24)
3199 		return 0;
3200 
3201 	ret = check_device_access_params(scp, lba, 1);
3202 	if (ret)
3203 		return ret;
3204 
3205 	if (scsi_debug_lbp())
3206 		mapped = map_state(lba, &num);
3207 	else {
3208 		mapped = 1;
3209 		/* following just in case virtual_gb changed */
3210 		sdebug_capacity = get_sdebug_capacity();
3211 		if (sdebug_capacity - lba <= 0xffffffff)
3212 			num = sdebug_capacity - lba;
3213 		else
3214 			num = 0xffffffff;
3215 	}
3216 
3217 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3218 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3219 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3220 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3221 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3222 
3223 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3224 }
3225 
3226 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3227  * (W-LUN), the normal Linux scanning logic does not associate it with a
3228  * device (e.g. /dev/sg7). The following magic will make that association:
3229  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3230  * where <n> is a host number. If there are multiple targets in a host then
3231  * the above will associate a W-LUN to each target. To only get a W-LUN
3232  * for target 2, then use "echo '- 2 49409' > scan" .
3233  */
3234 static int resp_report_luns(struct scsi_cmnd *scp,
3235 			    struct sdebug_dev_info *devip)
3236 {
3237 	unsigned char *cmd = scp->cmnd;
3238 	unsigned int alloc_len;
3239 	unsigned char select_report;
3240 	u64 lun;
3241 	struct scsi_lun *lun_p;
3242 	u8 *arr;
3243 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3244 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3245 	unsigned int tlun_cnt;	/* total LUN count */
3246 	unsigned int rlen;	/* response length (in bytes) */
3247 	int i, res;
3248 
3249 	clear_luns_changed_on_target(devip);
3250 
3251 	select_report = cmd[2];
3252 	alloc_len = get_unaligned_be32(cmd + 6);
3253 
3254 	if (alloc_len < 4) {
3255 		pr_err("alloc len too small %d\n", alloc_len);
3256 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3257 		return check_condition_result;
3258 	}
3259 
3260 	switch (select_report) {
3261 	case 0:		/* all LUNs apart from W-LUNs */
3262 		lun_cnt = sdebug_max_luns;
3263 		wlun_cnt = 0;
3264 		break;
3265 	case 1:		/* only W-LUNs */
3266 		lun_cnt = 0;
3267 		wlun_cnt = 1;
3268 		break;
3269 	case 2:		/* all LUNs */
3270 		lun_cnt = sdebug_max_luns;
3271 		wlun_cnt = 1;
3272 		break;
3273 	case 0x10:	/* only administrative LUs */
3274 	case 0x11:	/* see SPC-5 */
3275 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3276 	default:
3277 		pr_debug("select report invalid %d\n", select_report);
3278 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3279 		return check_condition_result;
3280 	}
3281 
3282 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3283 		--lun_cnt;
3284 
3285 	tlun_cnt = lun_cnt + wlun_cnt;
3286 
3287 	rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
3288 	arr = vmalloc(rlen);
3289 	if (!arr) {
3290 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3291 				INSUFF_RES_ASCQ);
3292 		return check_condition_result;
3293 	}
3294 	memset(arr, 0, rlen);
3295 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3296 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3297 
3298 	/* luns start at byte 8 in response following the header */
3299 	lun_p = (struct scsi_lun *)&arr[8];
3300 
3301 	/* LUNs use single level peripheral device addressing method */
3302 	lun = sdebug_no_lun_0 ? 1 : 0;
3303 	for (i = 0; i < lun_cnt; i++)
3304 		int_to_scsilun(lun++, lun_p++);
3305 
3306 	if (wlun_cnt)
3307 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
3308 
3309 	put_unaligned_be32(rlen - 8, &arr[0]);
3310 
3311 	res = fill_from_dev_buffer(scp, arr, rlen);
3312 	vfree(arr);
3313 	return res;
3314 }
3315 
3316 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3317 			    unsigned int num, struct sdebug_dev_info *devip)
3318 {
3319 	int j;
3320 	unsigned char *kaddr, *buf;
3321 	unsigned int offset;
3322 	struct scsi_data_buffer *sdb = scsi_in(scp);
3323 	struct sg_mapping_iter miter;
3324 
3325 	/* better not to use temporary buffer. */
3326 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3327 	if (!buf) {
3328 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3329 				INSUFF_RES_ASCQ);
3330 		return check_condition_result;
3331 	}
3332 
3333 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3334 
3335 	offset = 0;
3336 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3337 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3338 
3339 	while (sg_miter_next(&miter)) {
3340 		kaddr = miter.addr;
3341 		for (j = 0; j < miter.length; j++)
3342 			*(kaddr + j) ^= *(buf + offset + j);
3343 
3344 		offset += miter.length;
3345 	}
3346 	sg_miter_stop(&miter);
3347 	kfree(buf);
3348 
3349 	return 0;
3350 }
3351 
3352 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3353 			       struct sdebug_dev_info *devip)
3354 {
3355 	u8 *cmd = scp->cmnd;
3356 	u64 lba;
3357 	u32 num;
3358 	int errsts;
3359 
3360 	if (!scsi_bidi_cmnd(scp)) {
3361 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3362 				INSUFF_RES_ASCQ);
3363 		return check_condition_result;
3364 	}
3365 	errsts = resp_read_dt0(scp, devip);
3366 	if (errsts)
3367 		return errsts;
3368 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3369 		errsts = resp_write_dt0(scp, devip);
3370 		if (errsts)
3371 			return errsts;
3372 	}
3373 	lba = get_unaligned_be32(cmd + 2);
3374 	num = get_unaligned_be16(cmd + 7);
3375 	return resp_xdwriteread(scp, lba, num, devip);
3376 }
3377 
3378 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3379 {
3380 	struct sdebug_queue *sqp = sdebug_q_arr;
3381 
3382 	if (sdebug_mq_active) {
3383 		u32 tag = blk_mq_unique_tag(cmnd->request);
3384 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3385 
3386 		if (unlikely(hwq >= submit_queues)) {
3387 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3388 			hwq %= submit_queues;
3389 		}
3390 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3391 		return sqp + hwq;
3392 	} else
3393 		return sqp;
3394 }
3395 
3396 /* Queued (deferred) command completions converge here. */
3397 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3398 {
3399 	int qc_idx;
3400 	int retiring = 0;
3401 	unsigned long iflags;
3402 	struct sdebug_queue *sqp;
3403 	struct sdebug_queued_cmd *sqcp;
3404 	struct scsi_cmnd *scp;
3405 	struct sdebug_dev_info *devip;
3406 
3407 	qc_idx = sd_dp->qc_idx;
3408 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3409 	if (sdebug_statistics) {
3410 		atomic_inc(&sdebug_completions);
3411 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3412 			atomic_inc(&sdebug_miss_cpus);
3413 	}
3414 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3415 		pr_err("wild qc_idx=%d\n", qc_idx);
3416 		return;
3417 	}
3418 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3419 	sqcp = &sqp->qc_arr[qc_idx];
3420 	scp = sqcp->a_cmnd;
3421 	if (unlikely(scp == NULL)) {
3422 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3423 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3424 		       sd_dp->sqa_idx, qc_idx);
3425 		return;
3426 	}
3427 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3428 	if (likely(devip))
3429 		atomic_dec(&devip->num_in_q);
3430 	else
3431 		pr_err("devip=NULL\n");
3432 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3433 		retiring = 1;
3434 
3435 	sqcp->a_cmnd = NULL;
3436 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3437 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3438 		pr_err("Unexpected completion\n");
3439 		return;
3440 	}
3441 
3442 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3443 		int k, retval;
3444 
3445 		retval = atomic_read(&retired_max_queue);
3446 		if (qc_idx >= retval) {
3447 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3448 			pr_err("index %d too large\n", retval);
3449 			return;
3450 		}
3451 		k = find_last_bit(sqp->in_use_bm, retval);
3452 		if ((k < sdebug_max_queue) || (k == retval))
3453 			atomic_set(&retired_max_queue, 0);
3454 		else
3455 			atomic_set(&retired_max_queue, k + 1);
3456 	}
3457 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3458 	scp->scsi_done(scp); /* callback to mid level */
3459 }
3460 
3461 /* When high resolution timer goes off this function is called. */
3462 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3463 {
3464 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3465 						  hrt);
3466 	sdebug_q_cmd_complete(sd_dp);
3467 	return HRTIMER_NORESTART;
3468 }
3469 
3470 /* When work queue schedules work, it calls this function. */
3471 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3472 {
3473 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3474 						  ew.work);
3475 	sdebug_q_cmd_complete(sd_dp);
3476 }
3477 
3478 static struct sdebug_dev_info *sdebug_device_create(
3479 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3480 {
3481 	struct sdebug_dev_info *devip;
3482 
3483 	devip = kzalloc(sizeof(*devip), flags);
3484 	if (devip) {
3485 		devip->sdbg_host = sdbg_host;
3486 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3487 	}
3488 	return devip;
3489 }
3490 
3491 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3492 {
3493 	struct sdebug_host_info *sdbg_host;
3494 	struct sdebug_dev_info *open_devip = NULL;
3495 	struct sdebug_dev_info *devip;
3496 
3497 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3498 	if (!sdbg_host) {
3499 		pr_err("Host info NULL\n");
3500 		return NULL;
3501         }
3502 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3503 		if ((devip->used) && (devip->channel == sdev->channel) &&
3504                     (devip->target == sdev->id) &&
3505                     (devip->lun == sdev->lun))
3506                         return devip;
3507 		else {
3508 			if ((!devip->used) && (!open_devip))
3509 				open_devip = devip;
3510 		}
3511 	}
3512 	if (!open_devip) { /* try and make a new one */
3513 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3514 		if (!open_devip) {
3515 			pr_err("out of memory at line %d\n", __LINE__);
3516 			return NULL;
3517 		}
3518 	}
3519 
3520 	open_devip->channel = sdev->channel;
3521 	open_devip->target = sdev->id;
3522 	open_devip->lun = sdev->lun;
3523 	open_devip->sdbg_host = sdbg_host;
3524 	atomic_set(&open_devip->num_in_q, 0);
3525 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3526 	open_devip->used = true;
3527 	return open_devip;
3528 }
3529 
3530 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3531 {
3532 	if (sdebug_verbose)
3533 		pr_info("slave_alloc <%u %u %u %llu>\n",
3534 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3535 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3536 	return 0;
3537 }
3538 
3539 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3540 {
3541 	struct sdebug_dev_info *devip =
3542 			(struct sdebug_dev_info *)sdp->hostdata;
3543 
3544 	if (sdebug_verbose)
3545 		pr_info("slave_configure <%u %u %u %llu>\n",
3546 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3547 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3548 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3549 	if (devip == NULL) {
3550 		devip = find_build_dev_info(sdp);
3551 		if (devip == NULL)
3552 			return 1;  /* no resources, will be marked offline */
3553 	}
3554 	sdp->hostdata = devip;
3555 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3556 	if (sdebug_no_uld)
3557 		sdp->no_uld_attach = 1;
3558 	return 0;
3559 }
3560 
3561 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3562 {
3563 	struct sdebug_dev_info *devip =
3564 		(struct sdebug_dev_info *)sdp->hostdata;
3565 
3566 	if (sdebug_verbose)
3567 		pr_info("slave_destroy <%u %u %u %llu>\n",
3568 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3569 	if (devip) {
3570 		/* make this slot available for re-use */
3571 		devip->used = false;
3572 		sdp->hostdata = NULL;
3573 	}
3574 }
3575 
3576 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3577 {
3578 	if (!sd_dp)
3579 		return;
3580 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3581 		hrtimer_cancel(&sd_dp->hrt);
3582 	else if (sdebug_jdelay < 0)
3583 		cancel_work_sync(&sd_dp->ew.work);
3584 }
3585 
3586 /* If @cmnd found deletes its timer or work queue and returns true; else
3587    returns false */
3588 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3589 {
3590 	unsigned long iflags;
3591 	int j, k, qmax, r_qmax;
3592 	struct sdebug_queue *sqp;
3593 	struct sdebug_queued_cmd *sqcp;
3594 	struct sdebug_dev_info *devip;
3595 	struct sdebug_defer *sd_dp;
3596 
3597 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3598 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3599 		qmax = sdebug_max_queue;
3600 		r_qmax = atomic_read(&retired_max_queue);
3601 		if (r_qmax > qmax)
3602 			qmax = r_qmax;
3603 		for (k = 0; k < qmax; ++k) {
3604 			if (test_bit(k, sqp->in_use_bm)) {
3605 				sqcp = &sqp->qc_arr[k];
3606 				if (cmnd != sqcp->a_cmnd)
3607 					continue;
3608 				/* found */
3609 				devip = (struct sdebug_dev_info *)
3610 						cmnd->device->hostdata;
3611 				if (devip)
3612 					atomic_dec(&devip->num_in_q);
3613 				sqcp->a_cmnd = NULL;
3614 				sd_dp = sqcp->sd_dp;
3615 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3616 				stop_qc_helper(sd_dp);
3617 				clear_bit(k, sqp->in_use_bm);
3618 				return true;
3619 			}
3620 		}
3621 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3622 	}
3623 	return false;
3624 }
3625 
3626 /* Deletes (stops) timers or work queues of all queued commands */
3627 static void stop_all_queued(void)
3628 {
3629 	unsigned long iflags;
3630 	int j, k;
3631 	struct sdebug_queue *sqp;
3632 	struct sdebug_queued_cmd *sqcp;
3633 	struct sdebug_dev_info *devip;
3634 	struct sdebug_defer *sd_dp;
3635 
3636 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3637 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3638 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3639 			if (test_bit(k, sqp->in_use_bm)) {
3640 				sqcp = &sqp->qc_arr[k];
3641 				if (sqcp->a_cmnd == NULL)
3642 					continue;
3643 				devip = (struct sdebug_dev_info *)
3644 					sqcp->a_cmnd->device->hostdata;
3645 				if (devip)
3646 					atomic_dec(&devip->num_in_q);
3647 				sqcp->a_cmnd = NULL;
3648 				sd_dp = sqcp->sd_dp;
3649 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3650 				stop_qc_helper(sd_dp);
3651 				clear_bit(k, sqp->in_use_bm);
3652 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3653 			}
3654 		}
3655 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3656 	}
3657 }
3658 
3659 /* Free queued command memory on heap */
3660 static void free_all_queued(void)
3661 {
3662 	int j, k;
3663 	struct sdebug_queue *sqp;
3664 	struct sdebug_queued_cmd *sqcp;
3665 
3666 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3667 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3668 			sqcp = &sqp->qc_arr[k];
3669 			kfree(sqcp->sd_dp);
3670 			sqcp->sd_dp = NULL;
3671 		}
3672 	}
3673 }
3674 
3675 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3676 {
3677 	bool ok;
3678 
3679 	++num_aborts;
3680 	if (SCpnt) {
3681 		ok = stop_queued_cmnd(SCpnt);
3682 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3683 			sdev_printk(KERN_INFO, SCpnt->device,
3684 				    "%s: command%s found\n", __func__,
3685 				    ok ? "" : " not");
3686 	}
3687 	return SUCCESS;
3688 }
3689 
3690 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3691 {
3692 	++num_dev_resets;
3693 	if (SCpnt && SCpnt->device) {
3694 		struct scsi_device *sdp = SCpnt->device;
3695 		struct sdebug_dev_info *devip =
3696 				(struct sdebug_dev_info *)sdp->hostdata;
3697 
3698 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3699 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3700 		if (devip)
3701 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3702 	}
3703 	return SUCCESS;
3704 }
3705 
3706 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3707 {
3708 	struct sdebug_host_info *sdbg_host;
3709 	struct sdebug_dev_info *devip;
3710 	struct scsi_device *sdp;
3711 	struct Scsi_Host *hp;
3712 	int k = 0;
3713 
3714 	++num_target_resets;
3715 	if (!SCpnt)
3716 		goto lie;
3717 	sdp = SCpnt->device;
3718 	if (!sdp)
3719 		goto lie;
3720 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3721 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3722 	hp = sdp->host;
3723 	if (!hp)
3724 		goto lie;
3725 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3726 	if (sdbg_host) {
3727 		list_for_each_entry(devip,
3728 				    &sdbg_host->dev_info_list,
3729 				    dev_list)
3730 			if (devip->target == sdp->id) {
3731 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3732 				++k;
3733 			}
3734 	}
3735 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3736 		sdev_printk(KERN_INFO, sdp,
3737 			    "%s: %d device(s) found in target\n", __func__, k);
3738 lie:
3739 	return SUCCESS;
3740 }
3741 
3742 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3743 {
3744 	struct sdebug_host_info *sdbg_host;
3745 	struct sdebug_dev_info *devip;
3746         struct scsi_device * sdp;
3747         struct Scsi_Host * hp;
3748 	int k = 0;
3749 
3750 	++num_bus_resets;
3751 	if (!(SCpnt && SCpnt->device))
3752 		goto lie;
3753 	sdp = SCpnt->device;
3754 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3755 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3756 	hp = sdp->host;
3757 	if (hp) {
3758 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3759 		if (sdbg_host) {
3760 			list_for_each_entry(devip,
3761                                             &sdbg_host->dev_info_list,
3762 					    dev_list) {
3763 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3764 				++k;
3765 			}
3766 		}
3767 	}
3768 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3769 		sdev_printk(KERN_INFO, sdp,
3770 			    "%s: %d device(s) found in host\n", __func__, k);
3771 lie:
3772 	return SUCCESS;
3773 }
3774 
3775 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3776 {
3777 	struct sdebug_host_info * sdbg_host;
3778 	struct sdebug_dev_info *devip;
3779 	int k = 0;
3780 
3781 	++num_host_resets;
3782 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3783 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3784         spin_lock(&sdebug_host_list_lock);
3785         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3786 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3787 				    dev_list) {
3788 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3789 			++k;
3790 		}
3791         }
3792         spin_unlock(&sdebug_host_list_lock);
3793 	stop_all_queued();
3794 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3795 		sdev_printk(KERN_INFO, SCpnt->device,
3796 			    "%s: %d device(s) found\n", __func__, k);
3797 	return SUCCESS;
3798 }
3799 
3800 static void __init sdebug_build_parts(unsigned char *ramp,
3801 				      unsigned long store_size)
3802 {
3803 	struct partition * pp;
3804 	int starts[SDEBUG_MAX_PARTS + 2];
3805 	int sectors_per_part, num_sectors, k;
3806 	int heads_by_sects, start_sec, end_sec;
3807 
3808 	/* assume partition table already zeroed */
3809 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3810 		return;
3811 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3812 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3813 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3814 	}
3815 	num_sectors = (int)sdebug_store_sectors;
3816 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3817 			   / sdebug_num_parts;
3818 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3819         starts[0] = sdebug_sectors_per;
3820 	for (k = 1; k < sdebug_num_parts; ++k)
3821 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3822 			    * heads_by_sects;
3823 	starts[sdebug_num_parts] = num_sectors;
3824 	starts[sdebug_num_parts + 1] = 0;
3825 
3826 	ramp[510] = 0x55;	/* magic partition markings */
3827 	ramp[511] = 0xAA;
3828 	pp = (struct partition *)(ramp + 0x1be);
3829 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3830 		start_sec = starts[k];
3831 		end_sec = starts[k + 1] - 1;
3832 		pp->boot_ind = 0;
3833 
3834 		pp->cyl = start_sec / heads_by_sects;
3835 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3836 			   / sdebug_sectors_per;
3837 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3838 
3839 		pp->end_cyl = end_sec / heads_by_sects;
3840 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3841 			       / sdebug_sectors_per;
3842 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3843 
3844 		pp->start_sect = cpu_to_le32(start_sec);
3845 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3846 		pp->sys_ind = 0x83;	/* plain Linux partition */
3847 	}
3848 }
3849 
3850 static void block_unblock_all_queues(bool block)
3851 {
3852 	int j;
3853 	struct sdebug_queue *sqp;
3854 
3855 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3856 		atomic_set(&sqp->blocked, (int)block);
3857 }
3858 
3859 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3860  * commands will be processed normally before triggers occur.
3861  */
3862 static void tweak_cmnd_count(void)
3863 {
3864 	int count, modulo;
3865 
3866 	modulo = abs(sdebug_every_nth);
3867 	if (modulo < 2)
3868 		return;
3869 	block_unblock_all_queues(true);
3870 	count = atomic_read(&sdebug_cmnd_count);
3871 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3872 	block_unblock_all_queues(false);
3873 }
3874 
3875 static void clear_queue_stats(void)
3876 {
3877 	atomic_set(&sdebug_cmnd_count, 0);
3878 	atomic_set(&sdebug_completions, 0);
3879 	atomic_set(&sdebug_miss_cpus, 0);
3880 	atomic_set(&sdebug_a_tsf, 0);
3881 }
3882 
3883 static void setup_inject(struct sdebug_queue *sqp,
3884 			 struct sdebug_queued_cmd *sqcp)
3885 {
3886 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3887 		return;
3888 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3889 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3890 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3891 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3892 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3893 }
3894 
3895 /* Complete the processing of the thread that queued a SCSI command to this
3896  * driver. It either completes the command by calling cmnd_done() or
3897  * schedules a hr timer or work queue then returns 0. Returns
3898  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3899  */
3900 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3901 			 int scsi_result, int delta_jiff)
3902 {
3903 	unsigned long iflags;
3904 	int k, num_in_q, qdepth, inject;
3905 	struct sdebug_queue *sqp;
3906 	struct sdebug_queued_cmd *sqcp;
3907 	struct scsi_device *sdp;
3908 	struct sdebug_defer *sd_dp;
3909 
3910 	if (unlikely(devip == NULL)) {
3911 		if (scsi_result == 0)
3912 			scsi_result = DID_NO_CONNECT << 16;
3913 		goto respond_in_thread;
3914 	}
3915 	sdp = cmnd->device;
3916 
3917 	if (unlikely(sdebug_verbose && scsi_result))
3918 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3919 			    __func__, scsi_result);
3920 	if (delta_jiff == 0)
3921 		goto respond_in_thread;
3922 
3923 	/* schedule the response at a later time if resources permit */
3924 	sqp = get_queue(cmnd);
3925 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3926 	if (unlikely(atomic_read(&sqp->blocked))) {
3927 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3928 		return SCSI_MLQUEUE_HOST_BUSY;
3929 	}
3930 	num_in_q = atomic_read(&devip->num_in_q);
3931 	qdepth = cmnd->device->queue_depth;
3932 	inject = 0;
3933 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
3934 		if (scsi_result) {
3935 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3936 			goto respond_in_thread;
3937 		} else
3938 			scsi_result = device_qfull_result;
3939 	} else if (unlikely(sdebug_every_nth &&
3940 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
3941 			    (scsi_result == 0))) {
3942 		if ((num_in_q == (qdepth - 1)) &&
3943 		    (atomic_inc_return(&sdebug_a_tsf) >=
3944 		     abs(sdebug_every_nth))) {
3945 			atomic_set(&sdebug_a_tsf, 0);
3946 			inject = 1;
3947 			scsi_result = device_qfull_result;
3948 		}
3949 	}
3950 
3951 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
3952 	if (unlikely(k >= sdebug_max_queue)) {
3953 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3954 		if (scsi_result)
3955 			goto respond_in_thread;
3956 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
3957 			scsi_result = device_qfull_result;
3958 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
3959 			sdev_printk(KERN_INFO, sdp,
3960 				    "%s: max_queue=%d exceeded, %s\n",
3961 				    __func__, sdebug_max_queue,
3962 				    (scsi_result ?  "status: TASK SET FULL" :
3963 						    "report: host busy"));
3964 		if (scsi_result)
3965 			goto respond_in_thread;
3966 		else
3967 			return SCSI_MLQUEUE_HOST_BUSY;
3968 	}
3969 	__set_bit(k, sqp->in_use_bm);
3970 	atomic_inc(&devip->num_in_q);
3971 	sqcp = &sqp->qc_arr[k];
3972 	sqcp->a_cmnd = cmnd;
3973 	cmnd->host_scribble = (unsigned char *)sqcp;
3974 	cmnd->result = scsi_result;
3975 	sd_dp = sqcp->sd_dp;
3976 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3977 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
3978 		setup_inject(sqp, sqcp);
3979 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
3980 		ktime_t kt;
3981 
3982 		if (delta_jiff > 0) {
3983 			struct timespec ts;
3984 
3985 			jiffies_to_timespec(delta_jiff, &ts);
3986 			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
3987 		} else
3988 			kt = ktime_set(0, sdebug_ndelay);
3989 		if (NULL == sd_dp) {
3990 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
3991 			if (NULL == sd_dp)
3992 				return SCSI_MLQUEUE_HOST_BUSY;
3993 			sqcp->sd_dp = sd_dp;
3994 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
3995 				     HRTIMER_MODE_REL_PINNED);
3996 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
3997 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
3998 			sd_dp->qc_idx = k;
3999 		}
4000 		if (sdebug_statistics)
4001 			sd_dp->issuing_cpu = raw_smp_processor_id();
4002 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4003 	} else {	/* jdelay < 0, use work queue */
4004 		if (NULL == sd_dp) {
4005 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4006 			if (NULL == sd_dp)
4007 				return SCSI_MLQUEUE_HOST_BUSY;
4008 			sqcp->sd_dp = sd_dp;
4009 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4010 			sd_dp->qc_idx = k;
4011 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4012 		}
4013 		if (sdebug_statistics)
4014 			sd_dp->issuing_cpu = raw_smp_processor_id();
4015 		schedule_work(&sd_dp->ew.work);
4016 	}
4017 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4018 		     (scsi_result == device_qfull_result)))
4019 		sdev_printk(KERN_INFO, sdp,
4020 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4021 			    num_in_q, (inject ? "<inject> " : ""),
4022 			    "status: TASK SET FULL");
4023 	return 0;
4024 
4025 respond_in_thread:	/* call back to mid-layer using invocation thread */
4026 	cmnd->result = scsi_result;
4027 	cmnd->scsi_done(cmnd);
4028 	return 0;
4029 }
4030 
4031 /* Note: The following macros create attribute files in the
4032    /sys/module/scsi_debug/parameters directory. Unfortunately this
4033    driver is unaware of a change and cannot trigger auxiliary actions
4034    as it can when the corresponding attribute in the
4035    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4036  */
4037 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4038 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4039 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4040 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4041 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4042 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4043 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4044 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4045 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4046 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4047 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4048 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4049 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4050 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4051 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4052 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4053 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4054 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4055 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4056 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4057 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4058 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4059 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4060 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4061 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4062 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4063 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4064 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4065 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4066 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4067 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4068 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4069 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4070 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4071 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4072 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4073 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4074 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4075 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4076 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4077 		   S_IRUGO | S_IWUSR);
4078 module_param_named(write_same_length, sdebug_write_same_length, int,
4079 		   S_IRUGO | S_IWUSR);
4080 
4081 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4082 MODULE_DESCRIPTION("SCSI debug adapter driver");
4083 MODULE_LICENSE("GPL");
4084 MODULE_VERSION(SDEBUG_VERSION);
4085 
4086 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4087 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4088 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4089 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4090 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4091 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4092 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4093 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4094 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4095 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4096 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4097 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4098 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4099 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4100 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4101 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4102 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4103 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4104 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4105 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4106 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4107 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4108 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4109 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4110 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4111 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4112 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4113 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4114 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4115 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4116 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4117 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4118 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4119 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4120 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4121 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4122 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4123 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4124 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4125 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4126 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4127 
4128 static char sdebug_info[256];
4129 
4130 static const char * scsi_debug_info(struct Scsi_Host * shp)
4131 {
4132 	int k;
4133 
4134 	k = scnprintf(sdebug_info, sizeof(sdebug_info),
4135 		      "%s: version %s [%s], dev_size_mb=%d, opts=0x%x\n",
4136 		      my_name, SDEBUG_VERSION, sdebug_version_date,
4137 		      sdebug_dev_size_mb, sdebug_opts);
4138 	if (k >= (sizeof(sdebug_info) - 1))
4139 		return sdebug_info;
4140 	scnprintf(sdebug_info + k, sizeof(sdebug_info) - k,
4141 		  "%s: submit_queues=%d, statistics=%d\n", my_name,
4142 		  submit_queues, (int)sdebug_statistics);
4143 	return sdebug_info;
4144 }
4145 
4146 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4147 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4148 				 int length)
4149 {
4150 	char arr[16];
4151 	int opts;
4152 	int minLen = length > 15 ? 15 : length;
4153 
4154 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4155 		return -EACCES;
4156 	memcpy(arr, buffer, minLen);
4157 	arr[minLen] = '\0';
4158 	if (1 != sscanf(arr, "%d", &opts))
4159 		return -EINVAL;
4160 	sdebug_opts = opts;
4161 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4162 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4163 	if (sdebug_every_nth != 0)
4164 		tweak_cmnd_count();
4165 	return length;
4166 }
4167 
4168 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4169  * same for each scsi_debug host (if more than one). Some of the counters
4170  * output are not atomics so might be inaccurate in a busy system. */
4171 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4172 {
4173 	int f, j, l;
4174 	struct sdebug_queue *sqp;
4175 
4176 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4177 		   SDEBUG_VERSION, sdebug_version_date);
4178 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4179 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4180 		   sdebug_opts, sdebug_every_nth);
4181 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4182 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4183 		   sdebug_sector_size, "bytes");
4184 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4185 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4186 		   num_aborts);
4187 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4188 		   num_dev_resets, num_target_resets, num_bus_resets,
4189 		   num_host_resets);
4190 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4191 		   dix_reads, dix_writes, dif_errors);
4192 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4193 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4194 		   sdebug_mq_active);
4195 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4196 		   atomic_read(&sdebug_cmnd_count),
4197 		   atomic_read(&sdebug_completions),
4198 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4199 		   atomic_read(&sdebug_a_tsf));
4200 
4201 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4202 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4203 		seq_printf(m, "  queue %d:\n", j);
4204 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4205 		if (f != sdebug_max_queue) {
4206 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4207 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4208 				   "first,last bits", f, l);
4209 		}
4210 	}
4211 	return 0;
4212 }
4213 
4214 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4215 {
4216 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4217 }
4218 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4219  * of delay is jiffies.
4220  */
4221 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4222 			   size_t count)
4223 {
4224 	int jdelay, res;
4225 
4226 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4227 		res = count;
4228 		if (sdebug_jdelay != jdelay) {
4229 			int j, k;
4230 			struct sdebug_queue *sqp;
4231 
4232 			block_unblock_all_queues(true);
4233 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4234 			     ++j, ++sqp) {
4235 				k = find_first_bit(sqp->in_use_bm,
4236 						   sdebug_max_queue);
4237 				if (k != sdebug_max_queue) {
4238 					res = -EBUSY;   /* queued commands */
4239 					break;
4240 				}
4241 			}
4242 			if (res > 0) {
4243 				/* make sure sdebug_defer instances get
4244 				 * re-allocated for new delay variant */
4245 				free_all_queued();
4246 				sdebug_jdelay = jdelay;
4247 				sdebug_ndelay = 0;
4248 			}
4249 			block_unblock_all_queues(false);
4250 		}
4251 		return res;
4252 	}
4253 	return -EINVAL;
4254 }
4255 static DRIVER_ATTR_RW(delay);
4256 
4257 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4258 {
4259 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4260 }
4261 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4262 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4263 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4264 			    size_t count)
4265 {
4266 	int ndelay, res;
4267 
4268 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4269 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4270 		res = count;
4271 		if (sdebug_ndelay != ndelay) {
4272 			int j, k;
4273 			struct sdebug_queue *sqp;
4274 
4275 			block_unblock_all_queues(true);
4276 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4277 			     ++j, ++sqp) {
4278 				k = find_first_bit(sqp->in_use_bm,
4279 						   sdebug_max_queue);
4280 				if (k != sdebug_max_queue) {
4281 					res = -EBUSY;   /* queued commands */
4282 					break;
4283 				}
4284 			}
4285 			if (res > 0) {
4286 				/* make sure sdebug_defer instances get
4287 				 * re-allocated for new delay variant */
4288 				free_all_queued();
4289 				sdebug_ndelay = ndelay;
4290 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4291 							: DEF_JDELAY;
4292 			}
4293 			block_unblock_all_queues(false);
4294 		}
4295 		return res;
4296 	}
4297 	return -EINVAL;
4298 }
4299 static DRIVER_ATTR_RW(ndelay);
4300 
4301 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4302 {
4303 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4304 }
4305 
4306 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4307 			  size_t count)
4308 {
4309         int opts;
4310 	char work[20];
4311 
4312         if (1 == sscanf(buf, "%10s", work)) {
4313 		if (0 == strncasecmp(work,"0x", 2)) {
4314 			if (1 == sscanf(&work[2], "%x", &opts))
4315 				goto opts_done;
4316 		} else {
4317 			if (1 == sscanf(work, "%d", &opts))
4318 				goto opts_done;
4319 		}
4320 	}
4321 	return -EINVAL;
4322 opts_done:
4323 	sdebug_opts = opts;
4324 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4325 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4326 	tweak_cmnd_count();
4327 	return count;
4328 }
4329 static DRIVER_ATTR_RW(opts);
4330 
4331 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4332 {
4333 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4334 }
4335 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4336 			   size_t count)
4337 {
4338         int n;
4339 
4340 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4341 		sdebug_ptype = n;
4342 		return count;
4343 	}
4344 	return -EINVAL;
4345 }
4346 static DRIVER_ATTR_RW(ptype);
4347 
4348 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4349 {
4350 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4351 }
4352 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4353 			    size_t count)
4354 {
4355         int n;
4356 
4357 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4358 		sdebug_dsense = n;
4359 		return count;
4360 	}
4361 	return -EINVAL;
4362 }
4363 static DRIVER_ATTR_RW(dsense);
4364 
4365 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4366 {
4367 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4368 }
4369 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4370 			     size_t count)
4371 {
4372         int n;
4373 
4374 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4375 		n = (n > 0);
4376 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4377 		if (sdebug_fake_rw != n) {
4378 			if ((0 == n) && (NULL == fake_storep)) {
4379 				unsigned long sz =
4380 					(unsigned long)sdebug_dev_size_mb *
4381 					1048576;
4382 
4383 				fake_storep = vmalloc(sz);
4384 				if (NULL == fake_storep) {
4385 					pr_err("out of memory, 9\n");
4386 					return -ENOMEM;
4387 				}
4388 				memset(fake_storep, 0, sz);
4389 			}
4390 			sdebug_fake_rw = n;
4391 		}
4392 		return count;
4393 	}
4394 	return -EINVAL;
4395 }
4396 static DRIVER_ATTR_RW(fake_rw);
4397 
4398 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4399 {
4400 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4401 }
4402 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4403 			      size_t count)
4404 {
4405         int n;
4406 
4407 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4408 		sdebug_no_lun_0 = n;
4409 		return count;
4410 	}
4411 	return -EINVAL;
4412 }
4413 static DRIVER_ATTR_RW(no_lun_0);
4414 
4415 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4416 {
4417 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4418 }
4419 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4420 			      size_t count)
4421 {
4422         int n;
4423 
4424 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4425 		sdebug_num_tgts = n;
4426 		sdebug_max_tgts_luns();
4427 		return count;
4428 	}
4429 	return -EINVAL;
4430 }
4431 static DRIVER_ATTR_RW(num_tgts);
4432 
4433 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4434 {
4435 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4436 }
4437 static DRIVER_ATTR_RO(dev_size_mb);
4438 
4439 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4440 {
4441 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4442 }
4443 static DRIVER_ATTR_RO(num_parts);
4444 
4445 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4446 {
4447 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4448 }
4449 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4450 			       size_t count)
4451 {
4452         int nth;
4453 
4454 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4455 		sdebug_every_nth = nth;
4456 		if (nth && !sdebug_statistics) {
4457 			pr_info("every_nth needs statistics=1, set it\n");
4458 			sdebug_statistics = true;
4459 		}
4460 		tweak_cmnd_count();
4461 		return count;
4462 	}
4463 	return -EINVAL;
4464 }
4465 static DRIVER_ATTR_RW(every_nth);
4466 
4467 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4468 {
4469 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4470 }
4471 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4472 			      size_t count)
4473 {
4474         int n;
4475 	bool changed;
4476 
4477 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4478 		if (n > 256) {
4479 			pr_warn("max_luns can be no more than 256\n");
4480 			return -EINVAL;
4481 		}
4482 		changed = (sdebug_max_luns != n);
4483 		sdebug_max_luns = n;
4484 		sdebug_max_tgts_luns();
4485 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4486 			struct sdebug_host_info *sdhp;
4487 			struct sdebug_dev_info *dp;
4488 
4489 			spin_lock(&sdebug_host_list_lock);
4490 			list_for_each_entry(sdhp, &sdebug_host_list,
4491 					    host_list) {
4492 				list_for_each_entry(dp, &sdhp->dev_info_list,
4493 						    dev_list) {
4494 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4495 						dp->uas_bm);
4496 				}
4497 			}
4498 			spin_unlock(&sdebug_host_list_lock);
4499 		}
4500 		return count;
4501 	}
4502 	return -EINVAL;
4503 }
4504 static DRIVER_ATTR_RW(max_luns);
4505 
4506 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4507 {
4508 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4509 }
4510 /* N.B. max_queue can be changed while there are queued commands. In flight
4511  * commands beyond the new max_queue will be completed. */
4512 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4513 			       size_t count)
4514 {
4515 	int j, n, k, a;
4516 	struct sdebug_queue *sqp;
4517 
4518 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4519 	    (n <= SDEBUG_CANQUEUE)) {
4520 		block_unblock_all_queues(true);
4521 		k = 0;
4522 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4523 		     ++j, ++sqp) {
4524 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4525 			if (a > k)
4526 				k = a;
4527 		}
4528 		sdebug_max_queue = n;
4529 		if (k == SDEBUG_CANQUEUE)
4530 			atomic_set(&retired_max_queue, 0);
4531 		else if (k >= n)
4532 			atomic_set(&retired_max_queue, k + 1);
4533 		else
4534 			atomic_set(&retired_max_queue, 0);
4535 		block_unblock_all_queues(false);
4536 		return count;
4537 	}
4538 	return -EINVAL;
4539 }
4540 static DRIVER_ATTR_RW(max_queue);
4541 
4542 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4543 {
4544 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4545 }
4546 static DRIVER_ATTR_RO(no_uld);
4547 
4548 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4549 {
4550 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4551 }
4552 static DRIVER_ATTR_RO(scsi_level);
4553 
4554 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4555 {
4556 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4557 }
4558 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4559 				size_t count)
4560 {
4561         int n;
4562 	bool changed;
4563 
4564 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4565 		changed = (sdebug_virtual_gb != n);
4566 		sdebug_virtual_gb = n;
4567 		sdebug_capacity = get_sdebug_capacity();
4568 		if (changed) {
4569 			struct sdebug_host_info *sdhp;
4570 			struct sdebug_dev_info *dp;
4571 
4572 			spin_lock(&sdebug_host_list_lock);
4573 			list_for_each_entry(sdhp, &sdebug_host_list,
4574 					    host_list) {
4575 				list_for_each_entry(dp, &sdhp->dev_info_list,
4576 						    dev_list) {
4577 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4578 						dp->uas_bm);
4579 				}
4580 			}
4581 			spin_unlock(&sdebug_host_list_lock);
4582 		}
4583 		return count;
4584 	}
4585 	return -EINVAL;
4586 }
4587 static DRIVER_ATTR_RW(virtual_gb);
4588 
4589 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4590 {
4591 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4592 }
4593 
4594 static int sdebug_add_adapter(void);
4595 static void sdebug_remove_adapter(void);
4596 
4597 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4598 			      size_t count)
4599 {
4600 	int delta_hosts;
4601 
4602 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4603 		return -EINVAL;
4604 	if (delta_hosts > 0) {
4605 		do {
4606 			sdebug_add_adapter();
4607 		} while (--delta_hosts);
4608 	} else if (delta_hosts < 0) {
4609 		do {
4610 			sdebug_remove_adapter();
4611 		} while (++delta_hosts);
4612 	}
4613 	return count;
4614 }
4615 static DRIVER_ATTR_RW(add_host);
4616 
4617 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4618 {
4619 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4620 }
4621 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4622 				    size_t count)
4623 {
4624 	int n;
4625 
4626 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4627 		sdebug_vpd_use_hostno = n;
4628 		return count;
4629 	}
4630 	return -EINVAL;
4631 }
4632 static DRIVER_ATTR_RW(vpd_use_hostno);
4633 
4634 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4635 {
4636 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4637 }
4638 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4639 				size_t count)
4640 {
4641 	int n;
4642 
4643 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4644 		if (n > 0)
4645 			sdebug_statistics = true;
4646 		else {
4647 			clear_queue_stats();
4648 			sdebug_statistics = false;
4649 		}
4650 		return count;
4651 	}
4652 	return -EINVAL;
4653 }
4654 static DRIVER_ATTR_RW(statistics);
4655 
4656 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4657 {
4658 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4659 }
4660 static DRIVER_ATTR_RO(sector_size);
4661 
4662 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4663 {
4664 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4665 }
4666 static DRIVER_ATTR_RO(submit_queues);
4667 
4668 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4669 {
4670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4671 }
4672 static DRIVER_ATTR_RO(dix);
4673 
4674 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4675 {
4676 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4677 }
4678 static DRIVER_ATTR_RO(dif);
4679 
4680 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4681 {
4682 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4683 }
4684 static DRIVER_ATTR_RO(guard);
4685 
4686 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4687 {
4688 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4689 }
4690 static DRIVER_ATTR_RO(ato);
4691 
4692 static ssize_t map_show(struct device_driver *ddp, char *buf)
4693 {
4694 	ssize_t count;
4695 
4696 	if (!scsi_debug_lbp())
4697 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4698 				 sdebug_store_sectors);
4699 
4700 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4701 			  (int)map_size, map_storep);
4702 	buf[count++] = '\n';
4703 	buf[count] = '\0';
4704 
4705 	return count;
4706 }
4707 static DRIVER_ATTR_RO(map);
4708 
4709 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4710 {
4711 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4712 }
4713 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4714 			       size_t count)
4715 {
4716 	int n;
4717 
4718 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4719 		sdebug_removable = (n > 0);
4720 		return count;
4721 	}
4722 	return -EINVAL;
4723 }
4724 static DRIVER_ATTR_RW(removable);
4725 
4726 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4727 {
4728 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4729 }
4730 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4731 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4732 			       size_t count)
4733 {
4734 	int n;
4735 
4736 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4737 		sdebug_host_lock = (n > 0);
4738 		return count;
4739 	}
4740 	return -EINVAL;
4741 }
4742 static DRIVER_ATTR_RW(host_lock);
4743 
4744 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4745 {
4746 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4747 }
4748 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4749 			    size_t count)
4750 {
4751 	int n;
4752 
4753 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4754 		sdebug_strict = (n > 0);
4755 		return count;
4756 	}
4757 	return -EINVAL;
4758 }
4759 static DRIVER_ATTR_RW(strict);
4760 
4761 
4762 /* Note: The following array creates attribute files in the
4763    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4764    files (over those found in the /sys/module/scsi_debug/parameters
4765    directory) is that auxiliary actions can be triggered when an attribute
4766    is changed. For example see: sdebug_add_host_store() above.
4767  */
4768 
4769 static struct attribute *sdebug_drv_attrs[] = {
4770 	&driver_attr_delay.attr,
4771 	&driver_attr_opts.attr,
4772 	&driver_attr_ptype.attr,
4773 	&driver_attr_dsense.attr,
4774 	&driver_attr_fake_rw.attr,
4775 	&driver_attr_no_lun_0.attr,
4776 	&driver_attr_num_tgts.attr,
4777 	&driver_attr_dev_size_mb.attr,
4778 	&driver_attr_num_parts.attr,
4779 	&driver_attr_every_nth.attr,
4780 	&driver_attr_max_luns.attr,
4781 	&driver_attr_max_queue.attr,
4782 	&driver_attr_no_uld.attr,
4783 	&driver_attr_scsi_level.attr,
4784 	&driver_attr_virtual_gb.attr,
4785 	&driver_attr_add_host.attr,
4786 	&driver_attr_vpd_use_hostno.attr,
4787 	&driver_attr_sector_size.attr,
4788 	&driver_attr_statistics.attr,
4789 	&driver_attr_submit_queues.attr,
4790 	&driver_attr_dix.attr,
4791 	&driver_attr_dif.attr,
4792 	&driver_attr_guard.attr,
4793 	&driver_attr_ato.attr,
4794 	&driver_attr_map.attr,
4795 	&driver_attr_removable.attr,
4796 	&driver_attr_host_lock.attr,
4797 	&driver_attr_ndelay.attr,
4798 	&driver_attr_strict.attr,
4799 	NULL,
4800 };
4801 ATTRIBUTE_GROUPS(sdebug_drv);
4802 
4803 static struct device *pseudo_primary;
4804 
4805 static int __init scsi_debug_init(void)
4806 {
4807 	unsigned long sz;
4808 	int host_to_add;
4809 	int k;
4810 	int ret;
4811 
4812 	atomic_set(&retired_max_queue, 0);
4813 
4814 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4815 		pr_warn("ndelay must be less than 1 second, ignored\n");
4816 		sdebug_ndelay = 0;
4817 	} else if (sdebug_ndelay > 0)
4818 		sdebug_jdelay = JDELAY_OVERRIDDEN;
4819 
4820 	switch (sdebug_sector_size) {
4821 	case  512:
4822 	case 1024:
4823 	case 2048:
4824 	case 4096:
4825 		break;
4826 	default:
4827 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
4828 		return -EINVAL;
4829 	}
4830 
4831 	switch (sdebug_dif) {
4832 
4833 	case SD_DIF_TYPE0_PROTECTION:
4834 		break;
4835 	case SD_DIF_TYPE1_PROTECTION:
4836 	case SD_DIF_TYPE2_PROTECTION:
4837 	case SD_DIF_TYPE3_PROTECTION:
4838 		have_dif_prot = true;
4839 		break;
4840 
4841 	default:
4842 		pr_err("dif must be 0, 1, 2 or 3\n");
4843 		return -EINVAL;
4844 	}
4845 
4846 	if (sdebug_guard > 1) {
4847 		pr_err("guard must be 0 or 1\n");
4848 		return -EINVAL;
4849 	}
4850 
4851 	if (sdebug_ato > 1) {
4852 		pr_err("ato must be 0 or 1\n");
4853 		return -EINVAL;
4854 	}
4855 
4856 	if (sdebug_physblk_exp > 15) {
4857 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4858 		return -EINVAL;
4859 	}
4860 	if (sdebug_max_luns > 256) {
4861 		pr_warn("max_luns can be no more than 256, use default\n");
4862 		sdebug_max_luns = DEF_MAX_LUNS;
4863 	}
4864 
4865 	if (sdebug_lowest_aligned > 0x3fff) {
4866 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4867 		return -EINVAL;
4868 	}
4869 
4870 	if (submit_queues < 1) {
4871 		pr_err("submit_queues must be 1 or more\n");
4872 		return -EINVAL;
4873 	}
4874 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4875 			       GFP_KERNEL);
4876 	if (sdebug_q_arr == NULL)
4877 		return -ENOMEM;
4878 	for (k = 0; k < submit_queues; ++k)
4879 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
4880 
4881 	if (sdebug_dev_size_mb < 1)
4882 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4883 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4884 	sdebug_store_sectors = sz / sdebug_sector_size;
4885 	sdebug_capacity = get_sdebug_capacity();
4886 
4887 	/* play around with geometry, don't waste too much on track 0 */
4888 	sdebug_heads = 8;
4889 	sdebug_sectors_per = 32;
4890 	if (sdebug_dev_size_mb >= 256)
4891 		sdebug_heads = 64;
4892 	else if (sdebug_dev_size_mb >= 16)
4893 		sdebug_heads = 32;
4894 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4895 			       (sdebug_sectors_per * sdebug_heads);
4896 	if (sdebug_cylinders_per >= 1024) {
4897 		/* other LLDs do this; implies >= 1GB ram disk ... */
4898 		sdebug_heads = 255;
4899 		sdebug_sectors_per = 63;
4900 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4901 			       (sdebug_sectors_per * sdebug_heads);
4902 	}
4903 
4904 	if (sdebug_fake_rw == 0) {
4905 		fake_storep = vmalloc(sz);
4906 		if (NULL == fake_storep) {
4907 			pr_err("out of memory, 1\n");
4908 			ret = -ENOMEM;
4909 			goto free_q_arr;
4910 		}
4911 		memset(fake_storep, 0, sz);
4912 		if (sdebug_num_parts > 0)
4913 			sdebug_build_parts(fake_storep, sz);
4914 	}
4915 
4916 	if (sdebug_dix) {
4917 		int dif_size;
4918 
4919 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4920 		dif_storep = vmalloc(dif_size);
4921 
4922 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4923 
4924 		if (dif_storep == NULL) {
4925 			pr_err("out of mem. (DIX)\n");
4926 			ret = -ENOMEM;
4927 			goto free_vm;
4928 		}
4929 
4930 		memset(dif_storep, 0xff, dif_size);
4931 	}
4932 
4933 	/* Logical Block Provisioning */
4934 	if (scsi_debug_lbp()) {
4935 		sdebug_unmap_max_blocks =
4936 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
4937 
4938 		sdebug_unmap_max_desc =
4939 			clamp(sdebug_unmap_max_desc, 0U, 256U);
4940 
4941 		sdebug_unmap_granularity =
4942 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
4943 
4944 		if (sdebug_unmap_alignment &&
4945 		    sdebug_unmap_granularity <=
4946 		    sdebug_unmap_alignment) {
4947 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4948 			ret = -EINVAL;
4949 			goto free_vm;
4950 		}
4951 
4952 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4953 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4954 
4955 		pr_info("%lu provisioning blocks\n", map_size);
4956 
4957 		if (map_storep == NULL) {
4958 			pr_err("out of mem. (MAP)\n");
4959 			ret = -ENOMEM;
4960 			goto free_vm;
4961 		}
4962 
4963 		bitmap_zero(map_storep, map_size);
4964 
4965 		/* Map first 1KB for partition table */
4966 		if (sdebug_num_parts)
4967 			map_region(0, 2);
4968 	}
4969 
4970 	pseudo_primary = root_device_register("pseudo_0");
4971 	if (IS_ERR(pseudo_primary)) {
4972 		pr_warn("root_device_register() error\n");
4973 		ret = PTR_ERR(pseudo_primary);
4974 		goto free_vm;
4975 	}
4976 	ret = bus_register(&pseudo_lld_bus);
4977 	if (ret < 0) {
4978 		pr_warn("bus_register error: %d\n", ret);
4979 		goto dev_unreg;
4980 	}
4981 	ret = driver_register(&sdebug_driverfs_driver);
4982 	if (ret < 0) {
4983 		pr_warn("driver_register error: %d\n", ret);
4984 		goto bus_unreg;
4985 	}
4986 
4987 	host_to_add = sdebug_add_host;
4988 	sdebug_add_host = 0;
4989 
4990         for (k = 0; k < host_to_add; k++) {
4991                 if (sdebug_add_adapter()) {
4992 			pr_err("sdebug_add_adapter failed k=%d\n", k);
4993                         break;
4994                 }
4995         }
4996 
4997 	if (sdebug_verbose)
4998 		pr_info("built %d host(s)\n", sdebug_add_host);
4999 
5000 	return 0;
5001 
5002 bus_unreg:
5003 	bus_unregister(&pseudo_lld_bus);
5004 dev_unreg:
5005 	root_device_unregister(pseudo_primary);
5006 free_vm:
5007 	vfree(map_storep);
5008 	vfree(dif_storep);
5009 	vfree(fake_storep);
5010 free_q_arr:
5011 	kfree(sdebug_q_arr);
5012 	return ret;
5013 }
5014 
5015 static void __exit scsi_debug_exit(void)
5016 {
5017 	int k = sdebug_add_host;
5018 
5019 	stop_all_queued();
5020 	free_all_queued();
5021 	for (; k; k--)
5022 		sdebug_remove_adapter();
5023 	driver_unregister(&sdebug_driverfs_driver);
5024 	bus_unregister(&pseudo_lld_bus);
5025 	root_device_unregister(pseudo_primary);
5026 
5027 	vfree(dif_storep);
5028 	vfree(fake_storep);
5029 	kfree(sdebug_q_arr);
5030 }
5031 
5032 device_initcall(scsi_debug_init);
5033 module_exit(scsi_debug_exit);
5034 
5035 static void sdebug_release_adapter(struct device * dev)
5036 {
5037         struct sdebug_host_info *sdbg_host;
5038 
5039 	sdbg_host = to_sdebug_host(dev);
5040         kfree(sdbg_host);
5041 }
5042 
5043 static int sdebug_add_adapter(void)
5044 {
5045 	int k, devs_per_host;
5046         int error = 0;
5047         struct sdebug_host_info *sdbg_host;
5048 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5049 
5050         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5051         if (NULL == sdbg_host) {
5052 		pr_err("out of memory at line %d\n", __LINE__);
5053                 return -ENOMEM;
5054         }
5055 
5056         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5057 
5058 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5059         for (k = 0; k < devs_per_host; k++) {
5060 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5061 		if (!sdbg_devinfo) {
5062 			pr_err("out of memory at line %d\n", __LINE__);
5063                         error = -ENOMEM;
5064 			goto clean;
5065                 }
5066         }
5067 
5068         spin_lock(&sdebug_host_list_lock);
5069         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5070         spin_unlock(&sdebug_host_list_lock);
5071 
5072         sdbg_host->dev.bus = &pseudo_lld_bus;
5073         sdbg_host->dev.parent = pseudo_primary;
5074         sdbg_host->dev.release = &sdebug_release_adapter;
5075 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5076 
5077         error = device_register(&sdbg_host->dev);
5078 
5079         if (error)
5080 		goto clean;
5081 
5082 	++sdebug_add_host;
5083         return error;
5084 
5085 clean:
5086 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5087 				 dev_list) {
5088 		list_del(&sdbg_devinfo->dev_list);
5089 		kfree(sdbg_devinfo);
5090 	}
5091 
5092 	kfree(sdbg_host);
5093         return error;
5094 }
5095 
5096 static void sdebug_remove_adapter(void)
5097 {
5098         struct sdebug_host_info * sdbg_host = NULL;
5099 
5100         spin_lock(&sdebug_host_list_lock);
5101         if (!list_empty(&sdebug_host_list)) {
5102                 sdbg_host = list_entry(sdebug_host_list.prev,
5103                                        struct sdebug_host_info, host_list);
5104 		list_del(&sdbg_host->host_list);
5105 	}
5106         spin_unlock(&sdebug_host_list_lock);
5107 
5108 	if (!sdbg_host)
5109 		return;
5110 
5111 	device_unregister(&sdbg_host->dev);
5112 	--sdebug_add_host;
5113 }
5114 
5115 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5116 {
5117 	int num_in_q = 0;
5118 	struct sdebug_dev_info *devip;
5119 
5120 	block_unblock_all_queues(true);
5121 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5122 	if (NULL == devip) {
5123 		block_unblock_all_queues(false);
5124 		return	-ENODEV;
5125 	}
5126 	num_in_q = atomic_read(&devip->num_in_q);
5127 
5128 	if (qdepth < 1)
5129 		qdepth = 1;
5130 	/* allow to exceed max host qc_arr elements for testing */
5131 	if (qdepth > SDEBUG_CANQUEUE + 10)
5132 		qdepth = SDEBUG_CANQUEUE + 10;
5133 	scsi_change_queue_depth(sdev, qdepth);
5134 
5135 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5136 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5137 			    __func__, qdepth, num_in_q);
5138 	}
5139 	block_unblock_all_queues(false);
5140 	return sdev->queue_depth;
5141 }
5142 
5143 static bool fake_timeout(struct scsi_cmnd *scp)
5144 {
5145 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5146 		if (sdebug_every_nth < -1)
5147 			sdebug_every_nth = -1;
5148 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5149 			return true; /* ignore command causing timeout */
5150 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5151 			 scsi_medium_access_command(scp))
5152 			return true; /* time out reads and writes */
5153 	}
5154 	return false;
5155 }
5156 
5157 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5158 				   struct scsi_cmnd *scp)
5159 {
5160 	u8 sdeb_i;
5161 	struct scsi_device *sdp = scp->device;
5162 	const struct opcode_info_t *oip;
5163 	const struct opcode_info_t *r_oip;
5164 	struct sdebug_dev_info *devip;
5165 	u8 *cmd = scp->cmnd;
5166 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5167 	int k, na;
5168 	int errsts = 0;
5169 	u32 flags;
5170 	u16 sa;
5171 	u8 opcode = cmd[0];
5172 	bool has_wlun_rl;
5173 
5174 	scsi_set_resid(scp, 0);
5175 	if (sdebug_statistics)
5176 		atomic_inc(&sdebug_cmnd_count);
5177 	if (unlikely(sdebug_verbose &&
5178 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5179 		char b[120];
5180 		int n, len, sb;
5181 
5182 		len = scp->cmd_len;
5183 		sb = (int)sizeof(b);
5184 		if (len > 32)
5185 			strcpy(b, "too long, over 32 bytes");
5186 		else {
5187 			for (k = 0, n = 0; k < len && n < sb; ++k)
5188 				n += scnprintf(b + n, sb - n, "%02x ",
5189 					       (u32)cmd[k]);
5190 		}
5191 		if (sdebug_mq_active)
5192 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5193 				    my_name, blk_mq_unique_tag(scp->request),
5194 				    b);
5195 		else
5196 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5197 				    b);
5198 	}
5199 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5200 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5201 		goto err_out;
5202 
5203 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5204 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5205 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5206 	if (unlikely(!devip)) {
5207 		devip = find_build_dev_info(sdp);
5208 		if (NULL == devip)
5209 			goto err_out;
5210 	}
5211 	na = oip->num_attached;
5212 	r_pfp = oip->pfp;
5213 	if (na) {	/* multiple commands with this opcode */
5214 		r_oip = oip;
5215 		if (FF_SA & r_oip->flags) {
5216 			if (F_SA_LOW & oip->flags)
5217 				sa = 0x1f & cmd[1];
5218 			else
5219 				sa = get_unaligned_be16(cmd + 8);
5220 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5221 				if (opcode == oip->opcode && sa == oip->sa)
5222 					break;
5223 			}
5224 		} else {   /* since no service action only check opcode */
5225 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5226 				if (opcode == oip->opcode)
5227 					break;
5228 			}
5229 		}
5230 		if (k > na) {
5231 			if (F_SA_LOW & r_oip->flags)
5232 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5233 			else if (F_SA_HIGH & r_oip->flags)
5234 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5235 			else
5236 				mk_sense_invalid_opcode(scp);
5237 			goto check_cond;
5238 		}
5239 	}	/* else (when na==0) we assume the oip is a match */
5240 	flags = oip->flags;
5241 	if (unlikely(F_INV_OP & flags)) {
5242 		mk_sense_invalid_opcode(scp);
5243 		goto check_cond;
5244 	}
5245 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5246 		if (sdebug_verbose)
5247 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5248 				    my_name, opcode, " supported for wlun");
5249 		mk_sense_invalid_opcode(scp);
5250 		goto check_cond;
5251 	}
5252 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5253 		u8 rem;
5254 		int j;
5255 
5256 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5257 			rem = ~oip->len_mask[k] & cmd[k];
5258 			if (rem) {
5259 				for (j = 7; j >= 0; --j, rem <<= 1) {
5260 					if (0x80 & rem)
5261 						break;
5262 				}
5263 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5264 				goto check_cond;
5265 			}
5266 		}
5267 	}
5268 	if (unlikely(!(F_SKIP_UA & flags) &&
5269 		     find_first_bit(devip->uas_bm,
5270 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5271 		errsts = make_ua(scp, devip);
5272 		if (errsts)
5273 			goto check_cond;
5274 	}
5275 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5276 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5277 		if (sdebug_verbose)
5278 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5279 				    "%s\n", my_name, "initializing command "
5280 				    "required");
5281 		errsts = check_condition_result;
5282 		goto fini;
5283 	}
5284 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5285 		goto fini;
5286 	if (unlikely(sdebug_every_nth)) {
5287 		if (fake_timeout(scp))
5288 			return 0;	/* ignore command: make trouble */
5289 	}
5290 	if (likely(oip->pfp))
5291 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5292 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5293 		errsts = r_pfp(scp, devip);
5294 
5295 fini:
5296 	return schedule_resp(scp, devip, errsts,
5297 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5298 check_cond:
5299 	return schedule_resp(scp, devip, check_condition_result, 0);
5300 err_out:
5301 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5302 }
5303 
5304 static struct scsi_host_template sdebug_driver_template = {
5305 	.show_info =		scsi_debug_show_info,
5306 	.write_info =		scsi_debug_write_info,
5307 	.proc_name =		sdebug_proc_name,
5308 	.name =			"SCSI DEBUG",
5309 	.info =			scsi_debug_info,
5310 	.slave_alloc =		scsi_debug_slave_alloc,
5311 	.slave_configure =	scsi_debug_slave_configure,
5312 	.slave_destroy =	scsi_debug_slave_destroy,
5313 	.ioctl =		scsi_debug_ioctl,
5314 	.queuecommand =		scsi_debug_queuecommand,
5315 	.change_queue_depth =	sdebug_change_qdepth,
5316 	.eh_abort_handler =	scsi_debug_abort,
5317 	.eh_device_reset_handler = scsi_debug_device_reset,
5318 	.eh_target_reset_handler = scsi_debug_target_reset,
5319 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5320 	.eh_host_reset_handler = scsi_debug_host_reset,
5321 	.can_queue =		SDEBUG_CANQUEUE,
5322 	.this_id =		7,
5323 	.sg_tablesize =		SG_MAX_SEGMENTS,
5324 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5325 	.max_sectors =		-1U,
5326 	.use_clustering = 	DISABLE_CLUSTERING,
5327 	.module =		THIS_MODULE,
5328 	.track_queue_depth =	1,
5329 };
5330 
5331 static int sdebug_driver_probe(struct device * dev)
5332 {
5333 	int error = 0;
5334 	struct sdebug_host_info *sdbg_host;
5335 	struct Scsi_Host *hpnt;
5336 	int hprot;
5337 
5338 	sdbg_host = to_sdebug_host(dev);
5339 
5340 	sdebug_driver_template.can_queue = sdebug_max_queue;
5341 	if (sdebug_clustering)
5342 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5343 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5344 	if (NULL == hpnt) {
5345 		pr_err("scsi_host_alloc failed\n");
5346 		error = -ENODEV;
5347 		return error;
5348 	}
5349 	if (submit_queues > nr_cpu_ids) {
5350 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5351 			my_name, submit_queues, nr_cpu_ids);
5352 		submit_queues = nr_cpu_ids;
5353 	}
5354 	/* Decide whether to tell scsi subsystem that we want mq */
5355 	/* Following should give the same answer for each host */
5356 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5357 	if (sdebug_mq_active)
5358 		hpnt->nr_hw_queues = submit_queues;
5359 
5360         sdbg_host->shost = hpnt;
5361 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5362 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5363 		hpnt->max_id = sdebug_num_tgts + 1;
5364 	else
5365 		hpnt->max_id = sdebug_num_tgts;
5366 	/* = sdebug_max_luns; */
5367 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5368 
5369 	hprot = 0;
5370 
5371 	switch (sdebug_dif) {
5372 
5373 	case SD_DIF_TYPE1_PROTECTION:
5374 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5375 		if (sdebug_dix)
5376 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5377 		break;
5378 
5379 	case SD_DIF_TYPE2_PROTECTION:
5380 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5381 		if (sdebug_dix)
5382 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5383 		break;
5384 
5385 	case SD_DIF_TYPE3_PROTECTION:
5386 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5387 		if (sdebug_dix)
5388 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5389 		break;
5390 
5391 	default:
5392 		if (sdebug_dix)
5393 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5394 		break;
5395 	}
5396 
5397 	scsi_host_set_prot(hpnt, hprot);
5398 
5399 	if (have_dif_prot || sdebug_dix)
5400 		pr_info("host protection%s%s%s%s%s%s%s\n",
5401 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5402 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5403 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5404 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5405 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5406 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5407 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5408 
5409 	if (sdebug_guard == 1)
5410 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5411 	else
5412 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5413 
5414 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5415 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5416 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5417 		sdebug_statistics = true;
5418         error = scsi_add_host(hpnt, &sdbg_host->dev);
5419         if (error) {
5420 		pr_err("scsi_add_host failed\n");
5421                 error = -ENODEV;
5422 		scsi_host_put(hpnt);
5423         } else
5424 		scsi_scan_host(hpnt);
5425 
5426 	return error;
5427 }
5428 
5429 static int sdebug_driver_remove(struct device * dev)
5430 {
5431         struct sdebug_host_info *sdbg_host;
5432 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5433 
5434 	sdbg_host = to_sdebug_host(dev);
5435 
5436 	if (!sdbg_host) {
5437 		pr_err("Unable to locate host info\n");
5438 		return -ENODEV;
5439 	}
5440 
5441         scsi_remove_host(sdbg_host->shost);
5442 
5443 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5444 				 dev_list) {
5445                 list_del(&sdbg_devinfo->dev_list);
5446                 kfree(sdbg_devinfo);
5447         }
5448 
5449         scsi_host_put(sdbg_host->shost);
5450         return 0;
5451 }
5452 
5453 static int pseudo_lld_bus_match(struct device *dev,
5454 				struct device_driver *dev_driver)
5455 {
5456 	return 1;
5457 }
5458 
5459 static struct bus_type pseudo_lld_bus = {
5460 	.name = "pseudo",
5461 	.match = pseudo_lld_bus_match,
5462 	.probe = sdebug_driver_probe,
5463 	.remove = sdebug_driver_remove,
5464 	.drv_groups = sdebug_drv_groups,
5465 };
5466