xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision d36da305)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_PER_HOST_STORE false
115 #define DEF_D_SENSE   0
116 #define DEF_EVERY_NTH   0
117 #define DEF_FAKE_RW	0
118 #define DEF_GUARD 0
119 #define DEF_HOST_LOCK 0
120 #define DEF_LBPU 0
121 #define DEF_LBPWS 0
122 #define DEF_LBPWS10 0
123 #define DEF_LBPRZ 1
124 #define DEF_LOWEST_ALIGNED 0
125 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
126 #define DEF_NO_LUN_0   0
127 #define DEF_NUM_PARTS   0
128 #define DEF_OPTS   0
129 #define DEF_OPT_BLKS 1024
130 #define DEF_PHYSBLK_EXP 0
131 #define DEF_OPT_XFERLEN_EXP 0
132 #define DEF_PTYPE   TYPE_DISK
133 #define DEF_RANDOM false
134 #define DEF_REMOVABLE false
135 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
136 #define DEF_SECTOR_SIZE 512
137 #define DEF_UNMAP_ALIGNMENT 0
138 #define DEF_UNMAP_GRANULARITY 1
139 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
140 #define DEF_UNMAP_MAX_DESC 256
141 #define DEF_VIRTUAL_GB   0
142 #define DEF_VPD_USE_HOSTNO 1
143 #define DEF_WRITESAME_LENGTH 0xFFFF
144 #define DEF_STRICT 0
145 #define DEF_STATISTICS false
146 #define DEF_SUBMIT_QUEUES 1
147 #define DEF_UUID_CTL 0
148 #define JDELAY_OVERRIDDEN -9999
149 
150 #define SDEBUG_LUN_0_VAL 0
151 
152 /* bit mask values for sdebug_opts */
153 #define SDEBUG_OPT_NOISE		1
154 #define SDEBUG_OPT_MEDIUM_ERR		2
155 #define SDEBUG_OPT_TIMEOUT		4
156 #define SDEBUG_OPT_RECOVERED_ERR	8
157 #define SDEBUG_OPT_TRANSPORT_ERR	16
158 #define SDEBUG_OPT_DIF_ERR		32
159 #define SDEBUG_OPT_DIX_ERR		64
160 #define SDEBUG_OPT_MAC_TIMEOUT		128
161 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
162 #define SDEBUG_OPT_Q_NOISE		0x200
163 #define SDEBUG_OPT_ALL_TSF		0x400
164 #define SDEBUG_OPT_RARE_TSF		0x800
165 #define SDEBUG_OPT_N_WCE		0x1000
166 #define SDEBUG_OPT_RESET_NOISE		0x2000
167 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
168 #define SDEBUG_OPT_HOST_BUSY		0x8000
169 #define SDEBUG_OPT_CMD_ABORT		0x10000
170 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
171 			      SDEBUG_OPT_RESET_NOISE)
172 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
173 				  SDEBUG_OPT_TRANSPORT_ERR | \
174 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
175 				  SDEBUG_OPT_SHORT_TRANSFER | \
176 				  SDEBUG_OPT_HOST_BUSY | \
177 				  SDEBUG_OPT_CMD_ABORT)
178 /* When "every_nth" > 0 then modulo "every_nth" commands:
179  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
180  *   - a RECOVERED_ERROR is simulated on successful read and write
181  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
182  *   - a TRANSPORT_ERROR is simulated on successful read and write
183  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
184  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
185  *     CMD_ABORT
186  *
187  * When "every_nth" < 0 then after "- every_nth" commands the selected
188  * error will be injected. The error will be injected on every subsequent
189  * command until some other action occurs; for example, the user writing
190  * a new value (other than -1 or 1) to every_nth:
191  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
192  */
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
213  * or "peripheral device" addressing (value 0) */
214 #define SAM2_LUN_ADDRESS_METHOD 0
215 
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217  * (for response) per submit queue at one time. Can be reduced by max_queue
218  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221  * but cannot exceed SDEBUG_CANQUEUE .
222  */
223 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN  255
226 
227 #define F_D_IN			1
228 #define F_D_OUT			2
229 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
230 #define F_D_UNKN		8
231 #define F_RL_WLUN_OK		0x10
232 #define F_SKIP_UA		0x20
233 #define F_DELAY_OVERR		0x40
234 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
235 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
236 #define F_INV_OP		0x200
237 #define F_FAKE_RW		0x400
238 #define F_M_ACCESS		0x800	/* media access */
239 #define F_SSU_DELAY		0x1000
240 #define F_SYNC_DELAY		0x2000
241 
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 
254 struct sdebug_dev_info {
255 	struct list_head dev_list;
256 	unsigned int channel;
257 	unsigned int target;
258 	u64 lun;
259 	uuid_t lu_name;
260 	struct sdebug_host_info *sdbg_host;
261 	unsigned long uas_bm[1];
262 	atomic_t num_in_q;
263 	atomic_t stopped;
264 	bool used;
265 };
266 
267 struct sdebug_host_info {
268 	struct list_head host_list;
269 	int si_idx;	/* sdeb_store_info (per host) xarray index */
270 	struct Scsi_Host *shost;
271 	struct device dev;
272 	struct list_head dev_info_list;
273 };
274 
275 /* There is an xarray of pointers to this struct's objects, one per host */
276 struct sdeb_store_info {
277 	rwlock_t macc_lck;	/* for atomic media access on this store */
278 	u8 *storep;		/* user data storage (ram) */
279 	struct t10_pi_tuple *dif_storep; /* protection info */
280 	void *map_storep;	/* provisioning map */
281 };
282 
283 #define to_sdebug_host(d)	\
284 	container_of(d, struct sdebug_host_info, dev)
285 
286 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
287 		      SDEB_DEFER_WQ = 2};
288 
289 struct sdebug_defer {
290 	struct hrtimer hrt;
291 	struct execute_work ew;
292 	int sqa_idx;	/* index of sdebug_queue array */
293 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
294 	int issuing_cpu;
295 	bool init_hrt;
296 	bool init_wq;
297 	bool aborted;	/* true when blk_abort_request() already called */
298 	enum sdeb_defer_type defer_t;
299 };
300 
301 struct sdebug_queued_cmd {
302 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
303 	 * instance indicates this slot is in use.
304 	 */
305 	struct sdebug_defer *sd_dp;
306 	struct scsi_cmnd *a_cmnd;
307 	unsigned int inj_recovered:1;
308 	unsigned int inj_transport:1;
309 	unsigned int inj_dif:1;
310 	unsigned int inj_dix:1;
311 	unsigned int inj_short:1;
312 	unsigned int inj_host_busy:1;
313 	unsigned int inj_cmd_abort:1;
314 };
315 
316 struct sdebug_queue {
317 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
318 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
319 	spinlock_t qc_lock;
320 	atomic_t blocked;	/* to temporarily stop more being queued */
321 };
322 
323 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
324 static atomic_t sdebug_completions;  /* count of deferred completions */
325 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
326 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
327 
328 struct opcode_info_t {
329 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
330 				/* for terminating element */
331 	u8 opcode;		/* if num_attached > 0, preferred */
332 	u16 sa;			/* service action */
333 	u32 flags;		/* OR-ed set of SDEB_F_* */
334 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
335 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
336 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
337 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
338 };
339 
340 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
341 enum sdeb_opcode_index {
342 	SDEB_I_INVALID_OPCODE =	0,
343 	SDEB_I_INQUIRY = 1,
344 	SDEB_I_REPORT_LUNS = 2,
345 	SDEB_I_REQUEST_SENSE = 3,
346 	SDEB_I_TEST_UNIT_READY = 4,
347 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
348 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
349 	SDEB_I_LOG_SENSE = 7,
350 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
351 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
352 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
353 	SDEB_I_START_STOP = 11,
354 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
355 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
356 	SDEB_I_MAINT_IN = 14,
357 	SDEB_I_MAINT_OUT = 15,
358 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
359 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
360 	SDEB_I_RESERVE = 18,		/* 6, 10 */
361 	SDEB_I_RELEASE = 19,		/* 6, 10 */
362 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
363 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
364 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
365 	SDEB_I_SEND_DIAG = 23,
366 	SDEB_I_UNMAP = 24,
367 	SDEB_I_WRITE_BUFFER = 25,
368 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
369 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
370 	SDEB_I_COMP_WRITE = 28,
371 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
372 	SDEB_I_LAST_ELEM_P1 = 30,	/* keep this last (previous + 1) */
373 };
374 
375 
376 static const unsigned char opcode_ind_arr[256] = {
377 /* 0x0; 0x0->0x1f: 6 byte cdbs */
378 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
379 	    0, 0, 0, 0,
380 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
381 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
382 	    SDEB_I_RELEASE,
383 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
384 	    SDEB_I_ALLOW_REMOVAL, 0,
385 /* 0x20; 0x20->0x3f: 10 byte cdbs */
386 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
387 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
388 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
389 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
390 /* 0x40; 0x40->0x5f: 10 byte cdbs */
391 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
392 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
393 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
394 	    SDEB_I_RELEASE,
395 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
396 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
397 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 	0, SDEB_I_VARIABLE_LEN,
400 /* 0x80; 0x80->0x9f: 16 byte cdbs */
401 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
402 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
403 	0, 0, 0, SDEB_I_VERIFY,
404 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
405 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
406 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
407 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
408 	     SDEB_I_MAINT_OUT, 0, 0, 0,
409 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
410 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
411 	0, 0, 0, 0, 0, 0, 0, 0,
412 	0, 0, 0, 0, 0, 0, 0, 0,
413 /* 0xc0; 0xc0->0xff: vendor specific */
414 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
415 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
416 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
417 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
418 };
419 
420 /*
421  * The following "response" functions return the SCSI mid-level's 4 byte
422  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
423  * command completion, they can mask their return value with
424  * SDEG_RES_IMMED_MASK .
425  */
426 #define SDEG_RES_IMMED_MASK 0x40000000
427 
428 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
435 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
436 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
437 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
438 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
439 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
440 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
441 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
442 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
443 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
444 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
445 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
446 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
447 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
448 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
449 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
450 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
451 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
452 
453 static int sdebug_do_add_host(bool mk_new_store);
454 static int sdebug_add_host_helper(int per_host_idx);
455 static void sdebug_do_remove_host(bool the_end);
456 static int sdebug_add_store(void);
457 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
458 static void sdebug_erase_all_stores(bool apart_from_first);
459 
460 /*
461  * The following are overflow arrays for cdbs that "hit" the same index in
462  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
463  * should be placed in opcode_info_arr[], the others should be placed here.
464  */
465 static const struct opcode_info_t msense_iarr[] = {
466 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
467 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 };
469 
470 static const struct opcode_info_t mselect_iarr[] = {
471 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
472 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
473 };
474 
475 static const struct opcode_info_t read_iarr[] = {
476 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
477 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
478 	     0, 0, 0, 0} },
479 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
480 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
481 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
482 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
483 	     0xc7, 0, 0, 0, 0} },
484 };
485 
486 static const struct opcode_info_t write_iarr[] = {
487 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
488 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
489 		   0, 0, 0, 0, 0, 0} },
490 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
491 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
492 		   0, 0, 0} },
493 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
494 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
495 		   0xbf, 0xc7, 0, 0, 0, 0} },
496 };
497 
498 static const struct opcode_info_t verify_iarr[] = {
499 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
500 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
501 		   0, 0, 0, 0, 0, 0} },
502 };
503 
504 static const struct opcode_info_t sa_in_16_iarr[] = {
505 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
506 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
507 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
508 };
509 
510 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
511 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
512 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
513 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
514 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
515 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
516 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
517 };
518 
519 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
520 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
521 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
522 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
523 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
524 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
525 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
526 };
527 
528 static const struct opcode_info_t write_same_iarr[] = {
529 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
530 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
531 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
532 };
533 
534 static const struct opcode_info_t reserve_iarr[] = {
535 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
536 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
537 };
538 
539 static const struct opcode_info_t release_iarr[] = {
540 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
541 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
542 };
543 
544 static const struct opcode_info_t sync_cache_iarr[] = {
545 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
546 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
547 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
548 };
549 
550 static const struct opcode_info_t pre_fetch_iarr[] = {
551 	{0, 0x90, 0, F_SYNC_DELAY | F_M_ACCESS, resp_pre_fetch, NULL,
552 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
554 };
555 
556 
557 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
558  * plus the terminating elements for logic that scans this table such as
559  * REPORT SUPPORTED OPERATION CODES. */
560 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
561 /* 0 */
562 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
563 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
564 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
565 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
566 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
567 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
568 	     0, 0} },					/* REPORT LUNS */
569 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
570 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
571 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
572 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
573 /* 5 */
574 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
575 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
576 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
577 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
578 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
579 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
580 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
581 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
582 	     0, 0, 0} },
583 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
584 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
585 	     0, 0} },
586 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
587 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
588 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
589 /* 10 */
590 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
591 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
592 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
593 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
594 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
595 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
597 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
598 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
600 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
601 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
602 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
603 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
604 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
605 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
606 				0xff, 0, 0xc7, 0, 0, 0, 0} },
607 /* 15 */
608 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
609 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
610 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
611 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
612 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
613 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
614 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
615 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
616 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
617 	     0xff, 0xff} },
618 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
619 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
620 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
621 	     0} },
622 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
623 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
624 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
625 	     0} },
626 /* 20 */
627 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
628 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
630 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
631 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
632 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
634 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
636 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
637 /* 25 */
638 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
639 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
640 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
641 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
642 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
643 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
644 		 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
646 	    resp_sync_cache, sync_cache_iarr,
647 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
648 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
649 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
650 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
651 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
652 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | F_M_ACCESS,
653 	    resp_pre_fetch, pre_fetch_iarr,
654 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
655 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
656 
657 /* 30 */
658 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
659 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
660 };
661 
662 static int sdebug_num_hosts;
663 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
664 static int sdebug_ato = DEF_ATO;
665 static int sdebug_cdb_len = DEF_CDB_LEN;
666 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
667 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
668 static int sdebug_dif = DEF_DIF;
669 static int sdebug_dix = DEF_DIX;
670 static int sdebug_dsense = DEF_D_SENSE;
671 static int sdebug_every_nth = DEF_EVERY_NTH;
672 static int sdebug_fake_rw = DEF_FAKE_RW;
673 static unsigned int sdebug_guard = DEF_GUARD;
674 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
675 static int sdebug_max_luns = DEF_MAX_LUNS;
676 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
677 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
678 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
679 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
680 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
681 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
682 static int sdebug_no_uld;
683 static int sdebug_num_parts = DEF_NUM_PARTS;
684 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
685 static int sdebug_opt_blks = DEF_OPT_BLKS;
686 static int sdebug_opts = DEF_OPTS;
687 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
688 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
689 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
690 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
691 static int sdebug_sector_size = DEF_SECTOR_SIZE;
692 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
693 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
694 static unsigned int sdebug_lbpu = DEF_LBPU;
695 static unsigned int sdebug_lbpws = DEF_LBPWS;
696 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
697 static unsigned int sdebug_lbprz = DEF_LBPRZ;
698 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
699 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
700 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
701 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
702 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
703 static int sdebug_uuid_ctl = DEF_UUID_CTL;
704 static bool sdebug_random = DEF_RANDOM;
705 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
706 static bool sdebug_removable = DEF_REMOVABLE;
707 static bool sdebug_clustering;
708 static bool sdebug_host_lock = DEF_HOST_LOCK;
709 static bool sdebug_strict = DEF_STRICT;
710 static bool sdebug_any_injecting_opt;
711 static bool sdebug_verbose;
712 static bool have_dif_prot;
713 static bool write_since_sync;
714 static bool sdebug_statistics = DEF_STATISTICS;
715 static bool sdebug_wp;
716 
717 static unsigned int sdebug_store_sectors;
718 static sector_t sdebug_capacity;	/* in sectors */
719 
720 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
721    may still need them */
722 static int sdebug_heads;		/* heads per disk */
723 static int sdebug_cylinders_per;	/* cylinders per surface */
724 static int sdebug_sectors_per;		/* sectors per cylinder */
725 
726 static LIST_HEAD(sdebug_host_list);
727 static DEFINE_SPINLOCK(sdebug_host_list_lock);
728 
729 static struct xarray per_store_arr;
730 static struct xarray *per_store_ap = &per_store_arr;
731 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
732 static int sdeb_most_recent_idx = -1;
733 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
734 
735 static unsigned long map_size;
736 static int num_aborts;
737 static int num_dev_resets;
738 static int num_target_resets;
739 static int num_bus_resets;
740 static int num_host_resets;
741 static int dix_writes;
742 static int dix_reads;
743 static int dif_errors;
744 
745 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
746 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
747 
748 static DEFINE_RWLOCK(atomic_rw);
749 static DEFINE_RWLOCK(atomic_rw2);
750 
751 static rwlock_t *ramdisk_lck_a[2];
752 
753 static char sdebug_proc_name[] = MY_NAME;
754 static const char *my_name = MY_NAME;
755 
756 static struct bus_type pseudo_lld_bus;
757 
758 static struct device_driver sdebug_driverfs_driver = {
759 	.name 		= sdebug_proc_name,
760 	.bus		= &pseudo_lld_bus,
761 };
762 
763 static const int check_condition_result =
764 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
765 
766 static const int illegal_condition_result =
767 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
768 
769 static const int device_qfull_result =
770 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
771 
772 static const int condition_met_result = SAM_STAT_CONDITION_MET;
773 
774 
775 /* Only do the extra work involved in logical block provisioning if one or
776  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
777  * real reads and writes (i.e. not skipping them for speed).
778  */
779 static inline bool scsi_debug_lbp(void)
780 {
781 	return 0 == sdebug_fake_rw &&
782 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
783 }
784 
785 static void *lba2fake_store(struct sdeb_store_info *sip,
786 			    unsigned long long lba)
787 {
788 	struct sdeb_store_info *lsip = sip;
789 
790 	lba = do_div(lba, sdebug_store_sectors);
791 	if (!sip || !sip->storep) {
792 		WARN_ON_ONCE(true);
793 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
794 	}
795 	return lsip->storep + lba * sdebug_sector_size;
796 }
797 
798 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
799 				      sector_t sector)
800 {
801 	sector = sector_div(sector, sdebug_store_sectors);
802 
803 	return sip->dif_storep + sector;
804 }
805 
806 static void sdebug_max_tgts_luns(void)
807 {
808 	struct sdebug_host_info *sdbg_host;
809 	struct Scsi_Host *hpnt;
810 
811 	spin_lock(&sdebug_host_list_lock);
812 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
813 		hpnt = sdbg_host->shost;
814 		if ((hpnt->this_id >= 0) &&
815 		    (sdebug_num_tgts > hpnt->this_id))
816 			hpnt->max_id = sdebug_num_tgts + 1;
817 		else
818 			hpnt->max_id = sdebug_num_tgts;
819 		/* sdebug_max_luns; */
820 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
821 	}
822 	spin_unlock(&sdebug_host_list_lock);
823 }
824 
825 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
826 
827 /* Set in_bit to -1 to indicate no bit position of invalid field */
828 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
829 				 enum sdeb_cmd_data c_d,
830 				 int in_byte, int in_bit)
831 {
832 	unsigned char *sbuff;
833 	u8 sks[4];
834 	int sl, asc;
835 
836 	sbuff = scp->sense_buffer;
837 	if (!sbuff) {
838 		sdev_printk(KERN_ERR, scp->device,
839 			    "%s: sense_buffer is NULL\n", __func__);
840 		return;
841 	}
842 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
843 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
844 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
845 	memset(sks, 0, sizeof(sks));
846 	sks[0] = 0x80;
847 	if (c_d)
848 		sks[0] |= 0x40;
849 	if (in_bit >= 0) {
850 		sks[0] |= 0x8;
851 		sks[0] |= 0x7 & in_bit;
852 	}
853 	put_unaligned_be16(in_byte, sks + 1);
854 	if (sdebug_dsense) {
855 		sl = sbuff[7] + 8;
856 		sbuff[7] = sl;
857 		sbuff[sl] = 0x2;
858 		sbuff[sl + 1] = 0x6;
859 		memcpy(sbuff + sl + 4, sks, 3);
860 	} else
861 		memcpy(sbuff + 15, sks, 3);
862 	if (sdebug_verbose)
863 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
864 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
865 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
866 }
867 
868 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
869 {
870 	unsigned char *sbuff;
871 
872 	sbuff = scp->sense_buffer;
873 	if (!sbuff) {
874 		sdev_printk(KERN_ERR, scp->device,
875 			    "%s: sense_buffer is NULL\n", __func__);
876 		return;
877 	}
878 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
879 
880 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
881 
882 	if (sdebug_verbose)
883 		sdev_printk(KERN_INFO, scp->device,
884 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
885 			    my_name, key, asc, asq);
886 }
887 
888 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
889 {
890 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
891 }
892 
893 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
894 			    void __user *arg)
895 {
896 	if (sdebug_verbose) {
897 		if (0x1261 == cmd)
898 			sdev_printk(KERN_INFO, dev,
899 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
900 		else if (0x5331 == cmd)
901 			sdev_printk(KERN_INFO, dev,
902 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
903 				    __func__);
904 		else
905 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
906 				    __func__, cmd);
907 	}
908 	return -EINVAL;
909 	/* return -ENOTTY; // correct return but upsets fdisk */
910 }
911 
912 static void config_cdb_len(struct scsi_device *sdev)
913 {
914 	switch (sdebug_cdb_len) {
915 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
916 		sdev->use_10_for_rw = false;
917 		sdev->use_16_for_rw = false;
918 		sdev->use_10_for_ms = false;
919 		break;
920 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
921 		sdev->use_10_for_rw = true;
922 		sdev->use_16_for_rw = false;
923 		sdev->use_10_for_ms = false;
924 		break;
925 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
926 		sdev->use_10_for_rw = true;
927 		sdev->use_16_for_rw = false;
928 		sdev->use_10_for_ms = true;
929 		break;
930 	case 16:
931 		sdev->use_10_for_rw = false;
932 		sdev->use_16_for_rw = true;
933 		sdev->use_10_for_ms = true;
934 		break;
935 	case 32: /* No knobs to suggest this so same as 16 for now */
936 		sdev->use_10_for_rw = false;
937 		sdev->use_16_for_rw = true;
938 		sdev->use_10_for_ms = true;
939 		break;
940 	default:
941 		pr_warn("unexpected cdb_len=%d, force to 10\n",
942 			sdebug_cdb_len);
943 		sdev->use_10_for_rw = true;
944 		sdev->use_16_for_rw = false;
945 		sdev->use_10_for_ms = false;
946 		sdebug_cdb_len = 10;
947 		break;
948 	}
949 }
950 
951 static void all_config_cdb_len(void)
952 {
953 	struct sdebug_host_info *sdbg_host;
954 	struct Scsi_Host *shost;
955 	struct scsi_device *sdev;
956 
957 	spin_lock(&sdebug_host_list_lock);
958 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
959 		shost = sdbg_host->shost;
960 		shost_for_each_device(sdev, shost) {
961 			config_cdb_len(sdev);
962 		}
963 	}
964 	spin_unlock(&sdebug_host_list_lock);
965 }
966 
967 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
968 {
969 	struct sdebug_host_info *sdhp;
970 	struct sdebug_dev_info *dp;
971 
972 	spin_lock(&sdebug_host_list_lock);
973 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
974 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
975 			if ((devip->sdbg_host == dp->sdbg_host) &&
976 			    (devip->target == dp->target))
977 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
978 		}
979 	}
980 	spin_unlock(&sdebug_host_list_lock);
981 }
982 
983 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
984 {
985 	int k;
986 
987 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
988 	if (k != SDEBUG_NUM_UAS) {
989 		const char *cp = NULL;
990 
991 		switch (k) {
992 		case SDEBUG_UA_POR:
993 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
994 					POWER_ON_RESET_ASCQ);
995 			if (sdebug_verbose)
996 				cp = "power on reset";
997 			break;
998 		case SDEBUG_UA_BUS_RESET:
999 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1000 					BUS_RESET_ASCQ);
1001 			if (sdebug_verbose)
1002 				cp = "bus reset";
1003 			break;
1004 		case SDEBUG_UA_MODE_CHANGED:
1005 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1006 					MODE_CHANGED_ASCQ);
1007 			if (sdebug_verbose)
1008 				cp = "mode parameters changed";
1009 			break;
1010 		case SDEBUG_UA_CAPACITY_CHANGED:
1011 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1012 					CAPACITY_CHANGED_ASCQ);
1013 			if (sdebug_verbose)
1014 				cp = "capacity data changed";
1015 			break;
1016 		case SDEBUG_UA_MICROCODE_CHANGED:
1017 			mk_sense_buffer(scp, UNIT_ATTENTION,
1018 					TARGET_CHANGED_ASC,
1019 					MICROCODE_CHANGED_ASCQ);
1020 			if (sdebug_verbose)
1021 				cp = "microcode has been changed";
1022 			break;
1023 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1024 			mk_sense_buffer(scp, UNIT_ATTENTION,
1025 					TARGET_CHANGED_ASC,
1026 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1027 			if (sdebug_verbose)
1028 				cp = "microcode has been changed without reset";
1029 			break;
1030 		case SDEBUG_UA_LUNS_CHANGED:
1031 			/*
1032 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1033 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1034 			 * on the target, until a REPORT LUNS command is
1035 			 * received.  SPC-4 behavior is to report it only once.
1036 			 * NOTE:  sdebug_scsi_level does not use the same
1037 			 * values as struct scsi_device->scsi_level.
1038 			 */
1039 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1040 				clear_luns_changed_on_target(devip);
1041 			mk_sense_buffer(scp, UNIT_ATTENTION,
1042 					TARGET_CHANGED_ASC,
1043 					LUNS_CHANGED_ASCQ);
1044 			if (sdebug_verbose)
1045 				cp = "reported luns data has changed";
1046 			break;
1047 		default:
1048 			pr_warn("unexpected unit attention code=%d\n", k);
1049 			if (sdebug_verbose)
1050 				cp = "unknown";
1051 			break;
1052 		}
1053 		clear_bit(k, devip->uas_bm);
1054 		if (sdebug_verbose)
1055 			sdev_printk(KERN_INFO, scp->device,
1056 				   "%s reports: Unit attention: %s\n",
1057 				   my_name, cp);
1058 		return check_condition_result;
1059 	}
1060 	return 0;
1061 }
1062 
1063 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1064 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1065 				int arr_len)
1066 {
1067 	int act_len;
1068 	struct scsi_data_buffer *sdb = &scp->sdb;
1069 
1070 	if (!sdb->length)
1071 		return 0;
1072 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1073 		return DID_ERROR << 16;
1074 
1075 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1076 				      arr, arr_len);
1077 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1078 
1079 	return 0;
1080 }
1081 
1082 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1083  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1084  * calls, not required to write in ascending offset order. Assumes resid
1085  * set to scsi_bufflen() prior to any calls.
1086  */
1087 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1088 				  int arr_len, unsigned int off_dst)
1089 {
1090 	unsigned int act_len, n;
1091 	struct scsi_data_buffer *sdb = &scp->sdb;
1092 	off_t skip = off_dst;
1093 
1094 	if (sdb->length <= off_dst)
1095 		return 0;
1096 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1097 		return DID_ERROR << 16;
1098 
1099 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1100 				       arr, arr_len, skip);
1101 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1102 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1103 		 scsi_get_resid(scp));
1104 	n = scsi_bufflen(scp) - (off_dst + act_len);
1105 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1106 	return 0;
1107 }
1108 
1109 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1110  * 'arr' or -1 if error.
1111  */
1112 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1113 			       int arr_len)
1114 {
1115 	if (!scsi_bufflen(scp))
1116 		return 0;
1117 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1118 		return -1;
1119 
1120 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1121 }
1122 
1123 
1124 static char sdebug_inq_vendor_id[9] = "Linux   ";
1125 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1126 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1127 /* Use some locally assigned NAAs for SAS addresses. */
1128 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1129 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1130 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1131 
1132 /* Device identification VPD page. Returns number of bytes placed in arr */
1133 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1134 			  int target_dev_id, int dev_id_num,
1135 			  const char *dev_id_str, int dev_id_str_len,
1136 			  const uuid_t *lu_name)
1137 {
1138 	int num, port_a;
1139 	char b[32];
1140 
1141 	port_a = target_dev_id + 1;
1142 	/* T10 vendor identifier field format (faked) */
1143 	arr[0] = 0x2;	/* ASCII */
1144 	arr[1] = 0x1;
1145 	arr[2] = 0x0;
1146 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1147 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1148 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1149 	num = 8 + 16 + dev_id_str_len;
1150 	arr[3] = num;
1151 	num += 4;
1152 	if (dev_id_num >= 0) {
1153 		if (sdebug_uuid_ctl) {
1154 			/* Locally assigned UUID */
1155 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1156 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1157 			arr[num++] = 0x0;
1158 			arr[num++] = 0x12;
1159 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1160 			arr[num++] = 0x0;
1161 			memcpy(arr + num, lu_name, 16);
1162 			num += 16;
1163 		} else {
1164 			/* NAA-3, Logical unit identifier (binary) */
1165 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1166 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1167 			arr[num++] = 0x0;
1168 			arr[num++] = 0x8;
1169 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1170 			num += 8;
1171 		}
1172 		/* Target relative port number */
1173 		arr[num++] = 0x61;	/* proto=sas, binary */
1174 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1175 		arr[num++] = 0x0;	/* reserved */
1176 		arr[num++] = 0x4;	/* length */
1177 		arr[num++] = 0x0;	/* reserved */
1178 		arr[num++] = 0x0;	/* reserved */
1179 		arr[num++] = 0x0;
1180 		arr[num++] = 0x1;	/* relative port A */
1181 	}
1182 	/* NAA-3, Target port identifier */
1183 	arr[num++] = 0x61;	/* proto=sas, binary */
1184 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1185 	arr[num++] = 0x0;
1186 	arr[num++] = 0x8;
1187 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1188 	num += 8;
1189 	/* NAA-3, Target port group identifier */
1190 	arr[num++] = 0x61;	/* proto=sas, binary */
1191 	arr[num++] = 0x95;	/* piv=1, target port group id */
1192 	arr[num++] = 0x0;
1193 	arr[num++] = 0x4;
1194 	arr[num++] = 0;
1195 	arr[num++] = 0;
1196 	put_unaligned_be16(port_group_id, arr + num);
1197 	num += 2;
1198 	/* NAA-3, Target device identifier */
1199 	arr[num++] = 0x61;	/* proto=sas, binary */
1200 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1201 	arr[num++] = 0x0;
1202 	arr[num++] = 0x8;
1203 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1204 	num += 8;
1205 	/* SCSI name string: Target device identifier */
1206 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1207 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1208 	arr[num++] = 0x0;
1209 	arr[num++] = 24;
1210 	memcpy(arr + num, "naa.32222220", 12);
1211 	num += 12;
1212 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1213 	memcpy(arr + num, b, 8);
1214 	num += 8;
1215 	memset(arr + num, 0, 4);
1216 	num += 4;
1217 	return num;
1218 }
1219 
1220 static unsigned char vpd84_data[] = {
1221 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1222     0x22,0x22,0x22,0x0,0xbb,0x1,
1223     0x22,0x22,0x22,0x0,0xbb,0x2,
1224 };
1225 
1226 /*  Software interface identification VPD page */
1227 static int inquiry_vpd_84(unsigned char *arr)
1228 {
1229 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1230 	return sizeof(vpd84_data);
1231 }
1232 
1233 /* Management network addresses VPD page */
1234 static int inquiry_vpd_85(unsigned char *arr)
1235 {
1236 	int num = 0;
1237 	const char *na1 = "https://www.kernel.org/config";
1238 	const char *na2 = "http://www.kernel.org/log";
1239 	int plen, olen;
1240 
1241 	arr[num++] = 0x1;	/* lu, storage config */
1242 	arr[num++] = 0x0;	/* reserved */
1243 	arr[num++] = 0x0;
1244 	olen = strlen(na1);
1245 	plen = olen + 1;
1246 	if (plen % 4)
1247 		plen = ((plen / 4) + 1) * 4;
1248 	arr[num++] = plen;	/* length, null termianted, padded */
1249 	memcpy(arr + num, na1, olen);
1250 	memset(arr + num + olen, 0, plen - olen);
1251 	num += plen;
1252 
1253 	arr[num++] = 0x4;	/* lu, logging */
1254 	arr[num++] = 0x0;	/* reserved */
1255 	arr[num++] = 0x0;
1256 	olen = strlen(na2);
1257 	plen = olen + 1;
1258 	if (plen % 4)
1259 		plen = ((plen / 4) + 1) * 4;
1260 	arr[num++] = plen;	/* length, null terminated, padded */
1261 	memcpy(arr + num, na2, olen);
1262 	memset(arr + num + olen, 0, plen - olen);
1263 	num += plen;
1264 
1265 	return num;
1266 }
1267 
1268 /* SCSI ports VPD page */
1269 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1270 {
1271 	int num = 0;
1272 	int port_a, port_b;
1273 
1274 	port_a = target_dev_id + 1;
1275 	port_b = port_a + 1;
1276 	arr[num++] = 0x0;	/* reserved */
1277 	arr[num++] = 0x0;	/* reserved */
1278 	arr[num++] = 0x0;
1279 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1280 	memset(arr + num, 0, 6);
1281 	num += 6;
1282 	arr[num++] = 0x0;
1283 	arr[num++] = 12;	/* length tp descriptor */
1284 	/* naa-5 target port identifier (A) */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1287 	arr[num++] = 0x0;	/* reserved */
1288 	arr[num++] = 0x8;	/* length */
1289 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1290 	num += 8;
1291 	arr[num++] = 0x0;	/* reserved */
1292 	arr[num++] = 0x0;	/* reserved */
1293 	arr[num++] = 0x0;
1294 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1295 	memset(arr + num, 0, 6);
1296 	num += 6;
1297 	arr[num++] = 0x0;
1298 	arr[num++] = 12;	/* length tp descriptor */
1299 	/* naa-5 target port identifier (B) */
1300 	arr[num++] = 0x61;	/* proto=sas, binary */
1301 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1302 	arr[num++] = 0x0;	/* reserved */
1303 	arr[num++] = 0x8;	/* length */
1304 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1305 	num += 8;
1306 
1307 	return num;
1308 }
1309 
1310 
1311 static unsigned char vpd89_data[] = {
1312 /* from 4th byte */ 0,0,0,0,
1313 'l','i','n','u','x',' ',' ',' ',
1314 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1315 '1','2','3','4',
1316 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1317 0xec,0,0,0,
1318 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1319 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1320 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1321 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1322 0x53,0x41,
1323 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1324 0x20,0x20,
1325 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1326 0x10,0x80,
1327 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1328 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1329 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1330 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1331 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1332 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1333 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1334 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1335 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1336 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1337 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1338 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1339 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1340 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1341 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1342 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1343 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1344 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1345 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1346 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1347 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1348 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1349 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1350 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1351 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1352 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1353 };
1354 
1355 /* ATA Information VPD page */
1356 static int inquiry_vpd_89(unsigned char *arr)
1357 {
1358 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1359 	return sizeof(vpd89_data);
1360 }
1361 
1362 
1363 static unsigned char vpdb0_data[] = {
1364 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1365 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1366 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1367 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1368 };
1369 
1370 /* Block limits VPD page (SBC-3) */
1371 static int inquiry_vpd_b0(unsigned char *arr)
1372 {
1373 	unsigned int gran;
1374 
1375 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1376 
1377 	/* Optimal transfer length granularity */
1378 	if (sdebug_opt_xferlen_exp != 0 &&
1379 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1380 		gran = 1 << sdebug_opt_xferlen_exp;
1381 	else
1382 		gran = 1 << sdebug_physblk_exp;
1383 	put_unaligned_be16(gran, arr + 2);
1384 
1385 	/* Maximum Transfer Length */
1386 	if (sdebug_store_sectors > 0x400)
1387 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1388 
1389 	/* Optimal Transfer Length */
1390 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1391 
1392 	if (sdebug_lbpu) {
1393 		/* Maximum Unmap LBA Count */
1394 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1395 
1396 		/* Maximum Unmap Block Descriptor Count */
1397 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1398 	}
1399 
1400 	/* Unmap Granularity Alignment */
1401 	if (sdebug_unmap_alignment) {
1402 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1403 		arr[28] |= 0x80; /* UGAVALID */
1404 	}
1405 
1406 	/* Optimal Unmap Granularity */
1407 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1408 
1409 	/* Maximum WRITE SAME Length */
1410 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1411 
1412 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1413 
1414 	return sizeof(vpdb0_data);
1415 }
1416 
1417 /* Block device characteristics VPD page (SBC-3) */
1418 static int inquiry_vpd_b1(unsigned char *arr)
1419 {
1420 	memset(arr, 0, 0x3c);
1421 	arr[0] = 0;
1422 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1423 	arr[2] = 0;
1424 	arr[3] = 5;	/* less than 1.8" */
1425 
1426 	return 0x3c;
1427 }
1428 
1429 /* Logical block provisioning VPD page (SBC-4) */
1430 static int inquiry_vpd_b2(unsigned char *arr)
1431 {
1432 	memset(arr, 0, 0x4);
1433 	arr[0] = 0;			/* threshold exponent */
1434 	if (sdebug_lbpu)
1435 		arr[1] = 1 << 7;
1436 	if (sdebug_lbpws)
1437 		arr[1] |= 1 << 6;
1438 	if (sdebug_lbpws10)
1439 		arr[1] |= 1 << 5;
1440 	if (sdebug_lbprz && scsi_debug_lbp())
1441 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1442 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1443 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1444 	/* threshold_percentage=0 */
1445 	return 0x4;
1446 }
1447 
1448 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1449 static int inquiry_vpd_b6(unsigned char *arr)
1450 {
1451 	memset(arr, 0, 0x3c);
1452 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1453 	/*
1454 	 * Set Optimal number of open sequential write preferred zones and
1455 	 * Optimal number of non-sequentially written sequential write
1456 	 * preferred zones and Maximum number of open sequential write
1457 	 * required zones fields to 'not reported' (0xffffffff). Leave other
1458 	 * fields set to zero.
1459 	 */
1460 	put_unaligned_be32(0xffffffff, &arr[4]);
1461 	put_unaligned_be32(0xffffffff, &arr[8]);
1462 	put_unaligned_be32(0xffffffff, &arr[12]);
1463 	return 0x3c;
1464 }
1465 
1466 #define SDEBUG_LONG_INQ_SZ 96
1467 #define SDEBUG_MAX_INQ_ARR_SZ 584
1468 
1469 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1470 {
1471 	unsigned char pq_pdt;
1472 	unsigned char *arr;
1473 	unsigned char *cmd = scp->cmnd;
1474 	int alloc_len, n, ret;
1475 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1476 
1477 	alloc_len = get_unaligned_be16(cmd + 3);
1478 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1479 	if (! arr)
1480 		return DID_REQUEUE << 16;
1481 	is_disk = (sdebug_ptype == TYPE_DISK);
1482 	is_zbc = (sdebug_ptype == TYPE_ZBC);
1483 	is_disk_zbc = (is_disk || is_zbc);
1484 	have_wlun = scsi_is_wlun(scp->device->lun);
1485 	if (have_wlun)
1486 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1487 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1488 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1489 	else
1490 		pq_pdt = (sdebug_ptype & 0x1f);
1491 	arr[0] = pq_pdt;
1492 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1493 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1494 		kfree(arr);
1495 		return check_condition_result;
1496 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1497 		int lu_id_num, port_group_id, target_dev_id, len;
1498 		char lu_id_str[6];
1499 		int host_no = devip->sdbg_host->shost->host_no;
1500 
1501 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1502 		    (devip->channel & 0x7f);
1503 		if (sdebug_vpd_use_hostno == 0)
1504 			host_no = 0;
1505 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1506 			    (devip->target * 1000) + devip->lun);
1507 		target_dev_id = ((host_no + 1) * 2000) +
1508 				 (devip->target * 1000) - 3;
1509 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1510 		if (0 == cmd[2]) { /* supported vital product data pages */
1511 			arr[1] = cmd[2];	/*sanity */
1512 			n = 4;
1513 			arr[n++] = 0x0;   /* this page */
1514 			arr[n++] = 0x80;  /* unit serial number */
1515 			arr[n++] = 0x83;  /* device identification */
1516 			arr[n++] = 0x84;  /* software interface ident. */
1517 			arr[n++] = 0x85;  /* management network addresses */
1518 			arr[n++] = 0x86;  /* extended inquiry */
1519 			arr[n++] = 0x87;  /* mode page policy */
1520 			arr[n++] = 0x88;  /* SCSI ports */
1521 			if (is_disk_zbc) {	  /* SBC or ZBC */
1522 				arr[n++] = 0x89;  /* ATA information */
1523 				arr[n++] = 0xb0;  /* Block limits */
1524 				arr[n++] = 0xb1;  /* Block characteristics */
1525 				if (is_disk)
1526 					arr[n++] = 0xb2;  /* LB Provisioning */
1527 				else if (is_zbc)
1528 					arr[n++] = 0xb6;  /* ZB dev. char. */
1529 			}
1530 			arr[3] = n - 4;	  /* number of supported VPD pages */
1531 		} else if (0x80 == cmd[2]) { /* unit serial number */
1532 			arr[1] = cmd[2];	/*sanity */
1533 			arr[3] = len;
1534 			memcpy(&arr[4], lu_id_str, len);
1535 		} else if (0x83 == cmd[2]) { /* device identification */
1536 			arr[1] = cmd[2];	/*sanity */
1537 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1538 						target_dev_id, lu_id_num,
1539 						lu_id_str, len,
1540 						&devip->lu_name);
1541 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1542 			arr[1] = cmd[2];	/*sanity */
1543 			arr[3] = inquiry_vpd_84(&arr[4]);
1544 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1545 			arr[1] = cmd[2];	/*sanity */
1546 			arr[3] = inquiry_vpd_85(&arr[4]);
1547 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1548 			arr[1] = cmd[2];	/*sanity */
1549 			arr[3] = 0x3c;	/* number of following entries */
1550 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1551 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1552 			else if (have_dif_prot)
1553 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1554 			else
1555 				arr[4] = 0x0;   /* no protection stuff */
1556 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1557 		} else if (0x87 == cmd[2]) { /* mode page policy */
1558 			arr[1] = cmd[2];	/*sanity */
1559 			arr[3] = 0x8;	/* number of following entries */
1560 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1561 			arr[6] = 0x80;	/* mlus, shared */
1562 			arr[8] = 0x18;	 /* protocol specific lu */
1563 			arr[10] = 0x82;	 /* mlus, per initiator port */
1564 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1565 			arr[1] = cmd[2];	/*sanity */
1566 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1567 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1568 			arr[1] = cmd[2];        /*sanity */
1569 			n = inquiry_vpd_89(&arr[4]);
1570 			put_unaligned_be16(n, arr + 2);
1571 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1572 			arr[1] = cmd[2];        /*sanity */
1573 			arr[3] = inquiry_vpd_b0(&arr[4]);
1574 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1575 			arr[1] = cmd[2];        /*sanity */
1576 			arr[3] = inquiry_vpd_b1(&arr[4]);
1577 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1578 			arr[1] = cmd[2];        /*sanity */
1579 			arr[3] = inquiry_vpd_b2(&arr[4]);
1580 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1581 			arr[1] = cmd[2];        /*sanity */
1582 			arr[3] = inquiry_vpd_b6(&arr[4]);
1583 		} else {
1584 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1585 			kfree(arr);
1586 			return check_condition_result;
1587 		}
1588 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1589 		ret = fill_from_dev_buffer(scp, arr,
1590 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1591 		kfree(arr);
1592 		return ret;
1593 	}
1594 	/* drops through here for a standard inquiry */
1595 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1596 	arr[2] = sdebug_scsi_level;
1597 	arr[3] = 2;    /* response_data_format==2 */
1598 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1599 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1600 	if (sdebug_vpd_use_hostno == 0)
1601 		arr[5] |= 0x10; /* claim: implicit TPGS */
1602 	arr[6] = 0x10; /* claim: MultiP */
1603 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1604 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1605 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1606 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1607 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1608 	/* Use Vendor Specific area to place driver date in ASCII hex */
1609 	memcpy(&arr[36], sdebug_version_date, 8);
1610 	/* version descriptors (2 bytes each) follow */
1611 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1612 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1613 	n = 62;
1614 	if (is_disk) {		/* SBC-4 no version claimed */
1615 		put_unaligned_be16(0x600, arr + n);
1616 		n += 2;
1617 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1618 		put_unaligned_be16(0x525, arr + n);
1619 		n += 2;
1620 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1621 		put_unaligned_be16(0x624, arr + n);
1622 		n += 2;
1623 	}
1624 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1625 	ret = fill_from_dev_buffer(scp, arr,
1626 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1627 	kfree(arr);
1628 	return ret;
1629 }
1630 
1631 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1632 				   0, 0, 0x0, 0x0};
1633 
1634 static int resp_requests(struct scsi_cmnd *scp,
1635 			 struct sdebug_dev_info *devip)
1636 {
1637 	unsigned char *sbuff;
1638 	unsigned char *cmd = scp->cmnd;
1639 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1640 	bool dsense;
1641 	int len = 18;
1642 
1643 	memset(arr, 0, sizeof(arr));
1644 	dsense = !!(cmd[1] & 1);
1645 	sbuff = scp->sense_buffer;
1646 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1647 		if (dsense) {
1648 			arr[0] = 0x72;
1649 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1650 			arr[2] = THRESHOLD_EXCEEDED;
1651 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1652 			len = 8;
1653 		} else {
1654 			arr[0] = 0x70;
1655 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1656 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1657 			arr[12] = THRESHOLD_EXCEEDED;
1658 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1659 		}
1660 	} else {
1661 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1662 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1663 			;	/* have sense and formats match */
1664 		else if (arr[0] <= 0x70) {
1665 			if (dsense) {
1666 				memset(arr, 0, 8);
1667 				arr[0] = 0x72;
1668 				len = 8;
1669 			} else {
1670 				memset(arr, 0, 18);
1671 				arr[0] = 0x70;
1672 				arr[7] = 0xa;
1673 			}
1674 		} else if (dsense) {
1675 			memset(arr, 0, 8);
1676 			arr[0] = 0x72;
1677 			arr[1] = sbuff[2];     /* sense key */
1678 			arr[2] = sbuff[12];    /* asc */
1679 			arr[3] = sbuff[13];    /* ascq */
1680 			len = 8;
1681 		} else {
1682 			memset(arr, 0, 18);
1683 			arr[0] = 0x70;
1684 			arr[2] = sbuff[1];
1685 			arr[7] = 0xa;
1686 			arr[12] = sbuff[1];
1687 			arr[13] = sbuff[3];
1688 		}
1689 
1690 	}
1691 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1692 	return fill_from_dev_buffer(scp, arr, len);
1693 }
1694 
1695 static int resp_start_stop(struct scsi_cmnd *scp,
1696 			   struct sdebug_dev_info *devip)
1697 {
1698 	unsigned char *cmd = scp->cmnd;
1699 	int power_cond, stop;
1700 	bool changing;
1701 
1702 	power_cond = (cmd[4] & 0xf0) >> 4;
1703 	if (power_cond) {
1704 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1705 		return check_condition_result;
1706 	}
1707 	stop = !(cmd[4] & 1);
1708 	changing = atomic_read(&devip->stopped) == !stop;
1709 	atomic_xchg(&devip->stopped, stop);
1710 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1711 		return SDEG_RES_IMMED_MASK;
1712 	else
1713 		return 0;
1714 }
1715 
1716 static sector_t get_sdebug_capacity(void)
1717 {
1718 	static const unsigned int gibibyte = 1073741824;
1719 
1720 	if (sdebug_virtual_gb > 0)
1721 		return (sector_t)sdebug_virtual_gb *
1722 			(gibibyte / sdebug_sector_size);
1723 	else
1724 		return sdebug_store_sectors;
1725 }
1726 
1727 #define SDEBUG_READCAP_ARR_SZ 8
1728 static int resp_readcap(struct scsi_cmnd *scp,
1729 			struct sdebug_dev_info *devip)
1730 {
1731 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1732 	unsigned int capac;
1733 
1734 	/* following just in case virtual_gb changed */
1735 	sdebug_capacity = get_sdebug_capacity();
1736 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1737 	if (sdebug_capacity < 0xffffffff) {
1738 		capac = (unsigned int)sdebug_capacity - 1;
1739 		put_unaligned_be32(capac, arr + 0);
1740 	} else
1741 		put_unaligned_be32(0xffffffff, arr + 0);
1742 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1743 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1744 }
1745 
1746 #define SDEBUG_READCAP16_ARR_SZ 32
1747 static int resp_readcap16(struct scsi_cmnd *scp,
1748 			  struct sdebug_dev_info *devip)
1749 {
1750 	unsigned char *cmd = scp->cmnd;
1751 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1752 	int alloc_len;
1753 
1754 	alloc_len = get_unaligned_be32(cmd + 10);
1755 	/* following just in case virtual_gb changed */
1756 	sdebug_capacity = get_sdebug_capacity();
1757 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1758 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1759 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1760 	arr[13] = sdebug_physblk_exp & 0xf;
1761 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1762 
1763 	if (scsi_debug_lbp()) {
1764 		arr[14] |= 0x80; /* LBPME */
1765 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1766 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1767 		 * in the wider field maps to 0 in this field.
1768 		 */
1769 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1770 			arr[14] |= 0x40;
1771 	}
1772 
1773 	arr[15] = sdebug_lowest_aligned & 0xff;
1774 
1775 	if (have_dif_prot) {
1776 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1777 		arr[12] |= 1; /* PROT_EN */
1778 	}
1779 
1780 	return fill_from_dev_buffer(scp, arr,
1781 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1782 }
1783 
1784 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1785 
1786 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1787 			      struct sdebug_dev_info *devip)
1788 {
1789 	unsigned char *cmd = scp->cmnd;
1790 	unsigned char *arr;
1791 	int host_no = devip->sdbg_host->shost->host_no;
1792 	int n, ret, alen, rlen;
1793 	int port_group_a, port_group_b, port_a, port_b;
1794 
1795 	alen = get_unaligned_be32(cmd + 6);
1796 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1797 	if (! arr)
1798 		return DID_REQUEUE << 16;
1799 	/*
1800 	 * EVPD page 0x88 states we have two ports, one
1801 	 * real and a fake port with no device connected.
1802 	 * So we create two port groups with one port each
1803 	 * and set the group with port B to unavailable.
1804 	 */
1805 	port_a = 0x1; /* relative port A */
1806 	port_b = 0x2; /* relative port B */
1807 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1808 			(devip->channel & 0x7f);
1809 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1810 			(devip->channel & 0x7f) + 0x80;
1811 
1812 	/*
1813 	 * The asymmetric access state is cycled according to the host_id.
1814 	 */
1815 	n = 4;
1816 	if (sdebug_vpd_use_hostno == 0) {
1817 		arr[n++] = host_no % 3; /* Asymm access state */
1818 		arr[n++] = 0x0F; /* claim: all states are supported */
1819 	} else {
1820 		arr[n++] = 0x0; /* Active/Optimized path */
1821 		arr[n++] = 0x01; /* only support active/optimized paths */
1822 	}
1823 	put_unaligned_be16(port_group_a, arr + n);
1824 	n += 2;
1825 	arr[n++] = 0;    /* Reserved */
1826 	arr[n++] = 0;    /* Status code */
1827 	arr[n++] = 0;    /* Vendor unique */
1828 	arr[n++] = 0x1;  /* One port per group */
1829 	arr[n++] = 0;    /* Reserved */
1830 	arr[n++] = 0;    /* Reserved */
1831 	put_unaligned_be16(port_a, arr + n);
1832 	n += 2;
1833 	arr[n++] = 3;    /* Port unavailable */
1834 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1835 	put_unaligned_be16(port_group_b, arr + n);
1836 	n += 2;
1837 	arr[n++] = 0;    /* Reserved */
1838 	arr[n++] = 0;    /* Status code */
1839 	arr[n++] = 0;    /* Vendor unique */
1840 	arr[n++] = 0x1;  /* One port per group */
1841 	arr[n++] = 0;    /* Reserved */
1842 	arr[n++] = 0;    /* Reserved */
1843 	put_unaligned_be16(port_b, arr + n);
1844 	n += 2;
1845 
1846 	rlen = n - 4;
1847 	put_unaligned_be32(rlen, arr + 0);
1848 
1849 	/*
1850 	 * Return the smallest value of either
1851 	 * - The allocated length
1852 	 * - The constructed command length
1853 	 * - The maximum array size
1854 	 */
1855 	rlen = min_t(int, alen, n);
1856 	ret = fill_from_dev_buffer(scp, arr,
1857 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1858 	kfree(arr);
1859 	return ret;
1860 }
1861 
1862 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1863 			     struct sdebug_dev_info *devip)
1864 {
1865 	bool rctd;
1866 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1867 	u16 req_sa, u;
1868 	u32 alloc_len, a_len;
1869 	int k, offset, len, errsts, count, bump, na;
1870 	const struct opcode_info_t *oip;
1871 	const struct opcode_info_t *r_oip;
1872 	u8 *arr;
1873 	u8 *cmd = scp->cmnd;
1874 
1875 	rctd = !!(cmd[2] & 0x80);
1876 	reporting_opts = cmd[2] & 0x7;
1877 	req_opcode = cmd[3];
1878 	req_sa = get_unaligned_be16(cmd + 4);
1879 	alloc_len = get_unaligned_be32(cmd + 6);
1880 	if (alloc_len < 4 || alloc_len > 0xffff) {
1881 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1882 		return check_condition_result;
1883 	}
1884 	if (alloc_len > 8192)
1885 		a_len = 8192;
1886 	else
1887 		a_len = alloc_len;
1888 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1889 	if (NULL == arr) {
1890 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1891 				INSUFF_RES_ASCQ);
1892 		return check_condition_result;
1893 	}
1894 	switch (reporting_opts) {
1895 	case 0:	/* all commands */
1896 		/* count number of commands */
1897 		for (count = 0, oip = opcode_info_arr;
1898 		     oip->num_attached != 0xff; ++oip) {
1899 			if (F_INV_OP & oip->flags)
1900 				continue;
1901 			count += (oip->num_attached + 1);
1902 		}
1903 		bump = rctd ? 20 : 8;
1904 		put_unaligned_be32(count * bump, arr);
1905 		for (offset = 4, oip = opcode_info_arr;
1906 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1907 			if (F_INV_OP & oip->flags)
1908 				continue;
1909 			na = oip->num_attached;
1910 			arr[offset] = oip->opcode;
1911 			put_unaligned_be16(oip->sa, arr + offset + 2);
1912 			if (rctd)
1913 				arr[offset + 5] |= 0x2;
1914 			if (FF_SA & oip->flags)
1915 				arr[offset + 5] |= 0x1;
1916 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1917 			if (rctd)
1918 				put_unaligned_be16(0xa, arr + offset + 8);
1919 			r_oip = oip;
1920 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1921 				if (F_INV_OP & oip->flags)
1922 					continue;
1923 				offset += bump;
1924 				arr[offset] = oip->opcode;
1925 				put_unaligned_be16(oip->sa, arr + offset + 2);
1926 				if (rctd)
1927 					arr[offset + 5] |= 0x2;
1928 				if (FF_SA & oip->flags)
1929 					arr[offset + 5] |= 0x1;
1930 				put_unaligned_be16(oip->len_mask[0],
1931 						   arr + offset + 6);
1932 				if (rctd)
1933 					put_unaligned_be16(0xa,
1934 							   arr + offset + 8);
1935 			}
1936 			oip = r_oip;
1937 			offset += bump;
1938 		}
1939 		break;
1940 	case 1:	/* one command: opcode only */
1941 	case 2:	/* one command: opcode plus service action */
1942 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1943 		sdeb_i = opcode_ind_arr[req_opcode];
1944 		oip = &opcode_info_arr[sdeb_i];
1945 		if (F_INV_OP & oip->flags) {
1946 			supp = 1;
1947 			offset = 4;
1948 		} else {
1949 			if (1 == reporting_opts) {
1950 				if (FF_SA & oip->flags) {
1951 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1952 							     2, 2);
1953 					kfree(arr);
1954 					return check_condition_result;
1955 				}
1956 				req_sa = 0;
1957 			} else if (2 == reporting_opts &&
1958 				   0 == (FF_SA & oip->flags)) {
1959 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1960 				kfree(arr);	/* point at requested sa */
1961 				return check_condition_result;
1962 			}
1963 			if (0 == (FF_SA & oip->flags) &&
1964 			    req_opcode == oip->opcode)
1965 				supp = 3;
1966 			else if (0 == (FF_SA & oip->flags)) {
1967 				na = oip->num_attached;
1968 				for (k = 0, oip = oip->arrp; k < na;
1969 				     ++k, ++oip) {
1970 					if (req_opcode == oip->opcode)
1971 						break;
1972 				}
1973 				supp = (k >= na) ? 1 : 3;
1974 			} else if (req_sa != oip->sa) {
1975 				na = oip->num_attached;
1976 				for (k = 0, oip = oip->arrp; k < na;
1977 				     ++k, ++oip) {
1978 					if (req_sa == oip->sa)
1979 						break;
1980 				}
1981 				supp = (k >= na) ? 1 : 3;
1982 			} else
1983 				supp = 3;
1984 			if (3 == supp) {
1985 				u = oip->len_mask[0];
1986 				put_unaligned_be16(u, arr + 2);
1987 				arr[4] = oip->opcode;
1988 				for (k = 1; k < u; ++k)
1989 					arr[4 + k] = (k < 16) ?
1990 						 oip->len_mask[k] : 0xff;
1991 				offset = 4 + u;
1992 			} else
1993 				offset = 4;
1994 		}
1995 		arr[1] = (rctd ? 0x80 : 0) | supp;
1996 		if (rctd) {
1997 			put_unaligned_be16(0xa, arr + offset);
1998 			offset += 12;
1999 		}
2000 		break;
2001 	default:
2002 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2003 		kfree(arr);
2004 		return check_condition_result;
2005 	}
2006 	offset = (offset < a_len) ? offset : a_len;
2007 	len = (offset < alloc_len) ? offset : alloc_len;
2008 	errsts = fill_from_dev_buffer(scp, arr, len);
2009 	kfree(arr);
2010 	return errsts;
2011 }
2012 
2013 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2014 			  struct sdebug_dev_info *devip)
2015 {
2016 	bool repd;
2017 	u32 alloc_len, len;
2018 	u8 arr[16];
2019 	u8 *cmd = scp->cmnd;
2020 
2021 	memset(arr, 0, sizeof(arr));
2022 	repd = !!(cmd[2] & 0x80);
2023 	alloc_len = get_unaligned_be32(cmd + 6);
2024 	if (alloc_len < 4) {
2025 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2026 		return check_condition_result;
2027 	}
2028 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2029 	arr[1] = 0x1;		/* ITNRS */
2030 	if (repd) {
2031 		arr[3] = 0xc;
2032 		len = 16;
2033 	} else
2034 		len = 4;
2035 
2036 	len = (len < alloc_len) ? len : alloc_len;
2037 	return fill_from_dev_buffer(scp, arr, len);
2038 }
2039 
2040 /* <<Following mode page info copied from ST318451LW>> */
2041 
2042 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2043 {	/* Read-Write Error Recovery page for mode_sense */
2044 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2045 					5, 0, 0xff, 0xff};
2046 
2047 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2048 	if (1 == pcontrol)
2049 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2050 	return sizeof(err_recov_pg);
2051 }
2052 
2053 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2054 { 	/* Disconnect-Reconnect page for mode_sense */
2055 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2056 					 0, 0, 0, 0, 0, 0, 0, 0};
2057 
2058 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2059 	if (1 == pcontrol)
2060 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2061 	return sizeof(disconnect_pg);
2062 }
2063 
2064 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2065 {       /* Format device page for mode_sense */
2066 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2067 				     0, 0, 0, 0, 0, 0, 0, 0,
2068 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2069 
2070 	memcpy(p, format_pg, sizeof(format_pg));
2071 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2072 	put_unaligned_be16(sdebug_sector_size, p + 12);
2073 	if (sdebug_removable)
2074 		p[20] |= 0x20; /* should agree with INQUIRY */
2075 	if (1 == pcontrol)
2076 		memset(p + 2, 0, sizeof(format_pg) - 2);
2077 	return sizeof(format_pg);
2078 }
2079 
2080 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2081 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2082 				     0, 0, 0, 0};
2083 
2084 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2085 { 	/* Caching page for mode_sense */
2086 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2087 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2088 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2089 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2090 
2091 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2092 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2093 	memcpy(p, caching_pg, sizeof(caching_pg));
2094 	if (1 == pcontrol)
2095 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2096 	else if (2 == pcontrol)
2097 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2098 	return sizeof(caching_pg);
2099 }
2100 
2101 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2102 				    0, 0, 0x2, 0x4b};
2103 
2104 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2105 { 	/* Control mode page for mode_sense */
2106 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2107 					0, 0, 0, 0};
2108 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2109 				     0, 0, 0x2, 0x4b};
2110 
2111 	if (sdebug_dsense)
2112 		ctrl_m_pg[2] |= 0x4;
2113 	else
2114 		ctrl_m_pg[2] &= ~0x4;
2115 
2116 	if (sdebug_ato)
2117 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2118 
2119 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2120 	if (1 == pcontrol)
2121 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2122 	else if (2 == pcontrol)
2123 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2124 	return sizeof(ctrl_m_pg);
2125 }
2126 
2127 
2128 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2129 {	/* Informational Exceptions control mode page for mode_sense */
2130 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2131 				       0, 0, 0x0, 0x0};
2132 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2133 				      0, 0, 0x0, 0x0};
2134 
2135 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2136 	if (1 == pcontrol)
2137 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2138 	else if (2 == pcontrol)
2139 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2140 	return sizeof(iec_m_pg);
2141 }
2142 
2143 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2144 {	/* SAS SSP mode page - short format for mode_sense */
2145 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2146 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2147 
2148 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2149 	if (1 == pcontrol)
2150 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2151 	return sizeof(sas_sf_m_pg);
2152 }
2153 
2154 
2155 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2156 			      int target_dev_id)
2157 {	/* SAS phy control and discover mode page for mode_sense */
2158 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2159 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2160 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2161 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2162 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2163 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2164 		    0, 0, 0, 0, 0, 0, 0, 0,
2165 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2166 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2167 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2168 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2169 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2170 		    0, 0, 0, 0, 0, 0, 0, 0,
2171 		};
2172 	int port_a, port_b;
2173 
2174 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2175 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2176 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2177 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2178 	port_a = target_dev_id + 1;
2179 	port_b = port_a + 1;
2180 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2181 	put_unaligned_be32(port_a, p + 20);
2182 	put_unaligned_be32(port_b, p + 48 + 20);
2183 	if (1 == pcontrol)
2184 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2185 	return sizeof(sas_pcd_m_pg);
2186 }
2187 
2188 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2189 {	/* SAS SSP shared protocol specific port mode subpage */
2190 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2191 		    0, 0, 0, 0, 0, 0, 0, 0,
2192 		};
2193 
2194 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2195 	if (1 == pcontrol)
2196 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2197 	return sizeof(sas_sha_m_pg);
2198 }
2199 
2200 #define SDEBUG_MAX_MSENSE_SZ 256
2201 
2202 static int resp_mode_sense(struct scsi_cmnd *scp,
2203 			   struct sdebug_dev_info *devip)
2204 {
2205 	int pcontrol, pcode, subpcode, bd_len;
2206 	unsigned char dev_spec;
2207 	int alloc_len, offset, len, target_dev_id;
2208 	int target = scp->device->id;
2209 	unsigned char *ap;
2210 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2211 	unsigned char *cmd = scp->cmnd;
2212 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2213 
2214 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2215 	pcontrol = (cmd[2] & 0xc0) >> 6;
2216 	pcode = cmd[2] & 0x3f;
2217 	subpcode = cmd[3];
2218 	msense_6 = (MODE_SENSE == cmd[0]);
2219 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2220 	is_disk = (sdebug_ptype == TYPE_DISK);
2221 	is_zbc = (sdebug_ptype == TYPE_ZBC);
2222 	if ((is_disk || is_zbc) && !dbd)
2223 		bd_len = llbaa ? 16 : 8;
2224 	else
2225 		bd_len = 0;
2226 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2227 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2228 	if (0x3 == pcontrol) {  /* Saving values not supported */
2229 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2230 		return check_condition_result;
2231 	}
2232 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2233 			(devip->target * 1000) - 3;
2234 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2235 	if (is_disk || is_zbc) {
2236 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2237 		if (sdebug_wp)
2238 			dev_spec |= 0x80;
2239 	} else
2240 		dev_spec = 0x0;
2241 	if (msense_6) {
2242 		arr[2] = dev_spec;
2243 		arr[3] = bd_len;
2244 		offset = 4;
2245 	} else {
2246 		arr[3] = dev_spec;
2247 		if (16 == bd_len)
2248 			arr[4] = 0x1;	/* set LONGLBA bit */
2249 		arr[7] = bd_len;	/* assume 255 or less */
2250 		offset = 8;
2251 	}
2252 	ap = arr + offset;
2253 	if ((bd_len > 0) && (!sdebug_capacity))
2254 		sdebug_capacity = get_sdebug_capacity();
2255 
2256 	if (8 == bd_len) {
2257 		if (sdebug_capacity > 0xfffffffe)
2258 			put_unaligned_be32(0xffffffff, ap + 0);
2259 		else
2260 			put_unaligned_be32(sdebug_capacity, ap + 0);
2261 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2262 		offset += bd_len;
2263 		ap = arr + offset;
2264 	} else if (16 == bd_len) {
2265 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2266 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2267 		offset += bd_len;
2268 		ap = arr + offset;
2269 	}
2270 
2271 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2272 		/* TODO: Control Extension page */
2273 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2274 		return check_condition_result;
2275 	}
2276 	bad_pcode = false;
2277 
2278 	switch (pcode) {
2279 	case 0x1:	/* Read-Write error recovery page, direct access */
2280 		len = resp_err_recov_pg(ap, pcontrol, target);
2281 		offset += len;
2282 		break;
2283 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2284 		len = resp_disconnect_pg(ap, pcontrol, target);
2285 		offset += len;
2286 		break;
2287 	case 0x3:       /* Format device page, direct access */
2288 		if (is_disk) {
2289 			len = resp_format_pg(ap, pcontrol, target);
2290 			offset += len;
2291 		} else
2292 			bad_pcode = true;
2293 		break;
2294 	case 0x8:	/* Caching page, direct access */
2295 		if (is_disk || is_zbc) {
2296 			len = resp_caching_pg(ap, pcontrol, target);
2297 			offset += len;
2298 		} else
2299 			bad_pcode = true;
2300 		break;
2301 	case 0xa:	/* Control Mode page, all devices */
2302 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2303 		offset += len;
2304 		break;
2305 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2306 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2307 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2308 			return check_condition_result;
2309 		}
2310 		len = 0;
2311 		if ((0x0 == subpcode) || (0xff == subpcode))
2312 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2313 		if ((0x1 == subpcode) || (0xff == subpcode))
2314 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2315 						  target_dev_id);
2316 		if ((0x2 == subpcode) || (0xff == subpcode))
2317 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2318 		offset += len;
2319 		break;
2320 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2321 		len = resp_iec_m_pg(ap, pcontrol, target);
2322 		offset += len;
2323 		break;
2324 	case 0x3f:	/* Read all Mode pages */
2325 		if ((0 == subpcode) || (0xff == subpcode)) {
2326 			len = resp_err_recov_pg(ap, pcontrol, target);
2327 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2328 			if (is_disk) {
2329 				len += resp_format_pg(ap + len, pcontrol,
2330 						      target);
2331 				len += resp_caching_pg(ap + len, pcontrol,
2332 						       target);
2333 			} else if (is_zbc) {
2334 				len += resp_caching_pg(ap + len, pcontrol,
2335 						       target);
2336 			}
2337 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2338 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2339 			if (0xff == subpcode) {
2340 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2341 						  target, target_dev_id);
2342 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2343 			}
2344 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2345 			offset += len;
2346 		} else {
2347 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2348 			return check_condition_result;
2349 		}
2350 		break;
2351 	default:
2352 		bad_pcode = true;
2353 		break;
2354 	}
2355 	if (bad_pcode) {
2356 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2357 		return check_condition_result;
2358 	}
2359 	if (msense_6)
2360 		arr[0] = offset - 1;
2361 	else
2362 		put_unaligned_be16((offset - 2), arr + 0);
2363 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2364 }
2365 
2366 #define SDEBUG_MAX_MSELECT_SZ 512
2367 
2368 static int resp_mode_select(struct scsi_cmnd *scp,
2369 			    struct sdebug_dev_info *devip)
2370 {
2371 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2372 	int param_len, res, mpage;
2373 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2374 	unsigned char *cmd = scp->cmnd;
2375 	int mselect6 = (MODE_SELECT == cmd[0]);
2376 
2377 	memset(arr, 0, sizeof(arr));
2378 	pf = cmd[1] & 0x10;
2379 	sp = cmd[1] & 0x1;
2380 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2381 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2382 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2383 		return check_condition_result;
2384 	}
2385 	res = fetch_to_dev_buffer(scp, arr, param_len);
2386 	if (-1 == res)
2387 		return DID_ERROR << 16;
2388 	else if (sdebug_verbose && (res < param_len))
2389 		sdev_printk(KERN_INFO, scp->device,
2390 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2391 			    __func__, param_len, res);
2392 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2393 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2394 	if (md_len > 2) {
2395 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2396 		return check_condition_result;
2397 	}
2398 	off = bd_len + (mselect6 ? 4 : 8);
2399 	mpage = arr[off] & 0x3f;
2400 	ps = !!(arr[off] & 0x80);
2401 	if (ps) {
2402 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2403 		return check_condition_result;
2404 	}
2405 	spf = !!(arr[off] & 0x40);
2406 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2407 		       (arr[off + 1] + 2);
2408 	if ((pg_len + off) > param_len) {
2409 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2410 				PARAMETER_LIST_LENGTH_ERR, 0);
2411 		return check_condition_result;
2412 	}
2413 	switch (mpage) {
2414 	case 0x8:      /* Caching Mode page */
2415 		if (caching_pg[1] == arr[off + 1]) {
2416 			memcpy(caching_pg + 2, arr + off + 2,
2417 			       sizeof(caching_pg) - 2);
2418 			goto set_mode_changed_ua;
2419 		}
2420 		break;
2421 	case 0xa:      /* Control Mode page */
2422 		if (ctrl_m_pg[1] == arr[off + 1]) {
2423 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2424 			       sizeof(ctrl_m_pg) - 2);
2425 			if (ctrl_m_pg[4] & 0x8)
2426 				sdebug_wp = true;
2427 			else
2428 				sdebug_wp = false;
2429 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2430 			goto set_mode_changed_ua;
2431 		}
2432 		break;
2433 	case 0x1c:      /* Informational Exceptions Mode page */
2434 		if (iec_m_pg[1] == arr[off + 1]) {
2435 			memcpy(iec_m_pg + 2, arr + off + 2,
2436 			       sizeof(iec_m_pg) - 2);
2437 			goto set_mode_changed_ua;
2438 		}
2439 		break;
2440 	default:
2441 		break;
2442 	}
2443 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2444 	return check_condition_result;
2445 set_mode_changed_ua:
2446 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2447 	return 0;
2448 }
2449 
2450 static int resp_temp_l_pg(unsigned char *arr)
2451 {
2452 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2453 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2454 		};
2455 
2456 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2457 	return sizeof(temp_l_pg);
2458 }
2459 
2460 static int resp_ie_l_pg(unsigned char *arr)
2461 {
2462 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2463 		};
2464 
2465 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2466 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2467 		arr[4] = THRESHOLD_EXCEEDED;
2468 		arr[5] = 0xff;
2469 	}
2470 	return sizeof(ie_l_pg);
2471 }
2472 
2473 #define SDEBUG_MAX_LSENSE_SZ 512
2474 
2475 static int resp_log_sense(struct scsi_cmnd *scp,
2476 			  struct sdebug_dev_info *devip)
2477 {
2478 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2479 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2480 	unsigned char *cmd = scp->cmnd;
2481 
2482 	memset(arr, 0, sizeof(arr));
2483 	ppc = cmd[1] & 0x2;
2484 	sp = cmd[1] & 0x1;
2485 	if (ppc || sp) {
2486 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2487 		return check_condition_result;
2488 	}
2489 	pcode = cmd[2] & 0x3f;
2490 	subpcode = cmd[3] & 0xff;
2491 	alloc_len = get_unaligned_be16(cmd + 7);
2492 	arr[0] = pcode;
2493 	if (0 == subpcode) {
2494 		switch (pcode) {
2495 		case 0x0:	/* Supported log pages log page */
2496 			n = 4;
2497 			arr[n++] = 0x0;		/* this page */
2498 			arr[n++] = 0xd;		/* Temperature */
2499 			arr[n++] = 0x2f;	/* Informational exceptions */
2500 			arr[3] = n - 4;
2501 			break;
2502 		case 0xd:	/* Temperature log page */
2503 			arr[3] = resp_temp_l_pg(arr + 4);
2504 			break;
2505 		case 0x2f:	/* Informational exceptions log page */
2506 			arr[3] = resp_ie_l_pg(arr + 4);
2507 			break;
2508 		default:
2509 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2510 			return check_condition_result;
2511 		}
2512 	} else if (0xff == subpcode) {
2513 		arr[0] |= 0x40;
2514 		arr[1] = subpcode;
2515 		switch (pcode) {
2516 		case 0x0:	/* Supported log pages and subpages log page */
2517 			n = 4;
2518 			arr[n++] = 0x0;
2519 			arr[n++] = 0x0;		/* 0,0 page */
2520 			arr[n++] = 0x0;
2521 			arr[n++] = 0xff;	/* this page */
2522 			arr[n++] = 0xd;
2523 			arr[n++] = 0x0;		/* Temperature */
2524 			arr[n++] = 0x2f;
2525 			arr[n++] = 0x0;	/* Informational exceptions */
2526 			arr[3] = n - 4;
2527 			break;
2528 		case 0xd:	/* Temperature subpages */
2529 			n = 4;
2530 			arr[n++] = 0xd;
2531 			arr[n++] = 0x0;		/* Temperature */
2532 			arr[3] = n - 4;
2533 			break;
2534 		case 0x2f:	/* Informational exceptions subpages */
2535 			n = 4;
2536 			arr[n++] = 0x2f;
2537 			arr[n++] = 0x0;		/* Informational exceptions */
2538 			arr[3] = n - 4;
2539 			break;
2540 		default:
2541 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2542 			return check_condition_result;
2543 		}
2544 	} else {
2545 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2546 		return check_condition_result;
2547 	}
2548 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2549 	return fill_from_dev_buffer(scp, arr,
2550 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2551 }
2552 
2553 static inline int check_device_access_params(struct scsi_cmnd *scp,
2554 	unsigned long long lba, unsigned int num, bool write)
2555 {
2556 	if (lba + num > sdebug_capacity) {
2557 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2558 		return check_condition_result;
2559 	}
2560 	/* transfer length excessive (tie in to block limits VPD page) */
2561 	if (num > sdebug_store_sectors) {
2562 		/* needs work to find which cdb byte 'num' comes from */
2563 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2564 		return check_condition_result;
2565 	}
2566 	if (write && unlikely(sdebug_wp)) {
2567 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2568 		return check_condition_result;
2569 	}
2570 	return 0;
2571 }
2572 
2573 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2574 {
2575 	return sdebug_fake_rw ?
2576 			NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2577 }
2578 
2579 /* Returns number of bytes copied or -1 if error. */
2580 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2581 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2582 {
2583 	int ret;
2584 	u64 block, rest = 0;
2585 	enum dma_data_direction dir;
2586 	struct scsi_data_buffer *sdb = &scp->sdb;
2587 	u8 *fsp;
2588 
2589 	if (do_write) {
2590 		dir = DMA_TO_DEVICE;
2591 		write_since_sync = true;
2592 	} else {
2593 		dir = DMA_FROM_DEVICE;
2594 	}
2595 
2596 	if (!sdb->length || !sip)
2597 		return 0;
2598 	if (scp->sc_data_direction != dir)
2599 		return -1;
2600 	fsp = sip->storep;
2601 
2602 	block = do_div(lba, sdebug_store_sectors);
2603 	if (block + num > sdebug_store_sectors)
2604 		rest = block + num - sdebug_store_sectors;
2605 
2606 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2607 		   fsp + (block * sdebug_sector_size),
2608 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2609 	if (ret != (num - rest) * sdebug_sector_size)
2610 		return ret;
2611 
2612 	if (rest) {
2613 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2614 			    fsp, rest * sdebug_sector_size,
2615 			    sg_skip + ((num - rest) * sdebug_sector_size),
2616 			    do_write);
2617 	}
2618 
2619 	return ret;
2620 }
2621 
2622 /* Returns number of bytes copied or -1 if error. */
2623 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2624 {
2625 	struct scsi_data_buffer *sdb = &scp->sdb;
2626 
2627 	if (!sdb->length)
2628 		return 0;
2629 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2630 		return -1;
2631 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2632 			      num * sdebug_sector_size, 0, true);
2633 }
2634 
2635 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2636  * arr into sip->storep+lba and return true. If comparison fails then
2637  * return false. */
2638 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2639 			      const u8 *arr, bool compare_only)
2640 {
2641 	bool res;
2642 	u64 block, rest = 0;
2643 	u32 store_blks = sdebug_store_sectors;
2644 	u32 lb_size = sdebug_sector_size;
2645 	u8 *fsp = sip->storep;
2646 
2647 	block = do_div(lba, store_blks);
2648 	if (block + num > store_blks)
2649 		rest = block + num - store_blks;
2650 
2651 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2652 	if (!res)
2653 		return res;
2654 	if (rest)
2655 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2656 			     rest * lb_size);
2657 	if (!res)
2658 		return res;
2659 	if (compare_only)
2660 		return true;
2661 	arr += num * lb_size;
2662 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2663 	if (rest)
2664 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2665 	return res;
2666 }
2667 
2668 static __be16 dif_compute_csum(const void *buf, int len)
2669 {
2670 	__be16 csum;
2671 
2672 	if (sdebug_guard)
2673 		csum = (__force __be16)ip_compute_csum(buf, len);
2674 	else
2675 		csum = cpu_to_be16(crc_t10dif(buf, len));
2676 
2677 	return csum;
2678 }
2679 
2680 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2681 		      sector_t sector, u32 ei_lba)
2682 {
2683 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2684 
2685 	if (sdt->guard_tag != csum) {
2686 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2687 			(unsigned long)sector,
2688 			be16_to_cpu(sdt->guard_tag),
2689 			be16_to_cpu(csum));
2690 		return 0x01;
2691 	}
2692 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2693 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2694 		pr_err("REF check failed on sector %lu\n",
2695 			(unsigned long)sector);
2696 		return 0x03;
2697 	}
2698 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2699 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2700 		pr_err("REF check failed on sector %lu\n",
2701 			(unsigned long)sector);
2702 		return 0x03;
2703 	}
2704 	return 0;
2705 }
2706 
2707 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
2708 			  unsigned int sectors, bool read)
2709 {
2710 	size_t resid;
2711 	void *paddr;
2712 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2713 						scp->device->hostdata);
2714 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
2715 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2716 	struct sg_mapping_iter miter;
2717 
2718 	/* Bytes of protection data to copy into sgl */
2719 	resid = sectors * sizeof(*dif_storep);
2720 
2721 	sg_miter_start(&miter, scsi_prot_sglist(scp),
2722 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
2723 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2724 
2725 	while (sg_miter_next(&miter) && resid > 0) {
2726 		size_t len = min_t(size_t, miter.length, resid);
2727 		void *start = dif_store(sip, sector);
2728 		size_t rest = 0;
2729 
2730 		if (dif_store_end < start + len)
2731 			rest = start + len - dif_store_end;
2732 
2733 		paddr = miter.addr;
2734 
2735 		if (read)
2736 			memcpy(paddr, start, len - rest);
2737 		else
2738 			memcpy(start, paddr, len - rest);
2739 
2740 		if (rest) {
2741 			if (read)
2742 				memcpy(paddr + len - rest, dif_storep, rest);
2743 			else
2744 				memcpy(dif_storep, paddr + len - rest, rest);
2745 		}
2746 
2747 		sector += len / sizeof(*dif_storep);
2748 		resid -= len;
2749 	}
2750 	sg_miter_stop(&miter);
2751 }
2752 
2753 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
2754 			    unsigned int sectors, u32 ei_lba)
2755 {
2756 	unsigned int i;
2757 	sector_t sector;
2758 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2759 						scp->device->hostdata);
2760 	struct t10_pi_tuple *sdt;
2761 
2762 	for (i = 0; i < sectors; i++, ei_lba++) {
2763 		int ret;
2764 
2765 		sector = start_sec + i;
2766 		sdt = dif_store(sip, sector);
2767 
2768 		if (sdt->app_tag == cpu_to_be16(0xffff))
2769 			continue;
2770 
2771 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
2772 				 ei_lba);
2773 		if (ret) {
2774 			dif_errors++;
2775 			return ret;
2776 		}
2777 	}
2778 
2779 	dif_copy_prot(scp, start_sec, sectors, true);
2780 	dix_reads++;
2781 
2782 	return 0;
2783 }
2784 
2785 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2786 {
2787 	bool check_prot;
2788 	u32 num;
2789 	u32 ei_lba;
2790 	int ret;
2791 	u64 lba;
2792 	struct sdeb_store_info *sip = devip2sip(devip);
2793 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
2794 	u8 *cmd = scp->cmnd;
2795 	struct sdebug_queued_cmd *sqcp;
2796 
2797 	switch (cmd[0]) {
2798 	case READ_16:
2799 		ei_lba = 0;
2800 		lba = get_unaligned_be64(cmd + 2);
2801 		num = get_unaligned_be32(cmd + 10);
2802 		check_prot = true;
2803 		break;
2804 	case READ_10:
2805 		ei_lba = 0;
2806 		lba = get_unaligned_be32(cmd + 2);
2807 		num = get_unaligned_be16(cmd + 7);
2808 		check_prot = true;
2809 		break;
2810 	case READ_6:
2811 		ei_lba = 0;
2812 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2813 		      (u32)(cmd[1] & 0x1f) << 16;
2814 		num = (0 == cmd[4]) ? 256 : cmd[4];
2815 		check_prot = true;
2816 		break;
2817 	case READ_12:
2818 		ei_lba = 0;
2819 		lba = get_unaligned_be32(cmd + 2);
2820 		num = get_unaligned_be32(cmd + 6);
2821 		check_prot = true;
2822 		break;
2823 	case XDWRITEREAD_10:
2824 		ei_lba = 0;
2825 		lba = get_unaligned_be32(cmd + 2);
2826 		num = get_unaligned_be16(cmd + 7);
2827 		check_prot = false;
2828 		break;
2829 	default:	/* assume READ(32) */
2830 		lba = get_unaligned_be64(cmd + 12);
2831 		ei_lba = get_unaligned_be32(cmd + 20);
2832 		num = get_unaligned_be32(cmd + 28);
2833 		check_prot = false;
2834 		break;
2835 	}
2836 	if (unlikely(have_dif_prot && check_prot)) {
2837 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2838 		    (cmd[1] & 0xe0)) {
2839 			mk_sense_invalid_opcode(scp);
2840 			return check_condition_result;
2841 		}
2842 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2843 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2844 		    (cmd[1] & 0xe0) == 0)
2845 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2846 				    "to DIF device\n");
2847 	}
2848 	if (unlikely(sdebug_any_injecting_opt)) {
2849 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2850 
2851 		if (sqcp) {
2852 			if (sqcp->inj_short)
2853 				num /= 2;
2854 		}
2855 	} else
2856 		sqcp = NULL;
2857 
2858 	ret = check_device_access_params(scp, lba, num, false);
2859 	if (ret)
2860 		return ret;
2861 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2862 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2863 		     ((lba + num) > sdebug_medium_error_start))) {
2864 		/* claim unrecoverable read error */
2865 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2866 		/* set info field and valid bit for fixed descriptor */
2867 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2868 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2869 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2870 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2871 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2872 		}
2873 		scsi_set_resid(scp, scsi_bufflen(scp));
2874 		return check_condition_result;
2875 	}
2876 
2877 	read_lock(macc_lckp);
2878 
2879 	/* DIX + T10 DIF */
2880 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2881 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2882 
2883 		if (prot_ret) {
2884 			read_unlock(macc_lckp);
2885 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2886 			return illegal_condition_result;
2887 		}
2888 	}
2889 
2890 	ret = do_device_access(sip, scp, 0, lba, num, false);
2891 	read_unlock(macc_lckp);
2892 	if (unlikely(ret == -1))
2893 		return DID_ERROR << 16;
2894 
2895 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
2896 
2897 	if (unlikely(sqcp)) {
2898 		if (sqcp->inj_recovered) {
2899 			mk_sense_buffer(scp, RECOVERED_ERROR,
2900 					THRESHOLD_EXCEEDED, 0);
2901 			return check_condition_result;
2902 		} else if (sqcp->inj_transport) {
2903 			mk_sense_buffer(scp, ABORTED_COMMAND,
2904 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2905 			return check_condition_result;
2906 		} else if (sqcp->inj_dif) {
2907 			/* Logical block guard check failed */
2908 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2909 			return illegal_condition_result;
2910 		} else if (sqcp->inj_dix) {
2911 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2912 			return illegal_condition_result;
2913 		}
2914 	}
2915 	return 0;
2916 }
2917 
2918 static void dump_sector(unsigned char *buf, int len)
2919 {
2920 	int i, j, n;
2921 
2922 	pr_err(">>> Sector Dump <<<\n");
2923 	for (i = 0 ; i < len ; i += 16) {
2924 		char b[128];
2925 
2926 		for (j = 0, n = 0; j < 16; j++) {
2927 			unsigned char c = buf[i+j];
2928 
2929 			if (c >= 0x20 && c < 0x7e)
2930 				n += scnprintf(b + n, sizeof(b) - n,
2931 					       " %c ", buf[i+j]);
2932 			else
2933 				n += scnprintf(b + n, sizeof(b) - n,
2934 					       "%02x ", buf[i+j]);
2935 		}
2936 		pr_err("%04d: %s\n", i, b);
2937 	}
2938 }
2939 
2940 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2941 			     unsigned int sectors, u32 ei_lba)
2942 {
2943 	int ret;
2944 	struct t10_pi_tuple *sdt;
2945 	void *daddr;
2946 	sector_t sector = start_sec;
2947 	int ppage_offset;
2948 	int dpage_offset;
2949 	struct sg_mapping_iter diter;
2950 	struct sg_mapping_iter piter;
2951 
2952 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2953 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2954 
2955 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2956 			scsi_prot_sg_count(SCpnt),
2957 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2958 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2959 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2960 
2961 	/* For each protection page */
2962 	while (sg_miter_next(&piter)) {
2963 		dpage_offset = 0;
2964 		if (WARN_ON(!sg_miter_next(&diter))) {
2965 			ret = 0x01;
2966 			goto out;
2967 		}
2968 
2969 		for (ppage_offset = 0; ppage_offset < piter.length;
2970 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2971 			/* If we're at the end of the current
2972 			 * data page advance to the next one
2973 			 */
2974 			if (dpage_offset >= diter.length) {
2975 				if (WARN_ON(!sg_miter_next(&diter))) {
2976 					ret = 0x01;
2977 					goto out;
2978 				}
2979 				dpage_offset = 0;
2980 			}
2981 
2982 			sdt = piter.addr + ppage_offset;
2983 			daddr = diter.addr + dpage_offset;
2984 
2985 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2986 			if (ret) {
2987 				dump_sector(daddr, sdebug_sector_size);
2988 				goto out;
2989 			}
2990 
2991 			sector++;
2992 			ei_lba++;
2993 			dpage_offset += sdebug_sector_size;
2994 		}
2995 		diter.consumed = dpage_offset;
2996 		sg_miter_stop(&diter);
2997 	}
2998 	sg_miter_stop(&piter);
2999 
3000 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3001 	dix_writes++;
3002 
3003 	return 0;
3004 
3005 out:
3006 	dif_errors++;
3007 	sg_miter_stop(&diter);
3008 	sg_miter_stop(&piter);
3009 	return ret;
3010 }
3011 
3012 static unsigned long lba_to_map_index(sector_t lba)
3013 {
3014 	if (sdebug_unmap_alignment)
3015 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3016 	sector_div(lba, sdebug_unmap_granularity);
3017 	return lba;
3018 }
3019 
3020 static sector_t map_index_to_lba(unsigned long index)
3021 {
3022 	sector_t lba = index * sdebug_unmap_granularity;
3023 
3024 	if (sdebug_unmap_alignment)
3025 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3026 	return lba;
3027 }
3028 
3029 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3030 			      unsigned int *num)
3031 {
3032 	sector_t end;
3033 	unsigned int mapped;
3034 	unsigned long index;
3035 	unsigned long next;
3036 
3037 	index = lba_to_map_index(lba);
3038 	mapped = test_bit(index, sip->map_storep);
3039 
3040 	if (mapped)
3041 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3042 	else
3043 		next = find_next_bit(sip->map_storep, map_size, index);
3044 
3045 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3046 	*num = end - lba;
3047 	return mapped;
3048 }
3049 
3050 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3051 		       unsigned int len)
3052 {
3053 	sector_t end = lba + len;
3054 
3055 	while (lba < end) {
3056 		unsigned long index = lba_to_map_index(lba);
3057 
3058 		if (index < map_size)
3059 			set_bit(index, sip->map_storep);
3060 
3061 		lba = map_index_to_lba(index + 1);
3062 	}
3063 }
3064 
3065 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3066 			 unsigned int len)
3067 {
3068 	sector_t end = lba + len;
3069 	u8 *fsp = sip->storep;
3070 
3071 	while (lba < end) {
3072 		unsigned long index = lba_to_map_index(lba);
3073 
3074 		if (lba == map_index_to_lba(index) &&
3075 		    lba + sdebug_unmap_granularity <= end &&
3076 		    index < map_size) {
3077 			clear_bit(index, sip->map_storep);
3078 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3079 				memset(fsp + lba * sdebug_sector_size,
3080 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3081 				       sdebug_sector_size *
3082 				       sdebug_unmap_granularity);
3083 			}
3084 			if (sip->dif_storep) {
3085 				memset(sip->dif_storep + lba, 0xff,
3086 				       sizeof(*sip->dif_storep) *
3087 				       sdebug_unmap_granularity);
3088 			}
3089 		}
3090 		lba = map_index_to_lba(index + 1);
3091 	}
3092 }
3093 
3094 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3095 {
3096 	bool check_prot;
3097 	u32 num;
3098 	u32 ei_lba;
3099 	int ret;
3100 	u64 lba;
3101 	struct sdeb_store_info *sip = devip2sip(devip);
3102 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3103 	u8 *cmd = scp->cmnd;
3104 
3105 	switch (cmd[0]) {
3106 	case WRITE_16:
3107 		ei_lba = 0;
3108 		lba = get_unaligned_be64(cmd + 2);
3109 		num = get_unaligned_be32(cmd + 10);
3110 		check_prot = true;
3111 		break;
3112 	case WRITE_10:
3113 		ei_lba = 0;
3114 		lba = get_unaligned_be32(cmd + 2);
3115 		num = get_unaligned_be16(cmd + 7);
3116 		check_prot = true;
3117 		break;
3118 	case WRITE_6:
3119 		ei_lba = 0;
3120 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3121 		      (u32)(cmd[1] & 0x1f) << 16;
3122 		num = (0 == cmd[4]) ? 256 : cmd[4];
3123 		check_prot = true;
3124 		break;
3125 	case WRITE_12:
3126 		ei_lba = 0;
3127 		lba = get_unaligned_be32(cmd + 2);
3128 		num = get_unaligned_be32(cmd + 6);
3129 		check_prot = true;
3130 		break;
3131 	case 0x53:	/* XDWRITEREAD(10) */
3132 		ei_lba = 0;
3133 		lba = get_unaligned_be32(cmd + 2);
3134 		num = get_unaligned_be16(cmd + 7);
3135 		check_prot = false;
3136 		break;
3137 	default:	/* assume WRITE(32) */
3138 		lba = get_unaligned_be64(cmd + 12);
3139 		ei_lba = get_unaligned_be32(cmd + 20);
3140 		num = get_unaligned_be32(cmd + 28);
3141 		check_prot = false;
3142 		break;
3143 	}
3144 	if (unlikely(have_dif_prot && check_prot)) {
3145 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3146 		    (cmd[1] & 0xe0)) {
3147 			mk_sense_invalid_opcode(scp);
3148 			return check_condition_result;
3149 		}
3150 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3151 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3152 		    (cmd[1] & 0xe0) == 0)
3153 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3154 				    "to DIF device\n");
3155 	}
3156 	ret = check_device_access_params(scp, lba, num, true);
3157 	if (ret)
3158 		return ret;
3159 	write_lock(macc_lckp);
3160 
3161 	/* DIX + T10 DIF */
3162 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3163 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3164 
3165 		if (prot_ret) {
3166 			write_unlock(macc_lckp);
3167 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3168 			return illegal_condition_result;
3169 		}
3170 	}
3171 
3172 	ret = do_device_access(sip, scp, 0, lba, num, true);
3173 	if (unlikely(scsi_debug_lbp()))
3174 		map_region(sip, lba, num);
3175 	write_unlock(macc_lckp);
3176 	if (unlikely(-1 == ret))
3177 		return DID_ERROR << 16;
3178 	else if (unlikely(sdebug_verbose &&
3179 			  (ret < (num * sdebug_sector_size))))
3180 		sdev_printk(KERN_INFO, scp->device,
3181 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3182 			    my_name, num * sdebug_sector_size, ret);
3183 
3184 	if (unlikely(sdebug_any_injecting_opt)) {
3185 		struct sdebug_queued_cmd *sqcp =
3186 				(struct sdebug_queued_cmd *)scp->host_scribble;
3187 
3188 		if (sqcp) {
3189 			if (sqcp->inj_recovered) {
3190 				mk_sense_buffer(scp, RECOVERED_ERROR,
3191 						THRESHOLD_EXCEEDED, 0);
3192 				return check_condition_result;
3193 			} else if (sqcp->inj_dif) {
3194 				/* Logical block guard check failed */
3195 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3196 				return illegal_condition_result;
3197 			} else if (sqcp->inj_dix) {
3198 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3199 				return illegal_condition_result;
3200 			}
3201 		}
3202 	}
3203 	return 0;
3204 }
3205 
3206 /*
3207  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3208  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3209  */
3210 static int resp_write_scat(struct scsi_cmnd *scp,
3211 			   struct sdebug_dev_info *devip)
3212 {
3213 	u8 *cmd = scp->cmnd;
3214 	u8 *lrdp = NULL;
3215 	u8 *up;
3216 	struct sdeb_store_info *sip = devip2sip(devip);
3217 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3218 	u8 wrprotect;
3219 	u16 lbdof, num_lrd, k;
3220 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3221 	u32 lb_size = sdebug_sector_size;
3222 	u32 ei_lba;
3223 	u64 lba;
3224 	int ret, res;
3225 	bool is_16;
3226 	static const u32 lrd_size = 32; /* + parameter list header size */
3227 
3228 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3229 		is_16 = false;
3230 		wrprotect = (cmd[10] >> 5) & 0x7;
3231 		lbdof = get_unaligned_be16(cmd + 12);
3232 		num_lrd = get_unaligned_be16(cmd + 16);
3233 		bt_len = get_unaligned_be32(cmd + 28);
3234 	} else {        /* that leaves WRITE SCATTERED(16) */
3235 		is_16 = true;
3236 		wrprotect = (cmd[2] >> 5) & 0x7;
3237 		lbdof = get_unaligned_be16(cmd + 4);
3238 		num_lrd = get_unaligned_be16(cmd + 8);
3239 		bt_len = get_unaligned_be32(cmd + 10);
3240 		if (unlikely(have_dif_prot)) {
3241 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3242 			    wrprotect) {
3243 				mk_sense_invalid_opcode(scp);
3244 				return illegal_condition_result;
3245 			}
3246 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3247 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3248 			     wrprotect == 0)
3249 				sdev_printk(KERN_ERR, scp->device,
3250 					    "Unprotected WR to DIF device\n");
3251 		}
3252 	}
3253 	if ((num_lrd == 0) || (bt_len == 0))
3254 		return 0;       /* T10 says these do-nothings are not errors */
3255 	if (lbdof == 0) {
3256 		if (sdebug_verbose)
3257 			sdev_printk(KERN_INFO, scp->device,
3258 				"%s: %s: LB Data Offset field bad\n",
3259 				my_name, __func__);
3260 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3261 		return illegal_condition_result;
3262 	}
3263 	lbdof_blen = lbdof * lb_size;
3264 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3265 		if (sdebug_verbose)
3266 			sdev_printk(KERN_INFO, scp->device,
3267 				"%s: %s: LBA range descriptors don't fit\n",
3268 				my_name, __func__);
3269 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3270 		return illegal_condition_result;
3271 	}
3272 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3273 	if (lrdp == NULL)
3274 		return SCSI_MLQUEUE_HOST_BUSY;
3275 	if (sdebug_verbose)
3276 		sdev_printk(KERN_INFO, scp->device,
3277 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3278 			my_name, __func__, lbdof_blen);
3279 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3280 	if (res == -1) {
3281 		ret = DID_ERROR << 16;
3282 		goto err_out;
3283 	}
3284 
3285 	write_lock(macc_lckp);
3286 	sg_off = lbdof_blen;
3287 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3288 	cum_lb = 0;
3289 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3290 		lba = get_unaligned_be64(up + 0);
3291 		num = get_unaligned_be32(up + 8);
3292 		if (sdebug_verbose)
3293 			sdev_printk(KERN_INFO, scp->device,
3294 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3295 				my_name, __func__, k, lba, num, sg_off);
3296 		if (num == 0)
3297 			continue;
3298 		ret = check_device_access_params(scp, lba, num, true);
3299 		if (ret)
3300 			goto err_out_unlock;
3301 		num_by = num * lb_size;
3302 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3303 
3304 		if ((cum_lb + num) > bt_len) {
3305 			if (sdebug_verbose)
3306 				sdev_printk(KERN_INFO, scp->device,
3307 				    "%s: %s: sum of blocks > data provided\n",
3308 				    my_name, __func__);
3309 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3310 					0);
3311 			ret = illegal_condition_result;
3312 			goto err_out_unlock;
3313 		}
3314 
3315 		/* DIX + T10 DIF */
3316 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3317 			int prot_ret = prot_verify_write(scp, lba, num,
3318 							 ei_lba);
3319 
3320 			if (prot_ret) {
3321 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3322 						prot_ret);
3323 				ret = illegal_condition_result;
3324 				goto err_out_unlock;
3325 			}
3326 		}
3327 
3328 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3329 		if (unlikely(scsi_debug_lbp()))
3330 			map_region(sip, lba, num);
3331 		if (unlikely(-1 == ret)) {
3332 			ret = DID_ERROR << 16;
3333 			goto err_out_unlock;
3334 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3335 			sdev_printk(KERN_INFO, scp->device,
3336 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3337 			    my_name, num_by, ret);
3338 
3339 		if (unlikely(sdebug_any_injecting_opt)) {
3340 			struct sdebug_queued_cmd *sqcp =
3341 				(struct sdebug_queued_cmd *)scp->host_scribble;
3342 
3343 			if (sqcp) {
3344 				if (sqcp->inj_recovered) {
3345 					mk_sense_buffer(scp, RECOVERED_ERROR,
3346 							THRESHOLD_EXCEEDED, 0);
3347 					ret = illegal_condition_result;
3348 					goto err_out_unlock;
3349 				} else if (sqcp->inj_dif) {
3350 					/* Logical block guard check failed */
3351 					mk_sense_buffer(scp, ABORTED_COMMAND,
3352 							0x10, 1);
3353 					ret = illegal_condition_result;
3354 					goto err_out_unlock;
3355 				} else if (sqcp->inj_dix) {
3356 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3357 							0x10, 1);
3358 					ret = illegal_condition_result;
3359 					goto err_out_unlock;
3360 				}
3361 			}
3362 		}
3363 		sg_off += num_by;
3364 		cum_lb += num;
3365 	}
3366 	ret = 0;
3367 err_out_unlock:
3368 	write_unlock(macc_lckp);
3369 err_out:
3370 	kfree(lrdp);
3371 	return ret;
3372 }
3373 
3374 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3375 			   u32 ei_lba, bool unmap, bool ndob)
3376 {
3377 	unsigned long long i;
3378 	u64 block, lbaa;
3379 	u32 lb_size = sdebug_sector_size;
3380 	int ret;
3381 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3382 						scp->device->hostdata);
3383 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3384 	u8 *fs1p;
3385 	u8 *fsp;
3386 
3387 	ret = check_device_access_params(scp, lba, num, true);
3388 	if (ret)
3389 		return ret;
3390 
3391 	write_lock(macc_lckp);
3392 
3393 	if (unmap && scsi_debug_lbp()) {
3394 		unmap_region(sip, lba, num);
3395 		goto out;
3396 	}
3397 	lbaa = lba;
3398 	block = do_div(lbaa, sdebug_store_sectors);
3399 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3400 	fsp = sip->storep;
3401 	fs1p = fsp + (block * lb_size);
3402 	if (ndob) {
3403 		memset(fs1p, 0, lb_size);
3404 		ret = 0;
3405 	} else
3406 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3407 
3408 	if (-1 == ret) {
3409 		write_unlock(&sip->macc_lck);
3410 		return DID_ERROR << 16;
3411 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3412 		sdev_printk(KERN_INFO, scp->device,
3413 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3414 			    my_name, "write same", lb_size, ret);
3415 
3416 	/* Copy first sector to remaining blocks */
3417 	for (i = 1 ; i < num ; i++) {
3418 		lbaa = lba + i;
3419 		block = do_div(lbaa, sdebug_store_sectors);
3420 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3421 	}
3422 	if (scsi_debug_lbp())
3423 		map_region(sip, lba, num);
3424 out:
3425 	write_unlock(macc_lckp);
3426 
3427 	return 0;
3428 }
3429 
3430 static int resp_write_same_10(struct scsi_cmnd *scp,
3431 			      struct sdebug_dev_info *devip)
3432 {
3433 	u8 *cmd = scp->cmnd;
3434 	u32 lba;
3435 	u16 num;
3436 	u32 ei_lba = 0;
3437 	bool unmap = false;
3438 
3439 	if (cmd[1] & 0x8) {
3440 		if (sdebug_lbpws10 == 0) {
3441 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3442 			return check_condition_result;
3443 		} else
3444 			unmap = true;
3445 	}
3446 	lba = get_unaligned_be32(cmd + 2);
3447 	num = get_unaligned_be16(cmd + 7);
3448 	if (num > sdebug_write_same_length) {
3449 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3450 		return check_condition_result;
3451 	}
3452 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3453 }
3454 
3455 static int resp_write_same_16(struct scsi_cmnd *scp,
3456 			      struct sdebug_dev_info *devip)
3457 {
3458 	u8 *cmd = scp->cmnd;
3459 	u64 lba;
3460 	u32 num;
3461 	u32 ei_lba = 0;
3462 	bool unmap = false;
3463 	bool ndob = false;
3464 
3465 	if (cmd[1] & 0x8) {	/* UNMAP */
3466 		if (sdebug_lbpws == 0) {
3467 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3468 			return check_condition_result;
3469 		} else
3470 			unmap = true;
3471 	}
3472 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3473 		ndob = true;
3474 	lba = get_unaligned_be64(cmd + 2);
3475 	num = get_unaligned_be32(cmd + 10);
3476 	if (num > sdebug_write_same_length) {
3477 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3478 		return check_condition_result;
3479 	}
3480 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3481 }
3482 
3483 /* Note the mode field is in the same position as the (lower) service action
3484  * field. For the Report supported operation codes command, SPC-4 suggests
3485  * each mode of this command should be reported separately; for future. */
3486 static int resp_write_buffer(struct scsi_cmnd *scp,
3487 			     struct sdebug_dev_info *devip)
3488 {
3489 	u8 *cmd = scp->cmnd;
3490 	struct scsi_device *sdp = scp->device;
3491 	struct sdebug_dev_info *dp;
3492 	u8 mode;
3493 
3494 	mode = cmd[1] & 0x1f;
3495 	switch (mode) {
3496 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3497 		/* set UAs on this device only */
3498 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3499 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3500 		break;
3501 	case 0x5:	/* download MC, save and ACT */
3502 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3503 		break;
3504 	case 0x6:	/* download MC with offsets and ACT */
3505 		/* set UAs on most devices (LUs) in this target */
3506 		list_for_each_entry(dp,
3507 				    &devip->sdbg_host->dev_info_list,
3508 				    dev_list)
3509 			if (dp->target == sdp->id) {
3510 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3511 				if (devip != dp)
3512 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3513 						dp->uas_bm);
3514 			}
3515 		break;
3516 	case 0x7:	/* download MC with offsets, save, and ACT */
3517 		/* set UA on all devices (LUs) in this target */
3518 		list_for_each_entry(dp,
3519 				    &devip->sdbg_host->dev_info_list,
3520 				    dev_list)
3521 			if (dp->target == sdp->id)
3522 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3523 					dp->uas_bm);
3524 		break;
3525 	default:
3526 		/* do nothing for this command for other mode values */
3527 		break;
3528 	}
3529 	return 0;
3530 }
3531 
3532 static int resp_comp_write(struct scsi_cmnd *scp,
3533 			   struct sdebug_dev_info *devip)
3534 {
3535 	u8 *cmd = scp->cmnd;
3536 	u8 *arr;
3537 	struct sdeb_store_info *sip = devip2sip(devip);
3538 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3539 	u64 lba;
3540 	u32 dnum;
3541 	u32 lb_size = sdebug_sector_size;
3542 	u8 num;
3543 	int ret;
3544 	int retval = 0;
3545 
3546 	lba = get_unaligned_be64(cmd + 2);
3547 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3548 	if (0 == num)
3549 		return 0;	/* degenerate case, not an error */
3550 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3551 	    (cmd[1] & 0xe0)) {
3552 		mk_sense_invalid_opcode(scp);
3553 		return check_condition_result;
3554 	}
3555 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3556 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3557 	    (cmd[1] & 0xe0) == 0)
3558 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3559 			    "to DIF device\n");
3560 	ret = check_device_access_params(scp, lba, num, false);
3561 	if (ret)
3562 		return ret;
3563 	dnum = 2 * num;
3564 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3565 	if (NULL == arr) {
3566 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3567 				INSUFF_RES_ASCQ);
3568 		return check_condition_result;
3569 	}
3570 
3571 	write_lock(macc_lckp);
3572 
3573 	ret = do_dout_fetch(scp, dnum, arr);
3574 	if (ret == -1) {
3575 		retval = DID_ERROR << 16;
3576 		goto cleanup;
3577 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3578 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3579 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3580 			    dnum * lb_size, ret);
3581 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3582 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3583 		retval = check_condition_result;
3584 		goto cleanup;
3585 	}
3586 	if (scsi_debug_lbp())
3587 		map_region(sip, lba, num);
3588 cleanup:
3589 	write_unlock(macc_lckp);
3590 	kfree(arr);
3591 	return retval;
3592 }
3593 
3594 struct unmap_block_desc {
3595 	__be64	lba;
3596 	__be32	blocks;
3597 	__be32	__reserved;
3598 };
3599 
3600 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3601 {
3602 	unsigned char *buf;
3603 	struct unmap_block_desc *desc;
3604 	struct sdeb_store_info *sip = devip2sip(devip);
3605 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3606 	unsigned int i, payload_len, descriptors;
3607 	int ret;
3608 
3609 	if (!scsi_debug_lbp())
3610 		return 0;	/* fib and say its done */
3611 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3612 	BUG_ON(scsi_bufflen(scp) != payload_len);
3613 
3614 	descriptors = (payload_len - 8) / 16;
3615 	if (descriptors > sdebug_unmap_max_desc) {
3616 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3617 		return check_condition_result;
3618 	}
3619 
3620 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3621 	if (!buf) {
3622 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3623 				INSUFF_RES_ASCQ);
3624 		return check_condition_result;
3625 	}
3626 
3627 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3628 
3629 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3630 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3631 
3632 	desc = (void *)&buf[8];
3633 
3634 	write_lock(macc_lckp);
3635 
3636 	for (i = 0 ; i < descriptors ; i++) {
3637 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3638 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3639 
3640 		ret = check_device_access_params(scp, lba, num, true);
3641 		if (ret)
3642 			goto out;
3643 
3644 		unmap_region(sip, lba, num);
3645 	}
3646 
3647 	ret = 0;
3648 
3649 out:
3650 	write_unlock(macc_lckp);
3651 	kfree(buf);
3652 
3653 	return ret;
3654 }
3655 
3656 #define SDEBUG_GET_LBA_STATUS_LEN 32
3657 
3658 static int resp_get_lba_status(struct scsi_cmnd *scp,
3659 			       struct sdebug_dev_info *devip)
3660 {
3661 	u8 *cmd = scp->cmnd;
3662 	struct sdeb_store_info *sip = devip2sip(devip);
3663 	u64 lba;
3664 	u32 alloc_len, mapped, num;
3665 	int ret;
3666 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3667 
3668 	lba = get_unaligned_be64(cmd + 2);
3669 	alloc_len = get_unaligned_be32(cmd + 10);
3670 
3671 	if (alloc_len < 24)
3672 		return 0;
3673 
3674 	ret = check_device_access_params(scp, lba, 1, false);
3675 	if (ret)
3676 		return ret;
3677 
3678 	if (scsi_debug_lbp())
3679 		mapped = map_state(sip, lba, &num);
3680 	else {
3681 		mapped = 1;
3682 		/* following just in case virtual_gb changed */
3683 		sdebug_capacity = get_sdebug_capacity();
3684 		if (sdebug_capacity - lba <= 0xffffffff)
3685 			num = sdebug_capacity - lba;
3686 		else
3687 			num = 0xffffffff;
3688 	}
3689 
3690 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3691 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3692 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3693 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3694 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3695 
3696 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3697 }
3698 
3699 static int resp_sync_cache(struct scsi_cmnd *scp,
3700 			   struct sdebug_dev_info *devip)
3701 {
3702 	int res = 0;
3703 	u64 lba;
3704 	u32 num_blocks;
3705 	u8 *cmd = scp->cmnd;
3706 
3707 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3708 		lba = get_unaligned_be32(cmd + 2);
3709 		num_blocks = get_unaligned_be16(cmd + 7);
3710 	} else {				/* SYNCHRONIZE_CACHE(16) */
3711 		lba = get_unaligned_be64(cmd + 2);
3712 		num_blocks = get_unaligned_be32(cmd + 10);
3713 	}
3714 	if (lba + num_blocks > sdebug_capacity) {
3715 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3716 		return check_condition_result;
3717 	}
3718 	if (!write_since_sync || cmd[1] & 0x2)
3719 		res = SDEG_RES_IMMED_MASK;
3720 	else		/* delay if write_since_sync and IMMED clear */
3721 		write_since_sync = false;
3722 	return res;
3723 }
3724 
3725 /*
3726  * Assuming the LBA+num_blocks is not out-of-range, this function will return
3727  * CONDITION MET if the specified blocks will/have fitted in the cache, and
3728  * a GOOD status otherwise. Model a disk with a big cache and yield
3729  * CONDITION MET. Actually tries to bring range in main memory into the
3730  * cache associated with the CPU(s).
3731  */
3732 static int resp_pre_fetch(struct scsi_cmnd *scp,
3733 			  struct sdebug_dev_info *devip)
3734 {
3735 	int res = 0;
3736 	u64 lba;
3737 	u64 block, rest = 0;
3738 	u32 nblks;
3739 	u8 *cmd = scp->cmnd;
3740 	struct sdeb_store_info *sip = devip2sip(devip);
3741 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3742 	u8 *fsp = sip ? sip->storep : NULL;
3743 
3744 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
3745 		lba = get_unaligned_be32(cmd + 2);
3746 		nblks = get_unaligned_be16(cmd + 7);
3747 	} else {			/* PRE-FETCH(16) */
3748 		lba = get_unaligned_be64(cmd + 2);
3749 		nblks = get_unaligned_be32(cmd + 10);
3750 	}
3751 	if (lba + nblks > sdebug_capacity) {
3752 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3753 		return check_condition_result;
3754 	}
3755 	if (!fsp)
3756 		goto fini;
3757 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
3758 	block = do_div(lba, sdebug_store_sectors);
3759 	if (block + nblks > sdebug_store_sectors)
3760 		rest = block + nblks - sdebug_store_sectors;
3761 
3762 	/* Try to bring the PRE-FETCH range into CPU's cache */
3763 	read_lock(macc_lckp);
3764 	prefetch_range(fsp + (sdebug_sector_size * block),
3765 		       (nblks - rest) * sdebug_sector_size);
3766 	if (rest)
3767 		prefetch_range(fsp, rest * sdebug_sector_size);
3768 	read_unlock(macc_lckp);
3769 fini:
3770 	if (cmd[1] & 0x2)
3771 		res = SDEG_RES_IMMED_MASK;
3772 	return res | condition_met_result;
3773 }
3774 
3775 #define RL_BUCKET_ELEMS 8
3776 
3777 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3778  * (W-LUN), the normal Linux scanning logic does not associate it with a
3779  * device (e.g. /dev/sg7). The following magic will make that association:
3780  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3781  * where <n> is a host number. If there are multiple targets in a host then
3782  * the above will associate a W-LUN to each target. To only get a W-LUN
3783  * for target 2, then use "echo '- 2 49409' > scan" .
3784  */
3785 static int resp_report_luns(struct scsi_cmnd *scp,
3786 			    struct sdebug_dev_info *devip)
3787 {
3788 	unsigned char *cmd = scp->cmnd;
3789 	unsigned int alloc_len;
3790 	unsigned char select_report;
3791 	u64 lun;
3792 	struct scsi_lun *lun_p;
3793 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3794 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3795 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3796 	unsigned int tlun_cnt;	/* total LUN count */
3797 	unsigned int rlen;	/* response length (in bytes) */
3798 	int k, j, n, res;
3799 	unsigned int off_rsp = 0;
3800 	const int sz_lun = sizeof(struct scsi_lun);
3801 
3802 	clear_luns_changed_on_target(devip);
3803 
3804 	select_report = cmd[2];
3805 	alloc_len = get_unaligned_be32(cmd + 6);
3806 
3807 	if (alloc_len < 4) {
3808 		pr_err("alloc len too small %d\n", alloc_len);
3809 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3810 		return check_condition_result;
3811 	}
3812 
3813 	switch (select_report) {
3814 	case 0:		/* all LUNs apart from W-LUNs */
3815 		lun_cnt = sdebug_max_luns;
3816 		wlun_cnt = 0;
3817 		break;
3818 	case 1:		/* only W-LUNs */
3819 		lun_cnt = 0;
3820 		wlun_cnt = 1;
3821 		break;
3822 	case 2:		/* all LUNs */
3823 		lun_cnt = sdebug_max_luns;
3824 		wlun_cnt = 1;
3825 		break;
3826 	case 0x10:	/* only administrative LUs */
3827 	case 0x11:	/* see SPC-5 */
3828 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3829 	default:
3830 		pr_debug("select report invalid %d\n", select_report);
3831 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3832 		return check_condition_result;
3833 	}
3834 
3835 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3836 		--lun_cnt;
3837 
3838 	tlun_cnt = lun_cnt + wlun_cnt;
3839 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3840 	scsi_set_resid(scp, scsi_bufflen(scp));
3841 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3842 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3843 
3844 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3845 	lun = sdebug_no_lun_0 ? 1 : 0;
3846 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3847 		memset(arr, 0, sizeof(arr));
3848 		lun_p = (struct scsi_lun *)&arr[0];
3849 		if (k == 0) {
3850 			put_unaligned_be32(rlen, &arr[0]);
3851 			++lun_p;
3852 			j = 1;
3853 		}
3854 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3855 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3856 				break;
3857 			int_to_scsilun(lun++, lun_p);
3858 		}
3859 		if (j < RL_BUCKET_ELEMS)
3860 			break;
3861 		n = j * sz_lun;
3862 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3863 		if (res)
3864 			return res;
3865 		off_rsp += n;
3866 	}
3867 	if (wlun_cnt) {
3868 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3869 		++j;
3870 	}
3871 	if (j > 0)
3872 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3873 	return res;
3874 }
3875 
3876 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3877 {
3878 	bool is_bytchk3 = false;
3879 	u8 bytchk;
3880 	int ret, j;
3881 	u32 vnum, a_num, off;
3882 	const u32 lb_size = sdebug_sector_size;
3883 	u64 lba;
3884 	u8 *arr;
3885 	u8 *cmd = scp->cmnd;
3886 	struct sdeb_store_info *sip = devip2sip(devip);
3887 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3888 
3889 	bytchk = (cmd[1] >> 1) & 0x3;
3890 	if (bytchk == 0) {
3891 		return 0;	/* always claim internal verify okay */
3892 	} else if (bytchk == 2) {
3893 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
3894 		return check_condition_result;
3895 	} else if (bytchk == 3) {
3896 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
3897 	}
3898 	switch (cmd[0]) {
3899 	case VERIFY_16:
3900 		lba = get_unaligned_be64(cmd + 2);
3901 		vnum = get_unaligned_be32(cmd + 10);
3902 		break;
3903 	case VERIFY:		/* is VERIFY(10) */
3904 		lba = get_unaligned_be32(cmd + 2);
3905 		vnum = get_unaligned_be16(cmd + 7);
3906 		break;
3907 	default:
3908 		mk_sense_invalid_opcode(scp);
3909 		return check_condition_result;
3910 	}
3911 	a_num = is_bytchk3 ? 1 : vnum;
3912 	/* Treat following check like one for read (i.e. no write) access */
3913 	ret = check_device_access_params(scp, lba, a_num, false);
3914 	if (ret)
3915 		return ret;
3916 
3917 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
3918 	if (!arr) {
3919 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3920 				INSUFF_RES_ASCQ);
3921 		return check_condition_result;
3922 	}
3923 	/* Not changing store, so only need read access */
3924 	read_lock(macc_lckp);
3925 
3926 	ret = do_dout_fetch(scp, a_num, arr);
3927 	if (ret == -1) {
3928 		ret = DID_ERROR << 16;
3929 		goto cleanup;
3930 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
3931 		sdev_printk(KERN_INFO, scp->device,
3932 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3933 			    my_name, __func__, a_num * lb_size, ret);
3934 	}
3935 	if (is_bytchk3) {
3936 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
3937 			memcpy(arr + off, arr, lb_size);
3938 	}
3939 	ret = 0;
3940 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
3941 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3942 		ret = check_condition_result;
3943 		goto cleanup;
3944 	}
3945 cleanup:
3946 	read_unlock(macc_lckp);
3947 	kfree(arr);
3948 	return ret;
3949 }
3950 
3951 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3952 {
3953 	u32 tag = blk_mq_unique_tag(cmnd->request);
3954 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3955 
3956 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3957 	if (WARN_ON_ONCE(hwq >= submit_queues))
3958 		hwq = 0;
3959 	return sdebug_q_arr + hwq;
3960 }
3961 
3962 /* Queued (deferred) command completions converge here. */
3963 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3964 {
3965 	bool aborted = sd_dp->aborted;
3966 	int qc_idx;
3967 	int retiring = 0;
3968 	unsigned long iflags;
3969 	struct sdebug_queue *sqp;
3970 	struct sdebug_queued_cmd *sqcp;
3971 	struct scsi_cmnd *scp;
3972 	struct sdebug_dev_info *devip;
3973 
3974 	sd_dp->defer_t = SDEB_DEFER_NONE;
3975 	if (unlikely(aborted))
3976 		sd_dp->aborted = false;
3977 	qc_idx = sd_dp->qc_idx;
3978 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3979 	if (sdebug_statistics) {
3980 		atomic_inc(&sdebug_completions);
3981 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3982 			atomic_inc(&sdebug_miss_cpus);
3983 	}
3984 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3985 		pr_err("wild qc_idx=%d\n", qc_idx);
3986 		return;
3987 	}
3988 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3989 	sqcp = &sqp->qc_arr[qc_idx];
3990 	scp = sqcp->a_cmnd;
3991 	if (unlikely(scp == NULL)) {
3992 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3993 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3994 		       sd_dp->sqa_idx, qc_idx);
3995 		return;
3996 	}
3997 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3998 	if (likely(devip))
3999 		atomic_dec(&devip->num_in_q);
4000 	else
4001 		pr_err("devip=NULL\n");
4002 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4003 		retiring = 1;
4004 
4005 	sqcp->a_cmnd = NULL;
4006 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4007 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4008 		pr_err("Unexpected completion\n");
4009 		return;
4010 	}
4011 
4012 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4013 		int k, retval;
4014 
4015 		retval = atomic_read(&retired_max_queue);
4016 		if (qc_idx >= retval) {
4017 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4018 			pr_err("index %d too large\n", retval);
4019 			return;
4020 		}
4021 		k = find_last_bit(sqp->in_use_bm, retval);
4022 		if ((k < sdebug_max_queue) || (k == retval))
4023 			atomic_set(&retired_max_queue, 0);
4024 		else
4025 			atomic_set(&retired_max_queue, k + 1);
4026 	}
4027 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4028 	if (unlikely(aborted)) {
4029 		if (sdebug_verbose)
4030 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4031 		return;
4032 	}
4033 	scp->scsi_done(scp); /* callback to mid level */
4034 }
4035 
4036 /* When high resolution timer goes off this function is called. */
4037 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4038 {
4039 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4040 						  hrt);
4041 	sdebug_q_cmd_complete(sd_dp);
4042 	return HRTIMER_NORESTART;
4043 }
4044 
4045 /* When work queue schedules work, it calls this function. */
4046 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4047 {
4048 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4049 						  ew.work);
4050 	sdebug_q_cmd_complete(sd_dp);
4051 }
4052 
4053 static bool got_shared_uuid;
4054 static uuid_t shared_uuid;
4055 
4056 static struct sdebug_dev_info *sdebug_device_create(
4057 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4058 {
4059 	struct sdebug_dev_info *devip;
4060 
4061 	devip = kzalloc(sizeof(*devip), flags);
4062 	if (devip) {
4063 		if (sdebug_uuid_ctl == 1)
4064 			uuid_gen(&devip->lu_name);
4065 		else if (sdebug_uuid_ctl == 2) {
4066 			if (got_shared_uuid)
4067 				devip->lu_name = shared_uuid;
4068 			else {
4069 				uuid_gen(&shared_uuid);
4070 				got_shared_uuid = true;
4071 				devip->lu_name = shared_uuid;
4072 			}
4073 		}
4074 		devip->sdbg_host = sdbg_host;
4075 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4076 	}
4077 	return devip;
4078 }
4079 
4080 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4081 {
4082 	struct sdebug_host_info *sdbg_host;
4083 	struct sdebug_dev_info *open_devip = NULL;
4084 	struct sdebug_dev_info *devip;
4085 
4086 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4087 	if (!sdbg_host) {
4088 		pr_err("Host info NULL\n");
4089 		return NULL;
4090 	}
4091 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4092 		if ((devip->used) && (devip->channel == sdev->channel) &&
4093 		    (devip->target == sdev->id) &&
4094 		    (devip->lun == sdev->lun))
4095 			return devip;
4096 		else {
4097 			if ((!devip->used) && (!open_devip))
4098 				open_devip = devip;
4099 		}
4100 	}
4101 	if (!open_devip) { /* try and make a new one */
4102 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4103 		if (!open_devip) {
4104 			pr_err("out of memory at line %d\n", __LINE__);
4105 			return NULL;
4106 		}
4107 	}
4108 
4109 	open_devip->channel = sdev->channel;
4110 	open_devip->target = sdev->id;
4111 	open_devip->lun = sdev->lun;
4112 	open_devip->sdbg_host = sdbg_host;
4113 	atomic_set(&open_devip->num_in_q, 0);
4114 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4115 	open_devip->used = true;
4116 	return open_devip;
4117 }
4118 
4119 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4120 {
4121 	if (sdebug_verbose)
4122 		pr_info("slave_alloc <%u %u %u %llu>\n",
4123 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4124 	return 0;
4125 }
4126 
4127 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4128 {
4129 	struct sdebug_dev_info *devip =
4130 			(struct sdebug_dev_info *)sdp->hostdata;
4131 
4132 	if (sdebug_verbose)
4133 		pr_info("slave_configure <%u %u %u %llu>\n",
4134 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4135 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4136 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4137 	if (devip == NULL) {
4138 		devip = find_build_dev_info(sdp);
4139 		if (devip == NULL)
4140 			return 1;  /* no resources, will be marked offline */
4141 	}
4142 	sdp->hostdata = devip;
4143 	if (sdebug_no_uld)
4144 		sdp->no_uld_attach = 1;
4145 	config_cdb_len(sdp);
4146 	return 0;
4147 }
4148 
4149 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4150 {
4151 	struct sdebug_dev_info *devip =
4152 		(struct sdebug_dev_info *)sdp->hostdata;
4153 
4154 	if (sdebug_verbose)
4155 		pr_info("slave_destroy <%u %u %u %llu>\n",
4156 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4157 	if (devip) {
4158 		/* make this slot available for re-use */
4159 		devip->used = false;
4160 		sdp->hostdata = NULL;
4161 	}
4162 }
4163 
4164 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4165 			   enum sdeb_defer_type defer_t)
4166 {
4167 	if (!sd_dp)
4168 		return;
4169 	if (defer_t == SDEB_DEFER_HRT)
4170 		hrtimer_cancel(&sd_dp->hrt);
4171 	else if (defer_t == SDEB_DEFER_WQ)
4172 		cancel_work_sync(&sd_dp->ew.work);
4173 }
4174 
4175 /* If @cmnd found deletes its timer or work queue and returns true; else
4176    returns false */
4177 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4178 {
4179 	unsigned long iflags;
4180 	int j, k, qmax, r_qmax;
4181 	enum sdeb_defer_type l_defer_t;
4182 	struct sdebug_queue *sqp;
4183 	struct sdebug_queued_cmd *sqcp;
4184 	struct sdebug_dev_info *devip;
4185 	struct sdebug_defer *sd_dp;
4186 
4187 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4188 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4189 		qmax = sdebug_max_queue;
4190 		r_qmax = atomic_read(&retired_max_queue);
4191 		if (r_qmax > qmax)
4192 			qmax = r_qmax;
4193 		for (k = 0; k < qmax; ++k) {
4194 			if (test_bit(k, sqp->in_use_bm)) {
4195 				sqcp = &sqp->qc_arr[k];
4196 				if (cmnd != sqcp->a_cmnd)
4197 					continue;
4198 				/* found */
4199 				devip = (struct sdebug_dev_info *)
4200 						cmnd->device->hostdata;
4201 				if (devip)
4202 					atomic_dec(&devip->num_in_q);
4203 				sqcp->a_cmnd = NULL;
4204 				sd_dp = sqcp->sd_dp;
4205 				if (sd_dp) {
4206 					l_defer_t = sd_dp->defer_t;
4207 					sd_dp->defer_t = SDEB_DEFER_NONE;
4208 				} else
4209 					l_defer_t = SDEB_DEFER_NONE;
4210 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4211 				stop_qc_helper(sd_dp, l_defer_t);
4212 				clear_bit(k, sqp->in_use_bm);
4213 				return true;
4214 			}
4215 		}
4216 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4217 	}
4218 	return false;
4219 }
4220 
4221 /* Deletes (stops) timers or work queues of all queued commands */
4222 static void stop_all_queued(void)
4223 {
4224 	unsigned long iflags;
4225 	int j, k;
4226 	enum sdeb_defer_type l_defer_t;
4227 	struct sdebug_queue *sqp;
4228 	struct sdebug_queued_cmd *sqcp;
4229 	struct sdebug_dev_info *devip;
4230 	struct sdebug_defer *sd_dp;
4231 
4232 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4233 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4234 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4235 			if (test_bit(k, sqp->in_use_bm)) {
4236 				sqcp = &sqp->qc_arr[k];
4237 				if (sqcp->a_cmnd == NULL)
4238 					continue;
4239 				devip = (struct sdebug_dev_info *)
4240 					sqcp->a_cmnd->device->hostdata;
4241 				if (devip)
4242 					atomic_dec(&devip->num_in_q);
4243 				sqcp->a_cmnd = NULL;
4244 				sd_dp = sqcp->sd_dp;
4245 				if (sd_dp) {
4246 					l_defer_t = sd_dp->defer_t;
4247 					sd_dp->defer_t = SDEB_DEFER_NONE;
4248 				} else
4249 					l_defer_t = SDEB_DEFER_NONE;
4250 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4251 				stop_qc_helper(sd_dp, l_defer_t);
4252 				clear_bit(k, sqp->in_use_bm);
4253 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4254 			}
4255 		}
4256 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4257 	}
4258 }
4259 
4260 /* Free queued command memory on heap */
4261 static void free_all_queued(void)
4262 {
4263 	int j, k;
4264 	struct sdebug_queue *sqp;
4265 	struct sdebug_queued_cmd *sqcp;
4266 
4267 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4268 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4269 			sqcp = &sqp->qc_arr[k];
4270 			kfree(sqcp->sd_dp);
4271 			sqcp->sd_dp = NULL;
4272 		}
4273 	}
4274 }
4275 
4276 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4277 {
4278 	bool ok;
4279 
4280 	++num_aborts;
4281 	if (SCpnt) {
4282 		ok = stop_queued_cmnd(SCpnt);
4283 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4284 			sdev_printk(KERN_INFO, SCpnt->device,
4285 				    "%s: command%s found\n", __func__,
4286 				    ok ? "" : " not");
4287 	}
4288 	return SUCCESS;
4289 }
4290 
4291 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4292 {
4293 	++num_dev_resets;
4294 	if (SCpnt && SCpnt->device) {
4295 		struct scsi_device *sdp = SCpnt->device;
4296 		struct sdebug_dev_info *devip =
4297 				(struct sdebug_dev_info *)sdp->hostdata;
4298 
4299 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4300 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4301 		if (devip)
4302 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4303 	}
4304 	return SUCCESS;
4305 }
4306 
4307 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4308 {
4309 	struct sdebug_host_info *sdbg_host;
4310 	struct sdebug_dev_info *devip;
4311 	struct scsi_device *sdp;
4312 	struct Scsi_Host *hp;
4313 	int k = 0;
4314 
4315 	++num_target_resets;
4316 	if (!SCpnt)
4317 		goto lie;
4318 	sdp = SCpnt->device;
4319 	if (!sdp)
4320 		goto lie;
4321 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4322 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4323 	hp = sdp->host;
4324 	if (!hp)
4325 		goto lie;
4326 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4327 	if (sdbg_host) {
4328 		list_for_each_entry(devip,
4329 				    &sdbg_host->dev_info_list,
4330 				    dev_list)
4331 			if (devip->target == sdp->id) {
4332 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4333 				++k;
4334 			}
4335 	}
4336 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4337 		sdev_printk(KERN_INFO, sdp,
4338 			    "%s: %d device(s) found in target\n", __func__, k);
4339 lie:
4340 	return SUCCESS;
4341 }
4342 
4343 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4344 {
4345 	struct sdebug_host_info *sdbg_host;
4346 	struct sdebug_dev_info *devip;
4347 	struct scsi_device *sdp;
4348 	struct Scsi_Host *hp;
4349 	int k = 0;
4350 
4351 	++num_bus_resets;
4352 	if (!(SCpnt && SCpnt->device))
4353 		goto lie;
4354 	sdp = SCpnt->device;
4355 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4356 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4357 	hp = sdp->host;
4358 	if (hp) {
4359 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4360 		if (sdbg_host) {
4361 			list_for_each_entry(devip,
4362 					    &sdbg_host->dev_info_list,
4363 					    dev_list) {
4364 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4365 				++k;
4366 			}
4367 		}
4368 	}
4369 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4370 		sdev_printk(KERN_INFO, sdp,
4371 			    "%s: %d device(s) found in host\n", __func__, k);
4372 lie:
4373 	return SUCCESS;
4374 }
4375 
4376 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4377 {
4378 	struct sdebug_host_info *sdbg_host;
4379 	struct sdebug_dev_info *devip;
4380 	int k = 0;
4381 
4382 	++num_host_resets;
4383 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4384 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4385 	spin_lock(&sdebug_host_list_lock);
4386 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4387 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4388 				    dev_list) {
4389 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4390 			++k;
4391 		}
4392 	}
4393 	spin_unlock(&sdebug_host_list_lock);
4394 	stop_all_queued();
4395 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4396 		sdev_printk(KERN_INFO, SCpnt->device,
4397 			    "%s: %d device(s) found\n", __func__, k);
4398 	return SUCCESS;
4399 }
4400 
4401 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
4402 {
4403 	struct msdos_partition *pp;
4404 	int starts[SDEBUG_MAX_PARTS + 2];
4405 	int sectors_per_part, num_sectors, k;
4406 	int heads_by_sects, start_sec, end_sec;
4407 
4408 	/* assume partition table already zeroed */
4409 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4410 		return;
4411 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4412 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4413 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4414 	}
4415 	num_sectors = (int)sdebug_store_sectors;
4416 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4417 			   / sdebug_num_parts;
4418 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4419 	starts[0] = sdebug_sectors_per;
4420 	for (k = 1; k < sdebug_num_parts; ++k)
4421 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4422 			    * heads_by_sects;
4423 	starts[sdebug_num_parts] = num_sectors;
4424 	starts[sdebug_num_parts + 1] = 0;
4425 
4426 	ramp[510] = 0x55;	/* magic partition markings */
4427 	ramp[511] = 0xAA;
4428 	pp = (struct msdos_partition *)(ramp + 0x1be);
4429 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4430 		start_sec = starts[k];
4431 		end_sec = starts[k + 1] - 1;
4432 		pp->boot_ind = 0;
4433 
4434 		pp->cyl = start_sec / heads_by_sects;
4435 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4436 			   / sdebug_sectors_per;
4437 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4438 
4439 		pp->end_cyl = end_sec / heads_by_sects;
4440 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4441 			       / sdebug_sectors_per;
4442 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4443 
4444 		pp->start_sect = cpu_to_le32(start_sec);
4445 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4446 		pp->sys_ind = 0x83;	/* plain Linux partition */
4447 	}
4448 }
4449 
4450 static void block_unblock_all_queues(bool block)
4451 {
4452 	int j;
4453 	struct sdebug_queue *sqp;
4454 
4455 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4456 		atomic_set(&sqp->blocked, (int)block);
4457 }
4458 
4459 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4460  * commands will be processed normally before triggers occur.
4461  */
4462 static void tweak_cmnd_count(void)
4463 {
4464 	int count, modulo;
4465 
4466 	modulo = abs(sdebug_every_nth);
4467 	if (modulo < 2)
4468 		return;
4469 	block_unblock_all_queues(true);
4470 	count = atomic_read(&sdebug_cmnd_count);
4471 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4472 	block_unblock_all_queues(false);
4473 }
4474 
4475 static void clear_queue_stats(void)
4476 {
4477 	atomic_set(&sdebug_cmnd_count, 0);
4478 	atomic_set(&sdebug_completions, 0);
4479 	atomic_set(&sdebug_miss_cpus, 0);
4480 	atomic_set(&sdebug_a_tsf, 0);
4481 }
4482 
4483 static void setup_inject(struct sdebug_queue *sqp,
4484 			 struct sdebug_queued_cmd *sqcp)
4485 {
4486 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4487 		if (sdebug_every_nth > 0)
4488 			sqcp->inj_recovered = sqcp->inj_transport
4489 				= sqcp->inj_dif
4490 				= sqcp->inj_dix = sqcp->inj_short
4491 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4492 		return;
4493 	}
4494 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4495 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4496 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4497 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4498 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4499 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4500 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4501 }
4502 
4503 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
4504 
4505 /* Complete the processing of the thread that queued a SCSI command to this
4506  * driver. It either completes the command by calling cmnd_done() or
4507  * schedules a hr timer or work queue then returns 0. Returns
4508  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4509  */
4510 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4511 			 int scsi_result,
4512 			 int (*pfp)(struct scsi_cmnd *,
4513 				    struct sdebug_dev_info *),
4514 			 int delta_jiff, int ndelay)
4515 {
4516 	bool new_sd_dp;
4517 	int k, num_in_q, qdepth, inject;
4518 	unsigned long iflags;
4519 	u64 ns_from_boot = 0;
4520 	struct sdebug_queue *sqp;
4521 	struct sdebug_queued_cmd *sqcp;
4522 	struct scsi_device *sdp;
4523 	struct sdebug_defer *sd_dp;
4524 
4525 	if (unlikely(devip == NULL)) {
4526 		if (scsi_result == 0)
4527 			scsi_result = DID_NO_CONNECT << 16;
4528 		goto respond_in_thread;
4529 	}
4530 	sdp = cmnd->device;
4531 
4532 	if (delta_jiff == 0)
4533 		goto respond_in_thread;
4534 
4535 	sqp = get_queue(cmnd);
4536 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4537 	if (unlikely(atomic_read(&sqp->blocked))) {
4538 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4539 		return SCSI_MLQUEUE_HOST_BUSY;
4540 	}
4541 	num_in_q = atomic_read(&devip->num_in_q);
4542 	qdepth = cmnd->device->queue_depth;
4543 	inject = 0;
4544 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4545 		if (scsi_result) {
4546 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4547 			goto respond_in_thread;
4548 		} else
4549 			scsi_result = device_qfull_result;
4550 	} else if (unlikely(sdebug_every_nth &&
4551 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4552 			    (scsi_result == 0))) {
4553 		if ((num_in_q == (qdepth - 1)) &&
4554 		    (atomic_inc_return(&sdebug_a_tsf) >=
4555 		     abs(sdebug_every_nth))) {
4556 			atomic_set(&sdebug_a_tsf, 0);
4557 			inject = 1;
4558 			scsi_result = device_qfull_result;
4559 		}
4560 	}
4561 
4562 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4563 	if (unlikely(k >= sdebug_max_queue)) {
4564 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4565 		if (scsi_result)
4566 			goto respond_in_thread;
4567 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4568 			scsi_result = device_qfull_result;
4569 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4570 			sdev_printk(KERN_INFO, sdp,
4571 				    "%s: max_queue=%d exceeded, %s\n",
4572 				    __func__, sdebug_max_queue,
4573 				    (scsi_result ?  "status: TASK SET FULL" :
4574 						    "report: host busy"));
4575 		if (scsi_result)
4576 			goto respond_in_thread;
4577 		else
4578 			return SCSI_MLQUEUE_HOST_BUSY;
4579 	}
4580 	__set_bit(k, sqp->in_use_bm);
4581 	atomic_inc(&devip->num_in_q);
4582 	sqcp = &sqp->qc_arr[k];
4583 	sqcp->a_cmnd = cmnd;
4584 	cmnd->host_scribble = (unsigned char *)sqcp;
4585 	sd_dp = sqcp->sd_dp;
4586 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4587 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4588 		setup_inject(sqp, sqcp);
4589 	if (sd_dp == NULL) {
4590 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4591 		if (sd_dp == NULL)
4592 			return SCSI_MLQUEUE_HOST_BUSY;
4593 		new_sd_dp = true;
4594 	} else {
4595 		new_sd_dp = false;
4596 	}
4597 
4598 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
4599 		ns_from_boot = ktime_get_boottime_ns();
4600 
4601 	/* one of the resp_*() response functions is called here */
4602 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4603 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4604 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4605 		delta_jiff = ndelay = 0;
4606 	}
4607 	if (cmnd->result == 0 && scsi_result != 0)
4608 		cmnd->result = scsi_result;
4609 
4610 	if (unlikely(sdebug_verbose && cmnd->result))
4611 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4612 			    __func__, cmnd->result);
4613 
4614 	if (delta_jiff > 0 || ndelay > 0) {
4615 		ktime_t kt;
4616 
4617 		if (delta_jiff > 0) {
4618 			u64 ns = jiffies_to_nsecs(delta_jiff);
4619 
4620 			if (sdebug_random && ns < U32_MAX) {
4621 				ns = prandom_u32_max((u32)ns);
4622 			} else if (sdebug_random) {
4623 				ns >>= 12;	/* scale to 4 usec precision */
4624 				if (ns < U32_MAX)	/* over 4 hours max */
4625 					ns = prandom_u32_max((u32)ns);
4626 				ns <<= 12;
4627 			}
4628 			kt = ns_to_ktime(ns);
4629 		} else {	/* ndelay has a 4.2 second max */
4630 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
4631 					     (u32)ndelay;
4632 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
4633 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
4634 
4635 				if (kt <= d) {	/* elapsed duration >= kt */
4636 					sqcp->a_cmnd = NULL;
4637 					atomic_dec(&devip->num_in_q);
4638 					clear_bit(k, sqp->in_use_bm);
4639 					if (new_sd_dp)
4640 						kfree(sd_dp);
4641 					/* call scsi_done() from this thread */
4642 					cmnd->scsi_done(cmnd);
4643 					return 0;
4644 				}
4645 				/* otherwise reduce kt by elapsed time */
4646 				kt -= d;
4647 			}
4648 		}
4649 		if (!sd_dp->init_hrt) {
4650 			sd_dp->init_hrt = true;
4651 			sqcp->sd_dp = sd_dp;
4652 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4653 				     HRTIMER_MODE_REL_PINNED);
4654 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4655 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4656 			sd_dp->qc_idx = k;
4657 		}
4658 		if (sdebug_statistics)
4659 			sd_dp->issuing_cpu = raw_smp_processor_id();
4660 		sd_dp->defer_t = SDEB_DEFER_HRT;
4661 		/* schedule the invocation of scsi_done() for a later time */
4662 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4663 	} else {	/* jdelay < 0, use work queue */
4664 		if (!sd_dp->init_wq) {
4665 			sd_dp->init_wq = true;
4666 			sqcp->sd_dp = sd_dp;
4667 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4668 			sd_dp->qc_idx = k;
4669 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4670 		}
4671 		if (sdebug_statistics)
4672 			sd_dp->issuing_cpu = raw_smp_processor_id();
4673 		sd_dp->defer_t = SDEB_DEFER_WQ;
4674 		if (unlikely(sqcp->inj_cmd_abort))
4675 			sd_dp->aborted = true;
4676 		schedule_work(&sd_dp->ew.work);
4677 		if (unlikely(sqcp->inj_cmd_abort)) {
4678 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4679 				    cmnd->request->tag);
4680 			blk_abort_request(cmnd->request);
4681 		}
4682 	}
4683 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4684 		     (scsi_result == device_qfull_result)))
4685 		sdev_printk(KERN_INFO, sdp,
4686 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4687 			    num_in_q, (inject ? "<inject> " : ""),
4688 			    "status: TASK SET FULL");
4689 	return 0;
4690 
4691 respond_in_thread:	/* call back to mid-layer using invocation thread */
4692 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4693 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4694 	if (cmnd->result == 0 && scsi_result != 0)
4695 		cmnd->result = scsi_result;
4696 	cmnd->scsi_done(cmnd);
4697 	return 0;
4698 }
4699 
4700 /* Note: The following macros create attribute files in the
4701    /sys/module/scsi_debug/parameters directory. Unfortunately this
4702    driver is unaware of a change and cannot trigger auxiliary actions
4703    as it can when the corresponding attribute in the
4704    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4705  */
4706 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4707 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4708 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4709 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4710 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4711 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4712 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4713 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4714 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4715 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4716 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4717 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4718 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4719 module_param_string(inq_product, sdebug_inq_product_id,
4720 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
4721 module_param_string(inq_rev, sdebug_inq_product_rev,
4722 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
4723 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4724 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
4725 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4726 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4727 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4728 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4729 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4730 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4731 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4732 module_param_named(medium_error_count, sdebug_medium_error_count, int,
4733 		   S_IRUGO | S_IWUSR);
4734 module_param_named(medium_error_start, sdebug_medium_error_start, int,
4735 		   S_IRUGO | S_IWUSR);
4736 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4737 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4738 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4739 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4740 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4741 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4742 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4743 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4744 module_param_named(per_host_store, sdebug_per_host_store, bool,
4745 		   S_IRUGO | S_IWUSR);
4746 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4747 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4748 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
4749 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4750 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4751 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4752 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4753 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4754 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4755 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4756 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4757 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4758 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4759 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4760 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4761 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4762 		   S_IRUGO | S_IWUSR);
4763 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
4764 module_param_named(write_same_length, sdebug_write_same_length, int,
4765 		   S_IRUGO | S_IWUSR);
4766 
4767 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4768 MODULE_DESCRIPTION("SCSI debug adapter driver");
4769 MODULE_LICENSE("GPL");
4770 MODULE_VERSION(SDEBUG_VERSION);
4771 
4772 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
4773 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4774 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4775 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4776 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4777 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4778 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4779 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4780 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4781 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4782 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4783 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4784 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4785 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4786 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4787 		 SDEBUG_VERSION "\")");
4788 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4789 MODULE_PARM_DESC(lbprz,
4790 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4791 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4792 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4793 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4794 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4795 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4796 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4797 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4798 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4799 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4800 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4801 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4802 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4803 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4804 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4805 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4806 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4807 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
4808 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4809 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4810 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
4811 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4812 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4813 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4814 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4815 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4816 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4817 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4818 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4819 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4820 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4821 MODULE_PARM_DESC(uuid_ctl,
4822 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4823 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4824 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4825 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
4826 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4827 
4828 #define SDEBUG_INFO_LEN 256
4829 static char sdebug_info[SDEBUG_INFO_LEN];
4830 
4831 static const char *scsi_debug_info(struct Scsi_Host *shp)
4832 {
4833 	int k;
4834 
4835 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4836 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4837 	if (k >= (SDEBUG_INFO_LEN - 1))
4838 		return sdebug_info;
4839 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4840 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4841 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4842 		  "statistics", (int)sdebug_statistics);
4843 	return sdebug_info;
4844 }
4845 
4846 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4847 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4848 				 int length)
4849 {
4850 	char arr[16];
4851 	int opts;
4852 	int minLen = length > 15 ? 15 : length;
4853 
4854 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4855 		return -EACCES;
4856 	memcpy(arr, buffer, minLen);
4857 	arr[minLen] = '\0';
4858 	if (1 != sscanf(arr, "%d", &opts))
4859 		return -EINVAL;
4860 	sdebug_opts = opts;
4861 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4862 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4863 	if (sdebug_every_nth != 0)
4864 		tweak_cmnd_count();
4865 	return length;
4866 }
4867 
4868 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4869  * same for each scsi_debug host (if more than one). Some of the counters
4870  * output are not atomics so might be inaccurate in a busy system. */
4871 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4872 {
4873 	int f, j, l;
4874 	struct sdebug_queue *sqp;
4875 	struct sdebug_host_info *sdhp;
4876 
4877 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4878 		   SDEBUG_VERSION, sdebug_version_date);
4879 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4880 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4881 		   sdebug_opts, sdebug_every_nth);
4882 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4883 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4884 		   sdebug_sector_size, "bytes");
4885 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4886 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4887 		   num_aborts);
4888 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4889 		   num_dev_resets, num_target_resets, num_bus_resets,
4890 		   num_host_resets);
4891 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4892 		   dix_reads, dix_writes, dif_errors);
4893 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4894 		   sdebug_statistics);
4895 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4896 		   atomic_read(&sdebug_cmnd_count),
4897 		   atomic_read(&sdebug_completions),
4898 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4899 		   atomic_read(&sdebug_a_tsf));
4900 
4901 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4902 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4903 		seq_printf(m, "  queue %d:\n", j);
4904 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4905 		if (f != sdebug_max_queue) {
4906 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4907 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4908 				   "first,last bits", f, l);
4909 		}
4910 	}
4911 
4912 	seq_printf(m, "this host_no=%d\n", host->host_no);
4913 	if (!xa_empty(per_store_ap)) {
4914 		bool niu;
4915 		int idx;
4916 		unsigned long l_idx;
4917 		struct sdeb_store_info *sip;
4918 
4919 		seq_puts(m, "\nhost list:\n");
4920 		j = 0;
4921 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
4922 			idx = sdhp->si_idx;
4923 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
4924 				   sdhp->shost->host_no, idx);
4925 			++j;
4926 		}
4927 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
4928 			   sdeb_most_recent_idx);
4929 		j = 0;
4930 		xa_for_each(per_store_ap, l_idx, sip) {
4931 			niu = xa_get_mark(per_store_ap, l_idx,
4932 					  SDEB_XA_NOT_IN_USE);
4933 			idx = (int)l_idx;
4934 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
4935 				   (niu ? "  not_in_use" : ""));
4936 			++j;
4937 		}
4938 	}
4939 	return 0;
4940 }
4941 
4942 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4943 {
4944 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4945 }
4946 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4947  * of delay is jiffies.
4948  */
4949 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4950 			   size_t count)
4951 {
4952 	int jdelay, res;
4953 
4954 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4955 		res = count;
4956 		if (sdebug_jdelay != jdelay) {
4957 			int j, k;
4958 			struct sdebug_queue *sqp;
4959 
4960 			block_unblock_all_queues(true);
4961 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4962 			     ++j, ++sqp) {
4963 				k = find_first_bit(sqp->in_use_bm,
4964 						   sdebug_max_queue);
4965 				if (k != sdebug_max_queue) {
4966 					res = -EBUSY;   /* queued commands */
4967 					break;
4968 				}
4969 			}
4970 			if (res > 0) {
4971 				sdebug_jdelay = jdelay;
4972 				sdebug_ndelay = 0;
4973 			}
4974 			block_unblock_all_queues(false);
4975 		}
4976 		return res;
4977 	}
4978 	return -EINVAL;
4979 }
4980 static DRIVER_ATTR_RW(delay);
4981 
4982 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4983 {
4984 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4985 }
4986 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4987 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4988 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4989 			    size_t count)
4990 {
4991 	int ndelay, res;
4992 
4993 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4994 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4995 		res = count;
4996 		if (sdebug_ndelay != ndelay) {
4997 			int j, k;
4998 			struct sdebug_queue *sqp;
4999 
5000 			block_unblock_all_queues(true);
5001 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5002 			     ++j, ++sqp) {
5003 				k = find_first_bit(sqp->in_use_bm,
5004 						   sdebug_max_queue);
5005 				if (k != sdebug_max_queue) {
5006 					res = -EBUSY;   /* queued commands */
5007 					break;
5008 				}
5009 			}
5010 			if (res > 0) {
5011 				sdebug_ndelay = ndelay;
5012 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5013 							: DEF_JDELAY;
5014 			}
5015 			block_unblock_all_queues(false);
5016 		}
5017 		return res;
5018 	}
5019 	return -EINVAL;
5020 }
5021 static DRIVER_ATTR_RW(ndelay);
5022 
5023 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5024 {
5025 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5026 }
5027 
5028 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5029 			  size_t count)
5030 {
5031 	int opts;
5032 	char work[20];
5033 
5034 	if (sscanf(buf, "%10s", work) == 1) {
5035 		if (strncasecmp(work, "0x", 2) == 0) {
5036 			if (kstrtoint(work + 2, 16, &opts) == 0)
5037 				goto opts_done;
5038 		} else {
5039 			if (kstrtoint(work, 10, &opts) == 0)
5040 				goto opts_done;
5041 		}
5042 	}
5043 	return -EINVAL;
5044 opts_done:
5045 	sdebug_opts = opts;
5046 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5047 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5048 	tweak_cmnd_count();
5049 	return count;
5050 }
5051 static DRIVER_ATTR_RW(opts);
5052 
5053 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5054 {
5055 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5056 }
5057 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5058 			   size_t count)
5059 {
5060 	int n;
5061 
5062 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5063 		sdebug_ptype = n;
5064 		return count;
5065 	}
5066 	return -EINVAL;
5067 }
5068 static DRIVER_ATTR_RW(ptype);
5069 
5070 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5071 {
5072 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5073 }
5074 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5075 			    size_t count)
5076 {
5077 	int n;
5078 
5079 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5080 		sdebug_dsense = n;
5081 		return count;
5082 	}
5083 	return -EINVAL;
5084 }
5085 static DRIVER_ATTR_RW(dsense);
5086 
5087 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5088 {
5089 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5090 }
5091 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5092 			     size_t count)
5093 {
5094 	int n, idx;
5095 
5096 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5097 		bool want_store = (n == 0);
5098 		struct sdebug_host_info *sdhp;
5099 
5100 		n = (n > 0);
5101 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5102 		if (sdebug_fake_rw == n)
5103 			return count;	/* not transitioning so do nothing */
5104 
5105 		if (want_store) {	/* 1 --> 0 transition, set up store */
5106 			if (sdeb_first_idx < 0) {
5107 				idx = sdebug_add_store();
5108 				if (idx < 0)
5109 					return idx;
5110 			} else {
5111 				idx = sdeb_first_idx;
5112 				xa_clear_mark(per_store_ap, idx,
5113 					      SDEB_XA_NOT_IN_USE);
5114 			}
5115 			/* make all hosts use same store */
5116 			list_for_each_entry(sdhp, &sdebug_host_list,
5117 					    host_list) {
5118 				if (sdhp->si_idx != idx) {
5119 					xa_set_mark(per_store_ap, sdhp->si_idx,
5120 						    SDEB_XA_NOT_IN_USE);
5121 					sdhp->si_idx = idx;
5122 				}
5123 			}
5124 			sdeb_most_recent_idx = idx;
5125 		} else {	/* 0 --> 1 transition is trigger for shrink */
5126 			sdebug_erase_all_stores(true /* apart from first */);
5127 		}
5128 		sdebug_fake_rw = n;
5129 		return count;
5130 	}
5131 	return -EINVAL;
5132 }
5133 static DRIVER_ATTR_RW(fake_rw);
5134 
5135 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5136 {
5137 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5138 }
5139 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5140 			      size_t count)
5141 {
5142 	int n;
5143 
5144 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5145 		sdebug_no_lun_0 = n;
5146 		return count;
5147 	}
5148 	return -EINVAL;
5149 }
5150 static DRIVER_ATTR_RW(no_lun_0);
5151 
5152 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
5153 {
5154 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
5155 }
5156 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
5157 			      size_t count)
5158 {
5159 	int n;
5160 
5161 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5162 		sdebug_num_tgts = n;
5163 		sdebug_max_tgts_luns();
5164 		return count;
5165 	}
5166 	return -EINVAL;
5167 }
5168 static DRIVER_ATTR_RW(num_tgts);
5169 
5170 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
5171 {
5172 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
5173 }
5174 static DRIVER_ATTR_RO(dev_size_mb);
5175 
5176 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
5177 {
5178 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
5179 }
5180 
5181 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
5182 				    size_t count)
5183 {
5184 	bool v;
5185 
5186 	if (kstrtobool(buf, &v))
5187 		return -EINVAL;
5188 
5189 	sdebug_per_host_store = v;
5190 	return count;
5191 }
5192 static DRIVER_ATTR_RW(per_host_store);
5193 
5194 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
5195 {
5196 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
5197 }
5198 static DRIVER_ATTR_RO(num_parts);
5199 
5200 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
5201 {
5202 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
5203 }
5204 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
5205 			       size_t count)
5206 {
5207 	int nth;
5208 
5209 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
5210 		sdebug_every_nth = nth;
5211 		if (nth && !sdebug_statistics) {
5212 			pr_info("every_nth needs statistics=1, set it\n");
5213 			sdebug_statistics = true;
5214 		}
5215 		tweak_cmnd_count();
5216 		return count;
5217 	}
5218 	return -EINVAL;
5219 }
5220 static DRIVER_ATTR_RW(every_nth);
5221 
5222 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
5223 {
5224 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
5225 }
5226 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
5227 			      size_t count)
5228 {
5229 	int n;
5230 	bool changed;
5231 
5232 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5233 		if (n > 256) {
5234 			pr_warn("max_luns can be no more than 256\n");
5235 			return -EINVAL;
5236 		}
5237 		changed = (sdebug_max_luns != n);
5238 		sdebug_max_luns = n;
5239 		sdebug_max_tgts_luns();
5240 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
5241 			struct sdebug_host_info *sdhp;
5242 			struct sdebug_dev_info *dp;
5243 
5244 			spin_lock(&sdebug_host_list_lock);
5245 			list_for_each_entry(sdhp, &sdebug_host_list,
5246 					    host_list) {
5247 				list_for_each_entry(dp, &sdhp->dev_info_list,
5248 						    dev_list) {
5249 					set_bit(SDEBUG_UA_LUNS_CHANGED,
5250 						dp->uas_bm);
5251 				}
5252 			}
5253 			spin_unlock(&sdebug_host_list_lock);
5254 		}
5255 		return count;
5256 	}
5257 	return -EINVAL;
5258 }
5259 static DRIVER_ATTR_RW(max_luns);
5260 
5261 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
5262 {
5263 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
5264 }
5265 /* N.B. max_queue can be changed while there are queued commands. In flight
5266  * commands beyond the new max_queue will be completed. */
5267 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
5268 			       size_t count)
5269 {
5270 	int j, n, k, a;
5271 	struct sdebug_queue *sqp;
5272 
5273 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
5274 	    (n <= SDEBUG_CANQUEUE)) {
5275 		block_unblock_all_queues(true);
5276 		k = 0;
5277 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5278 		     ++j, ++sqp) {
5279 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
5280 			if (a > k)
5281 				k = a;
5282 		}
5283 		sdebug_max_queue = n;
5284 		if (k == SDEBUG_CANQUEUE)
5285 			atomic_set(&retired_max_queue, 0);
5286 		else if (k >= n)
5287 			atomic_set(&retired_max_queue, k + 1);
5288 		else
5289 			atomic_set(&retired_max_queue, 0);
5290 		block_unblock_all_queues(false);
5291 		return count;
5292 	}
5293 	return -EINVAL;
5294 }
5295 static DRIVER_ATTR_RW(max_queue);
5296 
5297 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5298 {
5299 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5300 }
5301 static DRIVER_ATTR_RO(no_uld);
5302 
5303 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5304 {
5305 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5306 }
5307 static DRIVER_ATTR_RO(scsi_level);
5308 
5309 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5310 {
5311 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5312 }
5313 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5314 				size_t count)
5315 {
5316 	int n;
5317 	bool changed;
5318 
5319 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5320 		changed = (sdebug_virtual_gb != n);
5321 		sdebug_virtual_gb = n;
5322 		sdebug_capacity = get_sdebug_capacity();
5323 		if (changed) {
5324 			struct sdebug_host_info *sdhp;
5325 			struct sdebug_dev_info *dp;
5326 
5327 			spin_lock(&sdebug_host_list_lock);
5328 			list_for_each_entry(sdhp, &sdebug_host_list,
5329 					    host_list) {
5330 				list_for_each_entry(dp, &sdhp->dev_info_list,
5331 						    dev_list) {
5332 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5333 						dp->uas_bm);
5334 				}
5335 			}
5336 			spin_unlock(&sdebug_host_list_lock);
5337 		}
5338 		return count;
5339 	}
5340 	return -EINVAL;
5341 }
5342 static DRIVER_ATTR_RW(virtual_gb);
5343 
5344 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5345 {
5346 	/* absolute number of hosts currently active is what is shown */
5347 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
5348 }
5349 
5350 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5351 			      size_t count)
5352 {
5353 	bool found;
5354 	unsigned long idx;
5355 	struct sdeb_store_info *sip;
5356 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5357 	int delta_hosts;
5358 
5359 	if (sscanf(buf, "%d", &delta_hosts) != 1)
5360 		return -EINVAL;
5361 	if (delta_hosts > 0) {
5362 		do {
5363 			found = false;
5364 			if (want_phs) {
5365 				xa_for_each_marked(per_store_ap, idx, sip,
5366 						   SDEB_XA_NOT_IN_USE) {
5367 					sdeb_most_recent_idx = (int)idx;
5368 					found = true;
5369 					break;
5370 				}
5371 				if (found)	/* re-use case */
5372 					sdebug_add_host_helper((int)idx);
5373 				else
5374 					sdebug_do_add_host(true);
5375 			} else {
5376 				sdebug_do_add_host(false);
5377 			}
5378 		} while (--delta_hosts);
5379 	} else if (delta_hosts < 0) {
5380 		do {
5381 			sdebug_do_remove_host(false);
5382 		} while (++delta_hosts);
5383 	}
5384 	return count;
5385 }
5386 static DRIVER_ATTR_RW(add_host);
5387 
5388 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5389 {
5390 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5391 }
5392 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5393 				    size_t count)
5394 {
5395 	int n;
5396 
5397 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5398 		sdebug_vpd_use_hostno = n;
5399 		return count;
5400 	}
5401 	return -EINVAL;
5402 }
5403 static DRIVER_ATTR_RW(vpd_use_hostno);
5404 
5405 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5406 {
5407 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5408 }
5409 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5410 				size_t count)
5411 {
5412 	int n;
5413 
5414 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5415 		if (n > 0)
5416 			sdebug_statistics = true;
5417 		else {
5418 			clear_queue_stats();
5419 			sdebug_statistics = false;
5420 		}
5421 		return count;
5422 	}
5423 	return -EINVAL;
5424 }
5425 static DRIVER_ATTR_RW(statistics);
5426 
5427 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5428 {
5429 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5430 }
5431 static DRIVER_ATTR_RO(sector_size);
5432 
5433 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5434 {
5435 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5436 }
5437 static DRIVER_ATTR_RO(submit_queues);
5438 
5439 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5440 {
5441 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5442 }
5443 static DRIVER_ATTR_RO(dix);
5444 
5445 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5446 {
5447 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5448 }
5449 static DRIVER_ATTR_RO(dif);
5450 
5451 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5452 {
5453 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5454 }
5455 static DRIVER_ATTR_RO(guard);
5456 
5457 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5458 {
5459 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5460 }
5461 static DRIVER_ATTR_RO(ato);
5462 
5463 static ssize_t map_show(struct device_driver *ddp, char *buf)
5464 {
5465 	ssize_t count = 0;
5466 
5467 	if (!scsi_debug_lbp())
5468 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5469 				 sdebug_store_sectors);
5470 
5471 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
5472 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
5473 
5474 		if (sip)
5475 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5476 					  (int)map_size, sip->map_storep);
5477 	}
5478 	buf[count++] = '\n';
5479 	buf[count] = '\0';
5480 
5481 	return count;
5482 }
5483 static DRIVER_ATTR_RO(map);
5484 
5485 static ssize_t random_show(struct device_driver *ddp, char *buf)
5486 {
5487 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
5488 }
5489 
5490 static ssize_t random_store(struct device_driver *ddp, const char *buf,
5491 			    size_t count)
5492 {
5493 	bool v;
5494 
5495 	if (kstrtobool(buf, &v))
5496 		return -EINVAL;
5497 
5498 	sdebug_random = v;
5499 	return count;
5500 }
5501 static DRIVER_ATTR_RW(random);
5502 
5503 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5504 {
5505 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5506 }
5507 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5508 			       size_t count)
5509 {
5510 	int n;
5511 
5512 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5513 		sdebug_removable = (n > 0);
5514 		return count;
5515 	}
5516 	return -EINVAL;
5517 }
5518 static DRIVER_ATTR_RW(removable);
5519 
5520 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5521 {
5522 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5523 }
5524 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5525 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5526 			       size_t count)
5527 {
5528 	int n;
5529 
5530 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5531 		sdebug_host_lock = (n > 0);
5532 		return count;
5533 	}
5534 	return -EINVAL;
5535 }
5536 static DRIVER_ATTR_RW(host_lock);
5537 
5538 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5539 {
5540 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5541 }
5542 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5543 			    size_t count)
5544 {
5545 	int n;
5546 
5547 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5548 		sdebug_strict = (n > 0);
5549 		return count;
5550 	}
5551 	return -EINVAL;
5552 }
5553 static DRIVER_ATTR_RW(strict);
5554 
5555 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5556 {
5557 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5558 }
5559 static DRIVER_ATTR_RO(uuid_ctl);
5560 
5561 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5562 {
5563 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5564 }
5565 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5566 			     size_t count)
5567 {
5568 	int ret, n;
5569 
5570 	ret = kstrtoint(buf, 0, &n);
5571 	if (ret)
5572 		return ret;
5573 	sdebug_cdb_len = n;
5574 	all_config_cdb_len();
5575 	return count;
5576 }
5577 static DRIVER_ATTR_RW(cdb_len);
5578 
5579 
5580 /* Note: The following array creates attribute files in the
5581    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5582    files (over those found in the /sys/module/scsi_debug/parameters
5583    directory) is that auxiliary actions can be triggered when an attribute
5584    is changed. For example see: add_host_store() above.
5585  */
5586 
5587 static struct attribute *sdebug_drv_attrs[] = {
5588 	&driver_attr_delay.attr,
5589 	&driver_attr_opts.attr,
5590 	&driver_attr_ptype.attr,
5591 	&driver_attr_dsense.attr,
5592 	&driver_attr_fake_rw.attr,
5593 	&driver_attr_no_lun_0.attr,
5594 	&driver_attr_num_tgts.attr,
5595 	&driver_attr_dev_size_mb.attr,
5596 	&driver_attr_num_parts.attr,
5597 	&driver_attr_every_nth.attr,
5598 	&driver_attr_max_luns.attr,
5599 	&driver_attr_max_queue.attr,
5600 	&driver_attr_no_uld.attr,
5601 	&driver_attr_scsi_level.attr,
5602 	&driver_attr_virtual_gb.attr,
5603 	&driver_attr_add_host.attr,
5604 	&driver_attr_per_host_store.attr,
5605 	&driver_attr_vpd_use_hostno.attr,
5606 	&driver_attr_sector_size.attr,
5607 	&driver_attr_statistics.attr,
5608 	&driver_attr_submit_queues.attr,
5609 	&driver_attr_dix.attr,
5610 	&driver_attr_dif.attr,
5611 	&driver_attr_guard.attr,
5612 	&driver_attr_ato.attr,
5613 	&driver_attr_map.attr,
5614 	&driver_attr_random.attr,
5615 	&driver_attr_removable.attr,
5616 	&driver_attr_host_lock.attr,
5617 	&driver_attr_ndelay.attr,
5618 	&driver_attr_strict.attr,
5619 	&driver_attr_uuid_ctl.attr,
5620 	&driver_attr_cdb_len.attr,
5621 	NULL,
5622 };
5623 ATTRIBUTE_GROUPS(sdebug_drv);
5624 
5625 static struct device *pseudo_primary;
5626 
5627 static int __init scsi_debug_init(void)
5628 {
5629 	bool want_store = (sdebug_fake_rw == 0);
5630 	unsigned long sz;
5631 	int k, ret, hosts_to_add;
5632 	int idx = -1;
5633 
5634 	ramdisk_lck_a[0] = &atomic_rw;
5635 	ramdisk_lck_a[1] = &atomic_rw2;
5636 	atomic_set(&retired_max_queue, 0);
5637 
5638 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5639 		pr_warn("ndelay must be less than 1 second, ignored\n");
5640 		sdebug_ndelay = 0;
5641 	} else if (sdebug_ndelay > 0)
5642 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5643 
5644 	switch (sdebug_sector_size) {
5645 	case  512:
5646 	case 1024:
5647 	case 2048:
5648 	case 4096:
5649 		break;
5650 	default:
5651 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5652 		return -EINVAL;
5653 	}
5654 
5655 	switch (sdebug_dif) {
5656 	case T10_PI_TYPE0_PROTECTION:
5657 		break;
5658 	case T10_PI_TYPE1_PROTECTION:
5659 	case T10_PI_TYPE2_PROTECTION:
5660 	case T10_PI_TYPE3_PROTECTION:
5661 		have_dif_prot = true;
5662 		break;
5663 
5664 	default:
5665 		pr_err("dif must be 0, 1, 2 or 3\n");
5666 		return -EINVAL;
5667 	}
5668 
5669 	if (sdebug_num_tgts < 0) {
5670 		pr_err("num_tgts must be >= 0\n");
5671 		return -EINVAL;
5672 	}
5673 
5674 	if (sdebug_guard > 1) {
5675 		pr_err("guard must be 0 or 1\n");
5676 		return -EINVAL;
5677 	}
5678 
5679 	if (sdebug_ato > 1) {
5680 		pr_err("ato must be 0 or 1\n");
5681 		return -EINVAL;
5682 	}
5683 
5684 	if (sdebug_physblk_exp > 15) {
5685 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5686 		return -EINVAL;
5687 	}
5688 	if (sdebug_max_luns > 256) {
5689 		pr_warn("max_luns can be no more than 256, use default\n");
5690 		sdebug_max_luns = DEF_MAX_LUNS;
5691 	}
5692 
5693 	if (sdebug_lowest_aligned > 0x3fff) {
5694 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5695 		return -EINVAL;
5696 	}
5697 
5698 	if (submit_queues < 1) {
5699 		pr_err("submit_queues must be 1 or more\n");
5700 		return -EINVAL;
5701 	}
5702 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5703 			       GFP_KERNEL);
5704 	if (sdebug_q_arr == NULL)
5705 		return -ENOMEM;
5706 	for (k = 0; k < submit_queues; ++k)
5707 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5708 
5709 	if (sdebug_dev_size_mb < 1)
5710 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5711 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5712 	sdebug_store_sectors = sz / sdebug_sector_size;
5713 	sdebug_capacity = get_sdebug_capacity();
5714 
5715 	/* play around with geometry, don't waste too much on track 0 */
5716 	sdebug_heads = 8;
5717 	sdebug_sectors_per = 32;
5718 	if (sdebug_dev_size_mb >= 256)
5719 		sdebug_heads = 64;
5720 	else if (sdebug_dev_size_mb >= 16)
5721 		sdebug_heads = 32;
5722 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5723 			       (sdebug_sectors_per * sdebug_heads);
5724 	if (sdebug_cylinders_per >= 1024) {
5725 		/* other LLDs do this; implies >= 1GB ram disk ... */
5726 		sdebug_heads = 255;
5727 		sdebug_sectors_per = 63;
5728 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5729 			       (sdebug_sectors_per * sdebug_heads);
5730 	}
5731 	if (scsi_debug_lbp()) {
5732 		sdebug_unmap_max_blocks =
5733 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5734 
5735 		sdebug_unmap_max_desc =
5736 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5737 
5738 		sdebug_unmap_granularity =
5739 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5740 
5741 		if (sdebug_unmap_alignment &&
5742 		    sdebug_unmap_granularity <=
5743 		    sdebug_unmap_alignment) {
5744 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5745 			ret = -EINVAL;
5746 			goto free_q_arr;
5747 		}
5748 	}
5749 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
5750 	if (want_store) {
5751 		idx = sdebug_add_store();
5752 		if (idx < 0) {
5753 			ret = idx;
5754 			goto free_q_arr;
5755 		}
5756 	}
5757 
5758 	pseudo_primary = root_device_register("pseudo_0");
5759 	if (IS_ERR(pseudo_primary)) {
5760 		pr_warn("root_device_register() error\n");
5761 		ret = PTR_ERR(pseudo_primary);
5762 		goto free_vm;
5763 	}
5764 	ret = bus_register(&pseudo_lld_bus);
5765 	if (ret < 0) {
5766 		pr_warn("bus_register error: %d\n", ret);
5767 		goto dev_unreg;
5768 	}
5769 	ret = driver_register(&sdebug_driverfs_driver);
5770 	if (ret < 0) {
5771 		pr_warn("driver_register error: %d\n", ret);
5772 		goto bus_unreg;
5773 	}
5774 
5775 	hosts_to_add = sdebug_add_host;
5776 	sdebug_add_host = 0;
5777 
5778 	for (k = 0; k < hosts_to_add; k++) {
5779 		if (want_store && k == 0) {
5780 			ret = sdebug_add_host_helper(idx);
5781 			if (ret < 0) {
5782 				pr_err("add_host_helper k=%d, error=%d\n",
5783 				       k, -ret);
5784 				break;
5785 			}
5786 		} else {
5787 			ret = sdebug_do_add_host(want_store &&
5788 						 sdebug_per_host_store);
5789 			if (ret < 0) {
5790 				pr_err("add_host k=%d error=%d\n", k, -ret);
5791 				break;
5792 			}
5793 		}
5794 	}
5795 	if (sdebug_verbose)
5796 		pr_info("built %d host(s)\n", sdebug_num_hosts);
5797 
5798 	return 0;
5799 
5800 bus_unreg:
5801 	bus_unregister(&pseudo_lld_bus);
5802 dev_unreg:
5803 	root_device_unregister(pseudo_primary);
5804 free_vm:
5805 	sdebug_erase_store(idx, NULL);
5806 free_q_arr:
5807 	kfree(sdebug_q_arr);
5808 	return ret;
5809 }
5810 
5811 static void __exit scsi_debug_exit(void)
5812 {
5813 	int k = sdebug_num_hosts;
5814 
5815 	stop_all_queued();
5816 	for (; k; k--)
5817 		sdebug_do_remove_host(true);
5818 	free_all_queued();
5819 	driver_unregister(&sdebug_driverfs_driver);
5820 	bus_unregister(&pseudo_lld_bus);
5821 	root_device_unregister(pseudo_primary);
5822 
5823 	sdebug_erase_all_stores(false);
5824 	xa_destroy(per_store_ap);
5825 }
5826 
5827 device_initcall(scsi_debug_init);
5828 module_exit(scsi_debug_exit);
5829 
5830 static void sdebug_release_adapter(struct device *dev)
5831 {
5832 	struct sdebug_host_info *sdbg_host;
5833 
5834 	sdbg_host = to_sdebug_host(dev);
5835 	kfree(sdbg_host);
5836 }
5837 
5838 /* idx must be valid, if sip is NULL then it will be obtained using idx */
5839 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
5840 {
5841 	if (idx < 0)
5842 		return;
5843 	if (!sip) {
5844 		if (xa_empty(per_store_ap))
5845 			return;
5846 		sip = xa_load(per_store_ap, idx);
5847 		if (!sip)
5848 			return;
5849 	}
5850 	vfree(sip->map_storep);
5851 	vfree(sip->dif_storep);
5852 	vfree(sip->storep);
5853 	xa_erase(per_store_ap, idx);
5854 	kfree(sip);
5855 }
5856 
5857 /* Assume apart_from_first==false only in shutdown case. */
5858 static void sdebug_erase_all_stores(bool apart_from_first)
5859 {
5860 	unsigned long idx;
5861 	struct sdeb_store_info *sip = NULL;
5862 
5863 	xa_for_each(per_store_ap, idx, sip) {
5864 		if (apart_from_first)
5865 			apart_from_first = false;
5866 		else
5867 			sdebug_erase_store(idx, sip);
5868 	}
5869 	if (apart_from_first)
5870 		sdeb_most_recent_idx = sdeb_first_idx;
5871 }
5872 
5873 /*
5874  * Returns store xarray new element index (idx) if >=0 else negated errno.
5875  * Limit the number of stores to 65536.
5876  */
5877 static int sdebug_add_store(void)
5878 {
5879 	int res;
5880 	u32 n_idx;
5881 	unsigned long iflags;
5882 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5883 	struct sdeb_store_info *sip = NULL;
5884 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
5885 
5886 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
5887 	if (!sip)
5888 		return -ENOMEM;
5889 
5890 	xa_lock_irqsave(per_store_ap, iflags);
5891 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
5892 	if (unlikely(res < 0)) {
5893 		xa_unlock_irqrestore(per_store_ap, iflags);
5894 		kfree(sip);
5895 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
5896 		return res;
5897 	}
5898 	sdeb_most_recent_idx = n_idx;
5899 	if (sdeb_first_idx < 0)
5900 		sdeb_first_idx = n_idx;
5901 	xa_unlock_irqrestore(per_store_ap, iflags);
5902 
5903 	res = -ENOMEM;
5904 	sip->storep = vzalloc(sz);
5905 	if (!sip->storep) {
5906 		pr_err("user data oom\n");
5907 		goto err;
5908 	}
5909 	if (sdebug_num_parts > 0)
5910 		sdebug_build_parts(sip->storep, sz);
5911 
5912 	/* DIF/DIX: what T10 calls Protection Information (PI) */
5913 	if (sdebug_dix) {
5914 		int dif_size;
5915 
5916 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5917 		sip->dif_storep = vmalloc(dif_size);
5918 
5919 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
5920 			sip->dif_storep);
5921 
5922 		if (!sip->dif_storep) {
5923 			pr_err("DIX oom\n");
5924 			goto err;
5925 		}
5926 		memset(sip->dif_storep, 0xff, dif_size);
5927 	}
5928 	/* Logical Block Provisioning */
5929 	if (scsi_debug_lbp()) {
5930 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5931 		sip->map_storep = vmalloc(array_size(sizeof(long),
5932 						     BITS_TO_LONGS(map_size)));
5933 
5934 		pr_info("%lu provisioning blocks\n", map_size);
5935 
5936 		if (!sip->map_storep) {
5937 			pr_err("LBP map oom\n");
5938 			goto err;
5939 		}
5940 
5941 		bitmap_zero(sip->map_storep, map_size);
5942 
5943 		/* Map first 1KB for partition table */
5944 		if (sdebug_num_parts)
5945 			map_region(sip, 0, 2);
5946 	}
5947 
5948 	rwlock_init(&sip->macc_lck);
5949 	return (int)n_idx;
5950 err:
5951 	sdebug_erase_store((int)n_idx, sip);
5952 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
5953 	return res;
5954 }
5955 
5956 static int sdebug_add_host_helper(int per_host_idx)
5957 {
5958 	int k, devs_per_host, idx;
5959 	int error = -ENOMEM;
5960 	struct sdebug_host_info *sdbg_host;
5961 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5962 
5963 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5964 	if (!sdbg_host)
5965 		return -ENOMEM;
5966 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
5967 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
5968 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
5969 	sdbg_host->si_idx = idx;
5970 
5971 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5972 
5973 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5974 	for (k = 0; k < devs_per_host; k++) {
5975 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5976 		if (!sdbg_devinfo)
5977 			goto clean;
5978 	}
5979 
5980 	spin_lock(&sdebug_host_list_lock);
5981 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5982 	spin_unlock(&sdebug_host_list_lock);
5983 
5984 	sdbg_host->dev.bus = &pseudo_lld_bus;
5985 	sdbg_host->dev.parent = pseudo_primary;
5986 	sdbg_host->dev.release = &sdebug_release_adapter;
5987 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
5988 
5989 	error = device_register(&sdbg_host->dev);
5990 	if (error)
5991 		goto clean;
5992 
5993 	++sdebug_num_hosts;
5994 	return 0;
5995 
5996 clean:
5997 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5998 				 dev_list) {
5999 		list_del(&sdbg_devinfo->dev_list);
6000 		kfree(sdbg_devinfo);
6001 	}
6002 	kfree(sdbg_host);
6003 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
6004 	return error;
6005 }
6006 
6007 static int sdebug_do_add_host(bool mk_new_store)
6008 {
6009 	int ph_idx = sdeb_most_recent_idx;
6010 
6011 	if (mk_new_store) {
6012 		ph_idx = sdebug_add_store();
6013 		if (ph_idx < 0)
6014 			return ph_idx;
6015 	}
6016 	return sdebug_add_host_helper(ph_idx);
6017 }
6018 
6019 static void sdebug_do_remove_host(bool the_end)
6020 {
6021 	int idx = -1;
6022 	struct sdebug_host_info *sdbg_host = NULL;
6023 	struct sdebug_host_info *sdbg_host2;
6024 
6025 	spin_lock(&sdebug_host_list_lock);
6026 	if (!list_empty(&sdebug_host_list)) {
6027 		sdbg_host = list_entry(sdebug_host_list.prev,
6028 				       struct sdebug_host_info, host_list);
6029 		idx = sdbg_host->si_idx;
6030 	}
6031 	if (!the_end && idx >= 0) {
6032 		bool unique = true;
6033 
6034 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6035 			if (sdbg_host2 == sdbg_host)
6036 				continue;
6037 			if (idx == sdbg_host2->si_idx) {
6038 				unique = false;
6039 				break;
6040 			}
6041 		}
6042 		if (unique) {
6043 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6044 			if (idx == sdeb_most_recent_idx)
6045 				--sdeb_most_recent_idx;
6046 		}
6047 	}
6048 	if (sdbg_host)
6049 		list_del(&sdbg_host->host_list);
6050 	spin_unlock(&sdebug_host_list_lock);
6051 
6052 	if (!sdbg_host)
6053 		return;
6054 
6055 	device_unregister(&sdbg_host->dev);
6056 	--sdebug_num_hosts;
6057 }
6058 
6059 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
6060 {
6061 	int num_in_q = 0;
6062 	struct sdebug_dev_info *devip;
6063 
6064 	block_unblock_all_queues(true);
6065 	devip = (struct sdebug_dev_info *)sdev->hostdata;
6066 	if (NULL == devip) {
6067 		block_unblock_all_queues(false);
6068 		return	-ENODEV;
6069 	}
6070 	num_in_q = atomic_read(&devip->num_in_q);
6071 
6072 	if (qdepth < 1)
6073 		qdepth = 1;
6074 	/* allow to exceed max host qc_arr elements for testing */
6075 	if (qdepth > SDEBUG_CANQUEUE + 10)
6076 		qdepth = SDEBUG_CANQUEUE + 10;
6077 	scsi_change_queue_depth(sdev, qdepth);
6078 
6079 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
6080 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
6081 			    __func__, qdepth, num_in_q);
6082 	}
6083 	block_unblock_all_queues(false);
6084 	return sdev->queue_depth;
6085 }
6086 
6087 static bool fake_timeout(struct scsi_cmnd *scp)
6088 {
6089 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
6090 		if (sdebug_every_nth < -1)
6091 			sdebug_every_nth = -1;
6092 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
6093 			return true; /* ignore command causing timeout */
6094 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
6095 			 scsi_medium_access_command(scp))
6096 			return true; /* time out reads and writes */
6097 	}
6098 	return false;
6099 }
6100 
6101 static bool fake_host_busy(struct scsi_cmnd *scp)
6102 {
6103 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
6104 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6105 }
6106 
6107 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
6108 				   struct scsi_cmnd *scp)
6109 {
6110 	u8 sdeb_i;
6111 	struct scsi_device *sdp = scp->device;
6112 	const struct opcode_info_t *oip;
6113 	const struct opcode_info_t *r_oip;
6114 	struct sdebug_dev_info *devip;
6115 
6116 	u8 *cmd = scp->cmnd;
6117 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
6118 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
6119 	int k, na;
6120 	int errsts = 0;
6121 	u32 flags;
6122 	u16 sa;
6123 	u8 opcode = cmd[0];
6124 	bool has_wlun_rl;
6125 
6126 	scsi_set_resid(scp, 0);
6127 	if (sdebug_statistics)
6128 		atomic_inc(&sdebug_cmnd_count);
6129 	if (unlikely(sdebug_verbose &&
6130 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
6131 		char b[120];
6132 		int n, len, sb;
6133 
6134 		len = scp->cmd_len;
6135 		sb = (int)sizeof(b);
6136 		if (len > 32)
6137 			strcpy(b, "too long, over 32 bytes");
6138 		else {
6139 			for (k = 0, n = 0; k < len && n < sb; ++k)
6140 				n += scnprintf(b + n, sb - n, "%02x ",
6141 					       (u32)cmd[k]);
6142 		}
6143 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
6144 			    blk_mq_unique_tag(scp->request), b);
6145 	}
6146 	if (fake_host_busy(scp))
6147 		return SCSI_MLQUEUE_HOST_BUSY;
6148 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
6149 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
6150 		goto err_out;
6151 
6152 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
6153 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
6154 	devip = (struct sdebug_dev_info *)sdp->hostdata;
6155 	if (unlikely(!devip)) {
6156 		devip = find_build_dev_info(sdp);
6157 		if (NULL == devip)
6158 			goto err_out;
6159 	}
6160 	na = oip->num_attached;
6161 	r_pfp = oip->pfp;
6162 	if (na) {	/* multiple commands with this opcode */
6163 		r_oip = oip;
6164 		if (FF_SA & r_oip->flags) {
6165 			if (F_SA_LOW & oip->flags)
6166 				sa = 0x1f & cmd[1];
6167 			else
6168 				sa = get_unaligned_be16(cmd + 8);
6169 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
6170 				if (opcode == oip->opcode && sa == oip->sa)
6171 					break;
6172 			}
6173 		} else {   /* since no service action only check opcode */
6174 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
6175 				if (opcode == oip->opcode)
6176 					break;
6177 			}
6178 		}
6179 		if (k > na) {
6180 			if (F_SA_LOW & r_oip->flags)
6181 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
6182 			else if (F_SA_HIGH & r_oip->flags)
6183 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
6184 			else
6185 				mk_sense_invalid_opcode(scp);
6186 			goto check_cond;
6187 		}
6188 	}	/* else (when na==0) we assume the oip is a match */
6189 	flags = oip->flags;
6190 	if (unlikely(F_INV_OP & flags)) {
6191 		mk_sense_invalid_opcode(scp);
6192 		goto check_cond;
6193 	}
6194 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
6195 		if (sdebug_verbose)
6196 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
6197 				    my_name, opcode, " supported for wlun");
6198 		mk_sense_invalid_opcode(scp);
6199 		goto check_cond;
6200 	}
6201 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
6202 		u8 rem;
6203 		int j;
6204 
6205 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
6206 			rem = ~oip->len_mask[k] & cmd[k];
6207 			if (rem) {
6208 				for (j = 7; j >= 0; --j, rem <<= 1) {
6209 					if (0x80 & rem)
6210 						break;
6211 				}
6212 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
6213 				goto check_cond;
6214 			}
6215 		}
6216 	}
6217 	if (unlikely(!(F_SKIP_UA & flags) &&
6218 		     find_first_bit(devip->uas_bm,
6219 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
6220 		errsts = make_ua(scp, devip);
6221 		if (errsts)
6222 			goto check_cond;
6223 	}
6224 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
6225 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
6226 		if (sdebug_verbose)
6227 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
6228 				    "%s\n", my_name, "initializing command "
6229 				    "required");
6230 		errsts = check_condition_result;
6231 		goto fini;
6232 	}
6233 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
6234 		goto fini;
6235 	if (unlikely(sdebug_every_nth)) {
6236 		if (fake_timeout(scp))
6237 			return 0;	/* ignore command: make trouble */
6238 	}
6239 	if (likely(oip->pfp))
6240 		pfp = oip->pfp;	/* calls a resp_* function */
6241 	else
6242 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
6243 
6244 fini:
6245 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
6246 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
6247 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
6248 					    sdebug_ndelay > 10000)) {
6249 		/*
6250 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
6251 		 * for Start Stop Unit (SSU) want at least 1 second delay and
6252 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
6253 		 * For Synchronize Cache want 1/20 of SSU's delay.
6254 		 */
6255 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
6256 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
6257 
6258 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
6259 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
6260 	} else
6261 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
6262 				     sdebug_ndelay);
6263 check_cond:
6264 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
6265 err_out:
6266 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
6267 }
6268 
6269 static struct scsi_host_template sdebug_driver_template = {
6270 	.show_info =		scsi_debug_show_info,
6271 	.write_info =		scsi_debug_write_info,
6272 	.proc_name =		sdebug_proc_name,
6273 	.name =			"SCSI DEBUG",
6274 	.info =			scsi_debug_info,
6275 	.slave_alloc =		scsi_debug_slave_alloc,
6276 	.slave_configure =	scsi_debug_slave_configure,
6277 	.slave_destroy =	scsi_debug_slave_destroy,
6278 	.ioctl =		scsi_debug_ioctl,
6279 	.queuecommand =		scsi_debug_queuecommand,
6280 	.change_queue_depth =	sdebug_change_qdepth,
6281 	.eh_abort_handler =	scsi_debug_abort,
6282 	.eh_device_reset_handler = scsi_debug_device_reset,
6283 	.eh_target_reset_handler = scsi_debug_target_reset,
6284 	.eh_bus_reset_handler = scsi_debug_bus_reset,
6285 	.eh_host_reset_handler = scsi_debug_host_reset,
6286 	.can_queue =		SDEBUG_CANQUEUE,
6287 	.this_id =		7,
6288 	.sg_tablesize =		SG_MAX_SEGMENTS,
6289 	.cmd_per_lun =		DEF_CMD_PER_LUN,
6290 	.max_sectors =		-1U,
6291 	.max_segment_size =	-1U,
6292 	.module =		THIS_MODULE,
6293 	.track_queue_depth =	1,
6294 };
6295 
6296 static int sdebug_driver_probe(struct device *dev)
6297 {
6298 	int error = 0;
6299 	struct sdebug_host_info *sdbg_host;
6300 	struct Scsi_Host *hpnt;
6301 	int hprot;
6302 
6303 	sdbg_host = to_sdebug_host(dev);
6304 
6305 	sdebug_driver_template.can_queue = sdebug_max_queue;
6306 	if (!sdebug_clustering)
6307 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
6308 
6309 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
6310 	if (NULL == hpnt) {
6311 		pr_err("scsi_host_alloc failed\n");
6312 		error = -ENODEV;
6313 		return error;
6314 	}
6315 	if (submit_queues > nr_cpu_ids) {
6316 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
6317 			my_name, submit_queues, nr_cpu_ids);
6318 		submit_queues = nr_cpu_ids;
6319 	}
6320 	/* Decide whether to tell scsi subsystem that we want mq */
6321 	/* Following should give the same answer for each host */
6322 	hpnt->nr_hw_queues = submit_queues;
6323 
6324 	sdbg_host->shost = hpnt;
6325 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
6326 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
6327 		hpnt->max_id = sdebug_num_tgts + 1;
6328 	else
6329 		hpnt->max_id = sdebug_num_tgts;
6330 	/* = sdebug_max_luns; */
6331 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
6332 
6333 	hprot = 0;
6334 
6335 	switch (sdebug_dif) {
6336 
6337 	case T10_PI_TYPE1_PROTECTION:
6338 		hprot = SHOST_DIF_TYPE1_PROTECTION;
6339 		if (sdebug_dix)
6340 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
6341 		break;
6342 
6343 	case T10_PI_TYPE2_PROTECTION:
6344 		hprot = SHOST_DIF_TYPE2_PROTECTION;
6345 		if (sdebug_dix)
6346 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
6347 		break;
6348 
6349 	case T10_PI_TYPE3_PROTECTION:
6350 		hprot = SHOST_DIF_TYPE3_PROTECTION;
6351 		if (sdebug_dix)
6352 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
6353 		break;
6354 
6355 	default:
6356 		if (sdebug_dix)
6357 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
6358 		break;
6359 	}
6360 
6361 	scsi_host_set_prot(hpnt, hprot);
6362 
6363 	if (have_dif_prot || sdebug_dix)
6364 		pr_info("host protection%s%s%s%s%s%s%s\n",
6365 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
6366 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
6367 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
6368 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
6369 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
6370 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
6371 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
6372 
6373 	if (sdebug_guard == 1)
6374 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
6375 	else
6376 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
6377 
6378 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
6379 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
6380 	if (sdebug_every_nth)	/* need stats counters for every_nth */
6381 		sdebug_statistics = true;
6382 	error = scsi_add_host(hpnt, &sdbg_host->dev);
6383 	if (error) {
6384 		pr_err("scsi_add_host failed\n");
6385 		error = -ENODEV;
6386 		scsi_host_put(hpnt);
6387 	} else {
6388 		scsi_scan_host(hpnt);
6389 	}
6390 
6391 	return error;
6392 }
6393 
6394 static int sdebug_driver_remove(struct device *dev)
6395 {
6396 	struct sdebug_host_info *sdbg_host;
6397 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6398 
6399 	sdbg_host = to_sdebug_host(dev);
6400 
6401 	if (!sdbg_host) {
6402 		pr_err("Unable to locate host info\n");
6403 		return -ENODEV;
6404 	}
6405 
6406 	scsi_remove_host(sdbg_host->shost);
6407 
6408 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6409 				 dev_list) {
6410 		list_del(&sdbg_devinfo->dev_list);
6411 		kfree(sdbg_devinfo);
6412 	}
6413 
6414 	scsi_host_put(sdbg_host->shost);
6415 	return 0;
6416 }
6417 
6418 static int pseudo_lld_bus_match(struct device *dev,
6419 				struct device_driver *dev_driver)
6420 {
6421 	return 1;
6422 }
6423 
6424 static struct bus_type pseudo_lld_bus = {
6425 	.name = "pseudo",
6426 	.match = pseudo_lld_bus_match,
6427 	.probe = sdebug_driver_probe,
6428 	.remove = sdebug_driver_remove,
6429 	.drv_groups = sdebug_drv_groups,
6430 };
6431