xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 600d9ead)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 static struct kmem_cache *queued_cmd_cache;
254 
255 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257 
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260 	ZBC_ZTYPE_CNV	= 0x1,
261 	ZBC_ZTYPE_SWR	= 0x2,
262 	ZBC_ZTYPE_SWP	= 0x3,
263 	/* ZBC_ZTYPE_SOBR = 0x4, */
264 	ZBC_ZTYPE_GAP	= 0x5,
265 };
266 
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269 	ZBC_NOT_WRITE_POINTER	= 0x0,
270 	ZC1_EMPTY		= 0x1,
271 	ZC2_IMPLICIT_OPEN	= 0x2,
272 	ZC3_EXPLICIT_OPEN	= 0x3,
273 	ZC4_CLOSED		= 0x4,
274 	ZC6_READ_ONLY		= 0xd,
275 	ZC5_FULL		= 0xe,
276 	ZC7_OFFLINE		= 0xf,
277 };
278 
279 struct sdeb_zone_state {	/* ZBC: per zone state */
280 	enum sdebug_z_type z_type;
281 	enum sdebug_z_cond z_cond;
282 	bool z_non_seq_resource;
283 	unsigned int z_size;
284 	sector_t z_start;
285 	sector_t z_wp;
286 };
287 
288 struct sdebug_dev_info {
289 	struct list_head dev_list;
290 	unsigned int channel;
291 	unsigned int target;
292 	u64 lun;
293 	uuid_t lu_name;
294 	struct sdebug_host_info *sdbg_host;
295 	unsigned long uas_bm[1];
296 	atomic_t stopped;	/* 1: by SSU, 2: device start */
297 	bool used;
298 
299 	/* For ZBC devices */
300 	enum blk_zoned_model zmodel;
301 	unsigned int zcap;
302 	unsigned int zsize;
303 	unsigned int zsize_shift;
304 	unsigned int nr_zones;
305 	unsigned int nr_conv_zones;
306 	unsigned int nr_seq_zones;
307 	unsigned int nr_imp_open;
308 	unsigned int nr_exp_open;
309 	unsigned int nr_closed;
310 	unsigned int max_open;
311 	ktime_t create_ts;	/* time since bootup that this device was created */
312 	struct sdeb_zone_state *zstate;
313 };
314 
315 struct sdebug_host_info {
316 	struct list_head host_list;
317 	int si_idx;	/* sdeb_store_info (per host) xarray index */
318 	struct Scsi_Host *shost;
319 	struct device dev;
320 	struct list_head dev_info_list;
321 };
322 
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 	rwlock_t macc_lck;	/* for atomic media access on this store */
326 	u8 *storep;		/* user data storage (ram) */
327 	struct t10_pi_tuple *dif_storep; /* protection info */
328 	void *map_storep;	/* provisioning map */
329 };
330 
331 #define dev_to_sdebug_host(d)	\
332 	container_of(d, struct sdebug_host_info, dev)
333 
334 #define shost_to_sdebug_host(shost)	\
335 	dev_to_sdebug_host(shost->dma_dev)
336 
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
339 
340 struct sdebug_defer {
341 	struct hrtimer hrt;
342 	struct execute_work ew;
343 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
344 	int sqa_idx;	/* index of sdebug_queue array */
345 	int hc_idx;	/* hostwide tag index */
346 	int issuing_cpu;
347 	bool aborted;	/* true when blk_abort_request() already called */
348 	enum sdeb_defer_type defer_t;
349 };
350 
351 struct sdebug_queued_cmd {
352 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
353 	 * instance indicates this slot is in use.
354 	 */
355 	struct sdebug_defer sd_dp;
356 	struct scsi_cmnd *scmd;
357 };
358 
359 struct sdebug_scsi_cmd {
360 	spinlock_t   lock;
361 };
362 
363 struct sdebug_queue {
364 	struct sdebug_queued_cmd *qc_arr[SDEBUG_CANQUEUE];
365 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
366 	spinlock_t qc_lock;
367 };
368 
369 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
370 static atomic_t sdebug_completions;  /* count of deferred completions */
371 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
372 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
373 static atomic_t sdeb_inject_pending;
374 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
375 
376 struct opcode_info_t {
377 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
378 				/* for terminating element */
379 	u8 opcode;		/* if num_attached > 0, preferred */
380 	u16 sa;			/* service action */
381 	u32 flags;		/* OR-ed set of SDEB_F_* */
382 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
383 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
384 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
385 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
386 };
387 
388 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
389 enum sdeb_opcode_index {
390 	SDEB_I_INVALID_OPCODE =	0,
391 	SDEB_I_INQUIRY = 1,
392 	SDEB_I_REPORT_LUNS = 2,
393 	SDEB_I_REQUEST_SENSE = 3,
394 	SDEB_I_TEST_UNIT_READY = 4,
395 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
396 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
397 	SDEB_I_LOG_SENSE = 7,
398 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
399 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
400 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
401 	SDEB_I_START_STOP = 11,
402 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
403 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
404 	SDEB_I_MAINT_IN = 14,
405 	SDEB_I_MAINT_OUT = 15,
406 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
407 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
408 	SDEB_I_RESERVE = 18,		/* 6, 10 */
409 	SDEB_I_RELEASE = 19,		/* 6, 10 */
410 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
411 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
412 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
413 	SDEB_I_SEND_DIAG = 23,
414 	SDEB_I_UNMAP = 24,
415 	SDEB_I_WRITE_BUFFER = 25,
416 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
417 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
418 	SDEB_I_COMP_WRITE = 28,
419 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
420 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
421 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
422 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
423 };
424 
425 
426 static const unsigned char opcode_ind_arr[256] = {
427 /* 0x0; 0x0->0x1f: 6 byte cdbs */
428 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
429 	    0, 0, 0, 0,
430 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
431 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
434 	    SDEB_I_ALLOW_REMOVAL, 0,
435 /* 0x20; 0x20->0x3f: 10 byte cdbs */
436 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
437 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
438 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
439 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
440 /* 0x40; 0x40->0x5f: 10 byte cdbs */
441 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
443 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
444 	    SDEB_I_RELEASE,
445 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
446 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
447 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
449 	0, SDEB_I_VARIABLE_LEN,
450 /* 0x80; 0x80->0x9f: 16 byte cdbs */
451 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
452 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
453 	0, 0, 0, SDEB_I_VERIFY,
454 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
455 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
456 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
457 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
458 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
459 	     SDEB_I_MAINT_OUT, 0, 0, 0,
460 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
461 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0,
464 /* 0xc0; 0xc0->0xff: vendor specific */
465 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
467 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
468 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
469 };
470 
471 /*
472  * The following "response" functions return the SCSI mid-level's 4 byte
473  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
474  * command completion, they can mask their return value with
475  * SDEG_RES_IMMED_MASK .
476  */
477 #define SDEG_RES_IMMED_MASK 0x40000000
478 
479 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
508 
509 static int sdebug_do_add_host(bool mk_new_store);
510 static int sdebug_add_host_helper(int per_host_idx);
511 static void sdebug_do_remove_host(bool the_end);
512 static int sdebug_add_store(void);
513 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
514 static void sdebug_erase_all_stores(bool apart_from_first);
515 
516 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
517 
518 /*
519  * The following are overflow arrays for cdbs that "hit" the same index in
520  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
521  * should be placed in opcode_info_arr[], the others should be placed here.
522  */
523 static const struct opcode_info_t msense_iarr[] = {
524 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
525 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 };
527 
528 static const struct opcode_info_t mselect_iarr[] = {
529 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
530 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 };
532 
533 static const struct opcode_info_t read_iarr[] = {
534 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
535 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
536 	     0, 0, 0, 0} },
537 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
538 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
539 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
540 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
541 	     0xc7, 0, 0, 0, 0} },
542 };
543 
544 static const struct opcode_info_t write_iarr[] = {
545 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
546 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
547 		   0, 0, 0, 0, 0, 0} },
548 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
549 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
550 		   0, 0, 0} },
551 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
552 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 		   0xbf, 0xc7, 0, 0, 0, 0} },
554 };
555 
556 static const struct opcode_info_t verify_iarr[] = {
557 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
558 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
559 		   0, 0, 0, 0, 0, 0} },
560 };
561 
562 static const struct opcode_info_t sa_in_16_iarr[] = {
563 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
564 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
565 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
566 };
567 
568 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
569 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
570 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
571 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
572 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
573 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
574 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
575 };
576 
577 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
578 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
579 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
580 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
581 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
582 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
583 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
584 };
585 
586 static const struct opcode_info_t write_same_iarr[] = {
587 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
588 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
589 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
590 };
591 
592 static const struct opcode_info_t reserve_iarr[] = {
593 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
594 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595 };
596 
597 static const struct opcode_info_t release_iarr[] = {
598 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
599 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
600 };
601 
602 static const struct opcode_info_t sync_cache_iarr[] = {
603 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
604 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
606 };
607 
608 static const struct opcode_info_t pre_fetch_iarr[] = {
609 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
610 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
612 };
613 
614 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
615 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
616 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
618 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
619 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
620 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
621 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
622 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
623 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
624 };
625 
626 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
627 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
628 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
629 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
630 };
631 
632 
633 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
634  * plus the terminating elements for logic that scans this table such as
635  * REPORT SUPPORTED OPERATION CODES. */
636 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
637 /* 0 */
638 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
639 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
641 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
643 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
644 	     0, 0} },					/* REPORT LUNS */
645 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
646 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
647 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
648 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 /* 5 */
650 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
651 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
652 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
653 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
654 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
655 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
656 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
657 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
658 	     0, 0, 0} },
659 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
660 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
661 	     0, 0} },
662 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
663 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
665 /* 10 */
666 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
667 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
668 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
670 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
671 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
673 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
674 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
676 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
677 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
678 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
679 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
680 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
681 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
682 				0xff, 0, 0xc7, 0, 0, 0, 0} },
683 /* 15 */
684 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
685 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
686 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
687 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
688 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
689 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
690 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
691 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
692 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
693 	     0xff, 0xff} },
694 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
695 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
696 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
697 	     0} },
698 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
699 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
700 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
701 	     0} },
702 /* 20 */
703 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
704 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
706 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
707 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
708 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
709 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
710 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
712 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
713 /* 25 */
714 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
715 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
717 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
718 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
719 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
720 		 0, 0, 0, 0, 0} },
721 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
722 	    resp_sync_cache, sync_cache_iarr,
723 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
724 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
725 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
726 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
727 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
728 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
729 	    resp_pre_fetch, pre_fetch_iarr,
730 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
731 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
732 
733 /* 30 */
734 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
735 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
736 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
738 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
739 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
740 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
742 /* sentinel */
743 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
744 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
745 };
746 
747 static int sdebug_num_hosts;
748 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
749 static int sdebug_ato = DEF_ATO;
750 static int sdebug_cdb_len = DEF_CDB_LEN;
751 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
752 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
753 static int sdebug_dif = DEF_DIF;
754 static int sdebug_dix = DEF_DIX;
755 static int sdebug_dsense = DEF_D_SENSE;
756 static int sdebug_every_nth = DEF_EVERY_NTH;
757 static int sdebug_fake_rw = DEF_FAKE_RW;
758 static unsigned int sdebug_guard = DEF_GUARD;
759 static int sdebug_host_max_queue;	/* per host */
760 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
761 static int sdebug_max_luns = DEF_MAX_LUNS;
762 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
763 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
764 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
765 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
766 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
767 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
768 static int sdebug_no_uld;
769 static int sdebug_num_parts = DEF_NUM_PARTS;
770 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
771 static int sdebug_opt_blks = DEF_OPT_BLKS;
772 static int sdebug_opts = DEF_OPTS;
773 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
774 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
775 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
776 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
777 static int sdebug_sector_size = DEF_SECTOR_SIZE;
778 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
779 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
780 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
781 static unsigned int sdebug_lbpu = DEF_LBPU;
782 static unsigned int sdebug_lbpws = DEF_LBPWS;
783 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
784 static unsigned int sdebug_lbprz = DEF_LBPRZ;
785 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
786 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
787 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
788 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
789 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
790 static int sdebug_uuid_ctl = DEF_UUID_CTL;
791 static bool sdebug_random = DEF_RANDOM;
792 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
793 static bool sdebug_removable = DEF_REMOVABLE;
794 static bool sdebug_clustering;
795 static bool sdebug_host_lock = DEF_HOST_LOCK;
796 static bool sdebug_strict = DEF_STRICT;
797 static bool sdebug_any_injecting_opt;
798 static bool sdebug_no_rwlock;
799 static bool sdebug_verbose;
800 static bool have_dif_prot;
801 static bool write_since_sync;
802 static bool sdebug_statistics = DEF_STATISTICS;
803 static bool sdebug_wp;
804 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
805 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
806 static char *sdeb_zbc_model_s;
807 
808 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
809 			  SAM_LUN_AM_FLAT = 0x1,
810 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
811 			  SAM_LUN_AM_EXTENDED = 0x3};
812 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
813 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
814 
815 static unsigned int sdebug_store_sectors;
816 static sector_t sdebug_capacity;	/* in sectors */
817 
818 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
819    may still need them */
820 static int sdebug_heads;		/* heads per disk */
821 static int sdebug_cylinders_per;	/* cylinders per surface */
822 static int sdebug_sectors_per;		/* sectors per cylinder */
823 
824 static LIST_HEAD(sdebug_host_list);
825 static DEFINE_MUTEX(sdebug_host_list_mutex);
826 
827 static struct xarray per_store_arr;
828 static struct xarray *per_store_ap = &per_store_arr;
829 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
830 static int sdeb_most_recent_idx = -1;
831 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
832 
833 static unsigned long map_size;
834 static int num_aborts;
835 static int num_dev_resets;
836 static int num_target_resets;
837 static int num_bus_resets;
838 static int num_host_resets;
839 static int dix_writes;
840 static int dix_reads;
841 static int dif_errors;
842 
843 /* ZBC global data */
844 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
845 static int sdeb_zbc_zone_cap_mb;
846 static int sdeb_zbc_zone_size_mb;
847 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
848 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
849 
850 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
851 static int poll_queues; /* iouring iopoll interface.*/
852 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
853 
854 static DEFINE_RWLOCK(atomic_rw);
855 static DEFINE_RWLOCK(atomic_rw2);
856 
857 static rwlock_t *ramdisk_lck_a[2];
858 
859 static char sdebug_proc_name[] = MY_NAME;
860 static const char *my_name = MY_NAME;
861 
862 static struct bus_type pseudo_lld_bus;
863 
864 static struct device_driver sdebug_driverfs_driver = {
865 	.name 		= sdebug_proc_name,
866 	.bus		= &pseudo_lld_bus,
867 };
868 
869 static const int check_condition_result =
870 	SAM_STAT_CHECK_CONDITION;
871 
872 static const int illegal_condition_result =
873 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
874 
875 static const int device_qfull_result =
876 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
877 
878 static const int condition_met_result = SAM_STAT_CONDITION_MET;
879 
880 
881 /* Only do the extra work involved in logical block provisioning if one or
882  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
883  * real reads and writes (i.e. not skipping them for speed).
884  */
885 static inline bool scsi_debug_lbp(void)
886 {
887 	return 0 == sdebug_fake_rw &&
888 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
889 }
890 
891 static void *lba2fake_store(struct sdeb_store_info *sip,
892 			    unsigned long long lba)
893 {
894 	struct sdeb_store_info *lsip = sip;
895 
896 	lba = do_div(lba, sdebug_store_sectors);
897 	if (!sip || !sip->storep) {
898 		WARN_ON_ONCE(true);
899 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
900 	}
901 	return lsip->storep + lba * sdebug_sector_size;
902 }
903 
904 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
905 				      sector_t sector)
906 {
907 	sector = sector_div(sector, sdebug_store_sectors);
908 
909 	return sip->dif_storep + sector;
910 }
911 
912 static void sdebug_max_tgts_luns(void)
913 {
914 	struct sdebug_host_info *sdbg_host;
915 	struct Scsi_Host *hpnt;
916 
917 	mutex_lock(&sdebug_host_list_mutex);
918 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
919 		hpnt = sdbg_host->shost;
920 		if ((hpnt->this_id >= 0) &&
921 		    (sdebug_num_tgts > hpnt->this_id))
922 			hpnt->max_id = sdebug_num_tgts + 1;
923 		else
924 			hpnt->max_id = sdebug_num_tgts;
925 		/* sdebug_max_luns; */
926 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
927 	}
928 	mutex_unlock(&sdebug_host_list_mutex);
929 }
930 
931 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
932 
933 /* Set in_bit to -1 to indicate no bit position of invalid field */
934 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
935 				 enum sdeb_cmd_data c_d,
936 				 int in_byte, int in_bit)
937 {
938 	unsigned char *sbuff;
939 	u8 sks[4];
940 	int sl, asc;
941 
942 	sbuff = scp->sense_buffer;
943 	if (!sbuff) {
944 		sdev_printk(KERN_ERR, scp->device,
945 			    "%s: sense_buffer is NULL\n", __func__);
946 		return;
947 	}
948 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
949 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
950 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
951 	memset(sks, 0, sizeof(sks));
952 	sks[0] = 0x80;
953 	if (c_d)
954 		sks[0] |= 0x40;
955 	if (in_bit >= 0) {
956 		sks[0] |= 0x8;
957 		sks[0] |= 0x7 & in_bit;
958 	}
959 	put_unaligned_be16(in_byte, sks + 1);
960 	if (sdebug_dsense) {
961 		sl = sbuff[7] + 8;
962 		sbuff[7] = sl;
963 		sbuff[sl] = 0x2;
964 		sbuff[sl + 1] = 0x6;
965 		memcpy(sbuff + sl + 4, sks, 3);
966 	} else
967 		memcpy(sbuff + 15, sks, 3);
968 	if (sdebug_verbose)
969 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
970 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
971 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
972 }
973 
974 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
975 {
976 	if (!scp->sense_buffer) {
977 		sdev_printk(KERN_ERR, scp->device,
978 			    "%s: sense_buffer is NULL\n", __func__);
979 		return;
980 	}
981 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
982 
983 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
984 
985 	if (sdebug_verbose)
986 		sdev_printk(KERN_INFO, scp->device,
987 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
988 			    my_name, key, asc, asq);
989 }
990 
991 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
992 {
993 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
994 }
995 
996 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
997 			    void __user *arg)
998 {
999 	if (sdebug_verbose) {
1000 		if (0x1261 == cmd)
1001 			sdev_printk(KERN_INFO, dev,
1002 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1003 		else if (0x5331 == cmd)
1004 			sdev_printk(KERN_INFO, dev,
1005 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1006 				    __func__);
1007 		else
1008 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1009 				    __func__, cmd);
1010 	}
1011 	return -EINVAL;
1012 	/* return -ENOTTY; // correct return but upsets fdisk */
1013 }
1014 
1015 static void config_cdb_len(struct scsi_device *sdev)
1016 {
1017 	switch (sdebug_cdb_len) {
1018 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1019 		sdev->use_10_for_rw = false;
1020 		sdev->use_16_for_rw = false;
1021 		sdev->use_10_for_ms = false;
1022 		break;
1023 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1024 		sdev->use_10_for_rw = true;
1025 		sdev->use_16_for_rw = false;
1026 		sdev->use_10_for_ms = false;
1027 		break;
1028 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1029 		sdev->use_10_for_rw = true;
1030 		sdev->use_16_for_rw = false;
1031 		sdev->use_10_for_ms = true;
1032 		break;
1033 	case 16:
1034 		sdev->use_10_for_rw = false;
1035 		sdev->use_16_for_rw = true;
1036 		sdev->use_10_for_ms = true;
1037 		break;
1038 	case 32: /* No knobs to suggest this so same as 16 for now */
1039 		sdev->use_10_for_rw = false;
1040 		sdev->use_16_for_rw = true;
1041 		sdev->use_10_for_ms = true;
1042 		break;
1043 	default:
1044 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1045 			sdebug_cdb_len);
1046 		sdev->use_10_for_rw = true;
1047 		sdev->use_16_for_rw = false;
1048 		sdev->use_10_for_ms = false;
1049 		sdebug_cdb_len = 10;
1050 		break;
1051 	}
1052 }
1053 
1054 static void all_config_cdb_len(void)
1055 {
1056 	struct sdebug_host_info *sdbg_host;
1057 	struct Scsi_Host *shost;
1058 	struct scsi_device *sdev;
1059 
1060 	mutex_lock(&sdebug_host_list_mutex);
1061 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1062 		shost = sdbg_host->shost;
1063 		shost_for_each_device(sdev, shost) {
1064 			config_cdb_len(sdev);
1065 		}
1066 	}
1067 	mutex_unlock(&sdebug_host_list_mutex);
1068 }
1069 
1070 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1071 {
1072 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1073 	struct sdebug_dev_info *dp;
1074 
1075 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1076 		if ((devip->sdbg_host == dp->sdbg_host) &&
1077 		    (devip->target == dp->target)) {
1078 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1079 		}
1080 	}
1081 }
1082 
1083 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1084 {
1085 	int k;
1086 
1087 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1088 	if (k != SDEBUG_NUM_UAS) {
1089 		const char *cp = NULL;
1090 
1091 		switch (k) {
1092 		case SDEBUG_UA_POR:
1093 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1094 					POWER_ON_RESET_ASCQ);
1095 			if (sdebug_verbose)
1096 				cp = "power on reset";
1097 			break;
1098 		case SDEBUG_UA_POOCCUR:
1099 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1100 					POWER_ON_OCCURRED_ASCQ);
1101 			if (sdebug_verbose)
1102 				cp = "power on occurred";
1103 			break;
1104 		case SDEBUG_UA_BUS_RESET:
1105 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1106 					BUS_RESET_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "bus reset";
1109 			break;
1110 		case SDEBUG_UA_MODE_CHANGED:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1112 					MODE_CHANGED_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "mode parameters changed";
1115 			break;
1116 		case SDEBUG_UA_CAPACITY_CHANGED:
1117 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1118 					CAPACITY_CHANGED_ASCQ);
1119 			if (sdebug_verbose)
1120 				cp = "capacity data changed";
1121 			break;
1122 		case SDEBUG_UA_MICROCODE_CHANGED:
1123 			mk_sense_buffer(scp, UNIT_ATTENTION,
1124 					TARGET_CHANGED_ASC,
1125 					MICROCODE_CHANGED_ASCQ);
1126 			if (sdebug_verbose)
1127 				cp = "microcode has been changed";
1128 			break;
1129 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1130 			mk_sense_buffer(scp, UNIT_ATTENTION,
1131 					TARGET_CHANGED_ASC,
1132 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1133 			if (sdebug_verbose)
1134 				cp = "microcode has been changed without reset";
1135 			break;
1136 		case SDEBUG_UA_LUNS_CHANGED:
1137 			/*
1138 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1139 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1140 			 * on the target, until a REPORT LUNS command is
1141 			 * received.  SPC-4 behavior is to report it only once.
1142 			 * NOTE:  sdebug_scsi_level does not use the same
1143 			 * values as struct scsi_device->scsi_level.
1144 			 */
1145 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1146 				clear_luns_changed_on_target(devip);
1147 			mk_sense_buffer(scp, UNIT_ATTENTION,
1148 					TARGET_CHANGED_ASC,
1149 					LUNS_CHANGED_ASCQ);
1150 			if (sdebug_verbose)
1151 				cp = "reported luns data has changed";
1152 			break;
1153 		default:
1154 			pr_warn("unexpected unit attention code=%d\n", k);
1155 			if (sdebug_verbose)
1156 				cp = "unknown";
1157 			break;
1158 		}
1159 		clear_bit(k, devip->uas_bm);
1160 		if (sdebug_verbose)
1161 			sdev_printk(KERN_INFO, scp->device,
1162 				   "%s reports: Unit attention: %s\n",
1163 				   my_name, cp);
1164 		return check_condition_result;
1165 	}
1166 	return 0;
1167 }
1168 
1169 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1170 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1171 				int arr_len)
1172 {
1173 	int act_len;
1174 	struct scsi_data_buffer *sdb = &scp->sdb;
1175 
1176 	if (!sdb->length)
1177 		return 0;
1178 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1179 		return DID_ERROR << 16;
1180 
1181 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1182 				      arr, arr_len);
1183 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1184 
1185 	return 0;
1186 }
1187 
1188 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1189  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1190  * calls, not required to write in ascending offset order. Assumes resid
1191  * set to scsi_bufflen() prior to any calls.
1192  */
1193 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1194 				  int arr_len, unsigned int off_dst)
1195 {
1196 	unsigned int act_len, n;
1197 	struct scsi_data_buffer *sdb = &scp->sdb;
1198 	off_t skip = off_dst;
1199 
1200 	if (sdb->length <= off_dst)
1201 		return 0;
1202 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1203 		return DID_ERROR << 16;
1204 
1205 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1206 				       arr, arr_len, skip);
1207 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1208 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1209 		 scsi_get_resid(scp));
1210 	n = scsi_bufflen(scp) - (off_dst + act_len);
1211 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1212 	return 0;
1213 }
1214 
1215 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1216  * 'arr' or -1 if error.
1217  */
1218 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1219 			       int arr_len)
1220 {
1221 	if (!scsi_bufflen(scp))
1222 		return 0;
1223 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1224 		return -1;
1225 
1226 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1227 }
1228 
1229 
1230 static char sdebug_inq_vendor_id[9] = "Linux   ";
1231 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1232 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1233 /* Use some locally assigned NAAs for SAS addresses. */
1234 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1235 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1236 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1237 
1238 /* Device identification VPD page. Returns number of bytes placed in arr */
1239 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1240 			  int target_dev_id, int dev_id_num,
1241 			  const char *dev_id_str, int dev_id_str_len,
1242 			  const uuid_t *lu_name)
1243 {
1244 	int num, port_a;
1245 	char b[32];
1246 
1247 	port_a = target_dev_id + 1;
1248 	/* T10 vendor identifier field format (faked) */
1249 	arr[0] = 0x2;	/* ASCII */
1250 	arr[1] = 0x1;
1251 	arr[2] = 0x0;
1252 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1253 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1254 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1255 	num = 8 + 16 + dev_id_str_len;
1256 	arr[3] = num;
1257 	num += 4;
1258 	if (dev_id_num >= 0) {
1259 		if (sdebug_uuid_ctl) {
1260 			/* Locally assigned UUID */
1261 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1262 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1263 			arr[num++] = 0x0;
1264 			arr[num++] = 0x12;
1265 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1266 			arr[num++] = 0x0;
1267 			memcpy(arr + num, lu_name, 16);
1268 			num += 16;
1269 		} else {
1270 			/* NAA-3, Logical unit identifier (binary) */
1271 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1272 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1273 			arr[num++] = 0x0;
1274 			arr[num++] = 0x8;
1275 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1276 			num += 8;
1277 		}
1278 		/* Target relative port number */
1279 		arr[num++] = 0x61;	/* proto=sas, binary */
1280 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1281 		arr[num++] = 0x0;	/* reserved */
1282 		arr[num++] = 0x4;	/* length */
1283 		arr[num++] = 0x0;	/* reserved */
1284 		arr[num++] = 0x0;	/* reserved */
1285 		arr[num++] = 0x0;
1286 		arr[num++] = 0x1;	/* relative port A */
1287 	}
1288 	/* NAA-3, Target port identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x8;
1293 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1294 	num += 8;
1295 	/* NAA-3, Target port group identifier */
1296 	arr[num++] = 0x61;	/* proto=sas, binary */
1297 	arr[num++] = 0x95;	/* piv=1, target port group id */
1298 	arr[num++] = 0x0;
1299 	arr[num++] = 0x4;
1300 	arr[num++] = 0;
1301 	arr[num++] = 0;
1302 	put_unaligned_be16(port_group_id, arr + num);
1303 	num += 2;
1304 	/* NAA-3, Target device identifier */
1305 	arr[num++] = 0x61;	/* proto=sas, binary */
1306 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1307 	arr[num++] = 0x0;
1308 	arr[num++] = 0x8;
1309 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1310 	num += 8;
1311 	/* SCSI name string: Target device identifier */
1312 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1313 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1314 	arr[num++] = 0x0;
1315 	arr[num++] = 24;
1316 	memcpy(arr + num, "naa.32222220", 12);
1317 	num += 12;
1318 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1319 	memcpy(arr + num, b, 8);
1320 	num += 8;
1321 	memset(arr + num, 0, 4);
1322 	num += 4;
1323 	return num;
1324 }
1325 
1326 static unsigned char vpd84_data[] = {
1327 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1328     0x22,0x22,0x22,0x0,0xbb,0x1,
1329     0x22,0x22,0x22,0x0,0xbb,0x2,
1330 };
1331 
1332 /*  Software interface identification VPD page */
1333 static int inquiry_vpd_84(unsigned char *arr)
1334 {
1335 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1336 	return sizeof(vpd84_data);
1337 }
1338 
1339 /* Management network addresses VPD page */
1340 static int inquiry_vpd_85(unsigned char *arr)
1341 {
1342 	int num = 0;
1343 	const char *na1 = "https://www.kernel.org/config";
1344 	const char *na2 = "http://www.kernel.org/log";
1345 	int plen, olen;
1346 
1347 	arr[num++] = 0x1;	/* lu, storage config */
1348 	arr[num++] = 0x0;	/* reserved */
1349 	arr[num++] = 0x0;
1350 	olen = strlen(na1);
1351 	plen = olen + 1;
1352 	if (plen % 4)
1353 		plen = ((plen / 4) + 1) * 4;
1354 	arr[num++] = plen;	/* length, null termianted, padded */
1355 	memcpy(arr + num, na1, olen);
1356 	memset(arr + num + olen, 0, plen - olen);
1357 	num += plen;
1358 
1359 	arr[num++] = 0x4;	/* lu, logging */
1360 	arr[num++] = 0x0;	/* reserved */
1361 	arr[num++] = 0x0;
1362 	olen = strlen(na2);
1363 	plen = olen + 1;
1364 	if (plen % 4)
1365 		plen = ((plen / 4) + 1) * 4;
1366 	arr[num++] = plen;	/* length, null terminated, padded */
1367 	memcpy(arr + num, na2, olen);
1368 	memset(arr + num + olen, 0, plen - olen);
1369 	num += plen;
1370 
1371 	return num;
1372 }
1373 
1374 /* SCSI ports VPD page */
1375 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1376 {
1377 	int num = 0;
1378 	int port_a, port_b;
1379 
1380 	port_a = target_dev_id + 1;
1381 	port_b = port_a + 1;
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;	/* reserved */
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1386 	memset(arr + num, 0, 6);
1387 	num += 6;
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 12;	/* length tp descriptor */
1390 	/* naa-5 target port identifier (A) */
1391 	arr[num++] = 0x61;	/* proto=sas, binary */
1392 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x8;	/* length */
1395 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1396 	num += 8;
1397 	arr[num++] = 0x0;	/* reserved */
1398 	arr[num++] = 0x0;	/* reserved */
1399 	arr[num++] = 0x0;
1400 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1401 	memset(arr + num, 0, 6);
1402 	num += 6;
1403 	arr[num++] = 0x0;
1404 	arr[num++] = 12;	/* length tp descriptor */
1405 	/* naa-5 target port identifier (B) */
1406 	arr[num++] = 0x61;	/* proto=sas, binary */
1407 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1408 	arr[num++] = 0x0;	/* reserved */
1409 	arr[num++] = 0x8;	/* length */
1410 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1411 	num += 8;
1412 
1413 	return num;
1414 }
1415 
1416 
1417 static unsigned char vpd89_data[] = {
1418 /* from 4th byte */ 0,0,0,0,
1419 'l','i','n','u','x',' ',' ',' ',
1420 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1421 '1','2','3','4',
1422 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1423 0xec,0,0,0,
1424 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1425 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1427 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1428 0x53,0x41,
1429 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1430 0x20,0x20,
1431 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1432 0x10,0x80,
1433 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1434 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1435 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1437 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1438 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1439 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1444 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1445 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1446 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1459 };
1460 
1461 /* ATA Information VPD page */
1462 static int inquiry_vpd_89(unsigned char *arr)
1463 {
1464 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1465 	return sizeof(vpd89_data);
1466 }
1467 
1468 
1469 static unsigned char vpdb0_data[] = {
1470 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1471 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1472 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1473 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1474 };
1475 
1476 /* Block limits VPD page (SBC-3) */
1477 static int inquiry_vpd_b0(unsigned char *arr)
1478 {
1479 	unsigned int gran;
1480 
1481 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1482 
1483 	/* Optimal transfer length granularity */
1484 	if (sdebug_opt_xferlen_exp != 0 &&
1485 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1486 		gran = 1 << sdebug_opt_xferlen_exp;
1487 	else
1488 		gran = 1 << sdebug_physblk_exp;
1489 	put_unaligned_be16(gran, arr + 2);
1490 
1491 	/* Maximum Transfer Length */
1492 	if (sdebug_store_sectors > 0x400)
1493 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1494 
1495 	/* Optimal Transfer Length */
1496 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1497 
1498 	if (sdebug_lbpu) {
1499 		/* Maximum Unmap LBA Count */
1500 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1501 
1502 		/* Maximum Unmap Block Descriptor Count */
1503 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1504 	}
1505 
1506 	/* Unmap Granularity Alignment */
1507 	if (sdebug_unmap_alignment) {
1508 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1509 		arr[28] |= 0x80; /* UGAVALID */
1510 	}
1511 
1512 	/* Optimal Unmap Granularity */
1513 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1514 
1515 	/* Maximum WRITE SAME Length */
1516 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1517 
1518 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1519 }
1520 
1521 /* Block device characteristics VPD page (SBC-3) */
1522 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1523 {
1524 	memset(arr, 0, 0x3c);
1525 	arr[0] = 0;
1526 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1527 	arr[2] = 0;
1528 	arr[3] = 5;	/* less than 1.8" */
1529 	if (devip->zmodel == BLK_ZONED_HA)
1530 		arr[4] = 1 << 4;	/* zoned field = 01b */
1531 
1532 	return 0x3c;
1533 }
1534 
1535 /* Logical block provisioning VPD page (SBC-4) */
1536 static int inquiry_vpd_b2(unsigned char *arr)
1537 {
1538 	memset(arr, 0, 0x4);
1539 	arr[0] = 0;			/* threshold exponent */
1540 	if (sdebug_lbpu)
1541 		arr[1] = 1 << 7;
1542 	if (sdebug_lbpws)
1543 		arr[1] |= 1 << 6;
1544 	if (sdebug_lbpws10)
1545 		arr[1] |= 1 << 5;
1546 	if (sdebug_lbprz && scsi_debug_lbp())
1547 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1548 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1549 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1550 	/* threshold_percentage=0 */
1551 	return 0x4;
1552 }
1553 
1554 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1555 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1556 {
1557 	memset(arr, 0, 0x3c);
1558 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1559 	/*
1560 	 * Set Optimal number of open sequential write preferred zones and
1561 	 * Optimal number of non-sequentially written sequential write
1562 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1563 	 * fields set to zero, apart from Max. number of open swrz_s field.
1564 	 */
1565 	put_unaligned_be32(0xffffffff, &arr[4]);
1566 	put_unaligned_be32(0xffffffff, &arr[8]);
1567 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1568 		put_unaligned_be32(devip->max_open, &arr[12]);
1569 	else
1570 		put_unaligned_be32(0xffffffff, &arr[12]);
1571 	if (devip->zcap < devip->zsize) {
1572 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1573 		put_unaligned_be64(devip->zsize, &arr[20]);
1574 	} else {
1575 		arr[19] = 0;
1576 	}
1577 	return 0x3c;
1578 }
1579 
1580 #define SDEBUG_LONG_INQ_SZ 96
1581 #define SDEBUG_MAX_INQ_ARR_SZ 584
1582 
1583 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1584 {
1585 	unsigned char pq_pdt;
1586 	unsigned char *arr;
1587 	unsigned char *cmd = scp->cmnd;
1588 	u32 alloc_len, n;
1589 	int ret;
1590 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1591 
1592 	alloc_len = get_unaligned_be16(cmd + 3);
1593 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1594 	if (! arr)
1595 		return DID_REQUEUE << 16;
1596 	is_disk = (sdebug_ptype == TYPE_DISK);
1597 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1598 	is_disk_zbc = (is_disk || is_zbc);
1599 	have_wlun = scsi_is_wlun(scp->device->lun);
1600 	if (have_wlun)
1601 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1602 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1603 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1604 	else
1605 		pq_pdt = (sdebug_ptype & 0x1f);
1606 	arr[0] = pq_pdt;
1607 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1608 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1609 		kfree(arr);
1610 		return check_condition_result;
1611 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1612 		int lu_id_num, port_group_id, target_dev_id;
1613 		u32 len;
1614 		char lu_id_str[6];
1615 		int host_no = devip->sdbg_host->shost->host_no;
1616 
1617 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1618 		    (devip->channel & 0x7f);
1619 		if (sdebug_vpd_use_hostno == 0)
1620 			host_no = 0;
1621 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1622 			    (devip->target * 1000) + devip->lun);
1623 		target_dev_id = ((host_no + 1) * 2000) +
1624 				 (devip->target * 1000) - 3;
1625 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1626 		if (0 == cmd[2]) { /* supported vital product data pages */
1627 			arr[1] = cmd[2];	/*sanity */
1628 			n = 4;
1629 			arr[n++] = 0x0;   /* this page */
1630 			arr[n++] = 0x80;  /* unit serial number */
1631 			arr[n++] = 0x83;  /* device identification */
1632 			arr[n++] = 0x84;  /* software interface ident. */
1633 			arr[n++] = 0x85;  /* management network addresses */
1634 			arr[n++] = 0x86;  /* extended inquiry */
1635 			arr[n++] = 0x87;  /* mode page policy */
1636 			arr[n++] = 0x88;  /* SCSI ports */
1637 			if (is_disk_zbc) {	  /* SBC or ZBC */
1638 				arr[n++] = 0x89;  /* ATA information */
1639 				arr[n++] = 0xb0;  /* Block limits */
1640 				arr[n++] = 0xb1;  /* Block characteristics */
1641 				if (is_disk)
1642 					arr[n++] = 0xb2;  /* LB Provisioning */
1643 				if (is_zbc)
1644 					arr[n++] = 0xb6;  /* ZB dev. char. */
1645 			}
1646 			arr[3] = n - 4;	  /* number of supported VPD pages */
1647 		} else if (0x80 == cmd[2]) { /* unit serial number */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = len;
1650 			memcpy(&arr[4], lu_id_str, len);
1651 		} else if (0x83 == cmd[2]) { /* device identification */
1652 			arr[1] = cmd[2];	/*sanity */
1653 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1654 						target_dev_id, lu_id_num,
1655 						lu_id_str, len,
1656 						&devip->lu_name);
1657 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_84(&arr[4]);
1660 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1661 			arr[1] = cmd[2];	/*sanity */
1662 			arr[3] = inquiry_vpd_85(&arr[4]);
1663 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1664 			arr[1] = cmd[2];	/*sanity */
1665 			arr[3] = 0x3c;	/* number of following entries */
1666 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1667 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1668 			else if (have_dif_prot)
1669 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1670 			else
1671 				arr[4] = 0x0;   /* no protection stuff */
1672 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1673 		} else if (0x87 == cmd[2]) { /* mode page policy */
1674 			arr[1] = cmd[2];	/*sanity */
1675 			arr[3] = 0x8;	/* number of following entries */
1676 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1677 			arr[6] = 0x80;	/* mlus, shared */
1678 			arr[8] = 0x18;	 /* protocol specific lu */
1679 			arr[10] = 0x82;	 /* mlus, per initiator port */
1680 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1681 			arr[1] = cmd[2];	/*sanity */
1682 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1683 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1684 			arr[1] = cmd[2];        /*sanity */
1685 			n = inquiry_vpd_89(&arr[4]);
1686 			put_unaligned_be16(n, arr + 2);
1687 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1688 			arr[1] = cmd[2];        /*sanity */
1689 			arr[3] = inquiry_vpd_b0(&arr[4]);
1690 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1691 			arr[1] = cmd[2];        /*sanity */
1692 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1693 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1694 			arr[1] = cmd[2];        /*sanity */
1695 			arr[3] = inquiry_vpd_b2(&arr[4]);
1696 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1697 			arr[1] = cmd[2];        /*sanity */
1698 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1699 		} else {
1700 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1701 			kfree(arr);
1702 			return check_condition_result;
1703 		}
1704 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1705 		ret = fill_from_dev_buffer(scp, arr,
1706 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1707 		kfree(arr);
1708 		return ret;
1709 	}
1710 	/* drops through here for a standard inquiry */
1711 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1712 	arr[2] = sdebug_scsi_level;
1713 	arr[3] = 2;    /* response_data_format==2 */
1714 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1715 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1716 	if (sdebug_vpd_use_hostno == 0)
1717 		arr[5] |= 0x10; /* claim: implicit TPGS */
1718 	arr[6] = 0x10; /* claim: MultiP */
1719 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1720 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1721 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1722 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1723 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1724 	/* Use Vendor Specific area to place driver date in ASCII hex */
1725 	memcpy(&arr[36], sdebug_version_date, 8);
1726 	/* version descriptors (2 bytes each) follow */
1727 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1728 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1729 	n = 62;
1730 	if (is_disk) {		/* SBC-4 no version claimed */
1731 		put_unaligned_be16(0x600, arr + n);
1732 		n += 2;
1733 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1734 		put_unaligned_be16(0x525, arr + n);
1735 		n += 2;
1736 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1737 		put_unaligned_be16(0x624, arr + n);
1738 		n += 2;
1739 	}
1740 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1741 	ret = fill_from_dev_buffer(scp, arr,
1742 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1743 	kfree(arr);
1744 	return ret;
1745 }
1746 
1747 /* See resp_iec_m_pg() for how this data is manipulated */
1748 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1749 				   0, 0, 0x0, 0x0};
1750 
1751 static int resp_requests(struct scsi_cmnd *scp,
1752 			 struct sdebug_dev_info *devip)
1753 {
1754 	unsigned char *cmd = scp->cmnd;
1755 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1756 	bool dsense = !!(cmd[1] & 1);
1757 	u32 alloc_len = cmd[4];
1758 	u32 len = 18;
1759 	int stopped_state = atomic_read(&devip->stopped);
1760 
1761 	memset(arr, 0, sizeof(arr));
1762 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1763 		if (dsense) {
1764 			arr[0] = 0x72;
1765 			arr[1] = NOT_READY;
1766 			arr[2] = LOGICAL_UNIT_NOT_READY;
1767 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1768 			len = 8;
1769 		} else {
1770 			arr[0] = 0x70;
1771 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1772 			arr[7] = 0xa;			/* 18 byte sense buffer */
1773 			arr[12] = LOGICAL_UNIT_NOT_READY;
1774 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1775 		}
1776 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1777 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1778 		if (dsense) {
1779 			arr[0] = 0x72;
1780 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1781 			arr[2] = THRESHOLD_EXCEEDED;
1782 			arr[3] = 0xff;		/* Failure prediction(false) */
1783 			len = 8;
1784 		} else {
1785 			arr[0] = 0x70;
1786 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1787 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1788 			arr[12] = THRESHOLD_EXCEEDED;
1789 			arr[13] = 0xff;		/* Failure prediction(false) */
1790 		}
1791 	} else {	/* nothing to report */
1792 		if (dsense) {
1793 			len = 8;
1794 			memset(arr, 0, len);
1795 			arr[0] = 0x72;
1796 		} else {
1797 			memset(arr, 0, len);
1798 			arr[0] = 0x70;
1799 			arr[7] = 0xa;
1800 		}
1801 	}
1802 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1803 }
1804 
1805 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1806 {
1807 	unsigned char *cmd = scp->cmnd;
1808 	int power_cond, want_stop, stopped_state;
1809 	bool changing;
1810 
1811 	power_cond = (cmd[4] & 0xf0) >> 4;
1812 	if (power_cond) {
1813 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1814 		return check_condition_result;
1815 	}
1816 	want_stop = !(cmd[4] & 1);
1817 	stopped_state = atomic_read(&devip->stopped);
1818 	if (stopped_state == 2) {
1819 		ktime_t now_ts = ktime_get_boottime();
1820 
1821 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1822 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1823 
1824 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1825 				/* tur_ms_to_ready timer extinguished */
1826 				atomic_set(&devip->stopped, 0);
1827 				stopped_state = 0;
1828 			}
1829 		}
1830 		if (stopped_state == 2) {
1831 			if (want_stop) {
1832 				stopped_state = 1;	/* dummy up success */
1833 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1834 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1835 				return check_condition_result;
1836 			}
1837 		}
1838 	}
1839 	changing = (stopped_state != want_stop);
1840 	if (changing)
1841 		atomic_xchg(&devip->stopped, want_stop);
1842 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1843 		return SDEG_RES_IMMED_MASK;
1844 	else
1845 		return 0;
1846 }
1847 
1848 static sector_t get_sdebug_capacity(void)
1849 {
1850 	static const unsigned int gibibyte = 1073741824;
1851 
1852 	if (sdebug_virtual_gb > 0)
1853 		return (sector_t)sdebug_virtual_gb *
1854 			(gibibyte / sdebug_sector_size);
1855 	else
1856 		return sdebug_store_sectors;
1857 }
1858 
1859 #define SDEBUG_READCAP_ARR_SZ 8
1860 static int resp_readcap(struct scsi_cmnd *scp,
1861 			struct sdebug_dev_info *devip)
1862 {
1863 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1864 	unsigned int capac;
1865 
1866 	/* following just in case virtual_gb changed */
1867 	sdebug_capacity = get_sdebug_capacity();
1868 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1869 	if (sdebug_capacity < 0xffffffff) {
1870 		capac = (unsigned int)sdebug_capacity - 1;
1871 		put_unaligned_be32(capac, arr + 0);
1872 	} else
1873 		put_unaligned_be32(0xffffffff, arr + 0);
1874 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1875 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1876 }
1877 
1878 #define SDEBUG_READCAP16_ARR_SZ 32
1879 static int resp_readcap16(struct scsi_cmnd *scp,
1880 			  struct sdebug_dev_info *devip)
1881 {
1882 	unsigned char *cmd = scp->cmnd;
1883 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1884 	u32 alloc_len;
1885 
1886 	alloc_len = get_unaligned_be32(cmd + 10);
1887 	/* following just in case virtual_gb changed */
1888 	sdebug_capacity = get_sdebug_capacity();
1889 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1890 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1891 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1892 	arr[13] = sdebug_physblk_exp & 0xf;
1893 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1894 
1895 	if (scsi_debug_lbp()) {
1896 		arr[14] |= 0x80; /* LBPME */
1897 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1898 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1899 		 * in the wider field maps to 0 in this field.
1900 		 */
1901 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1902 			arr[14] |= 0x40;
1903 	}
1904 
1905 	/*
1906 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1907 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1908 	 */
1909 	if (devip->zmodel == BLK_ZONED_HM)
1910 		arr[12] |= 1 << 4;
1911 
1912 	arr[15] = sdebug_lowest_aligned & 0xff;
1913 
1914 	if (have_dif_prot) {
1915 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1916 		arr[12] |= 1; /* PROT_EN */
1917 	}
1918 
1919 	return fill_from_dev_buffer(scp, arr,
1920 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1921 }
1922 
1923 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1924 
1925 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1926 			      struct sdebug_dev_info *devip)
1927 {
1928 	unsigned char *cmd = scp->cmnd;
1929 	unsigned char *arr;
1930 	int host_no = devip->sdbg_host->shost->host_no;
1931 	int port_group_a, port_group_b, port_a, port_b;
1932 	u32 alen, n, rlen;
1933 	int ret;
1934 
1935 	alen = get_unaligned_be32(cmd + 6);
1936 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1937 	if (! arr)
1938 		return DID_REQUEUE << 16;
1939 	/*
1940 	 * EVPD page 0x88 states we have two ports, one
1941 	 * real and a fake port with no device connected.
1942 	 * So we create two port groups with one port each
1943 	 * and set the group with port B to unavailable.
1944 	 */
1945 	port_a = 0x1; /* relative port A */
1946 	port_b = 0x2; /* relative port B */
1947 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1948 			(devip->channel & 0x7f);
1949 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1950 			(devip->channel & 0x7f) + 0x80;
1951 
1952 	/*
1953 	 * The asymmetric access state is cycled according to the host_id.
1954 	 */
1955 	n = 4;
1956 	if (sdebug_vpd_use_hostno == 0) {
1957 		arr[n++] = host_no % 3; /* Asymm access state */
1958 		arr[n++] = 0x0F; /* claim: all states are supported */
1959 	} else {
1960 		arr[n++] = 0x0; /* Active/Optimized path */
1961 		arr[n++] = 0x01; /* only support active/optimized paths */
1962 	}
1963 	put_unaligned_be16(port_group_a, arr + n);
1964 	n += 2;
1965 	arr[n++] = 0;    /* Reserved */
1966 	arr[n++] = 0;    /* Status code */
1967 	arr[n++] = 0;    /* Vendor unique */
1968 	arr[n++] = 0x1;  /* One port per group */
1969 	arr[n++] = 0;    /* Reserved */
1970 	arr[n++] = 0;    /* Reserved */
1971 	put_unaligned_be16(port_a, arr + n);
1972 	n += 2;
1973 	arr[n++] = 3;    /* Port unavailable */
1974 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1975 	put_unaligned_be16(port_group_b, arr + n);
1976 	n += 2;
1977 	arr[n++] = 0;    /* Reserved */
1978 	arr[n++] = 0;    /* Status code */
1979 	arr[n++] = 0;    /* Vendor unique */
1980 	arr[n++] = 0x1;  /* One port per group */
1981 	arr[n++] = 0;    /* Reserved */
1982 	arr[n++] = 0;    /* Reserved */
1983 	put_unaligned_be16(port_b, arr + n);
1984 	n += 2;
1985 
1986 	rlen = n - 4;
1987 	put_unaligned_be32(rlen, arr + 0);
1988 
1989 	/*
1990 	 * Return the smallest value of either
1991 	 * - The allocated length
1992 	 * - The constructed command length
1993 	 * - The maximum array size
1994 	 */
1995 	rlen = min(alen, n);
1996 	ret = fill_from_dev_buffer(scp, arr,
1997 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1998 	kfree(arr);
1999 	return ret;
2000 }
2001 
2002 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2003 			     struct sdebug_dev_info *devip)
2004 {
2005 	bool rctd;
2006 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2007 	u16 req_sa, u;
2008 	u32 alloc_len, a_len;
2009 	int k, offset, len, errsts, count, bump, na;
2010 	const struct opcode_info_t *oip;
2011 	const struct opcode_info_t *r_oip;
2012 	u8 *arr;
2013 	u8 *cmd = scp->cmnd;
2014 
2015 	rctd = !!(cmd[2] & 0x80);
2016 	reporting_opts = cmd[2] & 0x7;
2017 	req_opcode = cmd[3];
2018 	req_sa = get_unaligned_be16(cmd + 4);
2019 	alloc_len = get_unaligned_be32(cmd + 6);
2020 	if (alloc_len < 4 || alloc_len > 0xffff) {
2021 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2022 		return check_condition_result;
2023 	}
2024 	if (alloc_len > 8192)
2025 		a_len = 8192;
2026 	else
2027 		a_len = alloc_len;
2028 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2029 	if (NULL == arr) {
2030 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2031 				INSUFF_RES_ASCQ);
2032 		return check_condition_result;
2033 	}
2034 	switch (reporting_opts) {
2035 	case 0:	/* all commands */
2036 		/* count number of commands */
2037 		for (count = 0, oip = opcode_info_arr;
2038 		     oip->num_attached != 0xff; ++oip) {
2039 			if (F_INV_OP & oip->flags)
2040 				continue;
2041 			count += (oip->num_attached + 1);
2042 		}
2043 		bump = rctd ? 20 : 8;
2044 		put_unaligned_be32(count * bump, arr);
2045 		for (offset = 4, oip = opcode_info_arr;
2046 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2047 			if (F_INV_OP & oip->flags)
2048 				continue;
2049 			na = oip->num_attached;
2050 			arr[offset] = oip->opcode;
2051 			put_unaligned_be16(oip->sa, arr + offset + 2);
2052 			if (rctd)
2053 				arr[offset + 5] |= 0x2;
2054 			if (FF_SA & oip->flags)
2055 				arr[offset + 5] |= 0x1;
2056 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2057 			if (rctd)
2058 				put_unaligned_be16(0xa, arr + offset + 8);
2059 			r_oip = oip;
2060 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2061 				if (F_INV_OP & oip->flags)
2062 					continue;
2063 				offset += bump;
2064 				arr[offset] = oip->opcode;
2065 				put_unaligned_be16(oip->sa, arr + offset + 2);
2066 				if (rctd)
2067 					arr[offset + 5] |= 0x2;
2068 				if (FF_SA & oip->flags)
2069 					arr[offset + 5] |= 0x1;
2070 				put_unaligned_be16(oip->len_mask[0],
2071 						   arr + offset + 6);
2072 				if (rctd)
2073 					put_unaligned_be16(0xa,
2074 							   arr + offset + 8);
2075 			}
2076 			oip = r_oip;
2077 			offset += bump;
2078 		}
2079 		break;
2080 	case 1:	/* one command: opcode only */
2081 	case 2:	/* one command: opcode plus service action */
2082 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2083 		sdeb_i = opcode_ind_arr[req_opcode];
2084 		oip = &opcode_info_arr[sdeb_i];
2085 		if (F_INV_OP & oip->flags) {
2086 			supp = 1;
2087 			offset = 4;
2088 		} else {
2089 			if (1 == reporting_opts) {
2090 				if (FF_SA & oip->flags) {
2091 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2092 							     2, 2);
2093 					kfree(arr);
2094 					return check_condition_result;
2095 				}
2096 				req_sa = 0;
2097 			} else if (2 == reporting_opts &&
2098 				   0 == (FF_SA & oip->flags)) {
2099 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2100 				kfree(arr);	/* point at requested sa */
2101 				return check_condition_result;
2102 			}
2103 			if (0 == (FF_SA & oip->flags) &&
2104 			    req_opcode == oip->opcode)
2105 				supp = 3;
2106 			else if (0 == (FF_SA & oip->flags)) {
2107 				na = oip->num_attached;
2108 				for (k = 0, oip = oip->arrp; k < na;
2109 				     ++k, ++oip) {
2110 					if (req_opcode == oip->opcode)
2111 						break;
2112 				}
2113 				supp = (k >= na) ? 1 : 3;
2114 			} else if (req_sa != oip->sa) {
2115 				na = oip->num_attached;
2116 				for (k = 0, oip = oip->arrp; k < na;
2117 				     ++k, ++oip) {
2118 					if (req_sa == oip->sa)
2119 						break;
2120 				}
2121 				supp = (k >= na) ? 1 : 3;
2122 			} else
2123 				supp = 3;
2124 			if (3 == supp) {
2125 				u = oip->len_mask[0];
2126 				put_unaligned_be16(u, arr + 2);
2127 				arr[4] = oip->opcode;
2128 				for (k = 1; k < u; ++k)
2129 					arr[4 + k] = (k < 16) ?
2130 						 oip->len_mask[k] : 0xff;
2131 				offset = 4 + u;
2132 			} else
2133 				offset = 4;
2134 		}
2135 		arr[1] = (rctd ? 0x80 : 0) | supp;
2136 		if (rctd) {
2137 			put_unaligned_be16(0xa, arr + offset);
2138 			offset += 12;
2139 		}
2140 		break;
2141 	default:
2142 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2143 		kfree(arr);
2144 		return check_condition_result;
2145 	}
2146 	offset = (offset < a_len) ? offset : a_len;
2147 	len = (offset < alloc_len) ? offset : alloc_len;
2148 	errsts = fill_from_dev_buffer(scp, arr, len);
2149 	kfree(arr);
2150 	return errsts;
2151 }
2152 
2153 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2154 			  struct sdebug_dev_info *devip)
2155 {
2156 	bool repd;
2157 	u32 alloc_len, len;
2158 	u8 arr[16];
2159 	u8 *cmd = scp->cmnd;
2160 
2161 	memset(arr, 0, sizeof(arr));
2162 	repd = !!(cmd[2] & 0x80);
2163 	alloc_len = get_unaligned_be32(cmd + 6);
2164 	if (alloc_len < 4) {
2165 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2166 		return check_condition_result;
2167 	}
2168 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2169 	arr[1] = 0x1;		/* ITNRS */
2170 	if (repd) {
2171 		arr[3] = 0xc;
2172 		len = 16;
2173 	} else
2174 		len = 4;
2175 
2176 	len = (len < alloc_len) ? len : alloc_len;
2177 	return fill_from_dev_buffer(scp, arr, len);
2178 }
2179 
2180 /* <<Following mode page info copied from ST318451LW>> */
2181 
2182 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2183 {	/* Read-Write Error Recovery page for mode_sense */
2184 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2185 					5, 0, 0xff, 0xff};
2186 
2187 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2188 	if (1 == pcontrol)
2189 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2190 	return sizeof(err_recov_pg);
2191 }
2192 
2193 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2194 { 	/* Disconnect-Reconnect page for mode_sense */
2195 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2196 					 0, 0, 0, 0, 0, 0, 0, 0};
2197 
2198 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2199 	if (1 == pcontrol)
2200 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2201 	return sizeof(disconnect_pg);
2202 }
2203 
2204 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2205 {       /* Format device page for mode_sense */
2206 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2207 				     0, 0, 0, 0, 0, 0, 0, 0,
2208 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2209 
2210 	memcpy(p, format_pg, sizeof(format_pg));
2211 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2212 	put_unaligned_be16(sdebug_sector_size, p + 12);
2213 	if (sdebug_removable)
2214 		p[20] |= 0x20; /* should agree with INQUIRY */
2215 	if (1 == pcontrol)
2216 		memset(p + 2, 0, sizeof(format_pg) - 2);
2217 	return sizeof(format_pg);
2218 }
2219 
2220 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2221 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2222 				     0, 0, 0, 0};
2223 
2224 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2225 { 	/* Caching page for mode_sense */
2226 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2227 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2228 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2229 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2230 
2231 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2232 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2233 	memcpy(p, caching_pg, sizeof(caching_pg));
2234 	if (1 == pcontrol)
2235 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2236 	else if (2 == pcontrol)
2237 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2238 	return sizeof(caching_pg);
2239 }
2240 
2241 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2242 				    0, 0, 0x2, 0x4b};
2243 
2244 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2245 { 	/* Control mode page for mode_sense */
2246 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2247 					0, 0, 0, 0};
2248 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2249 				     0, 0, 0x2, 0x4b};
2250 
2251 	if (sdebug_dsense)
2252 		ctrl_m_pg[2] |= 0x4;
2253 	else
2254 		ctrl_m_pg[2] &= ~0x4;
2255 
2256 	if (sdebug_ato)
2257 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2258 
2259 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2260 	if (1 == pcontrol)
2261 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2262 	else if (2 == pcontrol)
2263 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2264 	return sizeof(ctrl_m_pg);
2265 }
2266 
2267 
2268 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2269 {	/* Informational Exceptions control mode page for mode_sense */
2270 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2271 				       0, 0, 0x0, 0x0};
2272 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2273 				      0, 0, 0x0, 0x0};
2274 
2275 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2276 	if (1 == pcontrol)
2277 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2278 	else if (2 == pcontrol)
2279 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2280 	return sizeof(iec_m_pg);
2281 }
2282 
2283 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2284 {	/* SAS SSP mode page - short format for mode_sense */
2285 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2286 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2287 
2288 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2289 	if (1 == pcontrol)
2290 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2291 	return sizeof(sas_sf_m_pg);
2292 }
2293 
2294 
2295 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2296 			      int target_dev_id)
2297 {	/* SAS phy control and discover mode page for mode_sense */
2298 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2299 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2300 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2301 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2302 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2303 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2304 		    0, 0, 0, 0, 0, 0, 0, 0,
2305 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2306 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2307 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2308 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2309 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2310 		    0, 0, 0, 0, 0, 0, 0, 0,
2311 		};
2312 	int port_a, port_b;
2313 
2314 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2315 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2316 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2317 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2318 	port_a = target_dev_id + 1;
2319 	port_b = port_a + 1;
2320 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2321 	put_unaligned_be32(port_a, p + 20);
2322 	put_unaligned_be32(port_b, p + 48 + 20);
2323 	if (1 == pcontrol)
2324 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2325 	return sizeof(sas_pcd_m_pg);
2326 }
2327 
2328 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2329 {	/* SAS SSP shared protocol specific port mode subpage */
2330 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2331 		    0, 0, 0, 0, 0, 0, 0, 0,
2332 		};
2333 
2334 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2335 	if (1 == pcontrol)
2336 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2337 	return sizeof(sas_sha_m_pg);
2338 }
2339 
2340 #define SDEBUG_MAX_MSENSE_SZ 256
2341 
2342 static int resp_mode_sense(struct scsi_cmnd *scp,
2343 			   struct sdebug_dev_info *devip)
2344 {
2345 	int pcontrol, pcode, subpcode, bd_len;
2346 	unsigned char dev_spec;
2347 	u32 alloc_len, offset, len;
2348 	int target_dev_id;
2349 	int target = scp->device->id;
2350 	unsigned char *ap;
2351 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2352 	unsigned char *cmd = scp->cmnd;
2353 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2354 
2355 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2356 	pcontrol = (cmd[2] & 0xc0) >> 6;
2357 	pcode = cmd[2] & 0x3f;
2358 	subpcode = cmd[3];
2359 	msense_6 = (MODE_SENSE == cmd[0]);
2360 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2361 	is_disk = (sdebug_ptype == TYPE_DISK);
2362 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2363 	if ((is_disk || is_zbc) && !dbd)
2364 		bd_len = llbaa ? 16 : 8;
2365 	else
2366 		bd_len = 0;
2367 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2368 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2369 	if (0x3 == pcontrol) {  /* Saving values not supported */
2370 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2371 		return check_condition_result;
2372 	}
2373 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2374 			(devip->target * 1000) - 3;
2375 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2376 	if (is_disk || is_zbc) {
2377 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2378 		if (sdebug_wp)
2379 			dev_spec |= 0x80;
2380 	} else
2381 		dev_spec = 0x0;
2382 	if (msense_6) {
2383 		arr[2] = dev_spec;
2384 		arr[3] = bd_len;
2385 		offset = 4;
2386 	} else {
2387 		arr[3] = dev_spec;
2388 		if (16 == bd_len)
2389 			arr[4] = 0x1;	/* set LONGLBA bit */
2390 		arr[7] = bd_len;	/* assume 255 or less */
2391 		offset = 8;
2392 	}
2393 	ap = arr + offset;
2394 	if ((bd_len > 0) && (!sdebug_capacity))
2395 		sdebug_capacity = get_sdebug_capacity();
2396 
2397 	if (8 == bd_len) {
2398 		if (sdebug_capacity > 0xfffffffe)
2399 			put_unaligned_be32(0xffffffff, ap + 0);
2400 		else
2401 			put_unaligned_be32(sdebug_capacity, ap + 0);
2402 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2403 		offset += bd_len;
2404 		ap = arr + offset;
2405 	} else if (16 == bd_len) {
2406 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2407 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2408 		offset += bd_len;
2409 		ap = arr + offset;
2410 	}
2411 
2412 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2413 		/* TODO: Control Extension page */
2414 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2415 		return check_condition_result;
2416 	}
2417 	bad_pcode = false;
2418 
2419 	switch (pcode) {
2420 	case 0x1:	/* Read-Write error recovery page, direct access */
2421 		len = resp_err_recov_pg(ap, pcontrol, target);
2422 		offset += len;
2423 		break;
2424 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2425 		len = resp_disconnect_pg(ap, pcontrol, target);
2426 		offset += len;
2427 		break;
2428 	case 0x3:       /* Format device page, direct access */
2429 		if (is_disk) {
2430 			len = resp_format_pg(ap, pcontrol, target);
2431 			offset += len;
2432 		} else
2433 			bad_pcode = true;
2434 		break;
2435 	case 0x8:	/* Caching page, direct access */
2436 		if (is_disk || is_zbc) {
2437 			len = resp_caching_pg(ap, pcontrol, target);
2438 			offset += len;
2439 		} else
2440 			bad_pcode = true;
2441 		break;
2442 	case 0xa:	/* Control Mode page, all devices */
2443 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2444 		offset += len;
2445 		break;
2446 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2447 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2448 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2449 			return check_condition_result;
2450 		}
2451 		len = 0;
2452 		if ((0x0 == subpcode) || (0xff == subpcode))
2453 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2454 		if ((0x1 == subpcode) || (0xff == subpcode))
2455 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2456 						  target_dev_id);
2457 		if ((0x2 == subpcode) || (0xff == subpcode))
2458 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2459 		offset += len;
2460 		break;
2461 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2462 		len = resp_iec_m_pg(ap, pcontrol, target);
2463 		offset += len;
2464 		break;
2465 	case 0x3f:	/* Read all Mode pages */
2466 		if ((0 == subpcode) || (0xff == subpcode)) {
2467 			len = resp_err_recov_pg(ap, pcontrol, target);
2468 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2469 			if (is_disk) {
2470 				len += resp_format_pg(ap + len, pcontrol,
2471 						      target);
2472 				len += resp_caching_pg(ap + len, pcontrol,
2473 						       target);
2474 			} else if (is_zbc) {
2475 				len += resp_caching_pg(ap + len, pcontrol,
2476 						       target);
2477 			}
2478 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2479 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2480 			if (0xff == subpcode) {
2481 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2482 						  target, target_dev_id);
2483 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2484 			}
2485 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2486 			offset += len;
2487 		} else {
2488 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2489 			return check_condition_result;
2490 		}
2491 		break;
2492 	default:
2493 		bad_pcode = true;
2494 		break;
2495 	}
2496 	if (bad_pcode) {
2497 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2498 		return check_condition_result;
2499 	}
2500 	if (msense_6)
2501 		arr[0] = offset - 1;
2502 	else
2503 		put_unaligned_be16((offset - 2), arr + 0);
2504 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2505 }
2506 
2507 #define SDEBUG_MAX_MSELECT_SZ 512
2508 
2509 static int resp_mode_select(struct scsi_cmnd *scp,
2510 			    struct sdebug_dev_info *devip)
2511 {
2512 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2513 	int param_len, res, mpage;
2514 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2515 	unsigned char *cmd = scp->cmnd;
2516 	int mselect6 = (MODE_SELECT == cmd[0]);
2517 
2518 	memset(arr, 0, sizeof(arr));
2519 	pf = cmd[1] & 0x10;
2520 	sp = cmd[1] & 0x1;
2521 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2522 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2523 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2524 		return check_condition_result;
2525 	}
2526 	res = fetch_to_dev_buffer(scp, arr, param_len);
2527 	if (-1 == res)
2528 		return DID_ERROR << 16;
2529 	else if (sdebug_verbose && (res < param_len))
2530 		sdev_printk(KERN_INFO, scp->device,
2531 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2532 			    __func__, param_len, res);
2533 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2534 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2535 	off = bd_len + (mselect6 ? 4 : 8);
2536 	if (md_len > 2 || off >= res) {
2537 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2538 		return check_condition_result;
2539 	}
2540 	mpage = arr[off] & 0x3f;
2541 	ps = !!(arr[off] & 0x80);
2542 	if (ps) {
2543 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2544 		return check_condition_result;
2545 	}
2546 	spf = !!(arr[off] & 0x40);
2547 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2548 		       (arr[off + 1] + 2);
2549 	if ((pg_len + off) > param_len) {
2550 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2551 				PARAMETER_LIST_LENGTH_ERR, 0);
2552 		return check_condition_result;
2553 	}
2554 	switch (mpage) {
2555 	case 0x8:      /* Caching Mode page */
2556 		if (caching_pg[1] == arr[off + 1]) {
2557 			memcpy(caching_pg + 2, arr + off + 2,
2558 			       sizeof(caching_pg) - 2);
2559 			goto set_mode_changed_ua;
2560 		}
2561 		break;
2562 	case 0xa:      /* Control Mode page */
2563 		if (ctrl_m_pg[1] == arr[off + 1]) {
2564 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2565 			       sizeof(ctrl_m_pg) - 2);
2566 			if (ctrl_m_pg[4] & 0x8)
2567 				sdebug_wp = true;
2568 			else
2569 				sdebug_wp = false;
2570 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2571 			goto set_mode_changed_ua;
2572 		}
2573 		break;
2574 	case 0x1c:      /* Informational Exceptions Mode page */
2575 		if (iec_m_pg[1] == arr[off + 1]) {
2576 			memcpy(iec_m_pg + 2, arr + off + 2,
2577 			       sizeof(iec_m_pg) - 2);
2578 			goto set_mode_changed_ua;
2579 		}
2580 		break;
2581 	default:
2582 		break;
2583 	}
2584 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2585 	return check_condition_result;
2586 set_mode_changed_ua:
2587 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2588 	return 0;
2589 }
2590 
2591 static int resp_temp_l_pg(unsigned char *arr)
2592 {
2593 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2594 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2595 		};
2596 
2597 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2598 	return sizeof(temp_l_pg);
2599 }
2600 
2601 static int resp_ie_l_pg(unsigned char *arr)
2602 {
2603 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2604 		};
2605 
2606 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2607 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2608 		arr[4] = THRESHOLD_EXCEEDED;
2609 		arr[5] = 0xff;
2610 	}
2611 	return sizeof(ie_l_pg);
2612 }
2613 
2614 static int resp_env_rep_l_spg(unsigned char *arr)
2615 {
2616 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2617 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2618 					 0x1, 0x0, 0x23, 0x8,
2619 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2620 		};
2621 
2622 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2623 	return sizeof(env_rep_l_spg);
2624 }
2625 
2626 #define SDEBUG_MAX_LSENSE_SZ 512
2627 
2628 static int resp_log_sense(struct scsi_cmnd *scp,
2629 			  struct sdebug_dev_info *devip)
2630 {
2631 	int ppc, sp, pcode, subpcode;
2632 	u32 alloc_len, len, n;
2633 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2634 	unsigned char *cmd = scp->cmnd;
2635 
2636 	memset(arr, 0, sizeof(arr));
2637 	ppc = cmd[1] & 0x2;
2638 	sp = cmd[1] & 0x1;
2639 	if (ppc || sp) {
2640 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2641 		return check_condition_result;
2642 	}
2643 	pcode = cmd[2] & 0x3f;
2644 	subpcode = cmd[3] & 0xff;
2645 	alloc_len = get_unaligned_be16(cmd + 7);
2646 	arr[0] = pcode;
2647 	if (0 == subpcode) {
2648 		switch (pcode) {
2649 		case 0x0:	/* Supported log pages log page */
2650 			n = 4;
2651 			arr[n++] = 0x0;		/* this page */
2652 			arr[n++] = 0xd;		/* Temperature */
2653 			arr[n++] = 0x2f;	/* Informational exceptions */
2654 			arr[3] = n - 4;
2655 			break;
2656 		case 0xd:	/* Temperature log page */
2657 			arr[3] = resp_temp_l_pg(arr + 4);
2658 			break;
2659 		case 0x2f:	/* Informational exceptions log page */
2660 			arr[3] = resp_ie_l_pg(arr + 4);
2661 			break;
2662 		default:
2663 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2664 			return check_condition_result;
2665 		}
2666 	} else if (0xff == subpcode) {
2667 		arr[0] |= 0x40;
2668 		arr[1] = subpcode;
2669 		switch (pcode) {
2670 		case 0x0:	/* Supported log pages and subpages log page */
2671 			n = 4;
2672 			arr[n++] = 0x0;
2673 			arr[n++] = 0x0;		/* 0,0 page */
2674 			arr[n++] = 0x0;
2675 			arr[n++] = 0xff;	/* this page */
2676 			arr[n++] = 0xd;
2677 			arr[n++] = 0x0;		/* Temperature */
2678 			arr[n++] = 0xd;
2679 			arr[n++] = 0x1;		/* Environment reporting */
2680 			arr[n++] = 0xd;
2681 			arr[n++] = 0xff;	/* all 0xd subpages */
2682 			arr[n++] = 0x2f;
2683 			arr[n++] = 0x0;	/* Informational exceptions */
2684 			arr[n++] = 0x2f;
2685 			arr[n++] = 0xff;	/* all 0x2f subpages */
2686 			arr[3] = n - 4;
2687 			break;
2688 		case 0xd:	/* Temperature subpages */
2689 			n = 4;
2690 			arr[n++] = 0xd;
2691 			arr[n++] = 0x0;		/* Temperature */
2692 			arr[n++] = 0xd;
2693 			arr[n++] = 0x1;		/* Environment reporting */
2694 			arr[n++] = 0xd;
2695 			arr[n++] = 0xff;	/* these subpages */
2696 			arr[3] = n - 4;
2697 			break;
2698 		case 0x2f:	/* Informational exceptions subpages */
2699 			n = 4;
2700 			arr[n++] = 0x2f;
2701 			arr[n++] = 0x0;		/* Informational exceptions */
2702 			arr[n++] = 0x2f;
2703 			arr[n++] = 0xff;	/* these subpages */
2704 			arr[3] = n - 4;
2705 			break;
2706 		default:
2707 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2708 			return check_condition_result;
2709 		}
2710 	} else if (subpcode > 0) {
2711 		arr[0] |= 0x40;
2712 		arr[1] = subpcode;
2713 		if (pcode == 0xd && subpcode == 1)
2714 			arr[3] = resp_env_rep_l_spg(arr + 4);
2715 		else {
2716 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2717 			return check_condition_result;
2718 		}
2719 	} else {
2720 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2721 		return check_condition_result;
2722 	}
2723 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2724 	return fill_from_dev_buffer(scp, arr,
2725 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2726 }
2727 
2728 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2729 {
2730 	return devip->nr_zones != 0;
2731 }
2732 
2733 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2734 					unsigned long long lba)
2735 {
2736 	u32 zno = lba >> devip->zsize_shift;
2737 	struct sdeb_zone_state *zsp;
2738 
2739 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2740 		return &devip->zstate[zno];
2741 
2742 	/*
2743 	 * If the zone capacity is less than the zone size, adjust for gap
2744 	 * zones.
2745 	 */
2746 	zno = 2 * zno - devip->nr_conv_zones;
2747 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2748 	zsp = &devip->zstate[zno];
2749 	if (lba >= zsp->z_start + zsp->z_size)
2750 		zsp++;
2751 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2752 	return zsp;
2753 }
2754 
2755 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2756 {
2757 	return zsp->z_type == ZBC_ZTYPE_CNV;
2758 }
2759 
2760 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2761 {
2762 	return zsp->z_type == ZBC_ZTYPE_GAP;
2763 }
2764 
2765 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2766 {
2767 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2768 }
2769 
2770 static void zbc_close_zone(struct sdebug_dev_info *devip,
2771 			   struct sdeb_zone_state *zsp)
2772 {
2773 	enum sdebug_z_cond zc;
2774 
2775 	if (!zbc_zone_is_seq(zsp))
2776 		return;
2777 
2778 	zc = zsp->z_cond;
2779 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2780 		return;
2781 
2782 	if (zc == ZC2_IMPLICIT_OPEN)
2783 		devip->nr_imp_open--;
2784 	else
2785 		devip->nr_exp_open--;
2786 
2787 	if (zsp->z_wp == zsp->z_start) {
2788 		zsp->z_cond = ZC1_EMPTY;
2789 	} else {
2790 		zsp->z_cond = ZC4_CLOSED;
2791 		devip->nr_closed++;
2792 	}
2793 }
2794 
2795 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2796 {
2797 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2798 	unsigned int i;
2799 
2800 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2801 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2802 			zbc_close_zone(devip, zsp);
2803 			return;
2804 		}
2805 	}
2806 }
2807 
2808 static void zbc_open_zone(struct sdebug_dev_info *devip,
2809 			  struct sdeb_zone_state *zsp, bool explicit)
2810 {
2811 	enum sdebug_z_cond zc;
2812 
2813 	if (!zbc_zone_is_seq(zsp))
2814 		return;
2815 
2816 	zc = zsp->z_cond;
2817 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2818 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2819 		return;
2820 
2821 	/* Close an implicit open zone if necessary */
2822 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2823 		zbc_close_zone(devip, zsp);
2824 	else if (devip->max_open &&
2825 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2826 		zbc_close_imp_open_zone(devip);
2827 
2828 	if (zsp->z_cond == ZC4_CLOSED)
2829 		devip->nr_closed--;
2830 	if (explicit) {
2831 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2832 		devip->nr_exp_open++;
2833 	} else {
2834 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2835 		devip->nr_imp_open++;
2836 	}
2837 }
2838 
2839 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2840 				     struct sdeb_zone_state *zsp)
2841 {
2842 	switch (zsp->z_cond) {
2843 	case ZC2_IMPLICIT_OPEN:
2844 		devip->nr_imp_open--;
2845 		break;
2846 	case ZC3_EXPLICIT_OPEN:
2847 		devip->nr_exp_open--;
2848 		break;
2849 	default:
2850 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2851 			  zsp->z_start, zsp->z_cond);
2852 		break;
2853 	}
2854 	zsp->z_cond = ZC5_FULL;
2855 }
2856 
2857 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2858 		       unsigned long long lba, unsigned int num)
2859 {
2860 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2861 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2862 
2863 	if (!zbc_zone_is_seq(zsp))
2864 		return;
2865 
2866 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2867 		zsp->z_wp += num;
2868 		if (zsp->z_wp >= zend)
2869 			zbc_set_zone_full(devip, zsp);
2870 		return;
2871 	}
2872 
2873 	while (num) {
2874 		if (lba != zsp->z_wp)
2875 			zsp->z_non_seq_resource = true;
2876 
2877 		end = lba + num;
2878 		if (end >= zend) {
2879 			n = zend - lba;
2880 			zsp->z_wp = zend;
2881 		} else if (end > zsp->z_wp) {
2882 			n = num;
2883 			zsp->z_wp = end;
2884 		} else {
2885 			n = num;
2886 		}
2887 		if (zsp->z_wp >= zend)
2888 			zbc_set_zone_full(devip, zsp);
2889 
2890 		num -= n;
2891 		lba += n;
2892 		if (num) {
2893 			zsp++;
2894 			zend = zsp->z_start + zsp->z_size;
2895 		}
2896 	}
2897 }
2898 
2899 static int check_zbc_access_params(struct scsi_cmnd *scp,
2900 			unsigned long long lba, unsigned int num, bool write)
2901 {
2902 	struct scsi_device *sdp = scp->device;
2903 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2904 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2905 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2906 
2907 	if (!write) {
2908 		if (devip->zmodel == BLK_ZONED_HA)
2909 			return 0;
2910 		/* For host-managed, reads cannot cross zone types boundaries */
2911 		if (zsp->z_type != zsp_end->z_type) {
2912 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2913 					LBA_OUT_OF_RANGE,
2914 					READ_INVDATA_ASCQ);
2915 			return check_condition_result;
2916 		}
2917 		return 0;
2918 	}
2919 
2920 	/* Writing into a gap zone is not allowed */
2921 	if (zbc_zone_is_gap(zsp)) {
2922 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2923 				ATTEMPT_ACCESS_GAP);
2924 		return check_condition_result;
2925 	}
2926 
2927 	/* No restrictions for writes within conventional zones */
2928 	if (zbc_zone_is_conv(zsp)) {
2929 		if (!zbc_zone_is_conv(zsp_end)) {
2930 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2931 					LBA_OUT_OF_RANGE,
2932 					WRITE_BOUNDARY_ASCQ);
2933 			return check_condition_result;
2934 		}
2935 		return 0;
2936 	}
2937 
2938 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2939 		/* Writes cannot cross sequential zone boundaries */
2940 		if (zsp_end != zsp) {
2941 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2942 					LBA_OUT_OF_RANGE,
2943 					WRITE_BOUNDARY_ASCQ);
2944 			return check_condition_result;
2945 		}
2946 		/* Cannot write full zones */
2947 		if (zsp->z_cond == ZC5_FULL) {
2948 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2949 					INVALID_FIELD_IN_CDB, 0);
2950 			return check_condition_result;
2951 		}
2952 		/* Writes must be aligned to the zone WP */
2953 		if (lba != zsp->z_wp) {
2954 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2955 					LBA_OUT_OF_RANGE,
2956 					UNALIGNED_WRITE_ASCQ);
2957 			return check_condition_result;
2958 		}
2959 	}
2960 
2961 	/* Handle implicit open of closed and empty zones */
2962 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2963 		if (devip->max_open &&
2964 		    devip->nr_exp_open >= devip->max_open) {
2965 			mk_sense_buffer(scp, DATA_PROTECT,
2966 					INSUFF_RES_ASC,
2967 					INSUFF_ZONE_ASCQ);
2968 			return check_condition_result;
2969 		}
2970 		zbc_open_zone(devip, zsp, false);
2971 	}
2972 
2973 	return 0;
2974 }
2975 
2976 static inline int check_device_access_params
2977 			(struct scsi_cmnd *scp, unsigned long long lba,
2978 			 unsigned int num, bool write)
2979 {
2980 	struct scsi_device *sdp = scp->device;
2981 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2982 
2983 	if (lba + num > sdebug_capacity) {
2984 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2985 		return check_condition_result;
2986 	}
2987 	/* transfer length excessive (tie in to block limits VPD page) */
2988 	if (num > sdebug_store_sectors) {
2989 		/* needs work to find which cdb byte 'num' comes from */
2990 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2991 		return check_condition_result;
2992 	}
2993 	if (write && unlikely(sdebug_wp)) {
2994 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2995 		return check_condition_result;
2996 	}
2997 	if (sdebug_dev_is_zoned(devip))
2998 		return check_zbc_access_params(scp, lba, num, write);
2999 
3000 	return 0;
3001 }
3002 
3003 /*
3004  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3005  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3006  * that access any of the "stores" in struct sdeb_store_info should call this
3007  * function with bug_if_fake_rw set to true.
3008  */
3009 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3010 						bool bug_if_fake_rw)
3011 {
3012 	if (sdebug_fake_rw) {
3013 		BUG_ON(bug_if_fake_rw);	/* See note above */
3014 		return NULL;
3015 	}
3016 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3017 }
3018 
3019 /* Returns number of bytes copied or -1 if error. */
3020 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3021 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3022 {
3023 	int ret;
3024 	u64 block, rest = 0;
3025 	enum dma_data_direction dir;
3026 	struct scsi_data_buffer *sdb = &scp->sdb;
3027 	u8 *fsp;
3028 
3029 	if (do_write) {
3030 		dir = DMA_TO_DEVICE;
3031 		write_since_sync = true;
3032 	} else {
3033 		dir = DMA_FROM_DEVICE;
3034 	}
3035 
3036 	if (!sdb->length || !sip)
3037 		return 0;
3038 	if (scp->sc_data_direction != dir)
3039 		return -1;
3040 	fsp = sip->storep;
3041 
3042 	block = do_div(lba, sdebug_store_sectors);
3043 	if (block + num > sdebug_store_sectors)
3044 		rest = block + num - sdebug_store_sectors;
3045 
3046 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3047 		   fsp + (block * sdebug_sector_size),
3048 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3049 	if (ret != (num - rest) * sdebug_sector_size)
3050 		return ret;
3051 
3052 	if (rest) {
3053 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3054 			    fsp, rest * sdebug_sector_size,
3055 			    sg_skip + ((num - rest) * sdebug_sector_size),
3056 			    do_write);
3057 	}
3058 
3059 	return ret;
3060 }
3061 
3062 /* Returns number of bytes copied or -1 if error. */
3063 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3064 {
3065 	struct scsi_data_buffer *sdb = &scp->sdb;
3066 
3067 	if (!sdb->length)
3068 		return 0;
3069 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3070 		return -1;
3071 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3072 			      num * sdebug_sector_size, 0, true);
3073 }
3074 
3075 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3076  * arr into sip->storep+lba and return true. If comparison fails then
3077  * return false. */
3078 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3079 			      const u8 *arr, bool compare_only)
3080 {
3081 	bool res;
3082 	u64 block, rest = 0;
3083 	u32 store_blks = sdebug_store_sectors;
3084 	u32 lb_size = sdebug_sector_size;
3085 	u8 *fsp = sip->storep;
3086 
3087 	block = do_div(lba, store_blks);
3088 	if (block + num > store_blks)
3089 		rest = block + num - store_blks;
3090 
3091 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3092 	if (!res)
3093 		return res;
3094 	if (rest)
3095 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3096 			     rest * lb_size);
3097 	if (!res)
3098 		return res;
3099 	if (compare_only)
3100 		return true;
3101 	arr += num * lb_size;
3102 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3103 	if (rest)
3104 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3105 	return res;
3106 }
3107 
3108 static __be16 dif_compute_csum(const void *buf, int len)
3109 {
3110 	__be16 csum;
3111 
3112 	if (sdebug_guard)
3113 		csum = (__force __be16)ip_compute_csum(buf, len);
3114 	else
3115 		csum = cpu_to_be16(crc_t10dif(buf, len));
3116 
3117 	return csum;
3118 }
3119 
3120 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3121 		      sector_t sector, u32 ei_lba)
3122 {
3123 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3124 
3125 	if (sdt->guard_tag != csum) {
3126 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3127 			(unsigned long)sector,
3128 			be16_to_cpu(sdt->guard_tag),
3129 			be16_to_cpu(csum));
3130 		return 0x01;
3131 	}
3132 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3133 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3134 		pr_err("REF check failed on sector %lu\n",
3135 			(unsigned long)sector);
3136 		return 0x03;
3137 	}
3138 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3139 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3140 		pr_err("REF check failed on sector %lu\n",
3141 			(unsigned long)sector);
3142 		return 0x03;
3143 	}
3144 	return 0;
3145 }
3146 
3147 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3148 			  unsigned int sectors, bool read)
3149 {
3150 	size_t resid;
3151 	void *paddr;
3152 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3153 						scp->device->hostdata, true);
3154 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3155 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3156 	struct sg_mapping_iter miter;
3157 
3158 	/* Bytes of protection data to copy into sgl */
3159 	resid = sectors * sizeof(*dif_storep);
3160 
3161 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3162 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3163 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3164 
3165 	while (sg_miter_next(&miter) && resid > 0) {
3166 		size_t len = min_t(size_t, miter.length, resid);
3167 		void *start = dif_store(sip, sector);
3168 		size_t rest = 0;
3169 
3170 		if (dif_store_end < start + len)
3171 			rest = start + len - dif_store_end;
3172 
3173 		paddr = miter.addr;
3174 
3175 		if (read)
3176 			memcpy(paddr, start, len - rest);
3177 		else
3178 			memcpy(start, paddr, len - rest);
3179 
3180 		if (rest) {
3181 			if (read)
3182 				memcpy(paddr + len - rest, dif_storep, rest);
3183 			else
3184 				memcpy(dif_storep, paddr + len - rest, rest);
3185 		}
3186 
3187 		sector += len / sizeof(*dif_storep);
3188 		resid -= len;
3189 	}
3190 	sg_miter_stop(&miter);
3191 }
3192 
3193 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3194 			    unsigned int sectors, u32 ei_lba)
3195 {
3196 	int ret = 0;
3197 	unsigned int i;
3198 	sector_t sector;
3199 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3200 						scp->device->hostdata, true);
3201 	struct t10_pi_tuple *sdt;
3202 
3203 	for (i = 0; i < sectors; i++, ei_lba++) {
3204 		sector = start_sec + i;
3205 		sdt = dif_store(sip, sector);
3206 
3207 		if (sdt->app_tag == cpu_to_be16(0xffff))
3208 			continue;
3209 
3210 		/*
3211 		 * Because scsi_debug acts as both initiator and
3212 		 * target we proceed to verify the PI even if
3213 		 * RDPROTECT=3. This is done so the "initiator" knows
3214 		 * which type of error to return. Otherwise we would
3215 		 * have to iterate over the PI twice.
3216 		 */
3217 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3218 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3219 					 sector, ei_lba);
3220 			if (ret) {
3221 				dif_errors++;
3222 				break;
3223 			}
3224 		}
3225 	}
3226 
3227 	dif_copy_prot(scp, start_sec, sectors, true);
3228 	dix_reads++;
3229 
3230 	return ret;
3231 }
3232 
3233 static inline void
3234 sdeb_read_lock(struct sdeb_store_info *sip)
3235 {
3236 	if (sdebug_no_rwlock) {
3237 		if (sip)
3238 			__acquire(&sip->macc_lck);
3239 		else
3240 			__acquire(&sdeb_fake_rw_lck);
3241 	} else {
3242 		if (sip)
3243 			read_lock(&sip->macc_lck);
3244 		else
3245 			read_lock(&sdeb_fake_rw_lck);
3246 	}
3247 }
3248 
3249 static inline void
3250 sdeb_read_unlock(struct sdeb_store_info *sip)
3251 {
3252 	if (sdebug_no_rwlock) {
3253 		if (sip)
3254 			__release(&sip->macc_lck);
3255 		else
3256 			__release(&sdeb_fake_rw_lck);
3257 	} else {
3258 		if (sip)
3259 			read_unlock(&sip->macc_lck);
3260 		else
3261 			read_unlock(&sdeb_fake_rw_lck);
3262 	}
3263 }
3264 
3265 static inline void
3266 sdeb_write_lock(struct sdeb_store_info *sip)
3267 {
3268 	if (sdebug_no_rwlock) {
3269 		if (sip)
3270 			__acquire(&sip->macc_lck);
3271 		else
3272 			__acquire(&sdeb_fake_rw_lck);
3273 	} else {
3274 		if (sip)
3275 			write_lock(&sip->macc_lck);
3276 		else
3277 			write_lock(&sdeb_fake_rw_lck);
3278 	}
3279 }
3280 
3281 static inline void
3282 sdeb_write_unlock(struct sdeb_store_info *sip)
3283 {
3284 	if (sdebug_no_rwlock) {
3285 		if (sip)
3286 			__release(&sip->macc_lck);
3287 		else
3288 			__release(&sdeb_fake_rw_lck);
3289 	} else {
3290 		if (sip)
3291 			write_unlock(&sip->macc_lck);
3292 		else
3293 			write_unlock(&sdeb_fake_rw_lck);
3294 	}
3295 }
3296 
3297 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3298 {
3299 	bool check_prot;
3300 	u32 num;
3301 	u32 ei_lba;
3302 	int ret;
3303 	u64 lba;
3304 	struct sdeb_store_info *sip = devip2sip(devip, true);
3305 	u8 *cmd = scp->cmnd;
3306 
3307 	switch (cmd[0]) {
3308 	case READ_16:
3309 		ei_lba = 0;
3310 		lba = get_unaligned_be64(cmd + 2);
3311 		num = get_unaligned_be32(cmd + 10);
3312 		check_prot = true;
3313 		break;
3314 	case READ_10:
3315 		ei_lba = 0;
3316 		lba = get_unaligned_be32(cmd + 2);
3317 		num = get_unaligned_be16(cmd + 7);
3318 		check_prot = true;
3319 		break;
3320 	case READ_6:
3321 		ei_lba = 0;
3322 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3323 		      (u32)(cmd[1] & 0x1f) << 16;
3324 		num = (0 == cmd[4]) ? 256 : cmd[4];
3325 		check_prot = true;
3326 		break;
3327 	case READ_12:
3328 		ei_lba = 0;
3329 		lba = get_unaligned_be32(cmd + 2);
3330 		num = get_unaligned_be32(cmd + 6);
3331 		check_prot = true;
3332 		break;
3333 	case XDWRITEREAD_10:
3334 		ei_lba = 0;
3335 		lba = get_unaligned_be32(cmd + 2);
3336 		num = get_unaligned_be16(cmd + 7);
3337 		check_prot = false;
3338 		break;
3339 	default:	/* assume READ(32) */
3340 		lba = get_unaligned_be64(cmd + 12);
3341 		ei_lba = get_unaligned_be32(cmd + 20);
3342 		num = get_unaligned_be32(cmd + 28);
3343 		check_prot = false;
3344 		break;
3345 	}
3346 	if (unlikely(have_dif_prot && check_prot)) {
3347 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3348 		    (cmd[1] & 0xe0)) {
3349 			mk_sense_invalid_opcode(scp);
3350 			return check_condition_result;
3351 		}
3352 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3353 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3354 		    (cmd[1] & 0xe0) == 0)
3355 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3356 				    "to DIF device\n");
3357 	}
3358 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3359 		     atomic_read(&sdeb_inject_pending))) {
3360 		num /= 2;
3361 		atomic_set(&sdeb_inject_pending, 0);
3362 	}
3363 
3364 	ret = check_device_access_params(scp, lba, num, false);
3365 	if (ret)
3366 		return ret;
3367 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3368 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3369 		     ((lba + num) > sdebug_medium_error_start))) {
3370 		/* claim unrecoverable read error */
3371 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3372 		/* set info field and valid bit for fixed descriptor */
3373 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3374 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3375 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3376 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3377 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3378 		}
3379 		scsi_set_resid(scp, scsi_bufflen(scp));
3380 		return check_condition_result;
3381 	}
3382 
3383 	sdeb_read_lock(sip);
3384 
3385 	/* DIX + T10 DIF */
3386 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3387 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3388 		case 1: /* Guard tag error */
3389 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3390 				sdeb_read_unlock(sip);
3391 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3392 				return check_condition_result;
3393 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3394 				sdeb_read_unlock(sip);
3395 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3396 				return illegal_condition_result;
3397 			}
3398 			break;
3399 		case 3: /* Reference tag error */
3400 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3401 				sdeb_read_unlock(sip);
3402 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3403 				return check_condition_result;
3404 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3405 				sdeb_read_unlock(sip);
3406 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3407 				return illegal_condition_result;
3408 			}
3409 			break;
3410 		}
3411 	}
3412 
3413 	ret = do_device_access(sip, scp, 0, lba, num, false);
3414 	sdeb_read_unlock(sip);
3415 	if (unlikely(ret == -1))
3416 		return DID_ERROR << 16;
3417 
3418 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3419 
3420 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3421 		     atomic_read(&sdeb_inject_pending))) {
3422 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3423 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3424 			atomic_set(&sdeb_inject_pending, 0);
3425 			return check_condition_result;
3426 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3427 			/* Logical block guard check failed */
3428 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3429 			atomic_set(&sdeb_inject_pending, 0);
3430 			return illegal_condition_result;
3431 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3432 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3433 			atomic_set(&sdeb_inject_pending, 0);
3434 			return illegal_condition_result;
3435 		}
3436 	}
3437 	return 0;
3438 }
3439 
3440 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3441 			     unsigned int sectors, u32 ei_lba)
3442 {
3443 	int ret;
3444 	struct t10_pi_tuple *sdt;
3445 	void *daddr;
3446 	sector_t sector = start_sec;
3447 	int ppage_offset;
3448 	int dpage_offset;
3449 	struct sg_mapping_iter diter;
3450 	struct sg_mapping_iter piter;
3451 
3452 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3453 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3454 
3455 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3456 			scsi_prot_sg_count(SCpnt),
3457 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3458 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3459 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3460 
3461 	/* For each protection page */
3462 	while (sg_miter_next(&piter)) {
3463 		dpage_offset = 0;
3464 		if (WARN_ON(!sg_miter_next(&diter))) {
3465 			ret = 0x01;
3466 			goto out;
3467 		}
3468 
3469 		for (ppage_offset = 0; ppage_offset < piter.length;
3470 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3471 			/* If we're at the end of the current
3472 			 * data page advance to the next one
3473 			 */
3474 			if (dpage_offset >= diter.length) {
3475 				if (WARN_ON(!sg_miter_next(&diter))) {
3476 					ret = 0x01;
3477 					goto out;
3478 				}
3479 				dpage_offset = 0;
3480 			}
3481 
3482 			sdt = piter.addr + ppage_offset;
3483 			daddr = diter.addr + dpage_offset;
3484 
3485 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3486 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3487 				if (ret)
3488 					goto out;
3489 			}
3490 
3491 			sector++;
3492 			ei_lba++;
3493 			dpage_offset += sdebug_sector_size;
3494 		}
3495 		diter.consumed = dpage_offset;
3496 		sg_miter_stop(&diter);
3497 	}
3498 	sg_miter_stop(&piter);
3499 
3500 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3501 	dix_writes++;
3502 
3503 	return 0;
3504 
3505 out:
3506 	dif_errors++;
3507 	sg_miter_stop(&diter);
3508 	sg_miter_stop(&piter);
3509 	return ret;
3510 }
3511 
3512 static unsigned long lba_to_map_index(sector_t lba)
3513 {
3514 	if (sdebug_unmap_alignment)
3515 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3516 	sector_div(lba, sdebug_unmap_granularity);
3517 	return lba;
3518 }
3519 
3520 static sector_t map_index_to_lba(unsigned long index)
3521 {
3522 	sector_t lba = index * sdebug_unmap_granularity;
3523 
3524 	if (sdebug_unmap_alignment)
3525 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3526 	return lba;
3527 }
3528 
3529 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3530 			      unsigned int *num)
3531 {
3532 	sector_t end;
3533 	unsigned int mapped;
3534 	unsigned long index;
3535 	unsigned long next;
3536 
3537 	index = lba_to_map_index(lba);
3538 	mapped = test_bit(index, sip->map_storep);
3539 
3540 	if (mapped)
3541 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3542 	else
3543 		next = find_next_bit(sip->map_storep, map_size, index);
3544 
3545 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3546 	*num = end - lba;
3547 	return mapped;
3548 }
3549 
3550 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3551 		       unsigned int len)
3552 {
3553 	sector_t end = lba + len;
3554 
3555 	while (lba < end) {
3556 		unsigned long index = lba_to_map_index(lba);
3557 
3558 		if (index < map_size)
3559 			set_bit(index, sip->map_storep);
3560 
3561 		lba = map_index_to_lba(index + 1);
3562 	}
3563 }
3564 
3565 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3566 			 unsigned int len)
3567 {
3568 	sector_t end = lba + len;
3569 	u8 *fsp = sip->storep;
3570 
3571 	while (lba < end) {
3572 		unsigned long index = lba_to_map_index(lba);
3573 
3574 		if (lba == map_index_to_lba(index) &&
3575 		    lba + sdebug_unmap_granularity <= end &&
3576 		    index < map_size) {
3577 			clear_bit(index, sip->map_storep);
3578 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3579 				memset(fsp + lba * sdebug_sector_size,
3580 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3581 				       sdebug_sector_size *
3582 				       sdebug_unmap_granularity);
3583 			}
3584 			if (sip->dif_storep) {
3585 				memset(sip->dif_storep + lba, 0xff,
3586 				       sizeof(*sip->dif_storep) *
3587 				       sdebug_unmap_granularity);
3588 			}
3589 		}
3590 		lba = map_index_to_lba(index + 1);
3591 	}
3592 }
3593 
3594 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3595 {
3596 	bool check_prot;
3597 	u32 num;
3598 	u32 ei_lba;
3599 	int ret;
3600 	u64 lba;
3601 	struct sdeb_store_info *sip = devip2sip(devip, true);
3602 	u8 *cmd = scp->cmnd;
3603 
3604 	switch (cmd[0]) {
3605 	case WRITE_16:
3606 		ei_lba = 0;
3607 		lba = get_unaligned_be64(cmd + 2);
3608 		num = get_unaligned_be32(cmd + 10);
3609 		check_prot = true;
3610 		break;
3611 	case WRITE_10:
3612 		ei_lba = 0;
3613 		lba = get_unaligned_be32(cmd + 2);
3614 		num = get_unaligned_be16(cmd + 7);
3615 		check_prot = true;
3616 		break;
3617 	case WRITE_6:
3618 		ei_lba = 0;
3619 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3620 		      (u32)(cmd[1] & 0x1f) << 16;
3621 		num = (0 == cmd[4]) ? 256 : cmd[4];
3622 		check_prot = true;
3623 		break;
3624 	case WRITE_12:
3625 		ei_lba = 0;
3626 		lba = get_unaligned_be32(cmd + 2);
3627 		num = get_unaligned_be32(cmd + 6);
3628 		check_prot = true;
3629 		break;
3630 	case 0x53:	/* XDWRITEREAD(10) */
3631 		ei_lba = 0;
3632 		lba = get_unaligned_be32(cmd + 2);
3633 		num = get_unaligned_be16(cmd + 7);
3634 		check_prot = false;
3635 		break;
3636 	default:	/* assume WRITE(32) */
3637 		lba = get_unaligned_be64(cmd + 12);
3638 		ei_lba = get_unaligned_be32(cmd + 20);
3639 		num = get_unaligned_be32(cmd + 28);
3640 		check_prot = false;
3641 		break;
3642 	}
3643 	if (unlikely(have_dif_prot && check_prot)) {
3644 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3645 		    (cmd[1] & 0xe0)) {
3646 			mk_sense_invalid_opcode(scp);
3647 			return check_condition_result;
3648 		}
3649 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3650 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3651 		    (cmd[1] & 0xe0) == 0)
3652 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3653 				    "to DIF device\n");
3654 	}
3655 
3656 	sdeb_write_lock(sip);
3657 	ret = check_device_access_params(scp, lba, num, true);
3658 	if (ret) {
3659 		sdeb_write_unlock(sip);
3660 		return ret;
3661 	}
3662 
3663 	/* DIX + T10 DIF */
3664 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3665 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3666 		case 1: /* Guard tag error */
3667 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3668 				sdeb_write_unlock(sip);
3669 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3670 				return illegal_condition_result;
3671 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3672 				sdeb_write_unlock(sip);
3673 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3674 				return check_condition_result;
3675 			}
3676 			break;
3677 		case 3: /* Reference tag error */
3678 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3679 				sdeb_write_unlock(sip);
3680 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3681 				return illegal_condition_result;
3682 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3683 				sdeb_write_unlock(sip);
3684 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3685 				return check_condition_result;
3686 			}
3687 			break;
3688 		}
3689 	}
3690 
3691 	ret = do_device_access(sip, scp, 0, lba, num, true);
3692 	if (unlikely(scsi_debug_lbp()))
3693 		map_region(sip, lba, num);
3694 	/* If ZBC zone then bump its write pointer */
3695 	if (sdebug_dev_is_zoned(devip))
3696 		zbc_inc_wp(devip, lba, num);
3697 	sdeb_write_unlock(sip);
3698 	if (unlikely(-1 == ret))
3699 		return DID_ERROR << 16;
3700 	else if (unlikely(sdebug_verbose &&
3701 			  (ret < (num * sdebug_sector_size))))
3702 		sdev_printk(KERN_INFO, scp->device,
3703 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3704 			    my_name, num * sdebug_sector_size, ret);
3705 
3706 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3707 		     atomic_read(&sdeb_inject_pending))) {
3708 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3709 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3710 			atomic_set(&sdeb_inject_pending, 0);
3711 			return check_condition_result;
3712 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3713 			/* Logical block guard check failed */
3714 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3715 			atomic_set(&sdeb_inject_pending, 0);
3716 			return illegal_condition_result;
3717 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3718 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3719 			atomic_set(&sdeb_inject_pending, 0);
3720 			return illegal_condition_result;
3721 		}
3722 	}
3723 	return 0;
3724 }
3725 
3726 /*
3727  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3728  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3729  */
3730 static int resp_write_scat(struct scsi_cmnd *scp,
3731 			   struct sdebug_dev_info *devip)
3732 {
3733 	u8 *cmd = scp->cmnd;
3734 	u8 *lrdp = NULL;
3735 	u8 *up;
3736 	struct sdeb_store_info *sip = devip2sip(devip, true);
3737 	u8 wrprotect;
3738 	u16 lbdof, num_lrd, k;
3739 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3740 	u32 lb_size = sdebug_sector_size;
3741 	u32 ei_lba;
3742 	u64 lba;
3743 	int ret, res;
3744 	bool is_16;
3745 	static const u32 lrd_size = 32; /* + parameter list header size */
3746 
3747 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3748 		is_16 = false;
3749 		wrprotect = (cmd[10] >> 5) & 0x7;
3750 		lbdof = get_unaligned_be16(cmd + 12);
3751 		num_lrd = get_unaligned_be16(cmd + 16);
3752 		bt_len = get_unaligned_be32(cmd + 28);
3753 	} else {        /* that leaves WRITE SCATTERED(16) */
3754 		is_16 = true;
3755 		wrprotect = (cmd[2] >> 5) & 0x7;
3756 		lbdof = get_unaligned_be16(cmd + 4);
3757 		num_lrd = get_unaligned_be16(cmd + 8);
3758 		bt_len = get_unaligned_be32(cmd + 10);
3759 		if (unlikely(have_dif_prot)) {
3760 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3761 			    wrprotect) {
3762 				mk_sense_invalid_opcode(scp);
3763 				return illegal_condition_result;
3764 			}
3765 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3766 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3767 			     wrprotect == 0)
3768 				sdev_printk(KERN_ERR, scp->device,
3769 					    "Unprotected WR to DIF device\n");
3770 		}
3771 	}
3772 	if ((num_lrd == 0) || (bt_len == 0))
3773 		return 0;       /* T10 says these do-nothings are not errors */
3774 	if (lbdof == 0) {
3775 		if (sdebug_verbose)
3776 			sdev_printk(KERN_INFO, scp->device,
3777 				"%s: %s: LB Data Offset field bad\n",
3778 				my_name, __func__);
3779 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3780 		return illegal_condition_result;
3781 	}
3782 	lbdof_blen = lbdof * lb_size;
3783 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3784 		if (sdebug_verbose)
3785 			sdev_printk(KERN_INFO, scp->device,
3786 				"%s: %s: LBA range descriptors don't fit\n",
3787 				my_name, __func__);
3788 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3789 		return illegal_condition_result;
3790 	}
3791 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3792 	if (lrdp == NULL)
3793 		return SCSI_MLQUEUE_HOST_BUSY;
3794 	if (sdebug_verbose)
3795 		sdev_printk(KERN_INFO, scp->device,
3796 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3797 			my_name, __func__, lbdof_blen);
3798 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3799 	if (res == -1) {
3800 		ret = DID_ERROR << 16;
3801 		goto err_out;
3802 	}
3803 
3804 	sdeb_write_lock(sip);
3805 	sg_off = lbdof_blen;
3806 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3807 	cum_lb = 0;
3808 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3809 		lba = get_unaligned_be64(up + 0);
3810 		num = get_unaligned_be32(up + 8);
3811 		if (sdebug_verbose)
3812 			sdev_printk(KERN_INFO, scp->device,
3813 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3814 				my_name, __func__, k, lba, num, sg_off);
3815 		if (num == 0)
3816 			continue;
3817 		ret = check_device_access_params(scp, lba, num, true);
3818 		if (ret)
3819 			goto err_out_unlock;
3820 		num_by = num * lb_size;
3821 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3822 
3823 		if ((cum_lb + num) > bt_len) {
3824 			if (sdebug_verbose)
3825 				sdev_printk(KERN_INFO, scp->device,
3826 				    "%s: %s: sum of blocks > data provided\n",
3827 				    my_name, __func__);
3828 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3829 					0);
3830 			ret = illegal_condition_result;
3831 			goto err_out_unlock;
3832 		}
3833 
3834 		/* DIX + T10 DIF */
3835 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3836 			int prot_ret = prot_verify_write(scp, lba, num,
3837 							 ei_lba);
3838 
3839 			if (prot_ret) {
3840 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3841 						prot_ret);
3842 				ret = illegal_condition_result;
3843 				goto err_out_unlock;
3844 			}
3845 		}
3846 
3847 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3848 		/* If ZBC zone then bump its write pointer */
3849 		if (sdebug_dev_is_zoned(devip))
3850 			zbc_inc_wp(devip, lba, num);
3851 		if (unlikely(scsi_debug_lbp()))
3852 			map_region(sip, lba, num);
3853 		if (unlikely(-1 == ret)) {
3854 			ret = DID_ERROR << 16;
3855 			goto err_out_unlock;
3856 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3857 			sdev_printk(KERN_INFO, scp->device,
3858 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3859 			    my_name, num_by, ret);
3860 
3861 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3862 			     atomic_read(&sdeb_inject_pending))) {
3863 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3864 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3865 				atomic_set(&sdeb_inject_pending, 0);
3866 				ret = check_condition_result;
3867 				goto err_out_unlock;
3868 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3869 				/* Logical block guard check failed */
3870 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3871 				atomic_set(&sdeb_inject_pending, 0);
3872 				ret = illegal_condition_result;
3873 				goto err_out_unlock;
3874 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3875 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3876 				atomic_set(&sdeb_inject_pending, 0);
3877 				ret = illegal_condition_result;
3878 				goto err_out_unlock;
3879 			}
3880 		}
3881 		sg_off += num_by;
3882 		cum_lb += num;
3883 	}
3884 	ret = 0;
3885 err_out_unlock:
3886 	sdeb_write_unlock(sip);
3887 err_out:
3888 	kfree(lrdp);
3889 	return ret;
3890 }
3891 
3892 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3893 			   u32 ei_lba, bool unmap, bool ndob)
3894 {
3895 	struct scsi_device *sdp = scp->device;
3896 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3897 	unsigned long long i;
3898 	u64 block, lbaa;
3899 	u32 lb_size = sdebug_sector_size;
3900 	int ret;
3901 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3902 						scp->device->hostdata, true);
3903 	u8 *fs1p;
3904 	u8 *fsp;
3905 
3906 	sdeb_write_lock(sip);
3907 
3908 	ret = check_device_access_params(scp, lba, num, true);
3909 	if (ret) {
3910 		sdeb_write_unlock(sip);
3911 		return ret;
3912 	}
3913 
3914 	if (unmap && scsi_debug_lbp()) {
3915 		unmap_region(sip, lba, num);
3916 		goto out;
3917 	}
3918 	lbaa = lba;
3919 	block = do_div(lbaa, sdebug_store_sectors);
3920 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3921 	fsp = sip->storep;
3922 	fs1p = fsp + (block * lb_size);
3923 	if (ndob) {
3924 		memset(fs1p, 0, lb_size);
3925 		ret = 0;
3926 	} else
3927 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3928 
3929 	if (-1 == ret) {
3930 		sdeb_write_unlock(sip);
3931 		return DID_ERROR << 16;
3932 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3933 		sdev_printk(KERN_INFO, scp->device,
3934 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3935 			    my_name, "write same", lb_size, ret);
3936 
3937 	/* Copy first sector to remaining blocks */
3938 	for (i = 1 ; i < num ; i++) {
3939 		lbaa = lba + i;
3940 		block = do_div(lbaa, sdebug_store_sectors);
3941 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3942 	}
3943 	if (scsi_debug_lbp())
3944 		map_region(sip, lba, num);
3945 	/* If ZBC zone then bump its write pointer */
3946 	if (sdebug_dev_is_zoned(devip))
3947 		zbc_inc_wp(devip, lba, num);
3948 out:
3949 	sdeb_write_unlock(sip);
3950 
3951 	return 0;
3952 }
3953 
3954 static int resp_write_same_10(struct scsi_cmnd *scp,
3955 			      struct sdebug_dev_info *devip)
3956 {
3957 	u8 *cmd = scp->cmnd;
3958 	u32 lba;
3959 	u16 num;
3960 	u32 ei_lba = 0;
3961 	bool unmap = false;
3962 
3963 	if (cmd[1] & 0x8) {
3964 		if (sdebug_lbpws10 == 0) {
3965 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3966 			return check_condition_result;
3967 		} else
3968 			unmap = true;
3969 	}
3970 	lba = get_unaligned_be32(cmd + 2);
3971 	num = get_unaligned_be16(cmd + 7);
3972 	if (num > sdebug_write_same_length) {
3973 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3974 		return check_condition_result;
3975 	}
3976 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3977 }
3978 
3979 static int resp_write_same_16(struct scsi_cmnd *scp,
3980 			      struct sdebug_dev_info *devip)
3981 {
3982 	u8 *cmd = scp->cmnd;
3983 	u64 lba;
3984 	u32 num;
3985 	u32 ei_lba = 0;
3986 	bool unmap = false;
3987 	bool ndob = false;
3988 
3989 	if (cmd[1] & 0x8) {	/* UNMAP */
3990 		if (sdebug_lbpws == 0) {
3991 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3992 			return check_condition_result;
3993 		} else
3994 			unmap = true;
3995 	}
3996 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3997 		ndob = true;
3998 	lba = get_unaligned_be64(cmd + 2);
3999 	num = get_unaligned_be32(cmd + 10);
4000 	if (num > sdebug_write_same_length) {
4001 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4002 		return check_condition_result;
4003 	}
4004 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4005 }
4006 
4007 /* Note the mode field is in the same position as the (lower) service action
4008  * field. For the Report supported operation codes command, SPC-4 suggests
4009  * each mode of this command should be reported separately; for future. */
4010 static int resp_write_buffer(struct scsi_cmnd *scp,
4011 			     struct sdebug_dev_info *devip)
4012 {
4013 	u8 *cmd = scp->cmnd;
4014 	struct scsi_device *sdp = scp->device;
4015 	struct sdebug_dev_info *dp;
4016 	u8 mode;
4017 
4018 	mode = cmd[1] & 0x1f;
4019 	switch (mode) {
4020 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4021 		/* set UAs on this device only */
4022 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4023 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4024 		break;
4025 	case 0x5:	/* download MC, save and ACT */
4026 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4027 		break;
4028 	case 0x6:	/* download MC with offsets and ACT */
4029 		/* set UAs on most devices (LUs) in this target */
4030 		list_for_each_entry(dp,
4031 				    &devip->sdbg_host->dev_info_list,
4032 				    dev_list)
4033 			if (dp->target == sdp->id) {
4034 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4035 				if (devip != dp)
4036 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4037 						dp->uas_bm);
4038 			}
4039 		break;
4040 	case 0x7:	/* download MC with offsets, save, and ACT */
4041 		/* set UA on all devices (LUs) in this target */
4042 		list_for_each_entry(dp,
4043 				    &devip->sdbg_host->dev_info_list,
4044 				    dev_list)
4045 			if (dp->target == sdp->id)
4046 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4047 					dp->uas_bm);
4048 		break;
4049 	default:
4050 		/* do nothing for this command for other mode values */
4051 		break;
4052 	}
4053 	return 0;
4054 }
4055 
4056 static int resp_comp_write(struct scsi_cmnd *scp,
4057 			   struct sdebug_dev_info *devip)
4058 {
4059 	u8 *cmd = scp->cmnd;
4060 	u8 *arr;
4061 	struct sdeb_store_info *sip = devip2sip(devip, true);
4062 	u64 lba;
4063 	u32 dnum;
4064 	u32 lb_size = sdebug_sector_size;
4065 	u8 num;
4066 	int ret;
4067 	int retval = 0;
4068 
4069 	lba = get_unaligned_be64(cmd + 2);
4070 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4071 	if (0 == num)
4072 		return 0;	/* degenerate case, not an error */
4073 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4074 	    (cmd[1] & 0xe0)) {
4075 		mk_sense_invalid_opcode(scp);
4076 		return check_condition_result;
4077 	}
4078 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4079 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4080 	    (cmd[1] & 0xe0) == 0)
4081 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4082 			    "to DIF device\n");
4083 	ret = check_device_access_params(scp, lba, num, false);
4084 	if (ret)
4085 		return ret;
4086 	dnum = 2 * num;
4087 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4088 	if (NULL == arr) {
4089 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4090 				INSUFF_RES_ASCQ);
4091 		return check_condition_result;
4092 	}
4093 
4094 	sdeb_write_lock(sip);
4095 
4096 	ret = do_dout_fetch(scp, dnum, arr);
4097 	if (ret == -1) {
4098 		retval = DID_ERROR << 16;
4099 		goto cleanup;
4100 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4101 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4102 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4103 			    dnum * lb_size, ret);
4104 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4105 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4106 		retval = check_condition_result;
4107 		goto cleanup;
4108 	}
4109 	if (scsi_debug_lbp())
4110 		map_region(sip, lba, num);
4111 cleanup:
4112 	sdeb_write_unlock(sip);
4113 	kfree(arr);
4114 	return retval;
4115 }
4116 
4117 struct unmap_block_desc {
4118 	__be64	lba;
4119 	__be32	blocks;
4120 	__be32	__reserved;
4121 };
4122 
4123 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4124 {
4125 	unsigned char *buf;
4126 	struct unmap_block_desc *desc;
4127 	struct sdeb_store_info *sip = devip2sip(devip, true);
4128 	unsigned int i, payload_len, descriptors;
4129 	int ret;
4130 
4131 	if (!scsi_debug_lbp())
4132 		return 0;	/* fib and say its done */
4133 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4134 	BUG_ON(scsi_bufflen(scp) != payload_len);
4135 
4136 	descriptors = (payload_len - 8) / 16;
4137 	if (descriptors > sdebug_unmap_max_desc) {
4138 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4139 		return check_condition_result;
4140 	}
4141 
4142 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4143 	if (!buf) {
4144 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4145 				INSUFF_RES_ASCQ);
4146 		return check_condition_result;
4147 	}
4148 
4149 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4150 
4151 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4152 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4153 
4154 	desc = (void *)&buf[8];
4155 
4156 	sdeb_write_lock(sip);
4157 
4158 	for (i = 0 ; i < descriptors ; i++) {
4159 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4160 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4161 
4162 		ret = check_device_access_params(scp, lba, num, true);
4163 		if (ret)
4164 			goto out;
4165 
4166 		unmap_region(sip, lba, num);
4167 	}
4168 
4169 	ret = 0;
4170 
4171 out:
4172 	sdeb_write_unlock(sip);
4173 	kfree(buf);
4174 
4175 	return ret;
4176 }
4177 
4178 #define SDEBUG_GET_LBA_STATUS_LEN 32
4179 
4180 static int resp_get_lba_status(struct scsi_cmnd *scp,
4181 			       struct sdebug_dev_info *devip)
4182 {
4183 	u8 *cmd = scp->cmnd;
4184 	u64 lba;
4185 	u32 alloc_len, mapped, num;
4186 	int ret;
4187 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4188 
4189 	lba = get_unaligned_be64(cmd + 2);
4190 	alloc_len = get_unaligned_be32(cmd + 10);
4191 
4192 	if (alloc_len < 24)
4193 		return 0;
4194 
4195 	ret = check_device_access_params(scp, lba, 1, false);
4196 	if (ret)
4197 		return ret;
4198 
4199 	if (scsi_debug_lbp()) {
4200 		struct sdeb_store_info *sip = devip2sip(devip, true);
4201 
4202 		mapped = map_state(sip, lba, &num);
4203 	} else {
4204 		mapped = 1;
4205 		/* following just in case virtual_gb changed */
4206 		sdebug_capacity = get_sdebug_capacity();
4207 		if (sdebug_capacity - lba <= 0xffffffff)
4208 			num = sdebug_capacity - lba;
4209 		else
4210 			num = 0xffffffff;
4211 	}
4212 
4213 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4214 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4215 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4216 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4217 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4218 
4219 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4220 }
4221 
4222 static int resp_sync_cache(struct scsi_cmnd *scp,
4223 			   struct sdebug_dev_info *devip)
4224 {
4225 	int res = 0;
4226 	u64 lba;
4227 	u32 num_blocks;
4228 	u8 *cmd = scp->cmnd;
4229 
4230 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4231 		lba = get_unaligned_be32(cmd + 2);
4232 		num_blocks = get_unaligned_be16(cmd + 7);
4233 	} else {				/* SYNCHRONIZE_CACHE(16) */
4234 		lba = get_unaligned_be64(cmd + 2);
4235 		num_blocks = get_unaligned_be32(cmd + 10);
4236 	}
4237 	if (lba + num_blocks > sdebug_capacity) {
4238 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4239 		return check_condition_result;
4240 	}
4241 	if (!write_since_sync || (cmd[1] & 0x2))
4242 		res = SDEG_RES_IMMED_MASK;
4243 	else		/* delay if write_since_sync and IMMED clear */
4244 		write_since_sync = false;
4245 	return res;
4246 }
4247 
4248 /*
4249  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4250  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4251  * a GOOD status otherwise. Model a disk with a big cache and yield
4252  * CONDITION MET. Actually tries to bring range in main memory into the
4253  * cache associated with the CPU(s).
4254  */
4255 static int resp_pre_fetch(struct scsi_cmnd *scp,
4256 			  struct sdebug_dev_info *devip)
4257 {
4258 	int res = 0;
4259 	u64 lba;
4260 	u64 block, rest = 0;
4261 	u32 nblks;
4262 	u8 *cmd = scp->cmnd;
4263 	struct sdeb_store_info *sip = devip2sip(devip, true);
4264 	u8 *fsp = sip->storep;
4265 
4266 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4267 		lba = get_unaligned_be32(cmd + 2);
4268 		nblks = get_unaligned_be16(cmd + 7);
4269 	} else {			/* PRE-FETCH(16) */
4270 		lba = get_unaligned_be64(cmd + 2);
4271 		nblks = get_unaligned_be32(cmd + 10);
4272 	}
4273 	if (lba + nblks > sdebug_capacity) {
4274 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4275 		return check_condition_result;
4276 	}
4277 	if (!fsp)
4278 		goto fini;
4279 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4280 	block = do_div(lba, sdebug_store_sectors);
4281 	if (block + nblks > sdebug_store_sectors)
4282 		rest = block + nblks - sdebug_store_sectors;
4283 
4284 	/* Try to bring the PRE-FETCH range into CPU's cache */
4285 	sdeb_read_lock(sip);
4286 	prefetch_range(fsp + (sdebug_sector_size * block),
4287 		       (nblks - rest) * sdebug_sector_size);
4288 	if (rest)
4289 		prefetch_range(fsp, rest * sdebug_sector_size);
4290 	sdeb_read_unlock(sip);
4291 fini:
4292 	if (cmd[1] & 0x2)
4293 		res = SDEG_RES_IMMED_MASK;
4294 	return res | condition_met_result;
4295 }
4296 
4297 #define RL_BUCKET_ELEMS 8
4298 
4299 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4300  * (W-LUN), the normal Linux scanning logic does not associate it with a
4301  * device (e.g. /dev/sg7). The following magic will make that association:
4302  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4303  * where <n> is a host number. If there are multiple targets in a host then
4304  * the above will associate a W-LUN to each target. To only get a W-LUN
4305  * for target 2, then use "echo '- 2 49409' > scan" .
4306  */
4307 static int resp_report_luns(struct scsi_cmnd *scp,
4308 			    struct sdebug_dev_info *devip)
4309 {
4310 	unsigned char *cmd = scp->cmnd;
4311 	unsigned int alloc_len;
4312 	unsigned char select_report;
4313 	u64 lun;
4314 	struct scsi_lun *lun_p;
4315 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4316 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4317 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4318 	unsigned int tlun_cnt;	/* total LUN count */
4319 	unsigned int rlen;	/* response length (in bytes) */
4320 	int k, j, n, res;
4321 	unsigned int off_rsp = 0;
4322 	const int sz_lun = sizeof(struct scsi_lun);
4323 
4324 	clear_luns_changed_on_target(devip);
4325 
4326 	select_report = cmd[2];
4327 	alloc_len = get_unaligned_be32(cmd + 6);
4328 
4329 	if (alloc_len < 4) {
4330 		pr_err("alloc len too small %d\n", alloc_len);
4331 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4332 		return check_condition_result;
4333 	}
4334 
4335 	switch (select_report) {
4336 	case 0:		/* all LUNs apart from W-LUNs */
4337 		lun_cnt = sdebug_max_luns;
4338 		wlun_cnt = 0;
4339 		break;
4340 	case 1:		/* only W-LUNs */
4341 		lun_cnt = 0;
4342 		wlun_cnt = 1;
4343 		break;
4344 	case 2:		/* all LUNs */
4345 		lun_cnt = sdebug_max_luns;
4346 		wlun_cnt = 1;
4347 		break;
4348 	case 0x10:	/* only administrative LUs */
4349 	case 0x11:	/* see SPC-5 */
4350 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4351 	default:
4352 		pr_debug("select report invalid %d\n", select_report);
4353 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4354 		return check_condition_result;
4355 	}
4356 
4357 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4358 		--lun_cnt;
4359 
4360 	tlun_cnt = lun_cnt + wlun_cnt;
4361 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4362 	scsi_set_resid(scp, scsi_bufflen(scp));
4363 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4364 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4365 
4366 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4367 	lun = sdebug_no_lun_0 ? 1 : 0;
4368 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4369 		memset(arr, 0, sizeof(arr));
4370 		lun_p = (struct scsi_lun *)&arr[0];
4371 		if (k == 0) {
4372 			put_unaligned_be32(rlen, &arr[0]);
4373 			++lun_p;
4374 			j = 1;
4375 		}
4376 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4377 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4378 				break;
4379 			int_to_scsilun(lun++, lun_p);
4380 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4381 				lun_p->scsi_lun[0] |= 0x40;
4382 		}
4383 		if (j < RL_BUCKET_ELEMS)
4384 			break;
4385 		n = j * sz_lun;
4386 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4387 		if (res)
4388 			return res;
4389 		off_rsp += n;
4390 	}
4391 	if (wlun_cnt) {
4392 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4393 		++j;
4394 	}
4395 	if (j > 0)
4396 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4397 	return res;
4398 }
4399 
4400 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4401 {
4402 	bool is_bytchk3 = false;
4403 	u8 bytchk;
4404 	int ret, j;
4405 	u32 vnum, a_num, off;
4406 	const u32 lb_size = sdebug_sector_size;
4407 	u64 lba;
4408 	u8 *arr;
4409 	u8 *cmd = scp->cmnd;
4410 	struct sdeb_store_info *sip = devip2sip(devip, true);
4411 
4412 	bytchk = (cmd[1] >> 1) & 0x3;
4413 	if (bytchk == 0) {
4414 		return 0;	/* always claim internal verify okay */
4415 	} else if (bytchk == 2) {
4416 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4417 		return check_condition_result;
4418 	} else if (bytchk == 3) {
4419 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4420 	}
4421 	switch (cmd[0]) {
4422 	case VERIFY_16:
4423 		lba = get_unaligned_be64(cmd + 2);
4424 		vnum = get_unaligned_be32(cmd + 10);
4425 		break;
4426 	case VERIFY:		/* is VERIFY(10) */
4427 		lba = get_unaligned_be32(cmd + 2);
4428 		vnum = get_unaligned_be16(cmd + 7);
4429 		break;
4430 	default:
4431 		mk_sense_invalid_opcode(scp);
4432 		return check_condition_result;
4433 	}
4434 	if (vnum == 0)
4435 		return 0;	/* not an error */
4436 	a_num = is_bytchk3 ? 1 : vnum;
4437 	/* Treat following check like one for read (i.e. no write) access */
4438 	ret = check_device_access_params(scp, lba, a_num, false);
4439 	if (ret)
4440 		return ret;
4441 
4442 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4443 	if (!arr) {
4444 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4445 				INSUFF_RES_ASCQ);
4446 		return check_condition_result;
4447 	}
4448 	/* Not changing store, so only need read access */
4449 	sdeb_read_lock(sip);
4450 
4451 	ret = do_dout_fetch(scp, a_num, arr);
4452 	if (ret == -1) {
4453 		ret = DID_ERROR << 16;
4454 		goto cleanup;
4455 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4456 		sdev_printk(KERN_INFO, scp->device,
4457 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4458 			    my_name, __func__, a_num * lb_size, ret);
4459 	}
4460 	if (is_bytchk3) {
4461 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4462 			memcpy(arr + off, arr, lb_size);
4463 	}
4464 	ret = 0;
4465 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4466 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4467 		ret = check_condition_result;
4468 		goto cleanup;
4469 	}
4470 cleanup:
4471 	sdeb_read_unlock(sip);
4472 	kfree(arr);
4473 	return ret;
4474 }
4475 
4476 #define RZONES_DESC_HD 64
4477 
4478 /* Report zones depending on start LBA and reporting options */
4479 static int resp_report_zones(struct scsi_cmnd *scp,
4480 			     struct sdebug_dev_info *devip)
4481 {
4482 	unsigned int rep_max_zones, nrz = 0;
4483 	int ret = 0;
4484 	u32 alloc_len, rep_opts, rep_len;
4485 	bool partial;
4486 	u64 lba, zs_lba;
4487 	u8 *arr = NULL, *desc;
4488 	u8 *cmd = scp->cmnd;
4489 	struct sdeb_zone_state *zsp = NULL;
4490 	struct sdeb_store_info *sip = devip2sip(devip, false);
4491 
4492 	if (!sdebug_dev_is_zoned(devip)) {
4493 		mk_sense_invalid_opcode(scp);
4494 		return check_condition_result;
4495 	}
4496 	zs_lba = get_unaligned_be64(cmd + 2);
4497 	alloc_len = get_unaligned_be32(cmd + 10);
4498 	if (alloc_len == 0)
4499 		return 0;	/* not an error */
4500 	rep_opts = cmd[14] & 0x3f;
4501 	partial = cmd[14] & 0x80;
4502 
4503 	if (zs_lba >= sdebug_capacity) {
4504 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4505 		return check_condition_result;
4506 	}
4507 
4508 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4509 
4510 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4511 	if (!arr) {
4512 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4513 				INSUFF_RES_ASCQ);
4514 		return check_condition_result;
4515 	}
4516 
4517 	sdeb_read_lock(sip);
4518 
4519 	desc = arr + 64;
4520 	for (lba = zs_lba; lba < sdebug_capacity;
4521 	     lba = zsp->z_start + zsp->z_size) {
4522 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4523 			break;
4524 		zsp = zbc_zone(devip, lba);
4525 		switch (rep_opts) {
4526 		case 0x00:
4527 			/* All zones */
4528 			break;
4529 		case 0x01:
4530 			/* Empty zones */
4531 			if (zsp->z_cond != ZC1_EMPTY)
4532 				continue;
4533 			break;
4534 		case 0x02:
4535 			/* Implicit open zones */
4536 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4537 				continue;
4538 			break;
4539 		case 0x03:
4540 			/* Explicit open zones */
4541 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4542 				continue;
4543 			break;
4544 		case 0x04:
4545 			/* Closed zones */
4546 			if (zsp->z_cond != ZC4_CLOSED)
4547 				continue;
4548 			break;
4549 		case 0x05:
4550 			/* Full zones */
4551 			if (zsp->z_cond != ZC5_FULL)
4552 				continue;
4553 			break;
4554 		case 0x06:
4555 		case 0x07:
4556 		case 0x10:
4557 			/*
4558 			 * Read-only, offline, reset WP recommended are
4559 			 * not emulated: no zones to report;
4560 			 */
4561 			continue;
4562 		case 0x11:
4563 			/* non-seq-resource set */
4564 			if (!zsp->z_non_seq_resource)
4565 				continue;
4566 			break;
4567 		case 0x3e:
4568 			/* All zones except gap zones. */
4569 			if (zbc_zone_is_gap(zsp))
4570 				continue;
4571 			break;
4572 		case 0x3f:
4573 			/* Not write pointer (conventional) zones */
4574 			if (zbc_zone_is_seq(zsp))
4575 				continue;
4576 			break;
4577 		default:
4578 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4579 					INVALID_FIELD_IN_CDB, 0);
4580 			ret = check_condition_result;
4581 			goto fini;
4582 		}
4583 
4584 		if (nrz < rep_max_zones) {
4585 			/* Fill zone descriptor */
4586 			desc[0] = zsp->z_type;
4587 			desc[1] = zsp->z_cond << 4;
4588 			if (zsp->z_non_seq_resource)
4589 				desc[1] |= 1 << 1;
4590 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4591 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4592 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4593 			desc += 64;
4594 		}
4595 
4596 		if (partial && nrz >= rep_max_zones)
4597 			break;
4598 
4599 		nrz++;
4600 	}
4601 
4602 	/* Report header */
4603 	/* Zone list length. */
4604 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4605 	/* Maximum LBA */
4606 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4607 	/* Zone starting LBA granularity. */
4608 	if (devip->zcap < devip->zsize)
4609 		put_unaligned_be64(devip->zsize, arr + 16);
4610 
4611 	rep_len = (unsigned long)desc - (unsigned long)arr;
4612 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4613 
4614 fini:
4615 	sdeb_read_unlock(sip);
4616 	kfree(arr);
4617 	return ret;
4618 }
4619 
4620 /* Logic transplanted from tcmu-runner, file_zbc.c */
4621 static void zbc_open_all(struct sdebug_dev_info *devip)
4622 {
4623 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4624 	unsigned int i;
4625 
4626 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4627 		if (zsp->z_cond == ZC4_CLOSED)
4628 			zbc_open_zone(devip, &devip->zstate[i], true);
4629 	}
4630 }
4631 
4632 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4633 {
4634 	int res = 0;
4635 	u64 z_id;
4636 	enum sdebug_z_cond zc;
4637 	u8 *cmd = scp->cmnd;
4638 	struct sdeb_zone_state *zsp;
4639 	bool all = cmd[14] & 0x01;
4640 	struct sdeb_store_info *sip = devip2sip(devip, false);
4641 
4642 	if (!sdebug_dev_is_zoned(devip)) {
4643 		mk_sense_invalid_opcode(scp);
4644 		return check_condition_result;
4645 	}
4646 
4647 	sdeb_write_lock(sip);
4648 
4649 	if (all) {
4650 		/* Check if all closed zones can be open */
4651 		if (devip->max_open &&
4652 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4653 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4654 					INSUFF_ZONE_ASCQ);
4655 			res = check_condition_result;
4656 			goto fini;
4657 		}
4658 		/* Open all closed zones */
4659 		zbc_open_all(devip);
4660 		goto fini;
4661 	}
4662 
4663 	/* Open the specified zone */
4664 	z_id = get_unaligned_be64(cmd + 2);
4665 	if (z_id >= sdebug_capacity) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 
4671 	zsp = zbc_zone(devip, z_id);
4672 	if (z_id != zsp->z_start) {
4673 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4674 		res = check_condition_result;
4675 		goto fini;
4676 	}
4677 	if (zbc_zone_is_conv(zsp)) {
4678 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4679 		res = check_condition_result;
4680 		goto fini;
4681 	}
4682 
4683 	zc = zsp->z_cond;
4684 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4685 		goto fini;
4686 
4687 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4688 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4689 				INSUFF_ZONE_ASCQ);
4690 		res = check_condition_result;
4691 		goto fini;
4692 	}
4693 
4694 	zbc_open_zone(devip, zsp, true);
4695 fini:
4696 	sdeb_write_unlock(sip);
4697 	return res;
4698 }
4699 
4700 static void zbc_close_all(struct sdebug_dev_info *devip)
4701 {
4702 	unsigned int i;
4703 
4704 	for (i = 0; i < devip->nr_zones; i++)
4705 		zbc_close_zone(devip, &devip->zstate[i]);
4706 }
4707 
4708 static int resp_close_zone(struct scsi_cmnd *scp,
4709 			   struct sdebug_dev_info *devip)
4710 {
4711 	int res = 0;
4712 	u64 z_id;
4713 	u8 *cmd = scp->cmnd;
4714 	struct sdeb_zone_state *zsp;
4715 	bool all = cmd[14] & 0x01;
4716 	struct sdeb_store_info *sip = devip2sip(devip, false);
4717 
4718 	if (!sdebug_dev_is_zoned(devip)) {
4719 		mk_sense_invalid_opcode(scp);
4720 		return check_condition_result;
4721 	}
4722 
4723 	sdeb_write_lock(sip);
4724 
4725 	if (all) {
4726 		zbc_close_all(devip);
4727 		goto fini;
4728 	}
4729 
4730 	/* Close specified zone */
4731 	z_id = get_unaligned_be64(cmd + 2);
4732 	if (z_id >= sdebug_capacity) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4734 		res = check_condition_result;
4735 		goto fini;
4736 	}
4737 
4738 	zsp = zbc_zone(devip, z_id);
4739 	if (z_id != zsp->z_start) {
4740 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4741 		res = check_condition_result;
4742 		goto fini;
4743 	}
4744 	if (zbc_zone_is_conv(zsp)) {
4745 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4746 		res = check_condition_result;
4747 		goto fini;
4748 	}
4749 
4750 	zbc_close_zone(devip, zsp);
4751 fini:
4752 	sdeb_write_unlock(sip);
4753 	return res;
4754 }
4755 
4756 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4757 			    struct sdeb_zone_state *zsp, bool empty)
4758 {
4759 	enum sdebug_z_cond zc = zsp->z_cond;
4760 
4761 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4762 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4763 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4764 			zbc_close_zone(devip, zsp);
4765 		if (zsp->z_cond == ZC4_CLOSED)
4766 			devip->nr_closed--;
4767 		zsp->z_wp = zsp->z_start + zsp->z_size;
4768 		zsp->z_cond = ZC5_FULL;
4769 	}
4770 }
4771 
4772 static void zbc_finish_all(struct sdebug_dev_info *devip)
4773 {
4774 	unsigned int i;
4775 
4776 	for (i = 0; i < devip->nr_zones; i++)
4777 		zbc_finish_zone(devip, &devip->zstate[i], false);
4778 }
4779 
4780 static int resp_finish_zone(struct scsi_cmnd *scp,
4781 			    struct sdebug_dev_info *devip)
4782 {
4783 	struct sdeb_zone_state *zsp;
4784 	int res = 0;
4785 	u64 z_id;
4786 	u8 *cmd = scp->cmnd;
4787 	bool all = cmd[14] & 0x01;
4788 	struct sdeb_store_info *sip = devip2sip(devip, false);
4789 
4790 	if (!sdebug_dev_is_zoned(devip)) {
4791 		mk_sense_invalid_opcode(scp);
4792 		return check_condition_result;
4793 	}
4794 
4795 	sdeb_write_lock(sip);
4796 
4797 	if (all) {
4798 		zbc_finish_all(devip);
4799 		goto fini;
4800 	}
4801 
4802 	/* Finish the specified zone */
4803 	z_id = get_unaligned_be64(cmd + 2);
4804 	if (z_id >= sdebug_capacity) {
4805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4806 		res = check_condition_result;
4807 		goto fini;
4808 	}
4809 
4810 	zsp = zbc_zone(devip, z_id);
4811 	if (z_id != zsp->z_start) {
4812 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4813 		res = check_condition_result;
4814 		goto fini;
4815 	}
4816 	if (zbc_zone_is_conv(zsp)) {
4817 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4818 		res = check_condition_result;
4819 		goto fini;
4820 	}
4821 
4822 	zbc_finish_zone(devip, zsp, true);
4823 fini:
4824 	sdeb_write_unlock(sip);
4825 	return res;
4826 }
4827 
4828 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4829 			 struct sdeb_zone_state *zsp)
4830 {
4831 	enum sdebug_z_cond zc;
4832 	struct sdeb_store_info *sip = devip2sip(devip, false);
4833 
4834 	if (!zbc_zone_is_seq(zsp))
4835 		return;
4836 
4837 	zc = zsp->z_cond;
4838 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4839 		zbc_close_zone(devip, zsp);
4840 
4841 	if (zsp->z_cond == ZC4_CLOSED)
4842 		devip->nr_closed--;
4843 
4844 	if (zsp->z_wp > zsp->z_start)
4845 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4846 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4847 
4848 	zsp->z_non_seq_resource = false;
4849 	zsp->z_wp = zsp->z_start;
4850 	zsp->z_cond = ZC1_EMPTY;
4851 }
4852 
4853 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4854 {
4855 	unsigned int i;
4856 
4857 	for (i = 0; i < devip->nr_zones; i++)
4858 		zbc_rwp_zone(devip, &devip->zstate[i]);
4859 }
4860 
4861 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4862 {
4863 	struct sdeb_zone_state *zsp;
4864 	int res = 0;
4865 	u64 z_id;
4866 	u8 *cmd = scp->cmnd;
4867 	bool all = cmd[14] & 0x01;
4868 	struct sdeb_store_info *sip = devip2sip(devip, false);
4869 
4870 	if (!sdebug_dev_is_zoned(devip)) {
4871 		mk_sense_invalid_opcode(scp);
4872 		return check_condition_result;
4873 	}
4874 
4875 	sdeb_write_lock(sip);
4876 
4877 	if (all) {
4878 		zbc_rwp_all(devip);
4879 		goto fini;
4880 	}
4881 
4882 	z_id = get_unaligned_be64(cmd + 2);
4883 	if (z_id >= sdebug_capacity) {
4884 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4885 		res = check_condition_result;
4886 		goto fini;
4887 	}
4888 
4889 	zsp = zbc_zone(devip, z_id);
4890 	if (z_id != zsp->z_start) {
4891 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4892 		res = check_condition_result;
4893 		goto fini;
4894 	}
4895 	if (zbc_zone_is_conv(zsp)) {
4896 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4897 		res = check_condition_result;
4898 		goto fini;
4899 	}
4900 
4901 	zbc_rwp_zone(devip, zsp);
4902 fini:
4903 	sdeb_write_unlock(sip);
4904 	return res;
4905 }
4906 
4907 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4908 {
4909 	u16 hwq;
4910 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4911 
4912 	hwq = blk_mq_unique_tag_to_hwq(tag);
4913 
4914 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4915 	if (WARN_ON_ONCE(hwq >= submit_queues))
4916 		hwq = 0;
4917 
4918 	return sdebug_q_arr + hwq;
4919 }
4920 
4921 static u32 get_tag(struct scsi_cmnd *cmnd)
4922 {
4923 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4924 }
4925 
4926 /* Queued (deferred) command completions converge here. */
4927 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4928 {
4929 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4930 	int qc_idx;
4931 	int retiring = 0;
4932 	unsigned long flags, iflags;
4933 	struct scsi_cmnd *scp = sqcp->scmd;
4934 	struct sdebug_scsi_cmd *sdsc;
4935 	bool aborted;
4936 	struct sdebug_queue *sqp;
4937 
4938 	qc_idx = sd_dp->sqa_idx;
4939 	if (sdebug_statistics) {
4940 		atomic_inc(&sdebug_completions);
4941 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4942 			atomic_inc(&sdebug_miss_cpus);
4943 	}
4944 	if (!scp) {
4945 		pr_err("scmd=NULL\n");
4946 		goto out;
4947 	}
4948 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4949 		pr_err("wild qc_idx=%d\n", qc_idx);
4950 		goto out;
4951 	}
4952 
4953 	sdsc = scsi_cmd_priv(scp);
4954 	sqp = get_queue(scp);
4955 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4956 	spin_lock_irqsave(&sdsc->lock, flags);
4957 	aborted = sd_dp->aborted;
4958 	if (unlikely(aborted))
4959 		sd_dp->aborted = false;
4960 	ASSIGN_QUEUED_CMD(scp, NULL);
4961 
4962 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4963 		retiring = 1;
4964 
4965 	sqp->qc_arr[qc_idx] = NULL;
4966 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4967 		spin_unlock_irqrestore(&sdsc->lock, flags);
4968 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4969 		pr_err("Unexpected completion qc_idx=%d\n", qc_idx);
4970 		goto out;
4971 	}
4972 
4973 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4974 		int k, retval;
4975 
4976 		retval = atomic_read(&retired_max_queue);
4977 		if (qc_idx >= retval) {
4978 			spin_unlock_irqrestore(&sdsc->lock, flags);
4979 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 			pr_err("index %d too large\n", retval);
4981 			goto out;
4982 		}
4983 		k = find_last_bit(sqp->in_use_bm, retval);
4984 		if ((k < sdebug_max_queue) || (k == retval))
4985 			atomic_set(&retired_max_queue, 0);
4986 		else
4987 			atomic_set(&retired_max_queue, k + 1);
4988 	}
4989 
4990 	spin_unlock_irqrestore(&sdsc->lock, flags);
4991 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4992 
4993 	if (aborted) {
4994 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4995 		blk_abort_request(scsi_cmd_to_rq(scp));
4996 		goto out;
4997 	}
4998 
4999 	scsi_done(scp); /* callback to mid level */
5000 out:
5001 	sdebug_free_queued_cmd(sqcp);
5002 }
5003 
5004 /* When high resolution timer goes off this function is called. */
5005 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5006 {
5007 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5008 						  hrt);
5009 	sdebug_q_cmd_complete(sd_dp);
5010 	return HRTIMER_NORESTART;
5011 }
5012 
5013 /* When work queue schedules work, it calls this function. */
5014 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5015 {
5016 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5017 						  ew.work);
5018 	sdebug_q_cmd_complete(sd_dp);
5019 }
5020 
5021 static bool got_shared_uuid;
5022 static uuid_t shared_uuid;
5023 
5024 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5025 {
5026 	struct sdeb_zone_state *zsp;
5027 	sector_t capacity = get_sdebug_capacity();
5028 	sector_t conv_capacity;
5029 	sector_t zstart = 0;
5030 	unsigned int i;
5031 
5032 	/*
5033 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5034 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5035 	 * use the specified zone size checking that at least 2 zones can be
5036 	 * created for the device.
5037 	 */
5038 	if (!sdeb_zbc_zone_size_mb) {
5039 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5040 			>> ilog2(sdebug_sector_size);
5041 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5042 			devip->zsize >>= 1;
5043 		if (devip->zsize < 2) {
5044 			pr_err("Device capacity too small\n");
5045 			return -EINVAL;
5046 		}
5047 	} else {
5048 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5049 			pr_err("Zone size is not a power of 2\n");
5050 			return -EINVAL;
5051 		}
5052 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5053 			>> ilog2(sdebug_sector_size);
5054 		if (devip->zsize >= capacity) {
5055 			pr_err("Zone size too large for device capacity\n");
5056 			return -EINVAL;
5057 		}
5058 	}
5059 
5060 	devip->zsize_shift = ilog2(devip->zsize);
5061 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5062 
5063 	if (sdeb_zbc_zone_cap_mb == 0) {
5064 		devip->zcap = devip->zsize;
5065 	} else {
5066 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5067 			      ilog2(sdebug_sector_size);
5068 		if (devip->zcap > devip->zsize) {
5069 			pr_err("Zone capacity too large\n");
5070 			return -EINVAL;
5071 		}
5072 	}
5073 
5074 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5075 	if (conv_capacity >= capacity) {
5076 		pr_err("Number of conventional zones too large\n");
5077 		return -EINVAL;
5078 	}
5079 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5080 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5081 			      devip->zsize_shift;
5082 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5083 
5084 	/* Add gap zones if zone capacity is smaller than the zone size */
5085 	if (devip->zcap < devip->zsize)
5086 		devip->nr_zones += devip->nr_seq_zones;
5087 
5088 	if (devip->zmodel == BLK_ZONED_HM) {
5089 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5090 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5091 			devip->max_open = (devip->nr_zones - 1) / 2;
5092 		else
5093 			devip->max_open = sdeb_zbc_max_open;
5094 	}
5095 
5096 	devip->zstate = kcalloc(devip->nr_zones,
5097 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5098 	if (!devip->zstate)
5099 		return -ENOMEM;
5100 
5101 	for (i = 0; i < devip->nr_zones; i++) {
5102 		zsp = &devip->zstate[i];
5103 
5104 		zsp->z_start = zstart;
5105 
5106 		if (i < devip->nr_conv_zones) {
5107 			zsp->z_type = ZBC_ZTYPE_CNV;
5108 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5109 			zsp->z_wp = (sector_t)-1;
5110 			zsp->z_size =
5111 				min_t(u64, devip->zsize, capacity - zstart);
5112 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5113 			if (devip->zmodel == BLK_ZONED_HM)
5114 				zsp->z_type = ZBC_ZTYPE_SWR;
5115 			else
5116 				zsp->z_type = ZBC_ZTYPE_SWP;
5117 			zsp->z_cond = ZC1_EMPTY;
5118 			zsp->z_wp = zsp->z_start;
5119 			zsp->z_size =
5120 				min_t(u64, devip->zcap, capacity - zstart);
5121 		} else {
5122 			zsp->z_type = ZBC_ZTYPE_GAP;
5123 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5124 			zsp->z_wp = (sector_t)-1;
5125 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5126 					    capacity - zstart);
5127 		}
5128 
5129 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5130 		zstart += zsp->z_size;
5131 	}
5132 
5133 	return 0;
5134 }
5135 
5136 static struct sdebug_dev_info *sdebug_device_create(
5137 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5138 {
5139 	struct sdebug_dev_info *devip;
5140 
5141 	devip = kzalloc(sizeof(*devip), flags);
5142 	if (devip) {
5143 		if (sdebug_uuid_ctl == 1)
5144 			uuid_gen(&devip->lu_name);
5145 		else if (sdebug_uuid_ctl == 2) {
5146 			if (got_shared_uuid)
5147 				devip->lu_name = shared_uuid;
5148 			else {
5149 				uuid_gen(&shared_uuid);
5150 				got_shared_uuid = true;
5151 				devip->lu_name = shared_uuid;
5152 			}
5153 		}
5154 		devip->sdbg_host = sdbg_host;
5155 		if (sdeb_zbc_in_use) {
5156 			devip->zmodel = sdeb_zbc_model;
5157 			if (sdebug_device_create_zones(devip)) {
5158 				kfree(devip);
5159 				return NULL;
5160 			}
5161 		} else {
5162 			devip->zmodel = BLK_ZONED_NONE;
5163 		}
5164 		devip->create_ts = ktime_get_boottime();
5165 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5166 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5167 	}
5168 	return devip;
5169 }
5170 
5171 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5172 {
5173 	struct sdebug_host_info *sdbg_host;
5174 	struct sdebug_dev_info *open_devip = NULL;
5175 	struct sdebug_dev_info *devip;
5176 
5177 	sdbg_host = shost_to_sdebug_host(sdev->host);
5178 
5179 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5180 		if ((devip->used) && (devip->channel == sdev->channel) &&
5181 		    (devip->target == sdev->id) &&
5182 		    (devip->lun == sdev->lun))
5183 			return devip;
5184 		else {
5185 			if ((!devip->used) && (!open_devip))
5186 				open_devip = devip;
5187 		}
5188 	}
5189 	if (!open_devip) { /* try and make a new one */
5190 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5191 		if (!open_devip) {
5192 			pr_err("out of memory at line %d\n", __LINE__);
5193 			return NULL;
5194 		}
5195 	}
5196 
5197 	open_devip->channel = sdev->channel;
5198 	open_devip->target = sdev->id;
5199 	open_devip->lun = sdev->lun;
5200 	open_devip->sdbg_host = sdbg_host;
5201 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5202 	open_devip->used = true;
5203 	return open_devip;
5204 }
5205 
5206 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5207 {
5208 	if (sdebug_verbose)
5209 		pr_info("slave_alloc <%u %u %u %llu>\n",
5210 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5211 	return 0;
5212 }
5213 
5214 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5215 {
5216 	struct sdebug_dev_info *devip =
5217 			(struct sdebug_dev_info *)sdp->hostdata;
5218 
5219 	if (sdebug_verbose)
5220 		pr_info("slave_configure <%u %u %u %llu>\n",
5221 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5222 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5223 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5224 	if (devip == NULL) {
5225 		devip = find_build_dev_info(sdp);
5226 		if (devip == NULL)
5227 			return 1;  /* no resources, will be marked offline */
5228 	}
5229 	sdp->hostdata = devip;
5230 	if (sdebug_no_uld)
5231 		sdp->no_uld_attach = 1;
5232 	config_cdb_len(sdp);
5233 	return 0;
5234 }
5235 
5236 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5237 {
5238 	struct sdebug_dev_info *devip =
5239 		(struct sdebug_dev_info *)sdp->hostdata;
5240 
5241 	if (sdebug_verbose)
5242 		pr_info("slave_destroy <%u %u %u %llu>\n",
5243 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5244 	if (devip) {
5245 		/* make this slot available for re-use */
5246 		devip->used = false;
5247 		sdp->hostdata = NULL;
5248 	}
5249 }
5250 
5251 /* Returns true if we require the queued memory to be freed by the caller. */
5252 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5253 			   enum sdeb_defer_type defer_t)
5254 {
5255 	if (defer_t == SDEB_DEFER_HRT) {
5256 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5257 
5258 		switch (res) {
5259 		case 0: /* Not active, it must have already run */
5260 		case -1: /* -1 It's executing the CB */
5261 			return false;
5262 		case 1: /* Was active, we've now cancelled */
5263 		default:
5264 			return true;
5265 		}
5266 	} else if (defer_t == SDEB_DEFER_WQ) {
5267 		/* Cancel if pending */
5268 		if (cancel_work_sync(&sd_dp->ew.work))
5269 			return true;
5270 		/* Was not pending, so it must have run */
5271 		return false;
5272 	} else if (defer_t == SDEB_DEFER_POLL) {
5273 		return true;
5274 	}
5275 
5276 	return false;
5277 }
5278 
5279 
5280 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd, int *sqa_idx)
5281 {
5282 	enum sdeb_defer_type l_defer_t;
5283 	struct sdebug_queued_cmd *sqcp;
5284 	struct sdebug_defer *sd_dp;
5285 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5286 
5287 	lockdep_assert_held(&sdsc->lock);
5288 
5289 	sqcp = TO_QUEUED_CMD(cmnd);
5290 	if (!sqcp)
5291 		return false;
5292 	sd_dp = &sqcp->sd_dp;
5293 	if (sqa_idx)
5294 		*sqa_idx = sd_dp->sqa_idx;
5295 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5296 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5297 
5298 	if (stop_qc_helper(sd_dp, l_defer_t))
5299 		sdebug_free_queued_cmd(sqcp);
5300 
5301 	return true;
5302 }
5303 
5304 /*
5305  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5306  */
5307 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5308 {
5309 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5310 	struct sdebug_queue *sqp = get_queue(cmnd);
5311 	unsigned long flags, iflags;
5312 	int k = -1;
5313 	bool res;
5314 
5315 	spin_lock_irqsave(&sdsc->lock, flags);
5316 	res = scsi_debug_stop_cmnd(cmnd, &k);
5317 	spin_unlock_irqrestore(&sdsc->lock, flags);
5318 
5319 	if (k >= 0) {
5320 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5321 		clear_bit(k, sqp->in_use_bm);
5322 		sqp->qc_arr[k] = NULL;
5323 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5324 	}
5325 
5326 	return res;
5327 }
5328 
5329 /* Deletes (stops) timers or work queues of all queued commands */
5330 static void stop_all_queued(void)
5331 {
5332 	unsigned long iflags, flags;
5333 	int j, k;
5334 	struct sdebug_queue *sqp;
5335 
5336 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5337 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5338 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5339 			if (test_bit(k, sqp->in_use_bm)) {
5340 				struct sdebug_queued_cmd *sqcp = sqp->qc_arr[k];
5341 				struct sdebug_scsi_cmd *sdsc;
5342 				struct scsi_cmnd *scmd;
5343 
5344 				if (!sqcp)
5345 					continue;
5346 				scmd = sqcp->scmd;
5347 				if (!scmd)
5348 					continue;
5349 				sdsc = scsi_cmd_priv(scmd);
5350 				spin_lock_irqsave(&sdsc->lock, flags);
5351 				if (TO_QUEUED_CMD(scmd) != sqcp) {
5352 					spin_unlock_irqrestore(&sdsc->lock, flags);
5353 					continue;
5354 				}
5355 				scsi_debug_stop_cmnd(scmd, NULL);
5356 				spin_unlock_irqrestore(&sdsc->lock, flags);
5357 				sqp->qc_arr[k] = NULL;
5358 				clear_bit(k, sqp->in_use_bm);
5359 			}
5360 		}
5361 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5362 	}
5363 }
5364 
5365 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5366 {
5367 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5368 
5369 	++num_aborts;
5370 
5371 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5372 		sdev_printk(KERN_INFO, SCpnt->device,
5373 			    "%s: command%s found\n", __func__,
5374 			    ok ? "" : " not");
5375 
5376 	return SUCCESS;
5377 }
5378 
5379 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5380 {
5381 	struct scsi_device *sdp = SCpnt->device;
5382 	struct sdebug_dev_info *devip = sdp->hostdata;
5383 
5384 	++num_dev_resets;
5385 
5386 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5387 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5388 	if (devip)
5389 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5390 
5391 	return SUCCESS;
5392 }
5393 
5394 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5395 {
5396 	struct scsi_device *sdp = SCpnt->device;
5397 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5398 	struct sdebug_dev_info *devip;
5399 	int k = 0;
5400 
5401 	++num_target_resets;
5402 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5403 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5404 
5405 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5406 		if (devip->target == sdp->id) {
5407 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5408 			++k;
5409 		}
5410 	}
5411 
5412 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5413 		sdev_printk(KERN_INFO, sdp,
5414 			    "%s: %d device(s) found in target\n", __func__, k);
5415 
5416 	return SUCCESS;
5417 }
5418 
5419 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5420 {
5421 	struct scsi_device *sdp = SCpnt->device;
5422 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5423 	struct sdebug_dev_info *devip;
5424 	int k = 0;
5425 
5426 	++num_bus_resets;
5427 
5428 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5429 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5430 
5431 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5432 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5433 		++k;
5434 	}
5435 
5436 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5437 		sdev_printk(KERN_INFO, sdp,
5438 			    "%s: %d device(s) found in host\n", __func__, k);
5439 	return SUCCESS;
5440 }
5441 
5442 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5443 {
5444 	struct sdebug_host_info *sdbg_host;
5445 	struct sdebug_dev_info *devip;
5446 	int k = 0;
5447 
5448 	++num_host_resets;
5449 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5450 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5451 	mutex_lock(&sdebug_host_list_mutex);
5452 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5453 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5454 				    dev_list) {
5455 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5456 			++k;
5457 		}
5458 	}
5459 	mutex_unlock(&sdebug_host_list_mutex);
5460 	stop_all_queued();
5461 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5462 		sdev_printk(KERN_INFO, SCpnt->device,
5463 			    "%s: %d device(s) found\n", __func__, k);
5464 	return SUCCESS;
5465 }
5466 
5467 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5468 {
5469 	struct msdos_partition *pp;
5470 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5471 	int sectors_per_part, num_sectors, k;
5472 	int heads_by_sects, start_sec, end_sec;
5473 
5474 	/* assume partition table already zeroed */
5475 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5476 		return;
5477 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5478 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5479 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5480 	}
5481 	num_sectors = (int)get_sdebug_capacity();
5482 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5483 			   / sdebug_num_parts;
5484 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5485 	starts[0] = sdebug_sectors_per;
5486 	max_part_secs = sectors_per_part;
5487 	for (k = 1; k < sdebug_num_parts; ++k) {
5488 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5489 			    * heads_by_sects;
5490 		if (starts[k] - starts[k - 1] < max_part_secs)
5491 			max_part_secs = starts[k] - starts[k - 1];
5492 	}
5493 	starts[sdebug_num_parts] = num_sectors;
5494 	starts[sdebug_num_parts + 1] = 0;
5495 
5496 	ramp[510] = 0x55;	/* magic partition markings */
5497 	ramp[511] = 0xAA;
5498 	pp = (struct msdos_partition *)(ramp + 0x1be);
5499 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5500 		start_sec = starts[k];
5501 		end_sec = starts[k] + max_part_secs - 1;
5502 		pp->boot_ind = 0;
5503 
5504 		pp->cyl = start_sec / heads_by_sects;
5505 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5506 			   / sdebug_sectors_per;
5507 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5508 
5509 		pp->end_cyl = end_sec / heads_by_sects;
5510 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5511 			       / sdebug_sectors_per;
5512 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5513 
5514 		pp->start_sect = cpu_to_le32(start_sec);
5515 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5516 		pp->sys_ind = 0x83;	/* plain Linux partition */
5517 	}
5518 }
5519 
5520 static void block_unblock_all_queues(bool block)
5521 {
5522 	struct sdebug_host_info *sdhp;
5523 
5524 	lockdep_assert_held(&sdebug_host_list_mutex);
5525 
5526 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5527 		struct Scsi_Host *shost = sdhp->shost;
5528 
5529 		if (block)
5530 			scsi_block_requests(shost);
5531 		else
5532 			scsi_unblock_requests(shost);
5533 	}
5534 }
5535 
5536 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5537  * commands will be processed normally before triggers occur.
5538  */
5539 static void tweak_cmnd_count(void)
5540 {
5541 	int count, modulo;
5542 
5543 	modulo = abs(sdebug_every_nth);
5544 	if (modulo < 2)
5545 		return;
5546 
5547 	mutex_lock(&sdebug_host_list_mutex);
5548 	block_unblock_all_queues(true);
5549 	count = atomic_read(&sdebug_cmnd_count);
5550 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5551 	block_unblock_all_queues(false);
5552 	mutex_unlock(&sdebug_host_list_mutex);
5553 }
5554 
5555 static void clear_queue_stats(void)
5556 {
5557 	atomic_set(&sdebug_cmnd_count, 0);
5558 	atomic_set(&sdebug_completions, 0);
5559 	atomic_set(&sdebug_miss_cpus, 0);
5560 	atomic_set(&sdebug_a_tsf, 0);
5561 }
5562 
5563 static bool inject_on_this_cmd(void)
5564 {
5565 	if (sdebug_every_nth == 0)
5566 		return false;
5567 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5568 }
5569 
5570 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5571 
5572 
5573 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5574 {
5575 	if (sqcp)
5576 		kmem_cache_free(queued_cmd_cache, sqcp);
5577 }
5578 
5579 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5580 {
5581 	struct sdebug_queued_cmd *sqcp;
5582 	struct sdebug_defer *sd_dp;
5583 
5584 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5585 	if (!sqcp)
5586 		return NULL;
5587 
5588 	sd_dp = &sqcp->sd_dp;
5589 
5590 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5591 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5592 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5593 
5594 	sqcp->scmd = scmd;
5595 	sd_dp->sqa_idx = -1;
5596 
5597 	return sqcp;
5598 }
5599 
5600 /* Complete the processing of the thread that queued a SCSI command to this
5601  * driver. It either completes the command by calling cmnd_done() or
5602  * schedules a hr timer or work queue then returns 0. Returns
5603  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5604  */
5605 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5606 			 int scsi_result,
5607 			 int (*pfp)(struct scsi_cmnd *,
5608 				    struct sdebug_dev_info *),
5609 			 int delta_jiff, int ndelay)
5610 {
5611 	struct request *rq = scsi_cmd_to_rq(cmnd);
5612 	bool polled = rq->cmd_flags & REQ_POLLED;
5613 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5614 	unsigned long iflags, flags;
5615 	u64 ns_from_boot = 0;
5616 	struct sdebug_queue *sqp;
5617 	struct sdebug_queued_cmd *sqcp;
5618 	struct scsi_device *sdp;
5619 	struct sdebug_defer *sd_dp;
5620 	int k;
5621 
5622 	if (unlikely(devip == NULL)) {
5623 		if (scsi_result == 0)
5624 			scsi_result = DID_NO_CONNECT << 16;
5625 		goto respond_in_thread;
5626 	}
5627 	sdp = cmnd->device;
5628 
5629 	if (delta_jiff == 0)
5630 		goto respond_in_thread;
5631 
5632 	sqp = get_queue(cmnd);
5633 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5634 
5635 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5636 		     (scsi_result == 0))) {
5637 		int num_in_q = scsi_device_busy(sdp);
5638 		int qdepth = cmnd->device->queue_depth;
5639 
5640 		if ((num_in_q == qdepth) &&
5641 		    (atomic_inc_return(&sdebug_a_tsf) >=
5642 		     abs(sdebug_every_nth))) {
5643 			atomic_set(&sdebug_a_tsf, 0);
5644 			scsi_result = device_qfull_result;
5645 
5646 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5647 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5648 					    __func__, num_in_q);
5649 		}
5650 	}
5651 
5652 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5653 	if (unlikely(k >= sdebug_max_queue)) {
5654 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5655 		if (scsi_result)
5656 			goto respond_in_thread;
5657 		scsi_result = device_qfull_result;
5658 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5659 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5660 				    __func__, sdebug_max_queue);
5661 		goto respond_in_thread;
5662 	}
5663 	set_bit(k, sqp->in_use_bm);
5664 
5665 	sqcp = sdebug_alloc_queued_cmd(cmnd);
5666 	if (!sqcp) {
5667 		clear_bit(k, sqp->in_use_bm);
5668 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5669 		return SCSI_MLQUEUE_HOST_BUSY;
5670 	}
5671 	sd_dp = &sqcp->sd_dp;
5672 	sd_dp->sqa_idx = k;
5673 	sqp->qc_arr[k] = sqcp;
5674 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5675 
5676 	/* Set the hostwide tag */
5677 	if (sdebug_host_max_queue)
5678 		sd_dp->hc_idx = get_tag(cmnd);
5679 
5680 	if (polled)
5681 		ns_from_boot = ktime_get_boottime_ns();
5682 
5683 	/* one of the resp_*() response functions is called here */
5684 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5685 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5686 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5687 		delta_jiff = ndelay = 0;
5688 	}
5689 	if (cmnd->result == 0 && scsi_result != 0)
5690 		cmnd->result = scsi_result;
5691 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5692 		if (atomic_read(&sdeb_inject_pending)) {
5693 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5694 			atomic_set(&sdeb_inject_pending, 0);
5695 			cmnd->result = check_condition_result;
5696 		}
5697 	}
5698 
5699 	if (unlikely(sdebug_verbose && cmnd->result))
5700 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5701 			    __func__, cmnd->result);
5702 
5703 	if (delta_jiff > 0 || ndelay > 0) {
5704 		ktime_t kt;
5705 
5706 		if (delta_jiff > 0) {
5707 			u64 ns = jiffies_to_nsecs(delta_jiff);
5708 
5709 			if (sdebug_random && ns < U32_MAX) {
5710 				ns = get_random_u32_below((u32)ns);
5711 			} else if (sdebug_random) {
5712 				ns >>= 12;	/* scale to 4 usec precision */
5713 				if (ns < U32_MAX)	/* over 4 hours max */
5714 					ns = get_random_u32_below((u32)ns);
5715 				ns <<= 12;
5716 			}
5717 			kt = ns_to_ktime(ns);
5718 		} else {	/* ndelay has a 4.2 second max */
5719 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5720 					     (u32)ndelay;
5721 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5722 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5723 
5724 				if (kt <= d) {	/* elapsed duration >= kt */
5725 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5726 					sqp->qc_arr[k] = NULL;
5727 					clear_bit(k, sqp->in_use_bm);
5728 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5729 					/* call scsi_done() from this thread */
5730 					sdebug_free_queued_cmd(sqcp);
5731 					scsi_done(cmnd);
5732 					return 0;
5733 				}
5734 				/* otherwise reduce kt by elapsed time */
5735 				kt -= d;
5736 			}
5737 		}
5738 		if (sdebug_statistics)
5739 			sd_dp->issuing_cpu = raw_smp_processor_id();
5740 		if (polled) {
5741 			spin_lock_irqsave(&sdsc->lock, flags);
5742 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5743 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5744 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5745 			spin_unlock_irqrestore(&sdsc->lock, flags);
5746 		} else {
5747 			/* schedule the invocation of scsi_done() for a later time */
5748 			spin_lock_irqsave(&sdsc->lock, flags);
5749 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5750 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5751 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5752 			/*
5753 			 * The completion handler will try to grab sqcp->lock,
5754 			 * so there is no chance that the completion handler
5755 			 * will call scsi_done() until we release the lock
5756 			 * here (so ok to keep referencing sdsc).
5757 			 */
5758 			spin_unlock_irqrestore(&sdsc->lock, flags);
5759 		}
5760 	} else {	/* jdelay < 0, use work queue */
5761 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5762 			     atomic_read(&sdeb_inject_pending))) {
5763 			sd_dp->aborted = true;
5764 			atomic_set(&sdeb_inject_pending, 0);
5765 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5766 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5767 		}
5768 
5769 		if (sdebug_statistics)
5770 			sd_dp->issuing_cpu = raw_smp_processor_id();
5771 		if (polled) {
5772 			spin_lock_irqsave(&sdsc->lock, flags);
5773 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5774 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5775 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5776 			spin_unlock_irqrestore(&sdsc->lock, flags);
5777 		} else {
5778 			spin_lock_irqsave(&sdsc->lock, flags);
5779 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5780 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5781 			schedule_work(&sd_dp->ew.work);
5782 			spin_unlock_irqrestore(&sdsc->lock, flags);
5783 		}
5784 	}
5785 
5786 	return 0;
5787 
5788 respond_in_thread:	/* call back to mid-layer using invocation thread */
5789 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5790 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5791 	if (cmnd->result == 0 && scsi_result != 0)
5792 		cmnd->result = scsi_result;
5793 	scsi_done(cmnd);
5794 	return 0;
5795 }
5796 
5797 /* Note: The following macros create attribute files in the
5798    /sys/module/scsi_debug/parameters directory. Unfortunately this
5799    driver is unaware of a change and cannot trigger auxiliary actions
5800    as it can when the corresponding attribute in the
5801    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5802  */
5803 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5804 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5805 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5806 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5807 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5808 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5809 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5810 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5811 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5812 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5813 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5814 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5815 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5816 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5817 module_param_string(inq_product, sdebug_inq_product_id,
5818 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5819 module_param_string(inq_rev, sdebug_inq_product_rev,
5820 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5821 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5822 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5823 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5824 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5825 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5826 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5827 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5828 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5829 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5830 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5831 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5832 		   S_IRUGO | S_IWUSR);
5833 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5834 		   S_IRUGO | S_IWUSR);
5835 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5836 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5837 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5838 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5839 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5840 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5841 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5842 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5843 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5844 module_param_named(per_host_store, sdebug_per_host_store, bool,
5845 		   S_IRUGO | S_IWUSR);
5846 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5847 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5848 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5849 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5850 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5851 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5852 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5853 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5854 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5855 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5856 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5857 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5858 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5859 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5860 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5861 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5862 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5863 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5864 		   S_IRUGO | S_IWUSR);
5865 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5866 module_param_named(write_same_length, sdebug_write_same_length, int,
5867 		   S_IRUGO | S_IWUSR);
5868 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5869 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5870 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5871 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5872 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5873 
5874 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5875 MODULE_DESCRIPTION("SCSI debug adapter driver");
5876 MODULE_LICENSE("GPL");
5877 MODULE_VERSION(SDEBUG_VERSION);
5878 
5879 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5880 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5881 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5882 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5883 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5884 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5885 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5886 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5887 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5888 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5889 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5890 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5891 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5892 MODULE_PARM_DESC(host_max_queue,
5893 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5894 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5895 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5896 		 SDEBUG_VERSION "\")");
5897 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5898 MODULE_PARM_DESC(lbprz,
5899 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5900 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5901 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5902 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5903 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5904 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5905 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5906 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5907 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5908 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5909 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5910 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5911 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5912 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5913 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5914 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5915 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5916 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5917 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5918 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5919 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5920 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5921 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5922 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5923 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5924 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5925 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5926 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5927 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5928 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5929 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5930 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5931 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5932 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5933 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5934 MODULE_PARM_DESC(uuid_ctl,
5935 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5936 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5937 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5938 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5939 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5940 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5941 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5942 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5943 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5944 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5945 
5946 #define SDEBUG_INFO_LEN 256
5947 static char sdebug_info[SDEBUG_INFO_LEN];
5948 
5949 static const char *scsi_debug_info(struct Scsi_Host *shp)
5950 {
5951 	int k;
5952 
5953 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5954 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5955 	if (k >= (SDEBUG_INFO_LEN - 1))
5956 		return sdebug_info;
5957 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5958 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5959 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5960 		  "statistics", (int)sdebug_statistics);
5961 	return sdebug_info;
5962 }
5963 
5964 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5965 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5966 				 int length)
5967 {
5968 	char arr[16];
5969 	int opts;
5970 	int minLen = length > 15 ? 15 : length;
5971 
5972 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5973 		return -EACCES;
5974 	memcpy(arr, buffer, minLen);
5975 	arr[minLen] = '\0';
5976 	if (1 != sscanf(arr, "%d", &opts))
5977 		return -EINVAL;
5978 	sdebug_opts = opts;
5979 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5980 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5981 	if (sdebug_every_nth != 0)
5982 		tweak_cmnd_count();
5983 	return length;
5984 }
5985 
5986 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5987  * same for each scsi_debug host (if more than one). Some of the counters
5988  * output are not atomics so might be inaccurate in a busy system. */
5989 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5990 {
5991 	int f, j, l;
5992 	struct sdebug_queue *sqp;
5993 	struct sdebug_host_info *sdhp;
5994 
5995 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5996 		   SDEBUG_VERSION, sdebug_version_date);
5997 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5998 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5999 		   sdebug_opts, sdebug_every_nth);
6000 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6001 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6002 		   sdebug_sector_size, "bytes");
6003 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6004 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6005 		   num_aborts);
6006 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6007 		   num_dev_resets, num_target_resets, num_bus_resets,
6008 		   num_host_resets);
6009 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6010 		   dix_reads, dix_writes, dif_errors);
6011 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6012 		   sdebug_statistics);
6013 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6014 		   atomic_read(&sdebug_cmnd_count),
6015 		   atomic_read(&sdebug_completions),
6016 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6017 		   atomic_read(&sdebug_a_tsf),
6018 		   atomic_read(&sdeb_mq_poll_count));
6019 
6020 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6021 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6022 		seq_printf(m, "  queue %d:\n", j);
6023 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6024 		if (f != sdebug_max_queue) {
6025 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6026 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6027 				   "first,last bits", f, l);
6028 		}
6029 	}
6030 
6031 	seq_printf(m, "this host_no=%d\n", host->host_no);
6032 	if (!xa_empty(per_store_ap)) {
6033 		bool niu;
6034 		int idx;
6035 		unsigned long l_idx;
6036 		struct sdeb_store_info *sip;
6037 
6038 		seq_puts(m, "\nhost list:\n");
6039 		j = 0;
6040 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6041 			idx = sdhp->si_idx;
6042 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6043 				   sdhp->shost->host_no, idx);
6044 			++j;
6045 		}
6046 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6047 			   sdeb_most_recent_idx);
6048 		j = 0;
6049 		xa_for_each(per_store_ap, l_idx, sip) {
6050 			niu = xa_get_mark(per_store_ap, l_idx,
6051 					  SDEB_XA_NOT_IN_USE);
6052 			idx = (int)l_idx;
6053 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6054 				   (niu ? "  not_in_use" : ""));
6055 			++j;
6056 		}
6057 	}
6058 	return 0;
6059 }
6060 
6061 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6062 {
6063 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6064 }
6065 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6066  * of delay is jiffies.
6067  */
6068 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6069 			   size_t count)
6070 {
6071 	int jdelay, res;
6072 
6073 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6074 		res = count;
6075 		if (sdebug_jdelay != jdelay) {
6076 			int j, k;
6077 			struct sdebug_queue *sqp;
6078 
6079 			mutex_lock(&sdebug_host_list_mutex);
6080 			block_unblock_all_queues(true);
6081 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6082 			     ++j, ++sqp) {
6083 				k = find_first_bit(sqp->in_use_bm,
6084 						   sdebug_max_queue);
6085 				if (k != sdebug_max_queue) {
6086 					res = -EBUSY;   /* queued commands */
6087 					break;
6088 				}
6089 			}
6090 			if (res > 0) {
6091 				sdebug_jdelay = jdelay;
6092 				sdebug_ndelay = 0;
6093 			}
6094 			block_unblock_all_queues(false);
6095 			mutex_unlock(&sdebug_host_list_mutex);
6096 		}
6097 		return res;
6098 	}
6099 	return -EINVAL;
6100 }
6101 static DRIVER_ATTR_RW(delay);
6102 
6103 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6104 {
6105 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6106 }
6107 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6108 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6109 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6110 			    size_t count)
6111 {
6112 	int ndelay, res;
6113 
6114 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6115 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6116 		res = count;
6117 		if (sdebug_ndelay != ndelay) {
6118 			int j, k;
6119 			struct sdebug_queue *sqp;
6120 
6121 			mutex_lock(&sdebug_host_list_mutex);
6122 			block_unblock_all_queues(true);
6123 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6124 			     ++j, ++sqp) {
6125 				k = find_first_bit(sqp->in_use_bm,
6126 						   sdebug_max_queue);
6127 				if (k != sdebug_max_queue) {
6128 					res = -EBUSY;   /* queued commands */
6129 					break;
6130 				}
6131 			}
6132 			if (res > 0) {
6133 				sdebug_ndelay = ndelay;
6134 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6135 							: DEF_JDELAY;
6136 			}
6137 			block_unblock_all_queues(false);
6138 			mutex_unlock(&sdebug_host_list_mutex);
6139 		}
6140 		return res;
6141 	}
6142 	return -EINVAL;
6143 }
6144 static DRIVER_ATTR_RW(ndelay);
6145 
6146 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6147 {
6148 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6149 }
6150 
6151 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6152 			  size_t count)
6153 {
6154 	int opts;
6155 	char work[20];
6156 
6157 	if (sscanf(buf, "%10s", work) == 1) {
6158 		if (strncasecmp(work, "0x", 2) == 0) {
6159 			if (kstrtoint(work + 2, 16, &opts) == 0)
6160 				goto opts_done;
6161 		} else {
6162 			if (kstrtoint(work, 10, &opts) == 0)
6163 				goto opts_done;
6164 		}
6165 	}
6166 	return -EINVAL;
6167 opts_done:
6168 	sdebug_opts = opts;
6169 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6170 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6171 	tweak_cmnd_count();
6172 	return count;
6173 }
6174 static DRIVER_ATTR_RW(opts);
6175 
6176 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6177 {
6178 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6179 }
6180 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6181 			   size_t count)
6182 {
6183 	int n;
6184 
6185 	/* Cannot change from or to TYPE_ZBC with sysfs */
6186 	if (sdebug_ptype == TYPE_ZBC)
6187 		return -EINVAL;
6188 
6189 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6190 		if (n == TYPE_ZBC)
6191 			return -EINVAL;
6192 		sdebug_ptype = n;
6193 		return count;
6194 	}
6195 	return -EINVAL;
6196 }
6197 static DRIVER_ATTR_RW(ptype);
6198 
6199 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6200 {
6201 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6202 }
6203 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6204 			    size_t count)
6205 {
6206 	int n;
6207 
6208 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6209 		sdebug_dsense = n;
6210 		return count;
6211 	}
6212 	return -EINVAL;
6213 }
6214 static DRIVER_ATTR_RW(dsense);
6215 
6216 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6217 {
6218 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6219 }
6220 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6221 			     size_t count)
6222 {
6223 	int n, idx;
6224 
6225 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6226 		bool want_store = (n == 0);
6227 		struct sdebug_host_info *sdhp;
6228 
6229 		n = (n > 0);
6230 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6231 		if (sdebug_fake_rw == n)
6232 			return count;	/* not transitioning so do nothing */
6233 
6234 		if (want_store) {	/* 1 --> 0 transition, set up store */
6235 			if (sdeb_first_idx < 0) {
6236 				idx = sdebug_add_store();
6237 				if (idx < 0)
6238 					return idx;
6239 			} else {
6240 				idx = sdeb_first_idx;
6241 				xa_clear_mark(per_store_ap, idx,
6242 					      SDEB_XA_NOT_IN_USE);
6243 			}
6244 			/* make all hosts use same store */
6245 			list_for_each_entry(sdhp, &sdebug_host_list,
6246 					    host_list) {
6247 				if (sdhp->si_idx != idx) {
6248 					xa_set_mark(per_store_ap, sdhp->si_idx,
6249 						    SDEB_XA_NOT_IN_USE);
6250 					sdhp->si_idx = idx;
6251 				}
6252 			}
6253 			sdeb_most_recent_idx = idx;
6254 		} else {	/* 0 --> 1 transition is trigger for shrink */
6255 			sdebug_erase_all_stores(true /* apart from first */);
6256 		}
6257 		sdebug_fake_rw = n;
6258 		return count;
6259 	}
6260 	return -EINVAL;
6261 }
6262 static DRIVER_ATTR_RW(fake_rw);
6263 
6264 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6265 {
6266 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6267 }
6268 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6269 			      size_t count)
6270 {
6271 	int n;
6272 
6273 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6274 		sdebug_no_lun_0 = n;
6275 		return count;
6276 	}
6277 	return -EINVAL;
6278 }
6279 static DRIVER_ATTR_RW(no_lun_0);
6280 
6281 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6282 {
6283 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6284 }
6285 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6286 			      size_t count)
6287 {
6288 	int n;
6289 
6290 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6291 		sdebug_num_tgts = n;
6292 		sdebug_max_tgts_luns();
6293 		return count;
6294 	}
6295 	return -EINVAL;
6296 }
6297 static DRIVER_ATTR_RW(num_tgts);
6298 
6299 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6300 {
6301 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6302 }
6303 static DRIVER_ATTR_RO(dev_size_mb);
6304 
6305 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6308 }
6309 
6310 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6311 				    size_t count)
6312 {
6313 	bool v;
6314 
6315 	if (kstrtobool(buf, &v))
6316 		return -EINVAL;
6317 
6318 	sdebug_per_host_store = v;
6319 	return count;
6320 }
6321 static DRIVER_ATTR_RW(per_host_store);
6322 
6323 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6324 {
6325 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6326 }
6327 static DRIVER_ATTR_RO(num_parts);
6328 
6329 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6330 {
6331 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6332 }
6333 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6334 			       size_t count)
6335 {
6336 	int nth;
6337 	char work[20];
6338 
6339 	if (sscanf(buf, "%10s", work) == 1) {
6340 		if (strncasecmp(work, "0x", 2) == 0) {
6341 			if (kstrtoint(work + 2, 16, &nth) == 0)
6342 				goto every_nth_done;
6343 		} else {
6344 			if (kstrtoint(work, 10, &nth) == 0)
6345 				goto every_nth_done;
6346 		}
6347 	}
6348 	return -EINVAL;
6349 
6350 every_nth_done:
6351 	sdebug_every_nth = nth;
6352 	if (nth && !sdebug_statistics) {
6353 		pr_info("every_nth needs statistics=1, set it\n");
6354 		sdebug_statistics = true;
6355 	}
6356 	tweak_cmnd_count();
6357 	return count;
6358 }
6359 static DRIVER_ATTR_RW(every_nth);
6360 
6361 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6362 {
6363 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6364 }
6365 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6366 				size_t count)
6367 {
6368 	int n;
6369 	bool changed;
6370 
6371 	if (kstrtoint(buf, 0, &n))
6372 		return -EINVAL;
6373 	if (n >= 0) {
6374 		if (n > (int)SAM_LUN_AM_FLAT) {
6375 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6376 			return -EINVAL;
6377 		}
6378 		changed = ((int)sdebug_lun_am != n);
6379 		sdebug_lun_am = n;
6380 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6381 			struct sdebug_host_info *sdhp;
6382 			struct sdebug_dev_info *dp;
6383 
6384 			mutex_lock(&sdebug_host_list_mutex);
6385 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6386 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6387 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6388 				}
6389 			}
6390 			mutex_unlock(&sdebug_host_list_mutex);
6391 		}
6392 		return count;
6393 	}
6394 	return -EINVAL;
6395 }
6396 static DRIVER_ATTR_RW(lun_format);
6397 
6398 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6399 {
6400 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6401 }
6402 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6403 			      size_t count)
6404 {
6405 	int n;
6406 	bool changed;
6407 
6408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6409 		if (n > 256) {
6410 			pr_warn("max_luns can be no more than 256\n");
6411 			return -EINVAL;
6412 		}
6413 		changed = (sdebug_max_luns != n);
6414 		sdebug_max_luns = n;
6415 		sdebug_max_tgts_luns();
6416 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6417 			struct sdebug_host_info *sdhp;
6418 			struct sdebug_dev_info *dp;
6419 
6420 			mutex_lock(&sdebug_host_list_mutex);
6421 			list_for_each_entry(sdhp, &sdebug_host_list,
6422 					    host_list) {
6423 				list_for_each_entry(dp, &sdhp->dev_info_list,
6424 						    dev_list) {
6425 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6426 						dp->uas_bm);
6427 				}
6428 			}
6429 			mutex_unlock(&sdebug_host_list_mutex);
6430 		}
6431 		return count;
6432 	}
6433 	return -EINVAL;
6434 }
6435 static DRIVER_ATTR_RW(max_luns);
6436 
6437 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6438 {
6439 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6440 }
6441 /* N.B. max_queue can be changed while there are queued commands. In flight
6442  * commands beyond the new max_queue will be completed. */
6443 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6444 			       size_t count)
6445 {
6446 	int j, n, k, a;
6447 	struct sdebug_queue *sqp;
6448 
6449 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6450 	    (n <= SDEBUG_CANQUEUE) &&
6451 	    (sdebug_host_max_queue == 0)) {
6452 		mutex_lock(&sdebug_host_list_mutex);
6453 		block_unblock_all_queues(true);
6454 		k = 0;
6455 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6456 		     ++j, ++sqp) {
6457 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6458 			if (a > k)
6459 				k = a;
6460 		}
6461 		sdebug_max_queue = n;
6462 		if (k == SDEBUG_CANQUEUE)
6463 			atomic_set(&retired_max_queue, 0);
6464 		else if (k >= n)
6465 			atomic_set(&retired_max_queue, k + 1);
6466 		else
6467 			atomic_set(&retired_max_queue, 0);
6468 		block_unblock_all_queues(false);
6469 		mutex_unlock(&sdebug_host_list_mutex);
6470 		return count;
6471 	}
6472 	return -EINVAL;
6473 }
6474 static DRIVER_ATTR_RW(max_queue);
6475 
6476 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6477 {
6478 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6479 }
6480 
6481 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6482 {
6483 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6484 }
6485 
6486 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6487 {
6488 	bool v;
6489 
6490 	if (kstrtobool(buf, &v))
6491 		return -EINVAL;
6492 
6493 	sdebug_no_rwlock = v;
6494 	return count;
6495 }
6496 static DRIVER_ATTR_RW(no_rwlock);
6497 
6498 /*
6499  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6500  * in range [0, sdebug_host_max_queue), we can't change it.
6501  */
6502 static DRIVER_ATTR_RO(host_max_queue);
6503 
6504 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6505 {
6506 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6507 }
6508 static DRIVER_ATTR_RO(no_uld);
6509 
6510 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6511 {
6512 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6513 }
6514 static DRIVER_ATTR_RO(scsi_level);
6515 
6516 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6517 {
6518 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6519 }
6520 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6521 				size_t count)
6522 {
6523 	int n;
6524 	bool changed;
6525 
6526 	/* Ignore capacity change for ZBC drives for now */
6527 	if (sdeb_zbc_in_use)
6528 		return -ENOTSUPP;
6529 
6530 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6531 		changed = (sdebug_virtual_gb != n);
6532 		sdebug_virtual_gb = n;
6533 		sdebug_capacity = get_sdebug_capacity();
6534 		if (changed) {
6535 			struct sdebug_host_info *sdhp;
6536 			struct sdebug_dev_info *dp;
6537 
6538 			mutex_lock(&sdebug_host_list_mutex);
6539 			list_for_each_entry(sdhp, &sdebug_host_list,
6540 					    host_list) {
6541 				list_for_each_entry(dp, &sdhp->dev_info_list,
6542 						    dev_list) {
6543 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6544 						dp->uas_bm);
6545 				}
6546 			}
6547 			mutex_unlock(&sdebug_host_list_mutex);
6548 		}
6549 		return count;
6550 	}
6551 	return -EINVAL;
6552 }
6553 static DRIVER_ATTR_RW(virtual_gb);
6554 
6555 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6556 {
6557 	/* absolute number of hosts currently active is what is shown */
6558 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6559 }
6560 
6561 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6562 			      size_t count)
6563 {
6564 	bool found;
6565 	unsigned long idx;
6566 	struct sdeb_store_info *sip;
6567 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6568 	int delta_hosts;
6569 
6570 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6571 		return -EINVAL;
6572 	if (delta_hosts > 0) {
6573 		do {
6574 			found = false;
6575 			if (want_phs) {
6576 				xa_for_each_marked(per_store_ap, idx, sip,
6577 						   SDEB_XA_NOT_IN_USE) {
6578 					sdeb_most_recent_idx = (int)idx;
6579 					found = true;
6580 					break;
6581 				}
6582 				if (found)	/* re-use case */
6583 					sdebug_add_host_helper((int)idx);
6584 				else
6585 					sdebug_do_add_host(true);
6586 			} else {
6587 				sdebug_do_add_host(false);
6588 			}
6589 		} while (--delta_hosts);
6590 	} else if (delta_hosts < 0) {
6591 		do {
6592 			sdebug_do_remove_host(false);
6593 		} while (++delta_hosts);
6594 	}
6595 	return count;
6596 }
6597 static DRIVER_ATTR_RW(add_host);
6598 
6599 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6600 {
6601 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6602 }
6603 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6604 				    size_t count)
6605 {
6606 	int n;
6607 
6608 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6609 		sdebug_vpd_use_hostno = n;
6610 		return count;
6611 	}
6612 	return -EINVAL;
6613 }
6614 static DRIVER_ATTR_RW(vpd_use_hostno);
6615 
6616 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6617 {
6618 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6619 }
6620 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6621 				size_t count)
6622 {
6623 	int n;
6624 
6625 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6626 		if (n > 0)
6627 			sdebug_statistics = true;
6628 		else {
6629 			clear_queue_stats();
6630 			sdebug_statistics = false;
6631 		}
6632 		return count;
6633 	}
6634 	return -EINVAL;
6635 }
6636 static DRIVER_ATTR_RW(statistics);
6637 
6638 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6639 {
6640 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6641 }
6642 static DRIVER_ATTR_RO(sector_size);
6643 
6644 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6645 {
6646 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6647 }
6648 static DRIVER_ATTR_RO(submit_queues);
6649 
6650 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6653 }
6654 static DRIVER_ATTR_RO(dix);
6655 
6656 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6657 {
6658 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6659 }
6660 static DRIVER_ATTR_RO(dif);
6661 
6662 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6663 {
6664 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6665 }
6666 static DRIVER_ATTR_RO(guard);
6667 
6668 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6669 {
6670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6671 }
6672 static DRIVER_ATTR_RO(ato);
6673 
6674 static ssize_t map_show(struct device_driver *ddp, char *buf)
6675 {
6676 	ssize_t count = 0;
6677 
6678 	if (!scsi_debug_lbp())
6679 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6680 				 sdebug_store_sectors);
6681 
6682 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6683 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6684 
6685 		if (sip)
6686 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6687 					  (int)map_size, sip->map_storep);
6688 	}
6689 	buf[count++] = '\n';
6690 	buf[count] = '\0';
6691 
6692 	return count;
6693 }
6694 static DRIVER_ATTR_RO(map);
6695 
6696 static ssize_t random_show(struct device_driver *ddp, char *buf)
6697 {
6698 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6699 }
6700 
6701 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6702 			    size_t count)
6703 {
6704 	bool v;
6705 
6706 	if (kstrtobool(buf, &v))
6707 		return -EINVAL;
6708 
6709 	sdebug_random = v;
6710 	return count;
6711 }
6712 static DRIVER_ATTR_RW(random);
6713 
6714 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6715 {
6716 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6717 }
6718 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6719 			       size_t count)
6720 {
6721 	int n;
6722 
6723 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6724 		sdebug_removable = (n > 0);
6725 		return count;
6726 	}
6727 	return -EINVAL;
6728 }
6729 static DRIVER_ATTR_RW(removable);
6730 
6731 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6732 {
6733 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6734 }
6735 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6736 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6737 			       size_t count)
6738 {
6739 	int n;
6740 
6741 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6742 		sdebug_host_lock = (n > 0);
6743 		return count;
6744 	}
6745 	return -EINVAL;
6746 }
6747 static DRIVER_ATTR_RW(host_lock);
6748 
6749 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6750 {
6751 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6752 }
6753 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6754 			    size_t count)
6755 {
6756 	int n;
6757 
6758 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6759 		sdebug_strict = (n > 0);
6760 		return count;
6761 	}
6762 	return -EINVAL;
6763 }
6764 static DRIVER_ATTR_RW(strict);
6765 
6766 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6767 {
6768 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6769 }
6770 static DRIVER_ATTR_RO(uuid_ctl);
6771 
6772 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6773 {
6774 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6775 }
6776 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6777 			     size_t count)
6778 {
6779 	int ret, n;
6780 
6781 	ret = kstrtoint(buf, 0, &n);
6782 	if (ret)
6783 		return ret;
6784 	sdebug_cdb_len = n;
6785 	all_config_cdb_len();
6786 	return count;
6787 }
6788 static DRIVER_ATTR_RW(cdb_len);
6789 
6790 static const char * const zbc_model_strs_a[] = {
6791 	[BLK_ZONED_NONE] = "none",
6792 	[BLK_ZONED_HA]   = "host-aware",
6793 	[BLK_ZONED_HM]   = "host-managed",
6794 };
6795 
6796 static const char * const zbc_model_strs_b[] = {
6797 	[BLK_ZONED_NONE] = "no",
6798 	[BLK_ZONED_HA]   = "aware",
6799 	[BLK_ZONED_HM]   = "managed",
6800 };
6801 
6802 static const char * const zbc_model_strs_c[] = {
6803 	[BLK_ZONED_NONE] = "0",
6804 	[BLK_ZONED_HA]   = "1",
6805 	[BLK_ZONED_HM]   = "2",
6806 };
6807 
6808 static int sdeb_zbc_model_str(const char *cp)
6809 {
6810 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6811 
6812 	if (res < 0) {
6813 		res = sysfs_match_string(zbc_model_strs_b, cp);
6814 		if (res < 0) {
6815 			res = sysfs_match_string(zbc_model_strs_c, cp);
6816 			if (res < 0)
6817 				return -EINVAL;
6818 		}
6819 	}
6820 	return res;
6821 }
6822 
6823 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6824 {
6825 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6826 			 zbc_model_strs_a[sdeb_zbc_model]);
6827 }
6828 static DRIVER_ATTR_RO(zbc);
6829 
6830 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6831 {
6832 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6833 }
6834 static DRIVER_ATTR_RO(tur_ms_to_ready);
6835 
6836 /* Note: The following array creates attribute files in the
6837    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6838    files (over those found in the /sys/module/scsi_debug/parameters
6839    directory) is that auxiliary actions can be triggered when an attribute
6840    is changed. For example see: add_host_store() above.
6841  */
6842 
6843 static struct attribute *sdebug_drv_attrs[] = {
6844 	&driver_attr_delay.attr,
6845 	&driver_attr_opts.attr,
6846 	&driver_attr_ptype.attr,
6847 	&driver_attr_dsense.attr,
6848 	&driver_attr_fake_rw.attr,
6849 	&driver_attr_host_max_queue.attr,
6850 	&driver_attr_no_lun_0.attr,
6851 	&driver_attr_num_tgts.attr,
6852 	&driver_attr_dev_size_mb.attr,
6853 	&driver_attr_num_parts.attr,
6854 	&driver_attr_every_nth.attr,
6855 	&driver_attr_lun_format.attr,
6856 	&driver_attr_max_luns.attr,
6857 	&driver_attr_max_queue.attr,
6858 	&driver_attr_no_rwlock.attr,
6859 	&driver_attr_no_uld.attr,
6860 	&driver_attr_scsi_level.attr,
6861 	&driver_attr_virtual_gb.attr,
6862 	&driver_attr_add_host.attr,
6863 	&driver_attr_per_host_store.attr,
6864 	&driver_attr_vpd_use_hostno.attr,
6865 	&driver_attr_sector_size.attr,
6866 	&driver_attr_statistics.attr,
6867 	&driver_attr_submit_queues.attr,
6868 	&driver_attr_dix.attr,
6869 	&driver_attr_dif.attr,
6870 	&driver_attr_guard.attr,
6871 	&driver_attr_ato.attr,
6872 	&driver_attr_map.attr,
6873 	&driver_attr_random.attr,
6874 	&driver_attr_removable.attr,
6875 	&driver_attr_host_lock.attr,
6876 	&driver_attr_ndelay.attr,
6877 	&driver_attr_strict.attr,
6878 	&driver_attr_uuid_ctl.attr,
6879 	&driver_attr_cdb_len.attr,
6880 	&driver_attr_tur_ms_to_ready.attr,
6881 	&driver_attr_zbc.attr,
6882 	NULL,
6883 };
6884 ATTRIBUTE_GROUPS(sdebug_drv);
6885 
6886 static struct device *pseudo_primary;
6887 
6888 static int __init scsi_debug_init(void)
6889 {
6890 	bool want_store = (sdebug_fake_rw == 0);
6891 	unsigned long sz;
6892 	int k, ret, hosts_to_add;
6893 	int idx = -1;
6894 
6895 	ramdisk_lck_a[0] = &atomic_rw;
6896 	ramdisk_lck_a[1] = &atomic_rw2;
6897 	atomic_set(&retired_max_queue, 0);
6898 
6899 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6900 		pr_warn("ndelay must be less than 1 second, ignored\n");
6901 		sdebug_ndelay = 0;
6902 	} else if (sdebug_ndelay > 0)
6903 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6904 
6905 	switch (sdebug_sector_size) {
6906 	case  512:
6907 	case 1024:
6908 	case 2048:
6909 	case 4096:
6910 		break;
6911 	default:
6912 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6913 		return -EINVAL;
6914 	}
6915 
6916 	switch (sdebug_dif) {
6917 	case T10_PI_TYPE0_PROTECTION:
6918 		break;
6919 	case T10_PI_TYPE1_PROTECTION:
6920 	case T10_PI_TYPE2_PROTECTION:
6921 	case T10_PI_TYPE3_PROTECTION:
6922 		have_dif_prot = true;
6923 		break;
6924 
6925 	default:
6926 		pr_err("dif must be 0, 1, 2 or 3\n");
6927 		return -EINVAL;
6928 	}
6929 
6930 	if (sdebug_num_tgts < 0) {
6931 		pr_err("num_tgts must be >= 0\n");
6932 		return -EINVAL;
6933 	}
6934 
6935 	if (sdebug_guard > 1) {
6936 		pr_err("guard must be 0 or 1\n");
6937 		return -EINVAL;
6938 	}
6939 
6940 	if (sdebug_ato > 1) {
6941 		pr_err("ato must be 0 or 1\n");
6942 		return -EINVAL;
6943 	}
6944 
6945 	if (sdebug_physblk_exp > 15) {
6946 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6947 		return -EINVAL;
6948 	}
6949 
6950 	sdebug_lun_am = sdebug_lun_am_i;
6951 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6952 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6953 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6954 	}
6955 
6956 	if (sdebug_max_luns > 256) {
6957 		if (sdebug_max_luns > 16384) {
6958 			pr_warn("max_luns can be no more than 16384, use default\n");
6959 			sdebug_max_luns = DEF_MAX_LUNS;
6960 		}
6961 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6962 	}
6963 
6964 	if (sdebug_lowest_aligned > 0x3fff) {
6965 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6966 		return -EINVAL;
6967 	}
6968 
6969 	if (submit_queues < 1) {
6970 		pr_err("submit_queues must be 1 or more\n");
6971 		return -EINVAL;
6972 	}
6973 
6974 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6975 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6976 		return -EINVAL;
6977 	}
6978 
6979 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6980 	    (sdebug_host_max_queue < 0)) {
6981 		pr_err("host_max_queue must be in range [0 %d]\n",
6982 		       SDEBUG_CANQUEUE);
6983 		return -EINVAL;
6984 	}
6985 
6986 	if (sdebug_host_max_queue &&
6987 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6988 		sdebug_max_queue = sdebug_host_max_queue;
6989 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6990 			sdebug_max_queue);
6991 	}
6992 
6993 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6994 			       GFP_KERNEL);
6995 	if (sdebug_q_arr == NULL)
6996 		return -ENOMEM;
6997 	for (k = 0; k < submit_queues; ++k)
6998 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6999 
7000 	/*
7001 	 * check for host managed zoned block device specified with
7002 	 * ptype=0x14 or zbc=XXX.
7003 	 */
7004 	if (sdebug_ptype == TYPE_ZBC) {
7005 		sdeb_zbc_model = BLK_ZONED_HM;
7006 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7007 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7008 		if (k < 0) {
7009 			ret = k;
7010 			goto free_q_arr;
7011 		}
7012 		sdeb_zbc_model = k;
7013 		switch (sdeb_zbc_model) {
7014 		case BLK_ZONED_NONE:
7015 		case BLK_ZONED_HA:
7016 			sdebug_ptype = TYPE_DISK;
7017 			break;
7018 		case BLK_ZONED_HM:
7019 			sdebug_ptype = TYPE_ZBC;
7020 			break;
7021 		default:
7022 			pr_err("Invalid ZBC model\n");
7023 			ret = -EINVAL;
7024 			goto free_q_arr;
7025 		}
7026 	}
7027 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7028 		sdeb_zbc_in_use = true;
7029 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7030 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7031 	}
7032 
7033 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7034 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7035 	if (sdebug_dev_size_mb < 1)
7036 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7037 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7038 	sdebug_store_sectors = sz / sdebug_sector_size;
7039 	sdebug_capacity = get_sdebug_capacity();
7040 
7041 	/* play around with geometry, don't waste too much on track 0 */
7042 	sdebug_heads = 8;
7043 	sdebug_sectors_per = 32;
7044 	if (sdebug_dev_size_mb >= 256)
7045 		sdebug_heads = 64;
7046 	else if (sdebug_dev_size_mb >= 16)
7047 		sdebug_heads = 32;
7048 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7049 			       (sdebug_sectors_per * sdebug_heads);
7050 	if (sdebug_cylinders_per >= 1024) {
7051 		/* other LLDs do this; implies >= 1GB ram disk ... */
7052 		sdebug_heads = 255;
7053 		sdebug_sectors_per = 63;
7054 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7055 			       (sdebug_sectors_per * sdebug_heads);
7056 	}
7057 	if (scsi_debug_lbp()) {
7058 		sdebug_unmap_max_blocks =
7059 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7060 
7061 		sdebug_unmap_max_desc =
7062 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7063 
7064 		sdebug_unmap_granularity =
7065 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7066 
7067 		if (sdebug_unmap_alignment &&
7068 		    sdebug_unmap_granularity <=
7069 		    sdebug_unmap_alignment) {
7070 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7071 			ret = -EINVAL;
7072 			goto free_q_arr;
7073 		}
7074 	}
7075 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7076 	if (want_store) {
7077 		idx = sdebug_add_store();
7078 		if (idx < 0) {
7079 			ret = idx;
7080 			goto free_q_arr;
7081 		}
7082 	}
7083 
7084 	pseudo_primary = root_device_register("pseudo_0");
7085 	if (IS_ERR(pseudo_primary)) {
7086 		pr_warn("root_device_register() error\n");
7087 		ret = PTR_ERR(pseudo_primary);
7088 		goto free_vm;
7089 	}
7090 	ret = bus_register(&pseudo_lld_bus);
7091 	if (ret < 0) {
7092 		pr_warn("bus_register error: %d\n", ret);
7093 		goto dev_unreg;
7094 	}
7095 	ret = driver_register(&sdebug_driverfs_driver);
7096 	if (ret < 0) {
7097 		pr_warn("driver_register error: %d\n", ret);
7098 		goto bus_unreg;
7099 	}
7100 
7101 	hosts_to_add = sdebug_add_host;
7102 	sdebug_add_host = 0;
7103 
7104 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7105 	if (!queued_cmd_cache)
7106 		goto driver_unreg;
7107 
7108 	for (k = 0; k < hosts_to_add; k++) {
7109 		if (want_store && k == 0) {
7110 			ret = sdebug_add_host_helper(idx);
7111 			if (ret < 0) {
7112 				pr_err("add_host_helper k=%d, error=%d\n",
7113 				       k, -ret);
7114 				break;
7115 			}
7116 		} else {
7117 			ret = sdebug_do_add_host(want_store &&
7118 						 sdebug_per_host_store);
7119 			if (ret < 0) {
7120 				pr_err("add_host k=%d error=%d\n", k, -ret);
7121 				break;
7122 			}
7123 		}
7124 	}
7125 	if (sdebug_verbose)
7126 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7127 
7128 	return 0;
7129 
7130 driver_unreg:
7131 	driver_unregister(&sdebug_driverfs_driver);
7132 bus_unreg:
7133 	bus_unregister(&pseudo_lld_bus);
7134 dev_unreg:
7135 	root_device_unregister(pseudo_primary);
7136 free_vm:
7137 	sdebug_erase_store(idx, NULL);
7138 free_q_arr:
7139 	kfree(sdebug_q_arr);
7140 	return ret;
7141 }
7142 
7143 static void __exit scsi_debug_exit(void)
7144 {
7145 	int k = sdebug_num_hosts;
7146 
7147 	for (; k; k--)
7148 		sdebug_do_remove_host(true);
7149 	kmem_cache_destroy(queued_cmd_cache);
7150 	driver_unregister(&sdebug_driverfs_driver);
7151 	bus_unregister(&pseudo_lld_bus);
7152 	root_device_unregister(pseudo_primary);
7153 
7154 	sdebug_erase_all_stores(false);
7155 	xa_destroy(per_store_ap);
7156 	kfree(sdebug_q_arr);
7157 }
7158 
7159 device_initcall(scsi_debug_init);
7160 module_exit(scsi_debug_exit);
7161 
7162 static void sdebug_release_adapter(struct device *dev)
7163 {
7164 	struct sdebug_host_info *sdbg_host;
7165 
7166 	sdbg_host = dev_to_sdebug_host(dev);
7167 	kfree(sdbg_host);
7168 }
7169 
7170 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7171 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7172 {
7173 	if (idx < 0)
7174 		return;
7175 	if (!sip) {
7176 		if (xa_empty(per_store_ap))
7177 			return;
7178 		sip = xa_load(per_store_ap, idx);
7179 		if (!sip)
7180 			return;
7181 	}
7182 	vfree(sip->map_storep);
7183 	vfree(sip->dif_storep);
7184 	vfree(sip->storep);
7185 	xa_erase(per_store_ap, idx);
7186 	kfree(sip);
7187 }
7188 
7189 /* Assume apart_from_first==false only in shutdown case. */
7190 static void sdebug_erase_all_stores(bool apart_from_first)
7191 {
7192 	unsigned long idx;
7193 	struct sdeb_store_info *sip = NULL;
7194 
7195 	xa_for_each(per_store_ap, idx, sip) {
7196 		if (apart_from_first)
7197 			apart_from_first = false;
7198 		else
7199 			sdebug_erase_store(idx, sip);
7200 	}
7201 	if (apart_from_first)
7202 		sdeb_most_recent_idx = sdeb_first_idx;
7203 }
7204 
7205 /*
7206  * Returns store xarray new element index (idx) if >=0 else negated errno.
7207  * Limit the number of stores to 65536.
7208  */
7209 static int sdebug_add_store(void)
7210 {
7211 	int res;
7212 	u32 n_idx;
7213 	unsigned long iflags;
7214 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7215 	struct sdeb_store_info *sip = NULL;
7216 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7217 
7218 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7219 	if (!sip)
7220 		return -ENOMEM;
7221 
7222 	xa_lock_irqsave(per_store_ap, iflags);
7223 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7224 	if (unlikely(res < 0)) {
7225 		xa_unlock_irqrestore(per_store_ap, iflags);
7226 		kfree(sip);
7227 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7228 		return res;
7229 	}
7230 	sdeb_most_recent_idx = n_idx;
7231 	if (sdeb_first_idx < 0)
7232 		sdeb_first_idx = n_idx;
7233 	xa_unlock_irqrestore(per_store_ap, iflags);
7234 
7235 	res = -ENOMEM;
7236 	sip->storep = vzalloc(sz);
7237 	if (!sip->storep) {
7238 		pr_err("user data oom\n");
7239 		goto err;
7240 	}
7241 	if (sdebug_num_parts > 0)
7242 		sdebug_build_parts(sip->storep, sz);
7243 
7244 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7245 	if (sdebug_dix) {
7246 		int dif_size;
7247 
7248 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7249 		sip->dif_storep = vmalloc(dif_size);
7250 
7251 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7252 			sip->dif_storep);
7253 
7254 		if (!sip->dif_storep) {
7255 			pr_err("DIX oom\n");
7256 			goto err;
7257 		}
7258 		memset(sip->dif_storep, 0xff, dif_size);
7259 	}
7260 	/* Logical Block Provisioning */
7261 	if (scsi_debug_lbp()) {
7262 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7263 		sip->map_storep = vmalloc(array_size(sizeof(long),
7264 						     BITS_TO_LONGS(map_size)));
7265 
7266 		pr_info("%lu provisioning blocks\n", map_size);
7267 
7268 		if (!sip->map_storep) {
7269 			pr_err("LBP map oom\n");
7270 			goto err;
7271 		}
7272 
7273 		bitmap_zero(sip->map_storep, map_size);
7274 
7275 		/* Map first 1KB for partition table */
7276 		if (sdebug_num_parts)
7277 			map_region(sip, 0, 2);
7278 	}
7279 
7280 	rwlock_init(&sip->macc_lck);
7281 	return (int)n_idx;
7282 err:
7283 	sdebug_erase_store((int)n_idx, sip);
7284 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7285 	return res;
7286 }
7287 
7288 static int sdebug_add_host_helper(int per_host_idx)
7289 {
7290 	int k, devs_per_host, idx;
7291 	int error = -ENOMEM;
7292 	struct sdebug_host_info *sdbg_host;
7293 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7294 
7295 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7296 	if (!sdbg_host)
7297 		return -ENOMEM;
7298 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7299 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7300 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7301 	sdbg_host->si_idx = idx;
7302 
7303 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7304 
7305 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7306 	for (k = 0; k < devs_per_host; k++) {
7307 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7308 		if (!sdbg_devinfo)
7309 			goto clean;
7310 	}
7311 
7312 	mutex_lock(&sdebug_host_list_mutex);
7313 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7314 	mutex_unlock(&sdebug_host_list_mutex);
7315 
7316 	sdbg_host->dev.bus = &pseudo_lld_bus;
7317 	sdbg_host->dev.parent = pseudo_primary;
7318 	sdbg_host->dev.release = &sdebug_release_adapter;
7319 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7320 
7321 	error = device_register(&sdbg_host->dev);
7322 	if (error) {
7323 		mutex_lock(&sdebug_host_list_mutex);
7324 		list_del(&sdbg_host->host_list);
7325 		mutex_unlock(&sdebug_host_list_mutex);
7326 		goto clean;
7327 	}
7328 
7329 	++sdebug_num_hosts;
7330 	return 0;
7331 
7332 clean:
7333 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7334 				 dev_list) {
7335 		list_del(&sdbg_devinfo->dev_list);
7336 		kfree(sdbg_devinfo->zstate);
7337 		kfree(sdbg_devinfo);
7338 	}
7339 	if (sdbg_host->dev.release)
7340 		put_device(&sdbg_host->dev);
7341 	else
7342 		kfree(sdbg_host);
7343 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7344 	return error;
7345 }
7346 
7347 static int sdebug_do_add_host(bool mk_new_store)
7348 {
7349 	int ph_idx = sdeb_most_recent_idx;
7350 
7351 	if (mk_new_store) {
7352 		ph_idx = sdebug_add_store();
7353 		if (ph_idx < 0)
7354 			return ph_idx;
7355 	}
7356 	return sdebug_add_host_helper(ph_idx);
7357 }
7358 
7359 static void sdebug_do_remove_host(bool the_end)
7360 {
7361 	int idx = -1;
7362 	struct sdebug_host_info *sdbg_host = NULL;
7363 	struct sdebug_host_info *sdbg_host2;
7364 
7365 	mutex_lock(&sdebug_host_list_mutex);
7366 	if (!list_empty(&sdebug_host_list)) {
7367 		sdbg_host = list_entry(sdebug_host_list.prev,
7368 				       struct sdebug_host_info, host_list);
7369 		idx = sdbg_host->si_idx;
7370 	}
7371 	if (!the_end && idx >= 0) {
7372 		bool unique = true;
7373 
7374 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7375 			if (sdbg_host2 == sdbg_host)
7376 				continue;
7377 			if (idx == sdbg_host2->si_idx) {
7378 				unique = false;
7379 				break;
7380 			}
7381 		}
7382 		if (unique) {
7383 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7384 			if (idx == sdeb_most_recent_idx)
7385 				--sdeb_most_recent_idx;
7386 		}
7387 	}
7388 	if (sdbg_host)
7389 		list_del(&sdbg_host->host_list);
7390 	mutex_unlock(&sdebug_host_list_mutex);
7391 
7392 	if (!sdbg_host)
7393 		return;
7394 
7395 	device_unregister(&sdbg_host->dev);
7396 	--sdebug_num_hosts;
7397 }
7398 
7399 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7400 {
7401 	struct sdebug_dev_info *devip = sdev->hostdata;
7402 
7403 	if (!devip)
7404 		return	-ENODEV;
7405 
7406 	mutex_lock(&sdebug_host_list_mutex);
7407 	block_unblock_all_queues(true);
7408 
7409 	if (qdepth > SDEBUG_CANQUEUE) {
7410 		qdepth = SDEBUG_CANQUEUE;
7411 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7412 			qdepth, SDEBUG_CANQUEUE);
7413 	}
7414 	if (qdepth < 1)
7415 		qdepth = 1;
7416 	if (qdepth != sdev->queue_depth)
7417 		scsi_change_queue_depth(sdev, qdepth);
7418 
7419 	block_unblock_all_queues(false);
7420 	mutex_unlock(&sdebug_host_list_mutex);
7421 
7422 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7423 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7424 
7425 	return sdev->queue_depth;
7426 }
7427 
7428 static bool fake_timeout(struct scsi_cmnd *scp)
7429 {
7430 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7431 		if (sdebug_every_nth < -1)
7432 			sdebug_every_nth = -1;
7433 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7434 			return true; /* ignore command causing timeout */
7435 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7436 			 scsi_medium_access_command(scp))
7437 			return true; /* time out reads and writes */
7438 	}
7439 	return false;
7440 }
7441 
7442 /* Response to TUR or media access command when device stopped */
7443 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7444 {
7445 	int stopped_state;
7446 	u64 diff_ns = 0;
7447 	ktime_t now_ts = ktime_get_boottime();
7448 	struct scsi_device *sdp = scp->device;
7449 
7450 	stopped_state = atomic_read(&devip->stopped);
7451 	if (stopped_state == 2) {
7452 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7453 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7454 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7455 				/* tur_ms_to_ready timer extinguished */
7456 				atomic_set(&devip->stopped, 0);
7457 				return 0;
7458 			}
7459 		}
7460 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7461 		if (sdebug_verbose)
7462 			sdev_printk(KERN_INFO, sdp,
7463 				    "%s: Not ready: in process of becoming ready\n", my_name);
7464 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7465 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7466 
7467 			if (diff_ns <= tur_nanosecs_to_ready)
7468 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7469 			else
7470 				diff_ns = tur_nanosecs_to_ready;
7471 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7472 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7473 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7474 						   diff_ns);
7475 			return check_condition_result;
7476 		}
7477 	}
7478 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7479 	if (sdebug_verbose)
7480 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7481 			    my_name);
7482 	return check_condition_result;
7483 }
7484 
7485 static void sdebug_map_queues(struct Scsi_Host *shost)
7486 {
7487 	int i, qoff;
7488 
7489 	if (shost->nr_hw_queues == 1)
7490 		return;
7491 
7492 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7493 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7494 
7495 		map->nr_queues  = 0;
7496 
7497 		if (i == HCTX_TYPE_DEFAULT)
7498 			map->nr_queues = submit_queues - poll_queues;
7499 		else if (i == HCTX_TYPE_POLL)
7500 			map->nr_queues = poll_queues;
7501 
7502 		if (!map->nr_queues) {
7503 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7504 			continue;
7505 		}
7506 
7507 		map->queue_offset = qoff;
7508 		blk_mq_map_queues(map);
7509 
7510 		qoff += map->nr_queues;
7511 	}
7512 }
7513 
7514 struct sdebug_blk_mq_poll_data {
7515 	unsigned int queue_num;
7516 	int *num_entries;
7517 };
7518 
7519 /*
7520  * We don't handle aborted commands here, but it does not seem possible to have
7521  * aborted polled commands from schedule_resp()
7522  */
7523 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7524 {
7525 	struct sdebug_blk_mq_poll_data *data = opaque;
7526 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7527 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7528 	struct sdebug_defer *sd_dp;
7529 	u32 unique_tag = blk_mq_unique_tag(rq);
7530 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7531 	struct sdebug_queued_cmd *sqcp;
7532 	struct sdebug_queue *sqp;
7533 	unsigned long flags;
7534 	int queue_num = data->queue_num;
7535 	bool retiring = false;
7536 	int qc_idx;
7537 	ktime_t time;
7538 
7539 	/* We're only interested in one queue for this iteration */
7540 	if (hwq != queue_num)
7541 		return true;
7542 
7543 	/* Subsequent checks would fail if this failed, but check anyway */
7544 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7545 		return true;
7546 
7547 	time = ktime_get_boottime();
7548 
7549 	spin_lock_irqsave(&sdsc->lock, flags);
7550 	sqcp = TO_QUEUED_CMD(cmd);
7551 	if (!sqcp) {
7552 		spin_unlock_irqrestore(&sdsc->lock, flags);
7553 		return true;
7554 	}
7555 
7556 	sqp = sdebug_q_arr + queue_num;
7557 	sd_dp = &sqcp->sd_dp;
7558 
7559 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7560 		spin_unlock_irqrestore(&sdsc->lock, flags);
7561 		return true;
7562 	}
7563 
7564 	if (time < sd_dp->cmpl_ts) {
7565 		spin_unlock_irqrestore(&sdsc->lock, flags);
7566 		return true;
7567 	}
7568 
7569 	if (unlikely(atomic_read(&retired_max_queue) > 0))
7570 		retiring = true;
7571 
7572 	qc_idx = sd_dp->sqa_idx;
7573 	sqp->qc_arr[qc_idx] = NULL;
7574 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7575 		spin_unlock_irqrestore(&sdsc->lock, flags);
7576 		pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u\n",
7577 			sqp, queue_num, qc_idx);
7578 		sdebug_free_queued_cmd(sqcp);
7579 		return true;
7580 	}
7581 
7582 	if (unlikely(retiring)) {	/* user has reduced max_queue */
7583 		int k, retval = atomic_read(&retired_max_queue);
7584 
7585 		if (qc_idx >= retval) {
7586 			pr_err("index %d too large\n", retval);
7587 			spin_unlock_irqrestore(&sdsc->lock, flags);
7588 			sdebug_free_queued_cmd(sqcp);
7589 			return true;
7590 		}
7591 
7592 		k = find_last_bit(sqp->in_use_bm, retval);
7593 		if ((k < sdebug_max_queue) || (k == retval))
7594 			atomic_set(&retired_max_queue, 0);
7595 		else
7596 			atomic_set(&retired_max_queue, k + 1);
7597 	}
7598 
7599 	ASSIGN_QUEUED_CMD(cmd, NULL);
7600 	spin_unlock_irqrestore(&sdsc->lock, flags);
7601 
7602 	if (sdebug_statistics) {
7603 		atomic_inc(&sdebug_completions);
7604 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7605 			atomic_inc(&sdebug_miss_cpus);
7606 	}
7607 
7608 	sdebug_free_queued_cmd(sqcp);
7609 
7610 	scsi_done(cmd); /* callback to mid level */
7611 	(*data->num_entries)++;
7612 	return true;
7613 }
7614 
7615 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7616 {
7617 	int num_entries = 0;
7618 	unsigned long iflags;
7619 	struct sdebug_queue *sqp;
7620 	struct sdebug_blk_mq_poll_data data = {
7621 		.queue_num = queue_num,
7622 		.num_entries = &num_entries,
7623 	};
7624 	sqp = sdebug_q_arr + queue_num;
7625 
7626 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7627 
7628 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7629 				&data);
7630 
7631 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7632 	if (num_entries > 0)
7633 		atomic_add(num_entries, &sdeb_mq_poll_count);
7634 	return num_entries;
7635 }
7636 
7637 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7638 				   struct scsi_cmnd *scp)
7639 {
7640 	u8 sdeb_i;
7641 	struct scsi_device *sdp = scp->device;
7642 	const struct opcode_info_t *oip;
7643 	const struct opcode_info_t *r_oip;
7644 	struct sdebug_dev_info *devip;
7645 	u8 *cmd = scp->cmnd;
7646 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7647 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7648 	int k, na;
7649 	int errsts = 0;
7650 	u64 lun_index = sdp->lun & 0x3FFF;
7651 	u32 flags;
7652 	u16 sa;
7653 	u8 opcode = cmd[0];
7654 	bool has_wlun_rl;
7655 	bool inject_now;
7656 
7657 	scsi_set_resid(scp, 0);
7658 	if (sdebug_statistics) {
7659 		atomic_inc(&sdebug_cmnd_count);
7660 		inject_now = inject_on_this_cmd();
7661 	} else {
7662 		inject_now = false;
7663 	}
7664 	if (unlikely(sdebug_verbose &&
7665 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7666 		char b[120];
7667 		int n, len, sb;
7668 
7669 		len = scp->cmd_len;
7670 		sb = (int)sizeof(b);
7671 		if (len > 32)
7672 			strcpy(b, "too long, over 32 bytes");
7673 		else {
7674 			for (k = 0, n = 0; k < len && n < sb; ++k)
7675 				n += scnprintf(b + n, sb - n, "%02x ",
7676 					       (u32)cmd[k]);
7677 		}
7678 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7679 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7680 	}
7681 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7682 		return SCSI_MLQUEUE_HOST_BUSY;
7683 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7684 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7685 		goto err_out;
7686 
7687 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7688 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7689 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7690 	if (unlikely(!devip)) {
7691 		devip = find_build_dev_info(sdp);
7692 		if (NULL == devip)
7693 			goto err_out;
7694 	}
7695 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7696 		atomic_set(&sdeb_inject_pending, 1);
7697 
7698 	na = oip->num_attached;
7699 	r_pfp = oip->pfp;
7700 	if (na) {	/* multiple commands with this opcode */
7701 		r_oip = oip;
7702 		if (FF_SA & r_oip->flags) {
7703 			if (F_SA_LOW & oip->flags)
7704 				sa = 0x1f & cmd[1];
7705 			else
7706 				sa = get_unaligned_be16(cmd + 8);
7707 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7708 				if (opcode == oip->opcode && sa == oip->sa)
7709 					break;
7710 			}
7711 		} else {   /* since no service action only check opcode */
7712 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7713 				if (opcode == oip->opcode)
7714 					break;
7715 			}
7716 		}
7717 		if (k > na) {
7718 			if (F_SA_LOW & r_oip->flags)
7719 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7720 			else if (F_SA_HIGH & r_oip->flags)
7721 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7722 			else
7723 				mk_sense_invalid_opcode(scp);
7724 			goto check_cond;
7725 		}
7726 	}	/* else (when na==0) we assume the oip is a match */
7727 	flags = oip->flags;
7728 	if (unlikely(F_INV_OP & flags)) {
7729 		mk_sense_invalid_opcode(scp);
7730 		goto check_cond;
7731 	}
7732 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7733 		if (sdebug_verbose)
7734 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7735 				    my_name, opcode, " supported for wlun");
7736 		mk_sense_invalid_opcode(scp);
7737 		goto check_cond;
7738 	}
7739 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7740 		u8 rem;
7741 		int j;
7742 
7743 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7744 			rem = ~oip->len_mask[k] & cmd[k];
7745 			if (rem) {
7746 				for (j = 7; j >= 0; --j, rem <<= 1) {
7747 					if (0x80 & rem)
7748 						break;
7749 				}
7750 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7751 				goto check_cond;
7752 			}
7753 		}
7754 	}
7755 	if (unlikely(!(F_SKIP_UA & flags) &&
7756 		     find_first_bit(devip->uas_bm,
7757 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7758 		errsts = make_ua(scp, devip);
7759 		if (errsts)
7760 			goto check_cond;
7761 	}
7762 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7763 		     atomic_read(&devip->stopped))) {
7764 		errsts = resp_not_ready(scp, devip);
7765 		if (errsts)
7766 			goto fini;
7767 	}
7768 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7769 		goto fini;
7770 	if (unlikely(sdebug_every_nth)) {
7771 		if (fake_timeout(scp))
7772 			return 0;	/* ignore command: make trouble */
7773 	}
7774 	if (likely(oip->pfp))
7775 		pfp = oip->pfp;	/* calls a resp_* function */
7776 	else
7777 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7778 
7779 fini:
7780 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7781 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7782 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7783 					    sdebug_ndelay > 10000)) {
7784 		/*
7785 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7786 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7787 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7788 		 * For Synchronize Cache want 1/20 of SSU's delay.
7789 		 */
7790 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7791 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7792 
7793 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7794 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7795 	} else
7796 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7797 				     sdebug_ndelay);
7798 check_cond:
7799 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7800 err_out:
7801 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7802 }
7803 
7804 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7805 {
7806 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7807 
7808 	spin_lock_init(&sdsc->lock);
7809 
7810 	return 0;
7811 }
7812 
7813 
7814 static struct scsi_host_template sdebug_driver_template = {
7815 	.show_info =		scsi_debug_show_info,
7816 	.write_info =		scsi_debug_write_info,
7817 	.proc_name =		sdebug_proc_name,
7818 	.name =			"SCSI DEBUG",
7819 	.info =			scsi_debug_info,
7820 	.slave_alloc =		scsi_debug_slave_alloc,
7821 	.slave_configure =	scsi_debug_slave_configure,
7822 	.slave_destroy =	scsi_debug_slave_destroy,
7823 	.ioctl =		scsi_debug_ioctl,
7824 	.queuecommand =		scsi_debug_queuecommand,
7825 	.change_queue_depth =	sdebug_change_qdepth,
7826 	.map_queues =		sdebug_map_queues,
7827 	.mq_poll =		sdebug_blk_mq_poll,
7828 	.eh_abort_handler =	scsi_debug_abort,
7829 	.eh_device_reset_handler = scsi_debug_device_reset,
7830 	.eh_target_reset_handler = scsi_debug_target_reset,
7831 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7832 	.eh_host_reset_handler = scsi_debug_host_reset,
7833 	.can_queue =		SDEBUG_CANQUEUE,
7834 	.this_id =		7,
7835 	.sg_tablesize =		SG_MAX_SEGMENTS,
7836 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7837 	.max_sectors =		-1U,
7838 	.max_segment_size =	-1U,
7839 	.module =		THIS_MODULE,
7840 	.track_queue_depth =	1,
7841 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
7842 	.init_cmd_priv = sdebug_init_cmd_priv,
7843 };
7844 
7845 static int sdebug_driver_probe(struct device *dev)
7846 {
7847 	int error = 0;
7848 	struct sdebug_host_info *sdbg_host;
7849 	struct Scsi_Host *hpnt;
7850 	int hprot;
7851 
7852 	sdbg_host = dev_to_sdebug_host(dev);
7853 
7854 	sdebug_driver_template.can_queue = sdebug_max_queue;
7855 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7856 	if (!sdebug_clustering)
7857 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7858 
7859 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7860 	if (NULL == hpnt) {
7861 		pr_err("scsi_host_alloc failed\n");
7862 		error = -ENODEV;
7863 		return error;
7864 	}
7865 	if (submit_queues > nr_cpu_ids) {
7866 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7867 			my_name, submit_queues, nr_cpu_ids);
7868 		submit_queues = nr_cpu_ids;
7869 	}
7870 	/*
7871 	 * Decide whether to tell scsi subsystem that we want mq. The
7872 	 * following should give the same answer for each host.
7873 	 */
7874 	hpnt->nr_hw_queues = submit_queues;
7875 	if (sdebug_host_max_queue)
7876 		hpnt->host_tagset = 1;
7877 
7878 	/* poll queues are possible for nr_hw_queues > 1 */
7879 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7880 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7881 			 my_name, poll_queues, hpnt->nr_hw_queues);
7882 		poll_queues = 0;
7883 	}
7884 
7885 	/*
7886 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7887 	 * left over for non-polled I/O.
7888 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7889 	 */
7890 	if (poll_queues >= submit_queues) {
7891 		if (submit_queues < 3)
7892 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7893 		else
7894 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7895 				my_name, submit_queues - 1);
7896 		poll_queues = 1;
7897 	}
7898 	if (poll_queues)
7899 		hpnt->nr_maps = 3;
7900 
7901 	sdbg_host->shost = hpnt;
7902 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7903 		hpnt->max_id = sdebug_num_tgts + 1;
7904 	else
7905 		hpnt->max_id = sdebug_num_tgts;
7906 	/* = sdebug_max_luns; */
7907 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7908 
7909 	hprot = 0;
7910 
7911 	switch (sdebug_dif) {
7912 
7913 	case T10_PI_TYPE1_PROTECTION:
7914 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7915 		if (sdebug_dix)
7916 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7917 		break;
7918 
7919 	case T10_PI_TYPE2_PROTECTION:
7920 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7921 		if (sdebug_dix)
7922 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7923 		break;
7924 
7925 	case T10_PI_TYPE3_PROTECTION:
7926 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7927 		if (sdebug_dix)
7928 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7929 		break;
7930 
7931 	default:
7932 		if (sdebug_dix)
7933 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7934 		break;
7935 	}
7936 
7937 	scsi_host_set_prot(hpnt, hprot);
7938 
7939 	if (have_dif_prot || sdebug_dix)
7940 		pr_info("host protection%s%s%s%s%s%s%s\n",
7941 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7942 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7943 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7944 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7945 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7946 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7947 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7948 
7949 	if (sdebug_guard == 1)
7950 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7951 	else
7952 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7953 
7954 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7955 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7956 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7957 		sdebug_statistics = true;
7958 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7959 	if (error) {
7960 		pr_err("scsi_add_host failed\n");
7961 		error = -ENODEV;
7962 		scsi_host_put(hpnt);
7963 	} else {
7964 		scsi_scan_host(hpnt);
7965 	}
7966 
7967 	return error;
7968 }
7969 
7970 static void sdebug_driver_remove(struct device *dev)
7971 {
7972 	struct sdebug_host_info *sdbg_host;
7973 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7974 
7975 	sdbg_host = dev_to_sdebug_host(dev);
7976 
7977 	scsi_remove_host(sdbg_host->shost);
7978 
7979 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7980 				 dev_list) {
7981 		list_del(&sdbg_devinfo->dev_list);
7982 		kfree(sdbg_devinfo->zstate);
7983 		kfree(sdbg_devinfo);
7984 	}
7985 
7986 	scsi_host_put(sdbg_host->shost);
7987 }
7988 
7989 static int pseudo_lld_bus_match(struct device *dev,
7990 				struct device_driver *dev_driver)
7991 {
7992 	return 1;
7993 }
7994 
7995 static struct bus_type pseudo_lld_bus = {
7996 	.name = "pseudo",
7997 	.match = pseudo_lld_bus_match,
7998 	.probe = sdebug_driver_probe,
7999 	.remove = sdebug_driver_remove,
8000 	.drv_groups = sdebug_drv_groups,
8001 };
8002