xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision a0473bf3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t stopped;	/* 1: by SSU, 2: device start */
292 	bool used;
293 
294 	/* For ZBC devices */
295 	enum blk_zoned_model zmodel;
296 	unsigned int zcap;
297 	unsigned int zsize;
298 	unsigned int zsize_shift;
299 	unsigned int nr_zones;
300 	unsigned int nr_conv_zones;
301 	unsigned int nr_seq_zones;
302 	unsigned int nr_imp_open;
303 	unsigned int nr_exp_open;
304 	unsigned int nr_closed;
305 	unsigned int max_open;
306 	ktime_t create_ts;	/* time since bootup that this device was created */
307 	struct sdeb_zone_state *zstate;
308 };
309 
310 struct sdebug_host_info {
311 	struct list_head host_list;
312 	int si_idx;	/* sdeb_store_info (per host) xarray index */
313 	struct Scsi_Host *shost;
314 	struct device dev;
315 	struct list_head dev_info_list;
316 };
317 
318 /* There is an xarray of pointers to this struct's objects, one per host */
319 struct sdeb_store_info {
320 	rwlock_t macc_lck;	/* for atomic media access on this store */
321 	u8 *storep;		/* user data storage (ram) */
322 	struct t10_pi_tuple *dif_storep; /* protection info */
323 	void *map_storep;	/* provisioning map */
324 };
325 
326 #define dev_to_sdebug_host(d)	\
327 	container_of(d, struct sdebug_host_info, dev)
328 
329 #define shost_to_sdebug_host(shost)	\
330 	dev_to_sdebug_host(shost->dma_dev)
331 
332 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
333 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
334 
335 struct sdebug_defer {
336 	struct hrtimer hrt;
337 	struct execute_work ew;
338 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
339 	int sqa_idx;	/* index of sdebug_queue array */
340 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
341 	int hc_idx;	/* hostwide tag index */
342 	int issuing_cpu;
343 	bool init_hrt;
344 	bool init_wq;
345 	bool init_poll;
346 	bool aborted;	/* true when blk_abort_request() already called */
347 	enum sdeb_defer_type defer_t;
348 };
349 
350 struct sdebug_queued_cmd {
351 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
352 	 * instance indicates this slot is in use.
353 	 */
354 	struct sdebug_defer *sd_dp;
355 	struct scsi_cmnd *a_cmnd;
356 };
357 
358 struct sdebug_queue {
359 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
360 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
361 	spinlock_t qc_lock;
362 };
363 
364 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
365 static atomic_t sdebug_completions;  /* count of deferred completions */
366 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
367 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
368 static atomic_t sdeb_inject_pending;
369 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
370 
371 struct opcode_info_t {
372 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
373 				/* for terminating element */
374 	u8 opcode;		/* if num_attached > 0, preferred */
375 	u16 sa;			/* service action */
376 	u32 flags;		/* OR-ed set of SDEB_F_* */
377 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
378 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
379 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
380 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
381 };
382 
383 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
384 enum sdeb_opcode_index {
385 	SDEB_I_INVALID_OPCODE =	0,
386 	SDEB_I_INQUIRY = 1,
387 	SDEB_I_REPORT_LUNS = 2,
388 	SDEB_I_REQUEST_SENSE = 3,
389 	SDEB_I_TEST_UNIT_READY = 4,
390 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
391 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
392 	SDEB_I_LOG_SENSE = 7,
393 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
394 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
395 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
396 	SDEB_I_START_STOP = 11,
397 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
398 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
399 	SDEB_I_MAINT_IN = 14,
400 	SDEB_I_MAINT_OUT = 15,
401 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
402 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
403 	SDEB_I_RESERVE = 18,		/* 6, 10 */
404 	SDEB_I_RELEASE = 19,		/* 6, 10 */
405 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
406 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
407 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
408 	SDEB_I_SEND_DIAG = 23,
409 	SDEB_I_UNMAP = 24,
410 	SDEB_I_WRITE_BUFFER = 25,
411 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
412 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
413 	SDEB_I_COMP_WRITE = 28,
414 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
415 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
416 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
417 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
418 };
419 
420 
421 static const unsigned char opcode_ind_arr[256] = {
422 /* 0x0; 0x0->0x1f: 6 byte cdbs */
423 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
424 	    0, 0, 0, 0,
425 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
426 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
427 	    SDEB_I_RELEASE,
428 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
429 	    SDEB_I_ALLOW_REMOVAL, 0,
430 /* 0x20; 0x20->0x3f: 10 byte cdbs */
431 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
432 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
433 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
434 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
435 /* 0x40; 0x40->0x5f: 10 byte cdbs */
436 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
438 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
439 	    SDEB_I_RELEASE,
440 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
441 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
444 	0, SDEB_I_VARIABLE_LEN,
445 /* 0x80; 0x80->0x9f: 16 byte cdbs */
446 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
447 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
448 	0, 0, 0, SDEB_I_VERIFY,
449 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
450 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
451 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
452 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
453 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
454 	     SDEB_I_MAINT_OUT, 0, 0, 0,
455 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
456 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 	0, 0, 0, 0, 0, 0, 0, 0,
459 /* 0xc0; 0xc0->0xff: vendor specific */
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
464 };
465 
466 /*
467  * The following "response" functions return the SCSI mid-level's 4 byte
468  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
469  * command completion, they can mask their return value with
470  * SDEG_RES_IMMED_MASK .
471  */
472 #define SDEG_RES_IMMED_MASK 0x40000000
473 
474 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 
504 static int sdebug_do_add_host(bool mk_new_store);
505 static int sdebug_add_host_helper(int per_host_idx);
506 static void sdebug_do_remove_host(bool the_end);
507 static int sdebug_add_store(void);
508 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
509 static void sdebug_erase_all_stores(bool apart_from_first);
510 
511 /*
512  * The following are overflow arrays for cdbs that "hit" the same index in
513  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
514  * should be placed in opcode_info_arr[], the others should be placed here.
515  */
516 static const struct opcode_info_t msense_iarr[] = {
517 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
518 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 };
520 
521 static const struct opcode_info_t mselect_iarr[] = {
522 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
523 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 };
525 
526 static const struct opcode_info_t read_iarr[] = {
527 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
528 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 	     0, 0, 0, 0} },
530 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
531 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
533 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
534 	     0xc7, 0, 0, 0, 0} },
535 };
536 
537 static const struct opcode_info_t write_iarr[] = {
538 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
539 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 		   0, 0, 0, 0, 0, 0} },
541 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
542 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 		   0, 0, 0} },
544 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
545 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
546 		   0xbf, 0xc7, 0, 0, 0, 0} },
547 };
548 
549 static const struct opcode_info_t verify_iarr[] = {
550 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
551 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
552 		   0, 0, 0, 0, 0, 0} },
553 };
554 
555 static const struct opcode_info_t sa_in_16_iarr[] = {
556 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
557 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
558 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
559 };
560 
561 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
562 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
563 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
564 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
565 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
566 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
567 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
568 };
569 
570 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
571 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
572 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
573 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
574 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
575 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
576 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
577 };
578 
579 static const struct opcode_info_t write_same_iarr[] = {
580 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
581 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
582 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
583 };
584 
585 static const struct opcode_info_t reserve_iarr[] = {
586 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
587 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 };
589 
590 static const struct opcode_info_t release_iarr[] = {
591 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
592 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
593 };
594 
595 static const struct opcode_info_t sync_cache_iarr[] = {
596 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
597 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
599 };
600 
601 static const struct opcode_info_t pre_fetch_iarr[] = {
602 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
603 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
605 };
606 
607 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
608 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
609 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
610 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
611 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
612 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
613 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
614 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
615 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
617 };
618 
619 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
620 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
621 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
622 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
623 };
624 
625 
626 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
627  * plus the terminating elements for logic that scans this table such as
628  * REPORT SUPPORTED OPERATION CODES. */
629 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 /* 0 */
631 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
632 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
634 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
636 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
637 	     0, 0} },					/* REPORT LUNS */
638 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
639 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
641 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 /* 5 */
643 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
644 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
645 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
646 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
647 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
648 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
649 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
650 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 	     0, 0, 0} },
652 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
653 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 	     0, 0} },
655 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
656 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
657 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 /* 10 */
659 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
660 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
661 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
662 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
663 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
664 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
665 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
666 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
667 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
668 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
669 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
670 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
671 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
672 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
673 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
674 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
675 				0xff, 0, 0xc7, 0, 0, 0, 0} },
676 /* 15 */
677 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
678 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
679 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
680 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
681 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
683 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
684 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
685 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 	     0xff, 0xff} },
687 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
688 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
689 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 	     0} },
691 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
692 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
693 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
694 	     0} },
695 /* 20 */
696 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
697 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
699 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
701 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
702 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
703 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
704 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
705 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 /* 25 */
707 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
708 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
709 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
710 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
711 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
712 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 		 0, 0, 0, 0, 0} },
714 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
715 	    resp_sync_cache, sync_cache_iarr,
716 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
718 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
719 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
720 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
721 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
722 	    resp_pre_fetch, pre_fetch_iarr,
723 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
724 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
725 
726 /* 30 */
727 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
728 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
729 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
730 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
731 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
732 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
733 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
734 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 /* sentinel */
736 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
737 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
738 };
739 
740 static int sdebug_num_hosts;
741 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
742 static int sdebug_ato = DEF_ATO;
743 static int sdebug_cdb_len = DEF_CDB_LEN;
744 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
745 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
746 static int sdebug_dif = DEF_DIF;
747 static int sdebug_dix = DEF_DIX;
748 static int sdebug_dsense = DEF_D_SENSE;
749 static int sdebug_every_nth = DEF_EVERY_NTH;
750 static int sdebug_fake_rw = DEF_FAKE_RW;
751 static unsigned int sdebug_guard = DEF_GUARD;
752 static int sdebug_host_max_queue;	/* per host */
753 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
754 static int sdebug_max_luns = DEF_MAX_LUNS;
755 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
756 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
757 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
758 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
759 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
760 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
761 static int sdebug_no_uld;
762 static int sdebug_num_parts = DEF_NUM_PARTS;
763 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
764 static int sdebug_opt_blks = DEF_OPT_BLKS;
765 static int sdebug_opts = DEF_OPTS;
766 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
767 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
768 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
769 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
770 static int sdebug_sector_size = DEF_SECTOR_SIZE;
771 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
772 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
773 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
774 static unsigned int sdebug_lbpu = DEF_LBPU;
775 static unsigned int sdebug_lbpws = DEF_LBPWS;
776 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
777 static unsigned int sdebug_lbprz = DEF_LBPRZ;
778 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
779 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
780 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
781 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
782 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
783 static int sdebug_uuid_ctl = DEF_UUID_CTL;
784 static bool sdebug_random = DEF_RANDOM;
785 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
786 static bool sdebug_removable = DEF_REMOVABLE;
787 static bool sdebug_clustering;
788 static bool sdebug_host_lock = DEF_HOST_LOCK;
789 static bool sdebug_strict = DEF_STRICT;
790 static bool sdebug_any_injecting_opt;
791 static bool sdebug_no_rwlock;
792 static bool sdebug_verbose;
793 static bool have_dif_prot;
794 static bool write_since_sync;
795 static bool sdebug_statistics = DEF_STATISTICS;
796 static bool sdebug_wp;
797 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
798 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
799 static char *sdeb_zbc_model_s;
800 
801 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
802 			  SAM_LUN_AM_FLAT = 0x1,
803 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
804 			  SAM_LUN_AM_EXTENDED = 0x3};
805 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
806 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
807 
808 static unsigned int sdebug_store_sectors;
809 static sector_t sdebug_capacity;	/* in sectors */
810 
811 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
812    may still need them */
813 static int sdebug_heads;		/* heads per disk */
814 static int sdebug_cylinders_per;	/* cylinders per surface */
815 static int sdebug_sectors_per;		/* sectors per cylinder */
816 
817 static LIST_HEAD(sdebug_host_list);
818 static DEFINE_MUTEX(sdebug_host_list_mutex);
819 
820 static struct xarray per_store_arr;
821 static struct xarray *per_store_ap = &per_store_arr;
822 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
823 static int sdeb_most_recent_idx = -1;
824 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
825 
826 static unsigned long map_size;
827 static int num_aborts;
828 static int num_dev_resets;
829 static int num_target_resets;
830 static int num_bus_resets;
831 static int num_host_resets;
832 static int dix_writes;
833 static int dix_reads;
834 static int dif_errors;
835 
836 /* ZBC global data */
837 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
838 static int sdeb_zbc_zone_cap_mb;
839 static int sdeb_zbc_zone_size_mb;
840 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
841 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
842 
843 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
844 static int poll_queues; /* iouring iopoll interface.*/
845 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
846 
847 static DEFINE_RWLOCK(atomic_rw);
848 static DEFINE_RWLOCK(atomic_rw2);
849 
850 static rwlock_t *ramdisk_lck_a[2];
851 
852 static char sdebug_proc_name[] = MY_NAME;
853 static const char *my_name = MY_NAME;
854 
855 static struct bus_type pseudo_lld_bus;
856 
857 static struct device_driver sdebug_driverfs_driver = {
858 	.name 		= sdebug_proc_name,
859 	.bus		= &pseudo_lld_bus,
860 };
861 
862 static const int check_condition_result =
863 	SAM_STAT_CHECK_CONDITION;
864 
865 static const int illegal_condition_result =
866 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 
868 static const int device_qfull_result =
869 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
870 
871 static const int condition_met_result = SAM_STAT_CONDITION_MET;
872 
873 
874 /* Only do the extra work involved in logical block provisioning if one or
875  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
876  * real reads and writes (i.e. not skipping them for speed).
877  */
878 static inline bool scsi_debug_lbp(void)
879 {
880 	return 0 == sdebug_fake_rw &&
881 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
882 }
883 
884 static void *lba2fake_store(struct sdeb_store_info *sip,
885 			    unsigned long long lba)
886 {
887 	struct sdeb_store_info *lsip = sip;
888 
889 	lba = do_div(lba, sdebug_store_sectors);
890 	if (!sip || !sip->storep) {
891 		WARN_ON_ONCE(true);
892 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
893 	}
894 	return lsip->storep + lba * sdebug_sector_size;
895 }
896 
897 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
898 				      sector_t sector)
899 {
900 	sector = sector_div(sector, sdebug_store_sectors);
901 
902 	return sip->dif_storep + sector;
903 }
904 
905 static void sdebug_max_tgts_luns(void)
906 {
907 	struct sdebug_host_info *sdbg_host;
908 	struct Scsi_Host *hpnt;
909 
910 	mutex_lock(&sdebug_host_list_mutex);
911 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
912 		hpnt = sdbg_host->shost;
913 		if ((hpnt->this_id >= 0) &&
914 		    (sdebug_num_tgts > hpnt->this_id))
915 			hpnt->max_id = sdebug_num_tgts + 1;
916 		else
917 			hpnt->max_id = sdebug_num_tgts;
918 		/* sdebug_max_luns; */
919 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 	}
921 	mutex_unlock(&sdebug_host_list_mutex);
922 }
923 
924 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 
926 /* Set in_bit to -1 to indicate no bit position of invalid field */
927 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
928 				 enum sdeb_cmd_data c_d,
929 				 int in_byte, int in_bit)
930 {
931 	unsigned char *sbuff;
932 	u8 sks[4];
933 	int sl, asc;
934 
935 	sbuff = scp->sense_buffer;
936 	if (!sbuff) {
937 		sdev_printk(KERN_ERR, scp->device,
938 			    "%s: sense_buffer is NULL\n", __func__);
939 		return;
940 	}
941 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
942 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
943 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
944 	memset(sks, 0, sizeof(sks));
945 	sks[0] = 0x80;
946 	if (c_d)
947 		sks[0] |= 0x40;
948 	if (in_bit >= 0) {
949 		sks[0] |= 0x8;
950 		sks[0] |= 0x7 & in_bit;
951 	}
952 	put_unaligned_be16(in_byte, sks + 1);
953 	if (sdebug_dsense) {
954 		sl = sbuff[7] + 8;
955 		sbuff[7] = sl;
956 		sbuff[sl] = 0x2;
957 		sbuff[sl + 1] = 0x6;
958 		memcpy(sbuff + sl + 4, sks, 3);
959 	} else
960 		memcpy(sbuff + 15, sks, 3);
961 	if (sdebug_verbose)
962 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
963 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
964 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
965 }
966 
967 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 {
969 	if (!scp->sense_buffer) {
970 		sdev_printk(KERN_ERR, scp->device,
971 			    "%s: sense_buffer is NULL\n", __func__);
972 		return;
973 	}
974 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
975 
976 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
977 
978 	if (sdebug_verbose)
979 		sdev_printk(KERN_INFO, scp->device,
980 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
981 			    my_name, key, asc, asq);
982 }
983 
984 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
985 {
986 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
987 }
988 
989 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
990 			    void __user *arg)
991 {
992 	if (sdebug_verbose) {
993 		if (0x1261 == cmd)
994 			sdev_printk(KERN_INFO, dev,
995 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
996 		else if (0x5331 == cmd)
997 			sdev_printk(KERN_INFO, dev,
998 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
999 				    __func__);
1000 		else
1001 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1002 				    __func__, cmd);
1003 	}
1004 	return -EINVAL;
1005 	/* return -ENOTTY; // correct return but upsets fdisk */
1006 }
1007 
1008 static void config_cdb_len(struct scsi_device *sdev)
1009 {
1010 	switch (sdebug_cdb_len) {
1011 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = false;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = false;
1015 		break;
1016 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1017 		sdev->use_10_for_rw = true;
1018 		sdev->use_16_for_rw = false;
1019 		sdev->use_10_for_ms = false;
1020 		break;
1021 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1022 		sdev->use_10_for_rw = true;
1023 		sdev->use_16_for_rw = false;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	case 16:
1027 		sdev->use_10_for_rw = false;
1028 		sdev->use_16_for_rw = true;
1029 		sdev->use_10_for_ms = true;
1030 		break;
1031 	case 32: /* No knobs to suggest this so same as 16 for now */
1032 		sdev->use_10_for_rw = false;
1033 		sdev->use_16_for_rw = true;
1034 		sdev->use_10_for_ms = true;
1035 		break;
1036 	default:
1037 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1038 			sdebug_cdb_len);
1039 		sdev->use_10_for_rw = true;
1040 		sdev->use_16_for_rw = false;
1041 		sdev->use_10_for_ms = false;
1042 		sdebug_cdb_len = 10;
1043 		break;
1044 	}
1045 }
1046 
1047 static void all_config_cdb_len(void)
1048 {
1049 	struct sdebug_host_info *sdbg_host;
1050 	struct Scsi_Host *shost;
1051 	struct scsi_device *sdev;
1052 
1053 	mutex_lock(&sdebug_host_list_mutex);
1054 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1055 		shost = sdbg_host->shost;
1056 		shost_for_each_device(sdev, shost) {
1057 			config_cdb_len(sdev);
1058 		}
1059 	}
1060 	mutex_unlock(&sdebug_host_list_mutex);
1061 }
1062 
1063 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1064 {
1065 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1066 	struct sdebug_dev_info *dp;
1067 
1068 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1069 		if ((devip->sdbg_host == dp->sdbg_host) &&
1070 		    (devip->target == dp->target)) {
1071 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1072 		}
1073 	}
1074 }
1075 
1076 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1077 {
1078 	int k;
1079 
1080 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1081 	if (k != SDEBUG_NUM_UAS) {
1082 		const char *cp = NULL;
1083 
1084 		switch (k) {
1085 		case SDEBUG_UA_POR:
1086 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 					POWER_ON_RESET_ASCQ);
1088 			if (sdebug_verbose)
1089 				cp = "power on reset";
1090 			break;
1091 		case SDEBUG_UA_POOCCUR:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1093 					POWER_ON_OCCURRED_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "power on occurred";
1096 			break;
1097 		case SDEBUG_UA_BUS_RESET:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1099 					BUS_RESET_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "bus reset";
1102 			break;
1103 		case SDEBUG_UA_MODE_CHANGED:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1105 					MODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "mode parameters changed";
1108 			break;
1109 		case SDEBUG_UA_CAPACITY_CHANGED:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1111 					CAPACITY_CHANGED_ASCQ);
1112 			if (sdebug_verbose)
1113 				cp = "capacity data changed";
1114 			break;
1115 		case SDEBUG_UA_MICROCODE_CHANGED:
1116 			mk_sense_buffer(scp, UNIT_ATTENTION,
1117 					TARGET_CHANGED_ASC,
1118 					MICROCODE_CHANGED_ASCQ);
1119 			if (sdebug_verbose)
1120 				cp = "microcode has been changed";
1121 			break;
1122 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1123 			mk_sense_buffer(scp, UNIT_ATTENTION,
1124 					TARGET_CHANGED_ASC,
1125 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1126 			if (sdebug_verbose)
1127 				cp = "microcode has been changed without reset";
1128 			break;
1129 		case SDEBUG_UA_LUNS_CHANGED:
1130 			/*
1131 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1132 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1133 			 * on the target, until a REPORT LUNS command is
1134 			 * received.  SPC-4 behavior is to report it only once.
1135 			 * NOTE:  sdebug_scsi_level does not use the same
1136 			 * values as struct scsi_device->scsi_level.
1137 			 */
1138 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1139 				clear_luns_changed_on_target(devip);
1140 			mk_sense_buffer(scp, UNIT_ATTENTION,
1141 					TARGET_CHANGED_ASC,
1142 					LUNS_CHANGED_ASCQ);
1143 			if (sdebug_verbose)
1144 				cp = "reported luns data has changed";
1145 			break;
1146 		default:
1147 			pr_warn("unexpected unit attention code=%d\n", k);
1148 			if (sdebug_verbose)
1149 				cp = "unknown";
1150 			break;
1151 		}
1152 		clear_bit(k, devip->uas_bm);
1153 		if (sdebug_verbose)
1154 			sdev_printk(KERN_INFO, scp->device,
1155 				   "%s reports: Unit attention: %s\n",
1156 				   my_name, cp);
1157 		return check_condition_result;
1158 	}
1159 	return 0;
1160 }
1161 
1162 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1163 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1164 				int arr_len)
1165 {
1166 	int act_len;
1167 	struct scsi_data_buffer *sdb = &scp->sdb;
1168 
1169 	if (!sdb->length)
1170 		return 0;
1171 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1172 		return DID_ERROR << 16;
1173 
1174 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1175 				      arr, arr_len);
1176 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1177 
1178 	return 0;
1179 }
1180 
1181 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1182  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1183  * calls, not required to write in ascending offset order. Assumes resid
1184  * set to scsi_bufflen() prior to any calls.
1185  */
1186 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1187 				  int arr_len, unsigned int off_dst)
1188 {
1189 	unsigned int act_len, n;
1190 	struct scsi_data_buffer *sdb = &scp->sdb;
1191 	off_t skip = off_dst;
1192 
1193 	if (sdb->length <= off_dst)
1194 		return 0;
1195 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1196 		return DID_ERROR << 16;
1197 
1198 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1199 				       arr, arr_len, skip);
1200 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1201 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1202 		 scsi_get_resid(scp));
1203 	n = scsi_bufflen(scp) - (off_dst + act_len);
1204 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1205 	return 0;
1206 }
1207 
1208 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1209  * 'arr' or -1 if error.
1210  */
1211 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1212 			       int arr_len)
1213 {
1214 	if (!scsi_bufflen(scp))
1215 		return 0;
1216 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1217 		return -1;
1218 
1219 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1220 }
1221 
1222 
1223 static char sdebug_inq_vendor_id[9] = "Linux   ";
1224 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1225 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1226 /* Use some locally assigned NAAs for SAS addresses. */
1227 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1228 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1229 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1230 
1231 /* Device identification VPD page. Returns number of bytes placed in arr */
1232 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1233 			  int target_dev_id, int dev_id_num,
1234 			  const char *dev_id_str, int dev_id_str_len,
1235 			  const uuid_t *lu_name)
1236 {
1237 	int num, port_a;
1238 	char b[32];
1239 
1240 	port_a = target_dev_id + 1;
1241 	/* T10 vendor identifier field format (faked) */
1242 	arr[0] = 0x2;	/* ASCII */
1243 	arr[1] = 0x1;
1244 	arr[2] = 0x0;
1245 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1246 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1247 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1248 	num = 8 + 16 + dev_id_str_len;
1249 	arr[3] = num;
1250 	num += 4;
1251 	if (dev_id_num >= 0) {
1252 		if (sdebug_uuid_ctl) {
1253 			/* Locally assigned UUID */
1254 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1255 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1256 			arr[num++] = 0x0;
1257 			arr[num++] = 0x12;
1258 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1259 			arr[num++] = 0x0;
1260 			memcpy(arr + num, lu_name, 16);
1261 			num += 16;
1262 		} else {
1263 			/* NAA-3, Logical unit identifier (binary) */
1264 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1265 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1266 			arr[num++] = 0x0;
1267 			arr[num++] = 0x8;
1268 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1269 			num += 8;
1270 		}
1271 		/* Target relative port number */
1272 		arr[num++] = 0x61;	/* proto=sas, binary */
1273 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1274 		arr[num++] = 0x0;	/* reserved */
1275 		arr[num++] = 0x4;	/* length */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x0;
1279 		arr[num++] = 0x1;	/* relative port A */
1280 	}
1281 	/* NAA-3, Target port identifier */
1282 	arr[num++] = 0x61;	/* proto=sas, binary */
1283 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1284 	arr[num++] = 0x0;
1285 	arr[num++] = 0x8;
1286 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1287 	num += 8;
1288 	/* NAA-3, Target port group identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0x95;	/* piv=1, target port group id */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x4;
1293 	arr[num++] = 0;
1294 	arr[num++] = 0;
1295 	put_unaligned_be16(port_group_id, arr + num);
1296 	num += 2;
1297 	/* NAA-3, Target device identifier */
1298 	arr[num++] = 0x61;	/* proto=sas, binary */
1299 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1300 	arr[num++] = 0x0;
1301 	arr[num++] = 0x8;
1302 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1303 	num += 8;
1304 	/* SCSI name string: Target device identifier */
1305 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1306 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1307 	arr[num++] = 0x0;
1308 	arr[num++] = 24;
1309 	memcpy(arr + num, "naa.32222220", 12);
1310 	num += 12;
1311 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1312 	memcpy(arr + num, b, 8);
1313 	num += 8;
1314 	memset(arr + num, 0, 4);
1315 	num += 4;
1316 	return num;
1317 }
1318 
1319 static unsigned char vpd84_data[] = {
1320 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1321     0x22,0x22,0x22,0x0,0xbb,0x1,
1322     0x22,0x22,0x22,0x0,0xbb,0x2,
1323 };
1324 
1325 /*  Software interface identification VPD page */
1326 static int inquiry_vpd_84(unsigned char *arr)
1327 {
1328 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1329 	return sizeof(vpd84_data);
1330 }
1331 
1332 /* Management network addresses VPD page */
1333 static int inquiry_vpd_85(unsigned char *arr)
1334 {
1335 	int num = 0;
1336 	const char *na1 = "https://www.kernel.org/config";
1337 	const char *na2 = "http://www.kernel.org/log";
1338 	int plen, olen;
1339 
1340 	arr[num++] = 0x1;	/* lu, storage config */
1341 	arr[num++] = 0x0;	/* reserved */
1342 	arr[num++] = 0x0;
1343 	olen = strlen(na1);
1344 	plen = olen + 1;
1345 	if (plen % 4)
1346 		plen = ((plen / 4) + 1) * 4;
1347 	arr[num++] = plen;	/* length, null termianted, padded */
1348 	memcpy(arr + num, na1, olen);
1349 	memset(arr + num + olen, 0, plen - olen);
1350 	num += plen;
1351 
1352 	arr[num++] = 0x4;	/* lu, logging */
1353 	arr[num++] = 0x0;	/* reserved */
1354 	arr[num++] = 0x0;
1355 	olen = strlen(na2);
1356 	plen = olen + 1;
1357 	if (plen % 4)
1358 		plen = ((plen / 4) + 1) * 4;
1359 	arr[num++] = plen;	/* length, null terminated, padded */
1360 	memcpy(arr + num, na2, olen);
1361 	memset(arr + num + olen, 0, plen - olen);
1362 	num += plen;
1363 
1364 	return num;
1365 }
1366 
1367 /* SCSI ports VPD page */
1368 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1369 {
1370 	int num = 0;
1371 	int port_a, port_b;
1372 
1373 	port_a = target_dev_id + 1;
1374 	port_b = port_a + 1;
1375 	arr[num++] = 0x0;	/* reserved */
1376 	arr[num++] = 0x0;	/* reserved */
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1379 	memset(arr + num, 0, 6);
1380 	num += 6;
1381 	arr[num++] = 0x0;
1382 	arr[num++] = 12;	/* length tp descriptor */
1383 	/* naa-5 target port identifier (A) */
1384 	arr[num++] = 0x61;	/* proto=sas, binary */
1385 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1386 	arr[num++] = 0x0;	/* reserved */
1387 	arr[num++] = 0x8;	/* length */
1388 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1389 	num += 8;
1390 	arr[num++] = 0x0;	/* reserved */
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x0;
1393 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1394 	memset(arr + num, 0, 6);
1395 	num += 6;
1396 	arr[num++] = 0x0;
1397 	arr[num++] = 12;	/* length tp descriptor */
1398 	/* naa-5 target port identifier (B) */
1399 	arr[num++] = 0x61;	/* proto=sas, binary */
1400 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1401 	arr[num++] = 0x0;	/* reserved */
1402 	arr[num++] = 0x8;	/* length */
1403 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1404 	num += 8;
1405 
1406 	return num;
1407 }
1408 
1409 
1410 static unsigned char vpd89_data[] = {
1411 /* from 4th byte */ 0,0,0,0,
1412 'l','i','n','u','x',' ',' ',' ',
1413 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1414 '1','2','3','4',
1415 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1416 0xec,0,0,0,
1417 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1418 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1420 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1421 0x53,0x41,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1423 0x20,0x20,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x10,0x80,
1426 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1427 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1428 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1430 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1431 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1432 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1437 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1438 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1439 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1452 };
1453 
1454 /* ATA Information VPD page */
1455 static int inquiry_vpd_89(unsigned char *arr)
1456 {
1457 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1458 	return sizeof(vpd89_data);
1459 }
1460 
1461 
1462 static unsigned char vpdb0_data[] = {
1463 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1464 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1465 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 };
1468 
1469 /* Block limits VPD page (SBC-3) */
1470 static int inquiry_vpd_b0(unsigned char *arr)
1471 {
1472 	unsigned int gran;
1473 
1474 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1475 
1476 	/* Optimal transfer length granularity */
1477 	if (sdebug_opt_xferlen_exp != 0 &&
1478 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1479 		gran = 1 << sdebug_opt_xferlen_exp;
1480 	else
1481 		gran = 1 << sdebug_physblk_exp;
1482 	put_unaligned_be16(gran, arr + 2);
1483 
1484 	/* Maximum Transfer Length */
1485 	if (sdebug_store_sectors > 0x400)
1486 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1487 
1488 	/* Optimal Transfer Length */
1489 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1490 
1491 	if (sdebug_lbpu) {
1492 		/* Maximum Unmap LBA Count */
1493 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1494 
1495 		/* Maximum Unmap Block Descriptor Count */
1496 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1497 	}
1498 
1499 	/* Unmap Granularity Alignment */
1500 	if (sdebug_unmap_alignment) {
1501 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1502 		arr[28] |= 0x80; /* UGAVALID */
1503 	}
1504 
1505 	/* Optimal Unmap Granularity */
1506 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1507 
1508 	/* Maximum WRITE SAME Length */
1509 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1510 
1511 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1512 }
1513 
1514 /* Block device characteristics VPD page (SBC-3) */
1515 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1516 {
1517 	memset(arr, 0, 0x3c);
1518 	arr[0] = 0;
1519 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1520 	arr[2] = 0;
1521 	arr[3] = 5;	/* less than 1.8" */
1522 	if (devip->zmodel == BLK_ZONED_HA)
1523 		arr[4] = 1 << 4;	/* zoned field = 01b */
1524 
1525 	return 0x3c;
1526 }
1527 
1528 /* Logical block provisioning VPD page (SBC-4) */
1529 static int inquiry_vpd_b2(unsigned char *arr)
1530 {
1531 	memset(arr, 0, 0x4);
1532 	arr[0] = 0;			/* threshold exponent */
1533 	if (sdebug_lbpu)
1534 		arr[1] = 1 << 7;
1535 	if (sdebug_lbpws)
1536 		arr[1] |= 1 << 6;
1537 	if (sdebug_lbpws10)
1538 		arr[1] |= 1 << 5;
1539 	if (sdebug_lbprz && scsi_debug_lbp())
1540 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1541 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1542 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1543 	/* threshold_percentage=0 */
1544 	return 0x4;
1545 }
1546 
1547 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1548 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1549 {
1550 	memset(arr, 0, 0x3c);
1551 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1552 	/*
1553 	 * Set Optimal number of open sequential write preferred zones and
1554 	 * Optimal number of non-sequentially written sequential write
1555 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1556 	 * fields set to zero, apart from Max. number of open swrz_s field.
1557 	 */
1558 	put_unaligned_be32(0xffffffff, &arr[4]);
1559 	put_unaligned_be32(0xffffffff, &arr[8]);
1560 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1561 		put_unaligned_be32(devip->max_open, &arr[12]);
1562 	else
1563 		put_unaligned_be32(0xffffffff, &arr[12]);
1564 	if (devip->zcap < devip->zsize) {
1565 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1566 		put_unaligned_be64(devip->zsize, &arr[20]);
1567 	} else {
1568 		arr[19] = 0;
1569 	}
1570 	return 0x3c;
1571 }
1572 
1573 #define SDEBUG_LONG_INQ_SZ 96
1574 #define SDEBUG_MAX_INQ_ARR_SZ 584
1575 
1576 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1577 {
1578 	unsigned char pq_pdt;
1579 	unsigned char *arr;
1580 	unsigned char *cmd = scp->cmnd;
1581 	u32 alloc_len, n;
1582 	int ret;
1583 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1584 
1585 	alloc_len = get_unaligned_be16(cmd + 3);
1586 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1587 	if (! arr)
1588 		return DID_REQUEUE << 16;
1589 	is_disk = (sdebug_ptype == TYPE_DISK);
1590 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1591 	is_disk_zbc = (is_disk || is_zbc);
1592 	have_wlun = scsi_is_wlun(scp->device->lun);
1593 	if (have_wlun)
1594 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1595 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1596 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1597 	else
1598 		pq_pdt = (sdebug_ptype & 0x1f);
1599 	arr[0] = pq_pdt;
1600 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1601 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1602 		kfree(arr);
1603 		return check_condition_result;
1604 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1605 		int lu_id_num, port_group_id, target_dev_id;
1606 		u32 len;
1607 		char lu_id_str[6];
1608 		int host_no = devip->sdbg_host->shost->host_no;
1609 
1610 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1611 		    (devip->channel & 0x7f);
1612 		if (sdebug_vpd_use_hostno == 0)
1613 			host_no = 0;
1614 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1615 			    (devip->target * 1000) + devip->lun);
1616 		target_dev_id = ((host_no + 1) * 2000) +
1617 				 (devip->target * 1000) - 3;
1618 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1619 		if (0 == cmd[2]) { /* supported vital product data pages */
1620 			arr[1] = cmd[2];	/*sanity */
1621 			n = 4;
1622 			arr[n++] = 0x0;   /* this page */
1623 			arr[n++] = 0x80;  /* unit serial number */
1624 			arr[n++] = 0x83;  /* device identification */
1625 			arr[n++] = 0x84;  /* software interface ident. */
1626 			arr[n++] = 0x85;  /* management network addresses */
1627 			arr[n++] = 0x86;  /* extended inquiry */
1628 			arr[n++] = 0x87;  /* mode page policy */
1629 			arr[n++] = 0x88;  /* SCSI ports */
1630 			if (is_disk_zbc) {	  /* SBC or ZBC */
1631 				arr[n++] = 0x89;  /* ATA information */
1632 				arr[n++] = 0xb0;  /* Block limits */
1633 				arr[n++] = 0xb1;  /* Block characteristics */
1634 				if (is_disk)
1635 					arr[n++] = 0xb2;  /* LB Provisioning */
1636 				if (is_zbc)
1637 					arr[n++] = 0xb6;  /* ZB dev. char. */
1638 			}
1639 			arr[3] = n - 4;	  /* number of supported VPD pages */
1640 		} else if (0x80 == cmd[2]) { /* unit serial number */
1641 			arr[1] = cmd[2];	/*sanity */
1642 			arr[3] = len;
1643 			memcpy(&arr[4], lu_id_str, len);
1644 		} else if (0x83 == cmd[2]) { /* device identification */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1647 						target_dev_id, lu_id_num,
1648 						lu_id_str, len,
1649 						&devip->lu_name);
1650 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = inquiry_vpd_84(&arr[4]);
1653 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1654 			arr[1] = cmd[2];	/*sanity */
1655 			arr[3] = inquiry_vpd_85(&arr[4]);
1656 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1657 			arr[1] = cmd[2];	/*sanity */
1658 			arr[3] = 0x3c;	/* number of following entries */
1659 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1660 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1661 			else if (have_dif_prot)
1662 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1663 			else
1664 				arr[4] = 0x0;   /* no protection stuff */
1665 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1666 		} else if (0x87 == cmd[2]) { /* mode page policy */
1667 			arr[1] = cmd[2];	/*sanity */
1668 			arr[3] = 0x8;	/* number of following entries */
1669 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1670 			arr[6] = 0x80;	/* mlus, shared */
1671 			arr[8] = 0x18;	 /* protocol specific lu */
1672 			arr[10] = 0x82;	 /* mlus, per initiator port */
1673 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1674 			arr[1] = cmd[2];	/*sanity */
1675 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1676 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1677 			arr[1] = cmd[2];        /*sanity */
1678 			n = inquiry_vpd_89(&arr[4]);
1679 			put_unaligned_be16(n, arr + 2);
1680 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1681 			arr[1] = cmd[2];        /*sanity */
1682 			arr[3] = inquiry_vpd_b0(&arr[4]);
1683 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1684 			arr[1] = cmd[2];        /*sanity */
1685 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1686 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1687 			arr[1] = cmd[2];        /*sanity */
1688 			arr[3] = inquiry_vpd_b2(&arr[4]);
1689 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1690 			arr[1] = cmd[2];        /*sanity */
1691 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1692 		} else {
1693 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1694 			kfree(arr);
1695 			return check_condition_result;
1696 		}
1697 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1698 		ret = fill_from_dev_buffer(scp, arr,
1699 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1700 		kfree(arr);
1701 		return ret;
1702 	}
1703 	/* drops through here for a standard inquiry */
1704 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1705 	arr[2] = sdebug_scsi_level;
1706 	arr[3] = 2;    /* response_data_format==2 */
1707 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1708 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1709 	if (sdebug_vpd_use_hostno == 0)
1710 		arr[5] |= 0x10; /* claim: implicit TPGS */
1711 	arr[6] = 0x10; /* claim: MultiP */
1712 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1713 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1714 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1715 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1716 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1717 	/* Use Vendor Specific area to place driver date in ASCII hex */
1718 	memcpy(&arr[36], sdebug_version_date, 8);
1719 	/* version descriptors (2 bytes each) follow */
1720 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1721 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1722 	n = 62;
1723 	if (is_disk) {		/* SBC-4 no version claimed */
1724 		put_unaligned_be16(0x600, arr + n);
1725 		n += 2;
1726 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1727 		put_unaligned_be16(0x525, arr + n);
1728 		n += 2;
1729 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1730 		put_unaligned_be16(0x624, arr + n);
1731 		n += 2;
1732 	}
1733 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1734 	ret = fill_from_dev_buffer(scp, arr,
1735 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1736 	kfree(arr);
1737 	return ret;
1738 }
1739 
1740 /* See resp_iec_m_pg() for how this data is manipulated */
1741 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1742 				   0, 0, 0x0, 0x0};
1743 
1744 static int resp_requests(struct scsi_cmnd *scp,
1745 			 struct sdebug_dev_info *devip)
1746 {
1747 	unsigned char *cmd = scp->cmnd;
1748 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1749 	bool dsense = !!(cmd[1] & 1);
1750 	u32 alloc_len = cmd[4];
1751 	u32 len = 18;
1752 	int stopped_state = atomic_read(&devip->stopped);
1753 
1754 	memset(arr, 0, sizeof(arr));
1755 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1756 		if (dsense) {
1757 			arr[0] = 0x72;
1758 			arr[1] = NOT_READY;
1759 			arr[2] = LOGICAL_UNIT_NOT_READY;
1760 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1761 			len = 8;
1762 		} else {
1763 			arr[0] = 0x70;
1764 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1765 			arr[7] = 0xa;			/* 18 byte sense buffer */
1766 			arr[12] = LOGICAL_UNIT_NOT_READY;
1767 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1768 		}
1769 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1770 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1771 		if (dsense) {
1772 			arr[0] = 0x72;
1773 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1774 			arr[2] = THRESHOLD_EXCEEDED;
1775 			arr[3] = 0xff;		/* Failure prediction(false) */
1776 			len = 8;
1777 		} else {
1778 			arr[0] = 0x70;
1779 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1780 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1781 			arr[12] = THRESHOLD_EXCEEDED;
1782 			arr[13] = 0xff;		/* Failure prediction(false) */
1783 		}
1784 	} else {	/* nothing to report */
1785 		if (dsense) {
1786 			len = 8;
1787 			memset(arr, 0, len);
1788 			arr[0] = 0x72;
1789 		} else {
1790 			memset(arr, 0, len);
1791 			arr[0] = 0x70;
1792 			arr[7] = 0xa;
1793 		}
1794 	}
1795 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1796 }
1797 
1798 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1799 {
1800 	unsigned char *cmd = scp->cmnd;
1801 	int power_cond, want_stop, stopped_state;
1802 	bool changing;
1803 
1804 	power_cond = (cmd[4] & 0xf0) >> 4;
1805 	if (power_cond) {
1806 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1807 		return check_condition_result;
1808 	}
1809 	want_stop = !(cmd[4] & 1);
1810 	stopped_state = atomic_read(&devip->stopped);
1811 	if (stopped_state == 2) {
1812 		ktime_t now_ts = ktime_get_boottime();
1813 
1814 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1815 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1816 
1817 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1818 				/* tur_ms_to_ready timer extinguished */
1819 				atomic_set(&devip->stopped, 0);
1820 				stopped_state = 0;
1821 			}
1822 		}
1823 		if (stopped_state == 2) {
1824 			if (want_stop) {
1825 				stopped_state = 1;	/* dummy up success */
1826 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1827 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1828 				return check_condition_result;
1829 			}
1830 		}
1831 	}
1832 	changing = (stopped_state != want_stop);
1833 	if (changing)
1834 		atomic_xchg(&devip->stopped, want_stop);
1835 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1836 		return SDEG_RES_IMMED_MASK;
1837 	else
1838 		return 0;
1839 }
1840 
1841 static sector_t get_sdebug_capacity(void)
1842 {
1843 	static const unsigned int gibibyte = 1073741824;
1844 
1845 	if (sdebug_virtual_gb > 0)
1846 		return (sector_t)sdebug_virtual_gb *
1847 			(gibibyte / sdebug_sector_size);
1848 	else
1849 		return sdebug_store_sectors;
1850 }
1851 
1852 #define SDEBUG_READCAP_ARR_SZ 8
1853 static int resp_readcap(struct scsi_cmnd *scp,
1854 			struct sdebug_dev_info *devip)
1855 {
1856 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1857 	unsigned int capac;
1858 
1859 	/* following just in case virtual_gb changed */
1860 	sdebug_capacity = get_sdebug_capacity();
1861 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1862 	if (sdebug_capacity < 0xffffffff) {
1863 		capac = (unsigned int)sdebug_capacity - 1;
1864 		put_unaligned_be32(capac, arr + 0);
1865 	} else
1866 		put_unaligned_be32(0xffffffff, arr + 0);
1867 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1868 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1869 }
1870 
1871 #define SDEBUG_READCAP16_ARR_SZ 32
1872 static int resp_readcap16(struct scsi_cmnd *scp,
1873 			  struct sdebug_dev_info *devip)
1874 {
1875 	unsigned char *cmd = scp->cmnd;
1876 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1877 	u32 alloc_len;
1878 
1879 	alloc_len = get_unaligned_be32(cmd + 10);
1880 	/* following just in case virtual_gb changed */
1881 	sdebug_capacity = get_sdebug_capacity();
1882 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1883 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1884 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1885 	arr[13] = sdebug_physblk_exp & 0xf;
1886 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1887 
1888 	if (scsi_debug_lbp()) {
1889 		arr[14] |= 0x80; /* LBPME */
1890 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1891 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1892 		 * in the wider field maps to 0 in this field.
1893 		 */
1894 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1895 			arr[14] |= 0x40;
1896 	}
1897 
1898 	/*
1899 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1900 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1901 	 */
1902 	if (devip->zmodel == BLK_ZONED_HM)
1903 		arr[12] |= 1 << 4;
1904 
1905 	arr[15] = sdebug_lowest_aligned & 0xff;
1906 
1907 	if (have_dif_prot) {
1908 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1909 		arr[12] |= 1; /* PROT_EN */
1910 	}
1911 
1912 	return fill_from_dev_buffer(scp, arr,
1913 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1914 }
1915 
1916 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1917 
1918 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1919 			      struct sdebug_dev_info *devip)
1920 {
1921 	unsigned char *cmd = scp->cmnd;
1922 	unsigned char *arr;
1923 	int host_no = devip->sdbg_host->shost->host_no;
1924 	int port_group_a, port_group_b, port_a, port_b;
1925 	u32 alen, n, rlen;
1926 	int ret;
1927 
1928 	alen = get_unaligned_be32(cmd + 6);
1929 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1930 	if (! arr)
1931 		return DID_REQUEUE << 16;
1932 	/*
1933 	 * EVPD page 0x88 states we have two ports, one
1934 	 * real and a fake port with no device connected.
1935 	 * So we create two port groups with one port each
1936 	 * and set the group with port B to unavailable.
1937 	 */
1938 	port_a = 0x1; /* relative port A */
1939 	port_b = 0x2; /* relative port B */
1940 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1941 			(devip->channel & 0x7f);
1942 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1943 			(devip->channel & 0x7f) + 0x80;
1944 
1945 	/*
1946 	 * The asymmetric access state is cycled according to the host_id.
1947 	 */
1948 	n = 4;
1949 	if (sdebug_vpd_use_hostno == 0) {
1950 		arr[n++] = host_no % 3; /* Asymm access state */
1951 		arr[n++] = 0x0F; /* claim: all states are supported */
1952 	} else {
1953 		arr[n++] = 0x0; /* Active/Optimized path */
1954 		arr[n++] = 0x01; /* only support active/optimized paths */
1955 	}
1956 	put_unaligned_be16(port_group_a, arr + n);
1957 	n += 2;
1958 	arr[n++] = 0;    /* Reserved */
1959 	arr[n++] = 0;    /* Status code */
1960 	arr[n++] = 0;    /* Vendor unique */
1961 	arr[n++] = 0x1;  /* One port per group */
1962 	arr[n++] = 0;    /* Reserved */
1963 	arr[n++] = 0;    /* Reserved */
1964 	put_unaligned_be16(port_a, arr + n);
1965 	n += 2;
1966 	arr[n++] = 3;    /* Port unavailable */
1967 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1968 	put_unaligned_be16(port_group_b, arr + n);
1969 	n += 2;
1970 	arr[n++] = 0;    /* Reserved */
1971 	arr[n++] = 0;    /* Status code */
1972 	arr[n++] = 0;    /* Vendor unique */
1973 	arr[n++] = 0x1;  /* One port per group */
1974 	arr[n++] = 0;    /* Reserved */
1975 	arr[n++] = 0;    /* Reserved */
1976 	put_unaligned_be16(port_b, arr + n);
1977 	n += 2;
1978 
1979 	rlen = n - 4;
1980 	put_unaligned_be32(rlen, arr + 0);
1981 
1982 	/*
1983 	 * Return the smallest value of either
1984 	 * - The allocated length
1985 	 * - The constructed command length
1986 	 * - The maximum array size
1987 	 */
1988 	rlen = min(alen, n);
1989 	ret = fill_from_dev_buffer(scp, arr,
1990 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1991 	kfree(arr);
1992 	return ret;
1993 }
1994 
1995 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1996 			     struct sdebug_dev_info *devip)
1997 {
1998 	bool rctd;
1999 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2000 	u16 req_sa, u;
2001 	u32 alloc_len, a_len;
2002 	int k, offset, len, errsts, count, bump, na;
2003 	const struct opcode_info_t *oip;
2004 	const struct opcode_info_t *r_oip;
2005 	u8 *arr;
2006 	u8 *cmd = scp->cmnd;
2007 
2008 	rctd = !!(cmd[2] & 0x80);
2009 	reporting_opts = cmd[2] & 0x7;
2010 	req_opcode = cmd[3];
2011 	req_sa = get_unaligned_be16(cmd + 4);
2012 	alloc_len = get_unaligned_be32(cmd + 6);
2013 	if (alloc_len < 4 || alloc_len > 0xffff) {
2014 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2015 		return check_condition_result;
2016 	}
2017 	if (alloc_len > 8192)
2018 		a_len = 8192;
2019 	else
2020 		a_len = alloc_len;
2021 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2022 	if (NULL == arr) {
2023 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2024 				INSUFF_RES_ASCQ);
2025 		return check_condition_result;
2026 	}
2027 	switch (reporting_opts) {
2028 	case 0:	/* all commands */
2029 		/* count number of commands */
2030 		for (count = 0, oip = opcode_info_arr;
2031 		     oip->num_attached != 0xff; ++oip) {
2032 			if (F_INV_OP & oip->flags)
2033 				continue;
2034 			count += (oip->num_attached + 1);
2035 		}
2036 		bump = rctd ? 20 : 8;
2037 		put_unaligned_be32(count * bump, arr);
2038 		for (offset = 4, oip = opcode_info_arr;
2039 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2040 			if (F_INV_OP & oip->flags)
2041 				continue;
2042 			na = oip->num_attached;
2043 			arr[offset] = oip->opcode;
2044 			put_unaligned_be16(oip->sa, arr + offset + 2);
2045 			if (rctd)
2046 				arr[offset + 5] |= 0x2;
2047 			if (FF_SA & oip->flags)
2048 				arr[offset + 5] |= 0x1;
2049 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2050 			if (rctd)
2051 				put_unaligned_be16(0xa, arr + offset + 8);
2052 			r_oip = oip;
2053 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2054 				if (F_INV_OP & oip->flags)
2055 					continue;
2056 				offset += bump;
2057 				arr[offset] = oip->opcode;
2058 				put_unaligned_be16(oip->sa, arr + offset + 2);
2059 				if (rctd)
2060 					arr[offset + 5] |= 0x2;
2061 				if (FF_SA & oip->flags)
2062 					arr[offset + 5] |= 0x1;
2063 				put_unaligned_be16(oip->len_mask[0],
2064 						   arr + offset + 6);
2065 				if (rctd)
2066 					put_unaligned_be16(0xa,
2067 							   arr + offset + 8);
2068 			}
2069 			oip = r_oip;
2070 			offset += bump;
2071 		}
2072 		break;
2073 	case 1:	/* one command: opcode only */
2074 	case 2:	/* one command: opcode plus service action */
2075 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2076 		sdeb_i = opcode_ind_arr[req_opcode];
2077 		oip = &opcode_info_arr[sdeb_i];
2078 		if (F_INV_OP & oip->flags) {
2079 			supp = 1;
2080 			offset = 4;
2081 		} else {
2082 			if (1 == reporting_opts) {
2083 				if (FF_SA & oip->flags) {
2084 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2085 							     2, 2);
2086 					kfree(arr);
2087 					return check_condition_result;
2088 				}
2089 				req_sa = 0;
2090 			} else if (2 == reporting_opts &&
2091 				   0 == (FF_SA & oip->flags)) {
2092 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2093 				kfree(arr);	/* point at requested sa */
2094 				return check_condition_result;
2095 			}
2096 			if (0 == (FF_SA & oip->flags) &&
2097 			    req_opcode == oip->opcode)
2098 				supp = 3;
2099 			else if (0 == (FF_SA & oip->flags)) {
2100 				na = oip->num_attached;
2101 				for (k = 0, oip = oip->arrp; k < na;
2102 				     ++k, ++oip) {
2103 					if (req_opcode == oip->opcode)
2104 						break;
2105 				}
2106 				supp = (k >= na) ? 1 : 3;
2107 			} else if (req_sa != oip->sa) {
2108 				na = oip->num_attached;
2109 				for (k = 0, oip = oip->arrp; k < na;
2110 				     ++k, ++oip) {
2111 					if (req_sa == oip->sa)
2112 						break;
2113 				}
2114 				supp = (k >= na) ? 1 : 3;
2115 			} else
2116 				supp = 3;
2117 			if (3 == supp) {
2118 				u = oip->len_mask[0];
2119 				put_unaligned_be16(u, arr + 2);
2120 				arr[4] = oip->opcode;
2121 				for (k = 1; k < u; ++k)
2122 					arr[4 + k] = (k < 16) ?
2123 						 oip->len_mask[k] : 0xff;
2124 				offset = 4 + u;
2125 			} else
2126 				offset = 4;
2127 		}
2128 		arr[1] = (rctd ? 0x80 : 0) | supp;
2129 		if (rctd) {
2130 			put_unaligned_be16(0xa, arr + offset);
2131 			offset += 12;
2132 		}
2133 		break;
2134 	default:
2135 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2136 		kfree(arr);
2137 		return check_condition_result;
2138 	}
2139 	offset = (offset < a_len) ? offset : a_len;
2140 	len = (offset < alloc_len) ? offset : alloc_len;
2141 	errsts = fill_from_dev_buffer(scp, arr, len);
2142 	kfree(arr);
2143 	return errsts;
2144 }
2145 
2146 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2147 			  struct sdebug_dev_info *devip)
2148 {
2149 	bool repd;
2150 	u32 alloc_len, len;
2151 	u8 arr[16];
2152 	u8 *cmd = scp->cmnd;
2153 
2154 	memset(arr, 0, sizeof(arr));
2155 	repd = !!(cmd[2] & 0x80);
2156 	alloc_len = get_unaligned_be32(cmd + 6);
2157 	if (alloc_len < 4) {
2158 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2159 		return check_condition_result;
2160 	}
2161 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2162 	arr[1] = 0x1;		/* ITNRS */
2163 	if (repd) {
2164 		arr[3] = 0xc;
2165 		len = 16;
2166 	} else
2167 		len = 4;
2168 
2169 	len = (len < alloc_len) ? len : alloc_len;
2170 	return fill_from_dev_buffer(scp, arr, len);
2171 }
2172 
2173 /* <<Following mode page info copied from ST318451LW>> */
2174 
2175 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2176 {	/* Read-Write Error Recovery page for mode_sense */
2177 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2178 					5, 0, 0xff, 0xff};
2179 
2180 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2181 	if (1 == pcontrol)
2182 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2183 	return sizeof(err_recov_pg);
2184 }
2185 
2186 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2187 { 	/* Disconnect-Reconnect page for mode_sense */
2188 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2189 					 0, 0, 0, 0, 0, 0, 0, 0};
2190 
2191 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2192 	if (1 == pcontrol)
2193 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2194 	return sizeof(disconnect_pg);
2195 }
2196 
2197 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2198 {       /* Format device page for mode_sense */
2199 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2200 				     0, 0, 0, 0, 0, 0, 0, 0,
2201 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2202 
2203 	memcpy(p, format_pg, sizeof(format_pg));
2204 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2205 	put_unaligned_be16(sdebug_sector_size, p + 12);
2206 	if (sdebug_removable)
2207 		p[20] |= 0x20; /* should agree with INQUIRY */
2208 	if (1 == pcontrol)
2209 		memset(p + 2, 0, sizeof(format_pg) - 2);
2210 	return sizeof(format_pg);
2211 }
2212 
2213 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2214 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2215 				     0, 0, 0, 0};
2216 
2217 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2218 { 	/* Caching page for mode_sense */
2219 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2220 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2221 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2222 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2223 
2224 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2225 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2226 	memcpy(p, caching_pg, sizeof(caching_pg));
2227 	if (1 == pcontrol)
2228 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2229 	else if (2 == pcontrol)
2230 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2231 	return sizeof(caching_pg);
2232 }
2233 
2234 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2235 				    0, 0, 0x2, 0x4b};
2236 
2237 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2238 { 	/* Control mode page for mode_sense */
2239 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2240 					0, 0, 0, 0};
2241 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2242 				     0, 0, 0x2, 0x4b};
2243 
2244 	if (sdebug_dsense)
2245 		ctrl_m_pg[2] |= 0x4;
2246 	else
2247 		ctrl_m_pg[2] &= ~0x4;
2248 
2249 	if (sdebug_ato)
2250 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2251 
2252 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2253 	if (1 == pcontrol)
2254 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2255 	else if (2 == pcontrol)
2256 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2257 	return sizeof(ctrl_m_pg);
2258 }
2259 
2260 
2261 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2262 {	/* Informational Exceptions control mode page for mode_sense */
2263 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2264 				       0, 0, 0x0, 0x0};
2265 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2266 				      0, 0, 0x0, 0x0};
2267 
2268 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2269 	if (1 == pcontrol)
2270 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2271 	else if (2 == pcontrol)
2272 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2273 	return sizeof(iec_m_pg);
2274 }
2275 
2276 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2277 {	/* SAS SSP mode page - short format for mode_sense */
2278 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2279 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2280 
2281 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2282 	if (1 == pcontrol)
2283 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2284 	return sizeof(sas_sf_m_pg);
2285 }
2286 
2287 
2288 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2289 			      int target_dev_id)
2290 {	/* SAS phy control and discover mode page for mode_sense */
2291 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2292 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2293 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2294 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2295 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2296 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2297 		    0, 0, 0, 0, 0, 0, 0, 0,
2298 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2299 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2300 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2301 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2302 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2303 		    0, 0, 0, 0, 0, 0, 0, 0,
2304 		};
2305 	int port_a, port_b;
2306 
2307 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2308 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2309 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2310 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2311 	port_a = target_dev_id + 1;
2312 	port_b = port_a + 1;
2313 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2314 	put_unaligned_be32(port_a, p + 20);
2315 	put_unaligned_be32(port_b, p + 48 + 20);
2316 	if (1 == pcontrol)
2317 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2318 	return sizeof(sas_pcd_m_pg);
2319 }
2320 
2321 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2322 {	/* SAS SSP shared protocol specific port mode subpage */
2323 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2324 		    0, 0, 0, 0, 0, 0, 0, 0,
2325 		};
2326 
2327 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2328 	if (1 == pcontrol)
2329 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2330 	return sizeof(sas_sha_m_pg);
2331 }
2332 
2333 #define SDEBUG_MAX_MSENSE_SZ 256
2334 
2335 static int resp_mode_sense(struct scsi_cmnd *scp,
2336 			   struct sdebug_dev_info *devip)
2337 {
2338 	int pcontrol, pcode, subpcode, bd_len;
2339 	unsigned char dev_spec;
2340 	u32 alloc_len, offset, len;
2341 	int target_dev_id;
2342 	int target = scp->device->id;
2343 	unsigned char *ap;
2344 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2345 	unsigned char *cmd = scp->cmnd;
2346 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2347 
2348 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2349 	pcontrol = (cmd[2] & 0xc0) >> 6;
2350 	pcode = cmd[2] & 0x3f;
2351 	subpcode = cmd[3];
2352 	msense_6 = (MODE_SENSE == cmd[0]);
2353 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2354 	is_disk = (sdebug_ptype == TYPE_DISK);
2355 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2356 	if ((is_disk || is_zbc) && !dbd)
2357 		bd_len = llbaa ? 16 : 8;
2358 	else
2359 		bd_len = 0;
2360 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2361 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2362 	if (0x3 == pcontrol) {  /* Saving values not supported */
2363 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2364 		return check_condition_result;
2365 	}
2366 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2367 			(devip->target * 1000) - 3;
2368 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2369 	if (is_disk || is_zbc) {
2370 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2371 		if (sdebug_wp)
2372 			dev_spec |= 0x80;
2373 	} else
2374 		dev_spec = 0x0;
2375 	if (msense_6) {
2376 		arr[2] = dev_spec;
2377 		arr[3] = bd_len;
2378 		offset = 4;
2379 	} else {
2380 		arr[3] = dev_spec;
2381 		if (16 == bd_len)
2382 			arr[4] = 0x1;	/* set LONGLBA bit */
2383 		arr[7] = bd_len;	/* assume 255 or less */
2384 		offset = 8;
2385 	}
2386 	ap = arr + offset;
2387 	if ((bd_len > 0) && (!sdebug_capacity))
2388 		sdebug_capacity = get_sdebug_capacity();
2389 
2390 	if (8 == bd_len) {
2391 		if (sdebug_capacity > 0xfffffffe)
2392 			put_unaligned_be32(0xffffffff, ap + 0);
2393 		else
2394 			put_unaligned_be32(sdebug_capacity, ap + 0);
2395 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2396 		offset += bd_len;
2397 		ap = arr + offset;
2398 	} else if (16 == bd_len) {
2399 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2400 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2401 		offset += bd_len;
2402 		ap = arr + offset;
2403 	}
2404 
2405 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2406 		/* TODO: Control Extension page */
2407 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2408 		return check_condition_result;
2409 	}
2410 	bad_pcode = false;
2411 
2412 	switch (pcode) {
2413 	case 0x1:	/* Read-Write error recovery page, direct access */
2414 		len = resp_err_recov_pg(ap, pcontrol, target);
2415 		offset += len;
2416 		break;
2417 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2418 		len = resp_disconnect_pg(ap, pcontrol, target);
2419 		offset += len;
2420 		break;
2421 	case 0x3:       /* Format device page, direct access */
2422 		if (is_disk) {
2423 			len = resp_format_pg(ap, pcontrol, target);
2424 			offset += len;
2425 		} else
2426 			bad_pcode = true;
2427 		break;
2428 	case 0x8:	/* Caching page, direct access */
2429 		if (is_disk || is_zbc) {
2430 			len = resp_caching_pg(ap, pcontrol, target);
2431 			offset += len;
2432 		} else
2433 			bad_pcode = true;
2434 		break;
2435 	case 0xa:	/* Control Mode page, all devices */
2436 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2437 		offset += len;
2438 		break;
2439 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2440 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2441 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2442 			return check_condition_result;
2443 		}
2444 		len = 0;
2445 		if ((0x0 == subpcode) || (0xff == subpcode))
2446 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2447 		if ((0x1 == subpcode) || (0xff == subpcode))
2448 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2449 						  target_dev_id);
2450 		if ((0x2 == subpcode) || (0xff == subpcode))
2451 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2452 		offset += len;
2453 		break;
2454 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2455 		len = resp_iec_m_pg(ap, pcontrol, target);
2456 		offset += len;
2457 		break;
2458 	case 0x3f:	/* Read all Mode pages */
2459 		if ((0 == subpcode) || (0xff == subpcode)) {
2460 			len = resp_err_recov_pg(ap, pcontrol, target);
2461 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2462 			if (is_disk) {
2463 				len += resp_format_pg(ap + len, pcontrol,
2464 						      target);
2465 				len += resp_caching_pg(ap + len, pcontrol,
2466 						       target);
2467 			} else if (is_zbc) {
2468 				len += resp_caching_pg(ap + len, pcontrol,
2469 						       target);
2470 			}
2471 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2472 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2473 			if (0xff == subpcode) {
2474 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2475 						  target, target_dev_id);
2476 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2477 			}
2478 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2479 			offset += len;
2480 		} else {
2481 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2482 			return check_condition_result;
2483 		}
2484 		break;
2485 	default:
2486 		bad_pcode = true;
2487 		break;
2488 	}
2489 	if (bad_pcode) {
2490 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2491 		return check_condition_result;
2492 	}
2493 	if (msense_6)
2494 		arr[0] = offset - 1;
2495 	else
2496 		put_unaligned_be16((offset - 2), arr + 0);
2497 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2498 }
2499 
2500 #define SDEBUG_MAX_MSELECT_SZ 512
2501 
2502 static int resp_mode_select(struct scsi_cmnd *scp,
2503 			    struct sdebug_dev_info *devip)
2504 {
2505 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2506 	int param_len, res, mpage;
2507 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2508 	unsigned char *cmd = scp->cmnd;
2509 	int mselect6 = (MODE_SELECT == cmd[0]);
2510 
2511 	memset(arr, 0, sizeof(arr));
2512 	pf = cmd[1] & 0x10;
2513 	sp = cmd[1] & 0x1;
2514 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2515 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2516 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2517 		return check_condition_result;
2518 	}
2519 	res = fetch_to_dev_buffer(scp, arr, param_len);
2520 	if (-1 == res)
2521 		return DID_ERROR << 16;
2522 	else if (sdebug_verbose && (res < param_len))
2523 		sdev_printk(KERN_INFO, scp->device,
2524 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2525 			    __func__, param_len, res);
2526 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2527 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2528 	off = bd_len + (mselect6 ? 4 : 8);
2529 	if (md_len > 2 || off >= res) {
2530 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2531 		return check_condition_result;
2532 	}
2533 	mpage = arr[off] & 0x3f;
2534 	ps = !!(arr[off] & 0x80);
2535 	if (ps) {
2536 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2537 		return check_condition_result;
2538 	}
2539 	spf = !!(arr[off] & 0x40);
2540 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2541 		       (arr[off + 1] + 2);
2542 	if ((pg_len + off) > param_len) {
2543 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2544 				PARAMETER_LIST_LENGTH_ERR, 0);
2545 		return check_condition_result;
2546 	}
2547 	switch (mpage) {
2548 	case 0x8:      /* Caching Mode page */
2549 		if (caching_pg[1] == arr[off + 1]) {
2550 			memcpy(caching_pg + 2, arr + off + 2,
2551 			       sizeof(caching_pg) - 2);
2552 			goto set_mode_changed_ua;
2553 		}
2554 		break;
2555 	case 0xa:      /* Control Mode page */
2556 		if (ctrl_m_pg[1] == arr[off + 1]) {
2557 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2558 			       sizeof(ctrl_m_pg) - 2);
2559 			if (ctrl_m_pg[4] & 0x8)
2560 				sdebug_wp = true;
2561 			else
2562 				sdebug_wp = false;
2563 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2564 			goto set_mode_changed_ua;
2565 		}
2566 		break;
2567 	case 0x1c:      /* Informational Exceptions Mode page */
2568 		if (iec_m_pg[1] == arr[off + 1]) {
2569 			memcpy(iec_m_pg + 2, arr + off + 2,
2570 			       sizeof(iec_m_pg) - 2);
2571 			goto set_mode_changed_ua;
2572 		}
2573 		break;
2574 	default:
2575 		break;
2576 	}
2577 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2578 	return check_condition_result;
2579 set_mode_changed_ua:
2580 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2581 	return 0;
2582 }
2583 
2584 static int resp_temp_l_pg(unsigned char *arr)
2585 {
2586 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2587 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2588 		};
2589 
2590 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2591 	return sizeof(temp_l_pg);
2592 }
2593 
2594 static int resp_ie_l_pg(unsigned char *arr)
2595 {
2596 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2597 		};
2598 
2599 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2600 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2601 		arr[4] = THRESHOLD_EXCEEDED;
2602 		arr[5] = 0xff;
2603 	}
2604 	return sizeof(ie_l_pg);
2605 }
2606 
2607 static int resp_env_rep_l_spg(unsigned char *arr)
2608 {
2609 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2610 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2611 					 0x1, 0x0, 0x23, 0x8,
2612 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2613 		};
2614 
2615 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2616 	return sizeof(env_rep_l_spg);
2617 }
2618 
2619 #define SDEBUG_MAX_LSENSE_SZ 512
2620 
2621 static int resp_log_sense(struct scsi_cmnd *scp,
2622 			  struct sdebug_dev_info *devip)
2623 {
2624 	int ppc, sp, pcode, subpcode;
2625 	u32 alloc_len, len, n;
2626 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2627 	unsigned char *cmd = scp->cmnd;
2628 
2629 	memset(arr, 0, sizeof(arr));
2630 	ppc = cmd[1] & 0x2;
2631 	sp = cmd[1] & 0x1;
2632 	if (ppc || sp) {
2633 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2634 		return check_condition_result;
2635 	}
2636 	pcode = cmd[2] & 0x3f;
2637 	subpcode = cmd[3] & 0xff;
2638 	alloc_len = get_unaligned_be16(cmd + 7);
2639 	arr[0] = pcode;
2640 	if (0 == subpcode) {
2641 		switch (pcode) {
2642 		case 0x0:	/* Supported log pages log page */
2643 			n = 4;
2644 			arr[n++] = 0x0;		/* this page */
2645 			arr[n++] = 0xd;		/* Temperature */
2646 			arr[n++] = 0x2f;	/* Informational exceptions */
2647 			arr[3] = n - 4;
2648 			break;
2649 		case 0xd:	/* Temperature log page */
2650 			arr[3] = resp_temp_l_pg(arr + 4);
2651 			break;
2652 		case 0x2f:	/* Informational exceptions log page */
2653 			arr[3] = resp_ie_l_pg(arr + 4);
2654 			break;
2655 		default:
2656 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2657 			return check_condition_result;
2658 		}
2659 	} else if (0xff == subpcode) {
2660 		arr[0] |= 0x40;
2661 		arr[1] = subpcode;
2662 		switch (pcode) {
2663 		case 0x0:	/* Supported log pages and subpages log page */
2664 			n = 4;
2665 			arr[n++] = 0x0;
2666 			arr[n++] = 0x0;		/* 0,0 page */
2667 			arr[n++] = 0x0;
2668 			arr[n++] = 0xff;	/* this page */
2669 			arr[n++] = 0xd;
2670 			arr[n++] = 0x0;		/* Temperature */
2671 			arr[n++] = 0xd;
2672 			arr[n++] = 0x1;		/* Environment reporting */
2673 			arr[n++] = 0xd;
2674 			arr[n++] = 0xff;	/* all 0xd subpages */
2675 			arr[n++] = 0x2f;
2676 			arr[n++] = 0x0;	/* Informational exceptions */
2677 			arr[n++] = 0x2f;
2678 			arr[n++] = 0xff;	/* all 0x2f subpages */
2679 			arr[3] = n - 4;
2680 			break;
2681 		case 0xd:	/* Temperature subpages */
2682 			n = 4;
2683 			arr[n++] = 0xd;
2684 			arr[n++] = 0x0;		/* Temperature */
2685 			arr[n++] = 0xd;
2686 			arr[n++] = 0x1;		/* Environment reporting */
2687 			arr[n++] = 0xd;
2688 			arr[n++] = 0xff;	/* these subpages */
2689 			arr[3] = n - 4;
2690 			break;
2691 		case 0x2f:	/* Informational exceptions subpages */
2692 			n = 4;
2693 			arr[n++] = 0x2f;
2694 			arr[n++] = 0x0;		/* Informational exceptions */
2695 			arr[n++] = 0x2f;
2696 			arr[n++] = 0xff;	/* these subpages */
2697 			arr[3] = n - 4;
2698 			break;
2699 		default:
2700 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2701 			return check_condition_result;
2702 		}
2703 	} else if (subpcode > 0) {
2704 		arr[0] |= 0x40;
2705 		arr[1] = subpcode;
2706 		if (pcode == 0xd && subpcode == 1)
2707 			arr[3] = resp_env_rep_l_spg(arr + 4);
2708 		else {
2709 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2710 			return check_condition_result;
2711 		}
2712 	} else {
2713 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2714 		return check_condition_result;
2715 	}
2716 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2717 	return fill_from_dev_buffer(scp, arr,
2718 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2719 }
2720 
2721 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2722 {
2723 	return devip->nr_zones != 0;
2724 }
2725 
2726 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2727 					unsigned long long lba)
2728 {
2729 	u32 zno = lba >> devip->zsize_shift;
2730 	struct sdeb_zone_state *zsp;
2731 
2732 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2733 		return &devip->zstate[zno];
2734 
2735 	/*
2736 	 * If the zone capacity is less than the zone size, adjust for gap
2737 	 * zones.
2738 	 */
2739 	zno = 2 * zno - devip->nr_conv_zones;
2740 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2741 	zsp = &devip->zstate[zno];
2742 	if (lba >= zsp->z_start + zsp->z_size)
2743 		zsp++;
2744 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2745 	return zsp;
2746 }
2747 
2748 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2749 {
2750 	return zsp->z_type == ZBC_ZTYPE_CNV;
2751 }
2752 
2753 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2754 {
2755 	return zsp->z_type == ZBC_ZTYPE_GAP;
2756 }
2757 
2758 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2759 {
2760 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2761 }
2762 
2763 static void zbc_close_zone(struct sdebug_dev_info *devip,
2764 			   struct sdeb_zone_state *zsp)
2765 {
2766 	enum sdebug_z_cond zc;
2767 
2768 	if (!zbc_zone_is_seq(zsp))
2769 		return;
2770 
2771 	zc = zsp->z_cond;
2772 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2773 		return;
2774 
2775 	if (zc == ZC2_IMPLICIT_OPEN)
2776 		devip->nr_imp_open--;
2777 	else
2778 		devip->nr_exp_open--;
2779 
2780 	if (zsp->z_wp == zsp->z_start) {
2781 		zsp->z_cond = ZC1_EMPTY;
2782 	} else {
2783 		zsp->z_cond = ZC4_CLOSED;
2784 		devip->nr_closed++;
2785 	}
2786 }
2787 
2788 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2789 {
2790 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2791 	unsigned int i;
2792 
2793 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2794 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2795 			zbc_close_zone(devip, zsp);
2796 			return;
2797 		}
2798 	}
2799 }
2800 
2801 static void zbc_open_zone(struct sdebug_dev_info *devip,
2802 			  struct sdeb_zone_state *zsp, bool explicit)
2803 {
2804 	enum sdebug_z_cond zc;
2805 
2806 	if (!zbc_zone_is_seq(zsp))
2807 		return;
2808 
2809 	zc = zsp->z_cond;
2810 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2811 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2812 		return;
2813 
2814 	/* Close an implicit open zone if necessary */
2815 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2816 		zbc_close_zone(devip, zsp);
2817 	else if (devip->max_open &&
2818 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2819 		zbc_close_imp_open_zone(devip);
2820 
2821 	if (zsp->z_cond == ZC4_CLOSED)
2822 		devip->nr_closed--;
2823 	if (explicit) {
2824 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2825 		devip->nr_exp_open++;
2826 	} else {
2827 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2828 		devip->nr_imp_open++;
2829 	}
2830 }
2831 
2832 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2833 				     struct sdeb_zone_state *zsp)
2834 {
2835 	switch (zsp->z_cond) {
2836 	case ZC2_IMPLICIT_OPEN:
2837 		devip->nr_imp_open--;
2838 		break;
2839 	case ZC3_EXPLICIT_OPEN:
2840 		devip->nr_exp_open--;
2841 		break;
2842 	default:
2843 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2844 			  zsp->z_start, zsp->z_cond);
2845 		break;
2846 	}
2847 	zsp->z_cond = ZC5_FULL;
2848 }
2849 
2850 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2851 		       unsigned long long lba, unsigned int num)
2852 {
2853 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2854 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2855 
2856 	if (!zbc_zone_is_seq(zsp))
2857 		return;
2858 
2859 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2860 		zsp->z_wp += num;
2861 		if (zsp->z_wp >= zend)
2862 			zbc_set_zone_full(devip, zsp);
2863 		return;
2864 	}
2865 
2866 	while (num) {
2867 		if (lba != zsp->z_wp)
2868 			zsp->z_non_seq_resource = true;
2869 
2870 		end = lba + num;
2871 		if (end >= zend) {
2872 			n = zend - lba;
2873 			zsp->z_wp = zend;
2874 		} else if (end > zsp->z_wp) {
2875 			n = num;
2876 			zsp->z_wp = end;
2877 		} else {
2878 			n = num;
2879 		}
2880 		if (zsp->z_wp >= zend)
2881 			zbc_set_zone_full(devip, zsp);
2882 
2883 		num -= n;
2884 		lba += n;
2885 		if (num) {
2886 			zsp++;
2887 			zend = zsp->z_start + zsp->z_size;
2888 		}
2889 	}
2890 }
2891 
2892 static int check_zbc_access_params(struct scsi_cmnd *scp,
2893 			unsigned long long lba, unsigned int num, bool write)
2894 {
2895 	struct scsi_device *sdp = scp->device;
2896 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2897 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2898 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2899 
2900 	if (!write) {
2901 		if (devip->zmodel == BLK_ZONED_HA)
2902 			return 0;
2903 		/* For host-managed, reads cannot cross zone types boundaries */
2904 		if (zsp->z_type != zsp_end->z_type) {
2905 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2906 					LBA_OUT_OF_RANGE,
2907 					READ_INVDATA_ASCQ);
2908 			return check_condition_result;
2909 		}
2910 		return 0;
2911 	}
2912 
2913 	/* Writing into a gap zone is not allowed */
2914 	if (zbc_zone_is_gap(zsp)) {
2915 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2916 				ATTEMPT_ACCESS_GAP);
2917 		return check_condition_result;
2918 	}
2919 
2920 	/* No restrictions for writes within conventional zones */
2921 	if (zbc_zone_is_conv(zsp)) {
2922 		if (!zbc_zone_is_conv(zsp_end)) {
2923 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2924 					LBA_OUT_OF_RANGE,
2925 					WRITE_BOUNDARY_ASCQ);
2926 			return check_condition_result;
2927 		}
2928 		return 0;
2929 	}
2930 
2931 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2932 		/* Writes cannot cross sequential zone boundaries */
2933 		if (zsp_end != zsp) {
2934 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2935 					LBA_OUT_OF_RANGE,
2936 					WRITE_BOUNDARY_ASCQ);
2937 			return check_condition_result;
2938 		}
2939 		/* Cannot write full zones */
2940 		if (zsp->z_cond == ZC5_FULL) {
2941 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2942 					INVALID_FIELD_IN_CDB, 0);
2943 			return check_condition_result;
2944 		}
2945 		/* Writes must be aligned to the zone WP */
2946 		if (lba != zsp->z_wp) {
2947 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2948 					LBA_OUT_OF_RANGE,
2949 					UNALIGNED_WRITE_ASCQ);
2950 			return check_condition_result;
2951 		}
2952 	}
2953 
2954 	/* Handle implicit open of closed and empty zones */
2955 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2956 		if (devip->max_open &&
2957 		    devip->nr_exp_open >= devip->max_open) {
2958 			mk_sense_buffer(scp, DATA_PROTECT,
2959 					INSUFF_RES_ASC,
2960 					INSUFF_ZONE_ASCQ);
2961 			return check_condition_result;
2962 		}
2963 		zbc_open_zone(devip, zsp, false);
2964 	}
2965 
2966 	return 0;
2967 }
2968 
2969 static inline int check_device_access_params
2970 			(struct scsi_cmnd *scp, unsigned long long lba,
2971 			 unsigned int num, bool write)
2972 {
2973 	struct scsi_device *sdp = scp->device;
2974 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2975 
2976 	if (lba + num > sdebug_capacity) {
2977 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2978 		return check_condition_result;
2979 	}
2980 	/* transfer length excessive (tie in to block limits VPD page) */
2981 	if (num > sdebug_store_sectors) {
2982 		/* needs work to find which cdb byte 'num' comes from */
2983 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2984 		return check_condition_result;
2985 	}
2986 	if (write && unlikely(sdebug_wp)) {
2987 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2988 		return check_condition_result;
2989 	}
2990 	if (sdebug_dev_is_zoned(devip))
2991 		return check_zbc_access_params(scp, lba, num, write);
2992 
2993 	return 0;
2994 }
2995 
2996 /*
2997  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2998  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2999  * that access any of the "stores" in struct sdeb_store_info should call this
3000  * function with bug_if_fake_rw set to true.
3001  */
3002 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3003 						bool bug_if_fake_rw)
3004 {
3005 	if (sdebug_fake_rw) {
3006 		BUG_ON(bug_if_fake_rw);	/* See note above */
3007 		return NULL;
3008 	}
3009 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3010 }
3011 
3012 /* Returns number of bytes copied or -1 if error. */
3013 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3014 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3015 {
3016 	int ret;
3017 	u64 block, rest = 0;
3018 	enum dma_data_direction dir;
3019 	struct scsi_data_buffer *sdb = &scp->sdb;
3020 	u8 *fsp;
3021 
3022 	if (do_write) {
3023 		dir = DMA_TO_DEVICE;
3024 		write_since_sync = true;
3025 	} else {
3026 		dir = DMA_FROM_DEVICE;
3027 	}
3028 
3029 	if (!sdb->length || !sip)
3030 		return 0;
3031 	if (scp->sc_data_direction != dir)
3032 		return -1;
3033 	fsp = sip->storep;
3034 
3035 	block = do_div(lba, sdebug_store_sectors);
3036 	if (block + num > sdebug_store_sectors)
3037 		rest = block + num - sdebug_store_sectors;
3038 
3039 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3040 		   fsp + (block * sdebug_sector_size),
3041 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3042 	if (ret != (num - rest) * sdebug_sector_size)
3043 		return ret;
3044 
3045 	if (rest) {
3046 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3047 			    fsp, rest * sdebug_sector_size,
3048 			    sg_skip + ((num - rest) * sdebug_sector_size),
3049 			    do_write);
3050 	}
3051 
3052 	return ret;
3053 }
3054 
3055 /* Returns number of bytes copied or -1 if error. */
3056 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3057 {
3058 	struct scsi_data_buffer *sdb = &scp->sdb;
3059 
3060 	if (!sdb->length)
3061 		return 0;
3062 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3063 		return -1;
3064 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3065 			      num * sdebug_sector_size, 0, true);
3066 }
3067 
3068 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3069  * arr into sip->storep+lba and return true. If comparison fails then
3070  * return false. */
3071 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3072 			      const u8 *arr, bool compare_only)
3073 {
3074 	bool res;
3075 	u64 block, rest = 0;
3076 	u32 store_blks = sdebug_store_sectors;
3077 	u32 lb_size = sdebug_sector_size;
3078 	u8 *fsp = sip->storep;
3079 
3080 	block = do_div(lba, store_blks);
3081 	if (block + num > store_blks)
3082 		rest = block + num - store_blks;
3083 
3084 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3085 	if (!res)
3086 		return res;
3087 	if (rest)
3088 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3089 			     rest * lb_size);
3090 	if (!res)
3091 		return res;
3092 	if (compare_only)
3093 		return true;
3094 	arr += num * lb_size;
3095 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3096 	if (rest)
3097 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3098 	return res;
3099 }
3100 
3101 static __be16 dif_compute_csum(const void *buf, int len)
3102 {
3103 	__be16 csum;
3104 
3105 	if (sdebug_guard)
3106 		csum = (__force __be16)ip_compute_csum(buf, len);
3107 	else
3108 		csum = cpu_to_be16(crc_t10dif(buf, len));
3109 
3110 	return csum;
3111 }
3112 
3113 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3114 		      sector_t sector, u32 ei_lba)
3115 {
3116 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3117 
3118 	if (sdt->guard_tag != csum) {
3119 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3120 			(unsigned long)sector,
3121 			be16_to_cpu(sdt->guard_tag),
3122 			be16_to_cpu(csum));
3123 		return 0x01;
3124 	}
3125 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3126 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3127 		pr_err("REF check failed on sector %lu\n",
3128 			(unsigned long)sector);
3129 		return 0x03;
3130 	}
3131 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3132 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3133 		pr_err("REF check failed on sector %lu\n",
3134 			(unsigned long)sector);
3135 		return 0x03;
3136 	}
3137 	return 0;
3138 }
3139 
3140 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3141 			  unsigned int sectors, bool read)
3142 {
3143 	size_t resid;
3144 	void *paddr;
3145 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3146 						scp->device->hostdata, true);
3147 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3148 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3149 	struct sg_mapping_iter miter;
3150 
3151 	/* Bytes of protection data to copy into sgl */
3152 	resid = sectors * sizeof(*dif_storep);
3153 
3154 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3155 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3156 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3157 
3158 	while (sg_miter_next(&miter) && resid > 0) {
3159 		size_t len = min_t(size_t, miter.length, resid);
3160 		void *start = dif_store(sip, sector);
3161 		size_t rest = 0;
3162 
3163 		if (dif_store_end < start + len)
3164 			rest = start + len - dif_store_end;
3165 
3166 		paddr = miter.addr;
3167 
3168 		if (read)
3169 			memcpy(paddr, start, len - rest);
3170 		else
3171 			memcpy(start, paddr, len - rest);
3172 
3173 		if (rest) {
3174 			if (read)
3175 				memcpy(paddr + len - rest, dif_storep, rest);
3176 			else
3177 				memcpy(dif_storep, paddr + len - rest, rest);
3178 		}
3179 
3180 		sector += len / sizeof(*dif_storep);
3181 		resid -= len;
3182 	}
3183 	sg_miter_stop(&miter);
3184 }
3185 
3186 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3187 			    unsigned int sectors, u32 ei_lba)
3188 {
3189 	int ret = 0;
3190 	unsigned int i;
3191 	sector_t sector;
3192 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3193 						scp->device->hostdata, true);
3194 	struct t10_pi_tuple *sdt;
3195 
3196 	for (i = 0; i < sectors; i++, ei_lba++) {
3197 		sector = start_sec + i;
3198 		sdt = dif_store(sip, sector);
3199 
3200 		if (sdt->app_tag == cpu_to_be16(0xffff))
3201 			continue;
3202 
3203 		/*
3204 		 * Because scsi_debug acts as both initiator and
3205 		 * target we proceed to verify the PI even if
3206 		 * RDPROTECT=3. This is done so the "initiator" knows
3207 		 * which type of error to return. Otherwise we would
3208 		 * have to iterate over the PI twice.
3209 		 */
3210 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3211 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3212 					 sector, ei_lba);
3213 			if (ret) {
3214 				dif_errors++;
3215 				break;
3216 			}
3217 		}
3218 	}
3219 
3220 	dif_copy_prot(scp, start_sec, sectors, true);
3221 	dix_reads++;
3222 
3223 	return ret;
3224 }
3225 
3226 static inline void
3227 sdeb_read_lock(struct sdeb_store_info *sip)
3228 {
3229 	if (sdebug_no_rwlock) {
3230 		if (sip)
3231 			__acquire(&sip->macc_lck);
3232 		else
3233 			__acquire(&sdeb_fake_rw_lck);
3234 	} else {
3235 		if (sip)
3236 			read_lock(&sip->macc_lck);
3237 		else
3238 			read_lock(&sdeb_fake_rw_lck);
3239 	}
3240 }
3241 
3242 static inline void
3243 sdeb_read_unlock(struct sdeb_store_info *sip)
3244 {
3245 	if (sdebug_no_rwlock) {
3246 		if (sip)
3247 			__release(&sip->macc_lck);
3248 		else
3249 			__release(&sdeb_fake_rw_lck);
3250 	} else {
3251 		if (sip)
3252 			read_unlock(&sip->macc_lck);
3253 		else
3254 			read_unlock(&sdeb_fake_rw_lck);
3255 	}
3256 }
3257 
3258 static inline void
3259 sdeb_write_lock(struct sdeb_store_info *sip)
3260 {
3261 	if (sdebug_no_rwlock) {
3262 		if (sip)
3263 			__acquire(&sip->macc_lck);
3264 		else
3265 			__acquire(&sdeb_fake_rw_lck);
3266 	} else {
3267 		if (sip)
3268 			write_lock(&sip->macc_lck);
3269 		else
3270 			write_lock(&sdeb_fake_rw_lck);
3271 	}
3272 }
3273 
3274 static inline void
3275 sdeb_write_unlock(struct sdeb_store_info *sip)
3276 {
3277 	if (sdebug_no_rwlock) {
3278 		if (sip)
3279 			__release(&sip->macc_lck);
3280 		else
3281 			__release(&sdeb_fake_rw_lck);
3282 	} else {
3283 		if (sip)
3284 			write_unlock(&sip->macc_lck);
3285 		else
3286 			write_unlock(&sdeb_fake_rw_lck);
3287 	}
3288 }
3289 
3290 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3291 {
3292 	bool check_prot;
3293 	u32 num;
3294 	u32 ei_lba;
3295 	int ret;
3296 	u64 lba;
3297 	struct sdeb_store_info *sip = devip2sip(devip, true);
3298 	u8 *cmd = scp->cmnd;
3299 
3300 	switch (cmd[0]) {
3301 	case READ_16:
3302 		ei_lba = 0;
3303 		lba = get_unaligned_be64(cmd + 2);
3304 		num = get_unaligned_be32(cmd + 10);
3305 		check_prot = true;
3306 		break;
3307 	case READ_10:
3308 		ei_lba = 0;
3309 		lba = get_unaligned_be32(cmd + 2);
3310 		num = get_unaligned_be16(cmd + 7);
3311 		check_prot = true;
3312 		break;
3313 	case READ_6:
3314 		ei_lba = 0;
3315 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3316 		      (u32)(cmd[1] & 0x1f) << 16;
3317 		num = (0 == cmd[4]) ? 256 : cmd[4];
3318 		check_prot = true;
3319 		break;
3320 	case READ_12:
3321 		ei_lba = 0;
3322 		lba = get_unaligned_be32(cmd + 2);
3323 		num = get_unaligned_be32(cmd + 6);
3324 		check_prot = true;
3325 		break;
3326 	case XDWRITEREAD_10:
3327 		ei_lba = 0;
3328 		lba = get_unaligned_be32(cmd + 2);
3329 		num = get_unaligned_be16(cmd + 7);
3330 		check_prot = false;
3331 		break;
3332 	default:	/* assume READ(32) */
3333 		lba = get_unaligned_be64(cmd + 12);
3334 		ei_lba = get_unaligned_be32(cmd + 20);
3335 		num = get_unaligned_be32(cmd + 28);
3336 		check_prot = false;
3337 		break;
3338 	}
3339 	if (unlikely(have_dif_prot && check_prot)) {
3340 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3341 		    (cmd[1] & 0xe0)) {
3342 			mk_sense_invalid_opcode(scp);
3343 			return check_condition_result;
3344 		}
3345 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3346 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3347 		    (cmd[1] & 0xe0) == 0)
3348 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3349 				    "to DIF device\n");
3350 	}
3351 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3352 		     atomic_read(&sdeb_inject_pending))) {
3353 		num /= 2;
3354 		atomic_set(&sdeb_inject_pending, 0);
3355 	}
3356 
3357 	ret = check_device_access_params(scp, lba, num, false);
3358 	if (ret)
3359 		return ret;
3360 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3361 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3362 		     ((lba + num) > sdebug_medium_error_start))) {
3363 		/* claim unrecoverable read error */
3364 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3365 		/* set info field and valid bit for fixed descriptor */
3366 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3367 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3368 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3369 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3370 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3371 		}
3372 		scsi_set_resid(scp, scsi_bufflen(scp));
3373 		return check_condition_result;
3374 	}
3375 
3376 	sdeb_read_lock(sip);
3377 
3378 	/* DIX + T10 DIF */
3379 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3380 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3381 		case 1: /* Guard tag error */
3382 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3383 				sdeb_read_unlock(sip);
3384 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3385 				return check_condition_result;
3386 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3387 				sdeb_read_unlock(sip);
3388 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3389 				return illegal_condition_result;
3390 			}
3391 			break;
3392 		case 3: /* Reference tag error */
3393 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3394 				sdeb_read_unlock(sip);
3395 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3396 				return check_condition_result;
3397 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3398 				sdeb_read_unlock(sip);
3399 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3400 				return illegal_condition_result;
3401 			}
3402 			break;
3403 		}
3404 	}
3405 
3406 	ret = do_device_access(sip, scp, 0, lba, num, false);
3407 	sdeb_read_unlock(sip);
3408 	if (unlikely(ret == -1))
3409 		return DID_ERROR << 16;
3410 
3411 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3412 
3413 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3414 		     atomic_read(&sdeb_inject_pending))) {
3415 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3416 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3417 			atomic_set(&sdeb_inject_pending, 0);
3418 			return check_condition_result;
3419 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3420 			/* Logical block guard check failed */
3421 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3422 			atomic_set(&sdeb_inject_pending, 0);
3423 			return illegal_condition_result;
3424 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3425 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3426 			atomic_set(&sdeb_inject_pending, 0);
3427 			return illegal_condition_result;
3428 		}
3429 	}
3430 	return 0;
3431 }
3432 
3433 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3434 			     unsigned int sectors, u32 ei_lba)
3435 {
3436 	int ret;
3437 	struct t10_pi_tuple *sdt;
3438 	void *daddr;
3439 	sector_t sector = start_sec;
3440 	int ppage_offset;
3441 	int dpage_offset;
3442 	struct sg_mapping_iter diter;
3443 	struct sg_mapping_iter piter;
3444 
3445 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3446 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3447 
3448 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3449 			scsi_prot_sg_count(SCpnt),
3450 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3451 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3452 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3453 
3454 	/* For each protection page */
3455 	while (sg_miter_next(&piter)) {
3456 		dpage_offset = 0;
3457 		if (WARN_ON(!sg_miter_next(&diter))) {
3458 			ret = 0x01;
3459 			goto out;
3460 		}
3461 
3462 		for (ppage_offset = 0; ppage_offset < piter.length;
3463 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3464 			/* If we're at the end of the current
3465 			 * data page advance to the next one
3466 			 */
3467 			if (dpage_offset >= diter.length) {
3468 				if (WARN_ON(!sg_miter_next(&diter))) {
3469 					ret = 0x01;
3470 					goto out;
3471 				}
3472 				dpage_offset = 0;
3473 			}
3474 
3475 			sdt = piter.addr + ppage_offset;
3476 			daddr = diter.addr + dpage_offset;
3477 
3478 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3479 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3480 				if (ret)
3481 					goto out;
3482 			}
3483 
3484 			sector++;
3485 			ei_lba++;
3486 			dpage_offset += sdebug_sector_size;
3487 		}
3488 		diter.consumed = dpage_offset;
3489 		sg_miter_stop(&diter);
3490 	}
3491 	sg_miter_stop(&piter);
3492 
3493 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3494 	dix_writes++;
3495 
3496 	return 0;
3497 
3498 out:
3499 	dif_errors++;
3500 	sg_miter_stop(&diter);
3501 	sg_miter_stop(&piter);
3502 	return ret;
3503 }
3504 
3505 static unsigned long lba_to_map_index(sector_t lba)
3506 {
3507 	if (sdebug_unmap_alignment)
3508 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3509 	sector_div(lba, sdebug_unmap_granularity);
3510 	return lba;
3511 }
3512 
3513 static sector_t map_index_to_lba(unsigned long index)
3514 {
3515 	sector_t lba = index * sdebug_unmap_granularity;
3516 
3517 	if (sdebug_unmap_alignment)
3518 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3519 	return lba;
3520 }
3521 
3522 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3523 			      unsigned int *num)
3524 {
3525 	sector_t end;
3526 	unsigned int mapped;
3527 	unsigned long index;
3528 	unsigned long next;
3529 
3530 	index = lba_to_map_index(lba);
3531 	mapped = test_bit(index, sip->map_storep);
3532 
3533 	if (mapped)
3534 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3535 	else
3536 		next = find_next_bit(sip->map_storep, map_size, index);
3537 
3538 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3539 	*num = end - lba;
3540 	return mapped;
3541 }
3542 
3543 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3544 		       unsigned int len)
3545 {
3546 	sector_t end = lba + len;
3547 
3548 	while (lba < end) {
3549 		unsigned long index = lba_to_map_index(lba);
3550 
3551 		if (index < map_size)
3552 			set_bit(index, sip->map_storep);
3553 
3554 		lba = map_index_to_lba(index + 1);
3555 	}
3556 }
3557 
3558 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3559 			 unsigned int len)
3560 {
3561 	sector_t end = lba + len;
3562 	u8 *fsp = sip->storep;
3563 
3564 	while (lba < end) {
3565 		unsigned long index = lba_to_map_index(lba);
3566 
3567 		if (lba == map_index_to_lba(index) &&
3568 		    lba + sdebug_unmap_granularity <= end &&
3569 		    index < map_size) {
3570 			clear_bit(index, sip->map_storep);
3571 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3572 				memset(fsp + lba * sdebug_sector_size,
3573 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3574 				       sdebug_sector_size *
3575 				       sdebug_unmap_granularity);
3576 			}
3577 			if (sip->dif_storep) {
3578 				memset(sip->dif_storep + lba, 0xff,
3579 				       sizeof(*sip->dif_storep) *
3580 				       sdebug_unmap_granularity);
3581 			}
3582 		}
3583 		lba = map_index_to_lba(index + 1);
3584 	}
3585 }
3586 
3587 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3588 {
3589 	bool check_prot;
3590 	u32 num;
3591 	u32 ei_lba;
3592 	int ret;
3593 	u64 lba;
3594 	struct sdeb_store_info *sip = devip2sip(devip, true);
3595 	u8 *cmd = scp->cmnd;
3596 
3597 	switch (cmd[0]) {
3598 	case WRITE_16:
3599 		ei_lba = 0;
3600 		lba = get_unaligned_be64(cmd + 2);
3601 		num = get_unaligned_be32(cmd + 10);
3602 		check_prot = true;
3603 		break;
3604 	case WRITE_10:
3605 		ei_lba = 0;
3606 		lba = get_unaligned_be32(cmd + 2);
3607 		num = get_unaligned_be16(cmd + 7);
3608 		check_prot = true;
3609 		break;
3610 	case WRITE_6:
3611 		ei_lba = 0;
3612 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3613 		      (u32)(cmd[1] & 0x1f) << 16;
3614 		num = (0 == cmd[4]) ? 256 : cmd[4];
3615 		check_prot = true;
3616 		break;
3617 	case WRITE_12:
3618 		ei_lba = 0;
3619 		lba = get_unaligned_be32(cmd + 2);
3620 		num = get_unaligned_be32(cmd + 6);
3621 		check_prot = true;
3622 		break;
3623 	case 0x53:	/* XDWRITEREAD(10) */
3624 		ei_lba = 0;
3625 		lba = get_unaligned_be32(cmd + 2);
3626 		num = get_unaligned_be16(cmd + 7);
3627 		check_prot = false;
3628 		break;
3629 	default:	/* assume WRITE(32) */
3630 		lba = get_unaligned_be64(cmd + 12);
3631 		ei_lba = get_unaligned_be32(cmd + 20);
3632 		num = get_unaligned_be32(cmd + 28);
3633 		check_prot = false;
3634 		break;
3635 	}
3636 	if (unlikely(have_dif_prot && check_prot)) {
3637 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3638 		    (cmd[1] & 0xe0)) {
3639 			mk_sense_invalid_opcode(scp);
3640 			return check_condition_result;
3641 		}
3642 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3643 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3644 		    (cmd[1] & 0xe0) == 0)
3645 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3646 				    "to DIF device\n");
3647 	}
3648 
3649 	sdeb_write_lock(sip);
3650 	ret = check_device_access_params(scp, lba, num, true);
3651 	if (ret) {
3652 		sdeb_write_unlock(sip);
3653 		return ret;
3654 	}
3655 
3656 	/* DIX + T10 DIF */
3657 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3658 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3659 		case 1: /* Guard tag error */
3660 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3661 				sdeb_write_unlock(sip);
3662 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3663 				return illegal_condition_result;
3664 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3665 				sdeb_write_unlock(sip);
3666 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3667 				return check_condition_result;
3668 			}
3669 			break;
3670 		case 3: /* Reference tag error */
3671 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3672 				sdeb_write_unlock(sip);
3673 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3674 				return illegal_condition_result;
3675 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3676 				sdeb_write_unlock(sip);
3677 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3678 				return check_condition_result;
3679 			}
3680 			break;
3681 		}
3682 	}
3683 
3684 	ret = do_device_access(sip, scp, 0, lba, num, true);
3685 	if (unlikely(scsi_debug_lbp()))
3686 		map_region(sip, lba, num);
3687 	/* If ZBC zone then bump its write pointer */
3688 	if (sdebug_dev_is_zoned(devip))
3689 		zbc_inc_wp(devip, lba, num);
3690 	sdeb_write_unlock(sip);
3691 	if (unlikely(-1 == ret))
3692 		return DID_ERROR << 16;
3693 	else if (unlikely(sdebug_verbose &&
3694 			  (ret < (num * sdebug_sector_size))))
3695 		sdev_printk(KERN_INFO, scp->device,
3696 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3697 			    my_name, num * sdebug_sector_size, ret);
3698 
3699 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3700 		     atomic_read(&sdeb_inject_pending))) {
3701 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3702 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3703 			atomic_set(&sdeb_inject_pending, 0);
3704 			return check_condition_result;
3705 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3706 			/* Logical block guard check failed */
3707 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3708 			atomic_set(&sdeb_inject_pending, 0);
3709 			return illegal_condition_result;
3710 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3711 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3712 			atomic_set(&sdeb_inject_pending, 0);
3713 			return illegal_condition_result;
3714 		}
3715 	}
3716 	return 0;
3717 }
3718 
3719 /*
3720  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3721  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3722  */
3723 static int resp_write_scat(struct scsi_cmnd *scp,
3724 			   struct sdebug_dev_info *devip)
3725 {
3726 	u8 *cmd = scp->cmnd;
3727 	u8 *lrdp = NULL;
3728 	u8 *up;
3729 	struct sdeb_store_info *sip = devip2sip(devip, true);
3730 	u8 wrprotect;
3731 	u16 lbdof, num_lrd, k;
3732 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3733 	u32 lb_size = sdebug_sector_size;
3734 	u32 ei_lba;
3735 	u64 lba;
3736 	int ret, res;
3737 	bool is_16;
3738 	static const u32 lrd_size = 32; /* + parameter list header size */
3739 
3740 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3741 		is_16 = false;
3742 		wrprotect = (cmd[10] >> 5) & 0x7;
3743 		lbdof = get_unaligned_be16(cmd + 12);
3744 		num_lrd = get_unaligned_be16(cmd + 16);
3745 		bt_len = get_unaligned_be32(cmd + 28);
3746 	} else {        /* that leaves WRITE SCATTERED(16) */
3747 		is_16 = true;
3748 		wrprotect = (cmd[2] >> 5) & 0x7;
3749 		lbdof = get_unaligned_be16(cmd + 4);
3750 		num_lrd = get_unaligned_be16(cmd + 8);
3751 		bt_len = get_unaligned_be32(cmd + 10);
3752 		if (unlikely(have_dif_prot)) {
3753 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3754 			    wrprotect) {
3755 				mk_sense_invalid_opcode(scp);
3756 				return illegal_condition_result;
3757 			}
3758 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3759 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3760 			     wrprotect == 0)
3761 				sdev_printk(KERN_ERR, scp->device,
3762 					    "Unprotected WR to DIF device\n");
3763 		}
3764 	}
3765 	if ((num_lrd == 0) || (bt_len == 0))
3766 		return 0;       /* T10 says these do-nothings are not errors */
3767 	if (lbdof == 0) {
3768 		if (sdebug_verbose)
3769 			sdev_printk(KERN_INFO, scp->device,
3770 				"%s: %s: LB Data Offset field bad\n",
3771 				my_name, __func__);
3772 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3773 		return illegal_condition_result;
3774 	}
3775 	lbdof_blen = lbdof * lb_size;
3776 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3777 		if (sdebug_verbose)
3778 			sdev_printk(KERN_INFO, scp->device,
3779 				"%s: %s: LBA range descriptors don't fit\n",
3780 				my_name, __func__);
3781 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3782 		return illegal_condition_result;
3783 	}
3784 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3785 	if (lrdp == NULL)
3786 		return SCSI_MLQUEUE_HOST_BUSY;
3787 	if (sdebug_verbose)
3788 		sdev_printk(KERN_INFO, scp->device,
3789 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3790 			my_name, __func__, lbdof_blen);
3791 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3792 	if (res == -1) {
3793 		ret = DID_ERROR << 16;
3794 		goto err_out;
3795 	}
3796 
3797 	sdeb_write_lock(sip);
3798 	sg_off = lbdof_blen;
3799 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3800 	cum_lb = 0;
3801 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3802 		lba = get_unaligned_be64(up + 0);
3803 		num = get_unaligned_be32(up + 8);
3804 		if (sdebug_verbose)
3805 			sdev_printk(KERN_INFO, scp->device,
3806 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3807 				my_name, __func__, k, lba, num, sg_off);
3808 		if (num == 0)
3809 			continue;
3810 		ret = check_device_access_params(scp, lba, num, true);
3811 		if (ret)
3812 			goto err_out_unlock;
3813 		num_by = num * lb_size;
3814 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3815 
3816 		if ((cum_lb + num) > bt_len) {
3817 			if (sdebug_verbose)
3818 				sdev_printk(KERN_INFO, scp->device,
3819 				    "%s: %s: sum of blocks > data provided\n",
3820 				    my_name, __func__);
3821 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3822 					0);
3823 			ret = illegal_condition_result;
3824 			goto err_out_unlock;
3825 		}
3826 
3827 		/* DIX + T10 DIF */
3828 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3829 			int prot_ret = prot_verify_write(scp, lba, num,
3830 							 ei_lba);
3831 
3832 			if (prot_ret) {
3833 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3834 						prot_ret);
3835 				ret = illegal_condition_result;
3836 				goto err_out_unlock;
3837 			}
3838 		}
3839 
3840 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3841 		/* If ZBC zone then bump its write pointer */
3842 		if (sdebug_dev_is_zoned(devip))
3843 			zbc_inc_wp(devip, lba, num);
3844 		if (unlikely(scsi_debug_lbp()))
3845 			map_region(sip, lba, num);
3846 		if (unlikely(-1 == ret)) {
3847 			ret = DID_ERROR << 16;
3848 			goto err_out_unlock;
3849 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3850 			sdev_printk(KERN_INFO, scp->device,
3851 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3852 			    my_name, num_by, ret);
3853 
3854 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3855 			     atomic_read(&sdeb_inject_pending))) {
3856 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3857 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3858 				atomic_set(&sdeb_inject_pending, 0);
3859 				ret = check_condition_result;
3860 				goto err_out_unlock;
3861 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3862 				/* Logical block guard check failed */
3863 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3864 				atomic_set(&sdeb_inject_pending, 0);
3865 				ret = illegal_condition_result;
3866 				goto err_out_unlock;
3867 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3868 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3869 				atomic_set(&sdeb_inject_pending, 0);
3870 				ret = illegal_condition_result;
3871 				goto err_out_unlock;
3872 			}
3873 		}
3874 		sg_off += num_by;
3875 		cum_lb += num;
3876 	}
3877 	ret = 0;
3878 err_out_unlock:
3879 	sdeb_write_unlock(sip);
3880 err_out:
3881 	kfree(lrdp);
3882 	return ret;
3883 }
3884 
3885 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3886 			   u32 ei_lba, bool unmap, bool ndob)
3887 {
3888 	struct scsi_device *sdp = scp->device;
3889 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3890 	unsigned long long i;
3891 	u64 block, lbaa;
3892 	u32 lb_size = sdebug_sector_size;
3893 	int ret;
3894 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3895 						scp->device->hostdata, true);
3896 	u8 *fs1p;
3897 	u8 *fsp;
3898 
3899 	sdeb_write_lock(sip);
3900 
3901 	ret = check_device_access_params(scp, lba, num, true);
3902 	if (ret) {
3903 		sdeb_write_unlock(sip);
3904 		return ret;
3905 	}
3906 
3907 	if (unmap && scsi_debug_lbp()) {
3908 		unmap_region(sip, lba, num);
3909 		goto out;
3910 	}
3911 	lbaa = lba;
3912 	block = do_div(lbaa, sdebug_store_sectors);
3913 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3914 	fsp = sip->storep;
3915 	fs1p = fsp + (block * lb_size);
3916 	if (ndob) {
3917 		memset(fs1p, 0, lb_size);
3918 		ret = 0;
3919 	} else
3920 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3921 
3922 	if (-1 == ret) {
3923 		sdeb_write_unlock(sip);
3924 		return DID_ERROR << 16;
3925 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3926 		sdev_printk(KERN_INFO, scp->device,
3927 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3928 			    my_name, "write same", lb_size, ret);
3929 
3930 	/* Copy first sector to remaining blocks */
3931 	for (i = 1 ; i < num ; i++) {
3932 		lbaa = lba + i;
3933 		block = do_div(lbaa, sdebug_store_sectors);
3934 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3935 	}
3936 	if (scsi_debug_lbp())
3937 		map_region(sip, lba, num);
3938 	/* If ZBC zone then bump its write pointer */
3939 	if (sdebug_dev_is_zoned(devip))
3940 		zbc_inc_wp(devip, lba, num);
3941 out:
3942 	sdeb_write_unlock(sip);
3943 
3944 	return 0;
3945 }
3946 
3947 static int resp_write_same_10(struct scsi_cmnd *scp,
3948 			      struct sdebug_dev_info *devip)
3949 {
3950 	u8 *cmd = scp->cmnd;
3951 	u32 lba;
3952 	u16 num;
3953 	u32 ei_lba = 0;
3954 	bool unmap = false;
3955 
3956 	if (cmd[1] & 0x8) {
3957 		if (sdebug_lbpws10 == 0) {
3958 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3959 			return check_condition_result;
3960 		} else
3961 			unmap = true;
3962 	}
3963 	lba = get_unaligned_be32(cmd + 2);
3964 	num = get_unaligned_be16(cmd + 7);
3965 	if (num > sdebug_write_same_length) {
3966 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3967 		return check_condition_result;
3968 	}
3969 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3970 }
3971 
3972 static int resp_write_same_16(struct scsi_cmnd *scp,
3973 			      struct sdebug_dev_info *devip)
3974 {
3975 	u8 *cmd = scp->cmnd;
3976 	u64 lba;
3977 	u32 num;
3978 	u32 ei_lba = 0;
3979 	bool unmap = false;
3980 	bool ndob = false;
3981 
3982 	if (cmd[1] & 0x8) {	/* UNMAP */
3983 		if (sdebug_lbpws == 0) {
3984 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3985 			return check_condition_result;
3986 		} else
3987 			unmap = true;
3988 	}
3989 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3990 		ndob = true;
3991 	lba = get_unaligned_be64(cmd + 2);
3992 	num = get_unaligned_be32(cmd + 10);
3993 	if (num > sdebug_write_same_length) {
3994 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3995 		return check_condition_result;
3996 	}
3997 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3998 }
3999 
4000 /* Note the mode field is in the same position as the (lower) service action
4001  * field. For the Report supported operation codes command, SPC-4 suggests
4002  * each mode of this command should be reported separately; for future. */
4003 static int resp_write_buffer(struct scsi_cmnd *scp,
4004 			     struct sdebug_dev_info *devip)
4005 {
4006 	u8 *cmd = scp->cmnd;
4007 	struct scsi_device *sdp = scp->device;
4008 	struct sdebug_dev_info *dp;
4009 	u8 mode;
4010 
4011 	mode = cmd[1] & 0x1f;
4012 	switch (mode) {
4013 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4014 		/* set UAs on this device only */
4015 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4016 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4017 		break;
4018 	case 0x5:	/* download MC, save and ACT */
4019 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4020 		break;
4021 	case 0x6:	/* download MC with offsets and ACT */
4022 		/* set UAs on most devices (LUs) in this target */
4023 		list_for_each_entry(dp,
4024 				    &devip->sdbg_host->dev_info_list,
4025 				    dev_list)
4026 			if (dp->target == sdp->id) {
4027 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4028 				if (devip != dp)
4029 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4030 						dp->uas_bm);
4031 			}
4032 		break;
4033 	case 0x7:	/* download MC with offsets, save, and ACT */
4034 		/* set UA on all devices (LUs) in this target */
4035 		list_for_each_entry(dp,
4036 				    &devip->sdbg_host->dev_info_list,
4037 				    dev_list)
4038 			if (dp->target == sdp->id)
4039 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4040 					dp->uas_bm);
4041 		break;
4042 	default:
4043 		/* do nothing for this command for other mode values */
4044 		break;
4045 	}
4046 	return 0;
4047 }
4048 
4049 static int resp_comp_write(struct scsi_cmnd *scp,
4050 			   struct sdebug_dev_info *devip)
4051 {
4052 	u8 *cmd = scp->cmnd;
4053 	u8 *arr;
4054 	struct sdeb_store_info *sip = devip2sip(devip, true);
4055 	u64 lba;
4056 	u32 dnum;
4057 	u32 lb_size = sdebug_sector_size;
4058 	u8 num;
4059 	int ret;
4060 	int retval = 0;
4061 
4062 	lba = get_unaligned_be64(cmd + 2);
4063 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4064 	if (0 == num)
4065 		return 0;	/* degenerate case, not an error */
4066 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4067 	    (cmd[1] & 0xe0)) {
4068 		mk_sense_invalid_opcode(scp);
4069 		return check_condition_result;
4070 	}
4071 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4072 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4073 	    (cmd[1] & 0xe0) == 0)
4074 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4075 			    "to DIF device\n");
4076 	ret = check_device_access_params(scp, lba, num, false);
4077 	if (ret)
4078 		return ret;
4079 	dnum = 2 * num;
4080 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4081 	if (NULL == arr) {
4082 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4083 				INSUFF_RES_ASCQ);
4084 		return check_condition_result;
4085 	}
4086 
4087 	sdeb_write_lock(sip);
4088 
4089 	ret = do_dout_fetch(scp, dnum, arr);
4090 	if (ret == -1) {
4091 		retval = DID_ERROR << 16;
4092 		goto cleanup;
4093 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4094 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4095 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4096 			    dnum * lb_size, ret);
4097 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4098 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4099 		retval = check_condition_result;
4100 		goto cleanup;
4101 	}
4102 	if (scsi_debug_lbp())
4103 		map_region(sip, lba, num);
4104 cleanup:
4105 	sdeb_write_unlock(sip);
4106 	kfree(arr);
4107 	return retval;
4108 }
4109 
4110 struct unmap_block_desc {
4111 	__be64	lba;
4112 	__be32	blocks;
4113 	__be32	__reserved;
4114 };
4115 
4116 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4117 {
4118 	unsigned char *buf;
4119 	struct unmap_block_desc *desc;
4120 	struct sdeb_store_info *sip = devip2sip(devip, true);
4121 	unsigned int i, payload_len, descriptors;
4122 	int ret;
4123 
4124 	if (!scsi_debug_lbp())
4125 		return 0;	/* fib and say its done */
4126 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4127 	BUG_ON(scsi_bufflen(scp) != payload_len);
4128 
4129 	descriptors = (payload_len - 8) / 16;
4130 	if (descriptors > sdebug_unmap_max_desc) {
4131 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4132 		return check_condition_result;
4133 	}
4134 
4135 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4136 	if (!buf) {
4137 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4138 				INSUFF_RES_ASCQ);
4139 		return check_condition_result;
4140 	}
4141 
4142 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4143 
4144 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4145 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4146 
4147 	desc = (void *)&buf[8];
4148 
4149 	sdeb_write_lock(sip);
4150 
4151 	for (i = 0 ; i < descriptors ; i++) {
4152 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4153 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4154 
4155 		ret = check_device_access_params(scp, lba, num, true);
4156 		if (ret)
4157 			goto out;
4158 
4159 		unmap_region(sip, lba, num);
4160 	}
4161 
4162 	ret = 0;
4163 
4164 out:
4165 	sdeb_write_unlock(sip);
4166 	kfree(buf);
4167 
4168 	return ret;
4169 }
4170 
4171 #define SDEBUG_GET_LBA_STATUS_LEN 32
4172 
4173 static int resp_get_lba_status(struct scsi_cmnd *scp,
4174 			       struct sdebug_dev_info *devip)
4175 {
4176 	u8 *cmd = scp->cmnd;
4177 	u64 lba;
4178 	u32 alloc_len, mapped, num;
4179 	int ret;
4180 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4181 
4182 	lba = get_unaligned_be64(cmd + 2);
4183 	alloc_len = get_unaligned_be32(cmd + 10);
4184 
4185 	if (alloc_len < 24)
4186 		return 0;
4187 
4188 	ret = check_device_access_params(scp, lba, 1, false);
4189 	if (ret)
4190 		return ret;
4191 
4192 	if (scsi_debug_lbp()) {
4193 		struct sdeb_store_info *sip = devip2sip(devip, true);
4194 
4195 		mapped = map_state(sip, lba, &num);
4196 	} else {
4197 		mapped = 1;
4198 		/* following just in case virtual_gb changed */
4199 		sdebug_capacity = get_sdebug_capacity();
4200 		if (sdebug_capacity - lba <= 0xffffffff)
4201 			num = sdebug_capacity - lba;
4202 		else
4203 			num = 0xffffffff;
4204 	}
4205 
4206 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4207 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4208 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4209 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4210 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4211 
4212 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4213 }
4214 
4215 static int resp_sync_cache(struct scsi_cmnd *scp,
4216 			   struct sdebug_dev_info *devip)
4217 {
4218 	int res = 0;
4219 	u64 lba;
4220 	u32 num_blocks;
4221 	u8 *cmd = scp->cmnd;
4222 
4223 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4224 		lba = get_unaligned_be32(cmd + 2);
4225 		num_blocks = get_unaligned_be16(cmd + 7);
4226 	} else {				/* SYNCHRONIZE_CACHE(16) */
4227 		lba = get_unaligned_be64(cmd + 2);
4228 		num_blocks = get_unaligned_be32(cmd + 10);
4229 	}
4230 	if (lba + num_blocks > sdebug_capacity) {
4231 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4232 		return check_condition_result;
4233 	}
4234 	if (!write_since_sync || (cmd[1] & 0x2))
4235 		res = SDEG_RES_IMMED_MASK;
4236 	else		/* delay if write_since_sync and IMMED clear */
4237 		write_since_sync = false;
4238 	return res;
4239 }
4240 
4241 /*
4242  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4243  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4244  * a GOOD status otherwise. Model a disk with a big cache and yield
4245  * CONDITION MET. Actually tries to bring range in main memory into the
4246  * cache associated with the CPU(s).
4247  */
4248 static int resp_pre_fetch(struct scsi_cmnd *scp,
4249 			  struct sdebug_dev_info *devip)
4250 {
4251 	int res = 0;
4252 	u64 lba;
4253 	u64 block, rest = 0;
4254 	u32 nblks;
4255 	u8 *cmd = scp->cmnd;
4256 	struct sdeb_store_info *sip = devip2sip(devip, true);
4257 	u8 *fsp = sip->storep;
4258 
4259 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4260 		lba = get_unaligned_be32(cmd + 2);
4261 		nblks = get_unaligned_be16(cmd + 7);
4262 	} else {			/* PRE-FETCH(16) */
4263 		lba = get_unaligned_be64(cmd + 2);
4264 		nblks = get_unaligned_be32(cmd + 10);
4265 	}
4266 	if (lba + nblks > sdebug_capacity) {
4267 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4268 		return check_condition_result;
4269 	}
4270 	if (!fsp)
4271 		goto fini;
4272 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4273 	block = do_div(lba, sdebug_store_sectors);
4274 	if (block + nblks > sdebug_store_sectors)
4275 		rest = block + nblks - sdebug_store_sectors;
4276 
4277 	/* Try to bring the PRE-FETCH range into CPU's cache */
4278 	sdeb_read_lock(sip);
4279 	prefetch_range(fsp + (sdebug_sector_size * block),
4280 		       (nblks - rest) * sdebug_sector_size);
4281 	if (rest)
4282 		prefetch_range(fsp, rest * sdebug_sector_size);
4283 	sdeb_read_unlock(sip);
4284 fini:
4285 	if (cmd[1] & 0x2)
4286 		res = SDEG_RES_IMMED_MASK;
4287 	return res | condition_met_result;
4288 }
4289 
4290 #define RL_BUCKET_ELEMS 8
4291 
4292 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4293  * (W-LUN), the normal Linux scanning logic does not associate it with a
4294  * device (e.g. /dev/sg7). The following magic will make that association:
4295  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4296  * where <n> is a host number. If there are multiple targets in a host then
4297  * the above will associate a W-LUN to each target. To only get a W-LUN
4298  * for target 2, then use "echo '- 2 49409' > scan" .
4299  */
4300 static int resp_report_luns(struct scsi_cmnd *scp,
4301 			    struct sdebug_dev_info *devip)
4302 {
4303 	unsigned char *cmd = scp->cmnd;
4304 	unsigned int alloc_len;
4305 	unsigned char select_report;
4306 	u64 lun;
4307 	struct scsi_lun *lun_p;
4308 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4309 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4310 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4311 	unsigned int tlun_cnt;	/* total LUN count */
4312 	unsigned int rlen;	/* response length (in bytes) */
4313 	int k, j, n, res;
4314 	unsigned int off_rsp = 0;
4315 	const int sz_lun = sizeof(struct scsi_lun);
4316 
4317 	clear_luns_changed_on_target(devip);
4318 
4319 	select_report = cmd[2];
4320 	alloc_len = get_unaligned_be32(cmd + 6);
4321 
4322 	if (alloc_len < 4) {
4323 		pr_err("alloc len too small %d\n", alloc_len);
4324 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4325 		return check_condition_result;
4326 	}
4327 
4328 	switch (select_report) {
4329 	case 0:		/* all LUNs apart from W-LUNs */
4330 		lun_cnt = sdebug_max_luns;
4331 		wlun_cnt = 0;
4332 		break;
4333 	case 1:		/* only W-LUNs */
4334 		lun_cnt = 0;
4335 		wlun_cnt = 1;
4336 		break;
4337 	case 2:		/* all LUNs */
4338 		lun_cnt = sdebug_max_luns;
4339 		wlun_cnt = 1;
4340 		break;
4341 	case 0x10:	/* only administrative LUs */
4342 	case 0x11:	/* see SPC-5 */
4343 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4344 	default:
4345 		pr_debug("select report invalid %d\n", select_report);
4346 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4347 		return check_condition_result;
4348 	}
4349 
4350 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4351 		--lun_cnt;
4352 
4353 	tlun_cnt = lun_cnt + wlun_cnt;
4354 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4355 	scsi_set_resid(scp, scsi_bufflen(scp));
4356 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4357 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4358 
4359 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4360 	lun = sdebug_no_lun_0 ? 1 : 0;
4361 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4362 		memset(arr, 0, sizeof(arr));
4363 		lun_p = (struct scsi_lun *)&arr[0];
4364 		if (k == 0) {
4365 			put_unaligned_be32(rlen, &arr[0]);
4366 			++lun_p;
4367 			j = 1;
4368 		}
4369 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4370 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4371 				break;
4372 			int_to_scsilun(lun++, lun_p);
4373 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4374 				lun_p->scsi_lun[0] |= 0x40;
4375 		}
4376 		if (j < RL_BUCKET_ELEMS)
4377 			break;
4378 		n = j * sz_lun;
4379 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4380 		if (res)
4381 			return res;
4382 		off_rsp += n;
4383 	}
4384 	if (wlun_cnt) {
4385 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4386 		++j;
4387 	}
4388 	if (j > 0)
4389 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4390 	return res;
4391 }
4392 
4393 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4394 {
4395 	bool is_bytchk3 = false;
4396 	u8 bytchk;
4397 	int ret, j;
4398 	u32 vnum, a_num, off;
4399 	const u32 lb_size = sdebug_sector_size;
4400 	u64 lba;
4401 	u8 *arr;
4402 	u8 *cmd = scp->cmnd;
4403 	struct sdeb_store_info *sip = devip2sip(devip, true);
4404 
4405 	bytchk = (cmd[1] >> 1) & 0x3;
4406 	if (bytchk == 0) {
4407 		return 0;	/* always claim internal verify okay */
4408 	} else if (bytchk == 2) {
4409 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4410 		return check_condition_result;
4411 	} else if (bytchk == 3) {
4412 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4413 	}
4414 	switch (cmd[0]) {
4415 	case VERIFY_16:
4416 		lba = get_unaligned_be64(cmd + 2);
4417 		vnum = get_unaligned_be32(cmd + 10);
4418 		break;
4419 	case VERIFY:		/* is VERIFY(10) */
4420 		lba = get_unaligned_be32(cmd + 2);
4421 		vnum = get_unaligned_be16(cmd + 7);
4422 		break;
4423 	default:
4424 		mk_sense_invalid_opcode(scp);
4425 		return check_condition_result;
4426 	}
4427 	if (vnum == 0)
4428 		return 0;	/* not an error */
4429 	a_num = is_bytchk3 ? 1 : vnum;
4430 	/* Treat following check like one for read (i.e. no write) access */
4431 	ret = check_device_access_params(scp, lba, a_num, false);
4432 	if (ret)
4433 		return ret;
4434 
4435 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4436 	if (!arr) {
4437 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4438 				INSUFF_RES_ASCQ);
4439 		return check_condition_result;
4440 	}
4441 	/* Not changing store, so only need read access */
4442 	sdeb_read_lock(sip);
4443 
4444 	ret = do_dout_fetch(scp, a_num, arr);
4445 	if (ret == -1) {
4446 		ret = DID_ERROR << 16;
4447 		goto cleanup;
4448 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4449 		sdev_printk(KERN_INFO, scp->device,
4450 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4451 			    my_name, __func__, a_num * lb_size, ret);
4452 	}
4453 	if (is_bytchk3) {
4454 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4455 			memcpy(arr + off, arr, lb_size);
4456 	}
4457 	ret = 0;
4458 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4459 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4460 		ret = check_condition_result;
4461 		goto cleanup;
4462 	}
4463 cleanup:
4464 	sdeb_read_unlock(sip);
4465 	kfree(arr);
4466 	return ret;
4467 }
4468 
4469 #define RZONES_DESC_HD 64
4470 
4471 /* Report zones depending on start LBA and reporting options */
4472 static int resp_report_zones(struct scsi_cmnd *scp,
4473 			     struct sdebug_dev_info *devip)
4474 {
4475 	unsigned int rep_max_zones, nrz = 0;
4476 	int ret = 0;
4477 	u32 alloc_len, rep_opts, rep_len;
4478 	bool partial;
4479 	u64 lba, zs_lba;
4480 	u8 *arr = NULL, *desc;
4481 	u8 *cmd = scp->cmnd;
4482 	struct sdeb_zone_state *zsp = NULL;
4483 	struct sdeb_store_info *sip = devip2sip(devip, false);
4484 
4485 	if (!sdebug_dev_is_zoned(devip)) {
4486 		mk_sense_invalid_opcode(scp);
4487 		return check_condition_result;
4488 	}
4489 	zs_lba = get_unaligned_be64(cmd + 2);
4490 	alloc_len = get_unaligned_be32(cmd + 10);
4491 	if (alloc_len == 0)
4492 		return 0;	/* not an error */
4493 	rep_opts = cmd[14] & 0x3f;
4494 	partial = cmd[14] & 0x80;
4495 
4496 	if (zs_lba >= sdebug_capacity) {
4497 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4498 		return check_condition_result;
4499 	}
4500 
4501 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4502 
4503 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4504 	if (!arr) {
4505 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4506 				INSUFF_RES_ASCQ);
4507 		return check_condition_result;
4508 	}
4509 
4510 	sdeb_read_lock(sip);
4511 
4512 	desc = arr + 64;
4513 	for (lba = zs_lba; lba < sdebug_capacity;
4514 	     lba = zsp->z_start + zsp->z_size) {
4515 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4516 			break;
4517 		zsp = zbc_zone(devip, lba);
4518 		switch (rep_opts) {
4519 		case 0x00:
4520 			/* All zones */
4521 			break;
4522 		case 0x01:
4523 			/* Empty zones */
4524 			if (zsp->z_cond != ZC1_EMPTY)
4525 				continue;
4526 			break;
4527 		case 0x02:
4528 			/* Implicit open zones */
4529 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4530 				continue;
4531 			break;
4532 		case 0x03:
4533 			/* Explicit open zones */
4534 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4535 				continue;
4536 			break;
4537 		case 0x04:
4538 			/* Closed zones */
4539 			if (zsp->z_cond != ZC4_CLOSED)
4540 				continue;
4541 			break;
4542 		case 0x05:
4543 			/* Full zones */
4544 			if (zsp->z_cond != ZC5_FULL)
4545 				continue;
4546 			break;
4547 		case 0x06:
4548 		case 0x07:
4549 		case 0x10:
4550 			/*
4551 			 * Read-only, offline, reset WP recommended are
4552 			 * not emulated: no zones to report;
4553 			 */
4554 			continue;
4555 		case 0x11:
4556 			/* non-seq-resource set */
4557 			if (!zsp->z_non_seq_resource)
4558 				continue;
4559 			break;
4560 		case 0x3e:
4561 			/* All zones except gap zones. */
4562 			if (zbc_zone_is_gap(zsp))
4563 				continue;
4564 			break;
4565 		case 0x3f:
4566 			/* Not write pointer (conventional) zones */
4567 			if (zbc_zone_is_seq(zsp))
4568 				continue;
4569 			break;
4570 		default:
4571 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4572 					INVALID_FIELD_IN_CDB, 0);
4573 			ret = check_condition_result;
4574 			goto fini;
4575 		}
4576 
4577 		if (nrz < rep_max_zones) {
4578 			/* Fill zone descriptor */
4579 			desc[0] = zsp->z_type;
4580 			desc[1] = zsp->z_cond << 4;
4581 			if (zsp->z_non_seq_resource)
4582 				desc[1] |= 1 << 1;
4583 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4584 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4585 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4586 			desc += 64;
4587 		}
4588 
4589 		if (partial && nrz >= rep_max_zones)
4590 			break;
4591 
4592 		nrz++;
4593 	}
4594 
4595 	/* Report header */
4596 	/* Zone list length. */
4597 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4598 	/* Maximum LBA */
4599 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4600 	/* Zone starting LBA granularity. */
4601 	if (devip->zcap < devip->zsize)
4602 		put_unaligned_be64(devip->zsize, arr + 16);
4603 
4604 	rep_len = (unsigned long)desc - (unsigned long)arr;
4605 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4606 
4607 fini:
4608 	sdeb_read_unlock(sip);
4609 	kfree(arr);
4610 	return ret;
4611 }
4612 
4613 /* Logic transplanted from tcmu-runner, file_zbc.c */
4614 static void zbc_open_all(struct sdebug_dev_info *devip)
4615 {
4616 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4617 	unsigned int i;
4618 
4619 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4620 		if (zsp->z_cond == ZC4_CLOSED)
4621 			zbc_open_zone(devip, &devip->zstate[i], true);
4622 	}
4623 }
4624 
4625 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4626 {
4627 	int res = 0;
4628 	u64 z_id;
4629 	enum sdebug_z_cond zc;
4630 	u8 *cmd = scp->cmnd;
4631 	struct sdeb_zone_state *zsp;
4632 	bool all = cmd[14] & 0x01;
4633 	struct sdeb_store_info *sip = devip2sip(devip, false);
4634 
4635 	if (!sdebug_dev_is_zoned(devip)) {
4636 		mk_sense_invalid_opcode(scp);
4637 		return check_condition_result;
4638 	}
4639 
4640 	sdeb_write_lock(sip);
4641 
4642 	if (all) {
4643 		/* Check if all closed zones can be open */
4644 		if (devip->max_open &&
4645 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4646 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4647 					INSUFF_ZONE_ASCQ);
4648 			res = check_condition_result;
4649 			goto fini;
4650 		}
4651 		/* Open all closed zones */
4652 		zbc_open_all(devip);
4653 		goto fini;
4654 	}
4655 
4656 	/* Open the specified zone */
4657 	z_id = get_unaligned_be64(cmd + 2);
4658 	if (z_id >= sdebug_capacity) {
4659 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4660 		res = check_condition_result;
4661 		goto fini;
4662 	}
4663 
4664 	zsp = zbc_zone(devip, z_id);
4665 	if (z_id != zsp->z_start) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 	if (zbc_zone_is_conv(zsp)) {
4671 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4672 		res = check_condition_result;
4673 		goto fini;
4674 	}
4675 
4676 	zc = zsp->z_cond;
4677 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4678 		goto fini;
4679 
4680 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4681 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4682 				INSUFF_ZONE_ASCQ);
4683 		res = check_condition_result;
4684 		goto fini;
4685 	}
4686 
4687 	zbc_open_zone(devip, zsp, true);
4688 fini:
4689 	sdeb_write_unlock(sip);
4690 	return res;
4691 }
4692 
4693 static void zbc_close_all(struct sdebug_dev_info *devip)
4694 {
4695 	unsigned int i;
4696 
4697 	for (i = 0; i < devip->nr_zones; i++)
4698 		zbc_close_zone(devip, &devip->zstate[i]);
4699 }
4700 
4701 static int resp_close_zone(struct scsi_cmnd *scp,
4702 			   struct sdebug_dev_info *devip)
4703 {
4704 	int res = 0;
4705 	u64 z_id;
4706 	u8 *cmd = scp->cmnd;
4707 	struct sdeb_zone_state *zsp;
4708 	bool all = cmd[14] & 0x01;
4709 	struct sdeb_store_info *sip = devip2sip(devip, false);
4710 
4711 	if (!sdebug_dev_is_zoned(devip)) {
4712 		mk_sense_invalid_opcode(scp);
4713 		return check_condition_result;
4714 	}
4715 
4716 	sdeb_write_lock(sip);
4717 
4718 	if (all) {
4719 		zbc_close_all(devip);
4720 		goto fini;
4721 	}
4722 
4723 	/* Close specified zone */
4724 	z_id = get_unaligned_be64(cmd + 2);
4725 	if (z_id >= sdebug_capacity) {
4726 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4727 		res = check_condition_result;
4728 		goto fini;
4729 	}
4730 
4731 	zsp = zbc_zone(devip, z_id);
4732 	if (z_id != zsp->z_start) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4734 		res = check_condition_result;
4735 		goto fini;
4736 	}
4737 	if (zbc_zone_is_conv(zsp)) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 
4743 	zbc_close_zone(devip, zsp);
4744 fini:
4745 	sdeb_write_unlock(sip);
4746 	return res;
4747 }
4748 
4749 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4750 			    struct sdeb_zone_state *zsp, bool empty)
4751 {
4752 	enum sdebug_z_cond zc = zsp->z_cond;
4753 
4754 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4755 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4756 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4757 			zbc_close_zone(devip, zsp);
4758 		if (zsp->z_cond == ZC4_CLOSED)
4759 			devip->nr_closed--;
4760 		zsp->z_wp = zsp->z_start + zsp->z_size;
4761 		zsp->z_cond = ZC5_FULL;
4762 	}
4763 }
4764 
4765 static void zbc_finish_all(struct sdebug_dev_info *devip)
4766 {
4767 	unsigned int i;
4768 
4769 	for (i = 0; i < devip->nr_zones; i++)
4770 		zbc_finish_zone(devip, &devip->zstate[i], false);
4771 }
4772 
4773 static int resp_finish_zone(struct scsi_cmnd *scp,
4774 			    struct sdebug_dev_info *devip)
4775 {
4776 	struct sdeb_zone_state *zsp;
4777 	int res = 0;
4778 	u64 z_id;
4779 	u8 *cmd = scp->cmnd;
4780 	bool all = cmd[14] & 0x01;
4781 	struct sdeb_store_info *sip = devip2sip(devip, false);
4782 
4783 	if (!sdebug_dev_is_zoned(devip)) {
4784 		mk_sense_invalid_opcode(scp);
4785 		return check_condition_result;
4786 	}
4787 
4788 	sdeb_write_lock(sip);
4789 
4790 	if (all) {
4791 		zbc_finish_all(devip);
4792 		goto fini;
4793 	}
4794 
4795 	/* Finish the specified zone */
4796 	z_id = get_unaligned_be64(cmd + 2);
4797 	if (z_id >= sdebug_capacity) {
4798 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4799 		res = check_condition_result;
4800 		goto fini;
4801 	}
4802 
4803 	zsp = zbc_zone(devip, z_id);
4804 	if (z_id != zsp->z_start) {
4805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4806 		res = check_condition_result;
4807 		goto fini;
4808 	}
4809 	if (zbc_zone_is_conv(zsp)) {
4810 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4811 		res = check_condition_result;
4812 		goto fini;
4813 	}
4814 
4815 	zbc_finish_zone(devip, zsp, true);
4816 fini:
4817 	sdeb_write_unlock(sip);
4818 	return res;
4819 }
4820 
4821 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4822 			 struct sdeb_zone_state *zsp)
4823 {
4824 	enum sdebug_z_cond zc;
4825 	struct sdeb_store_info *sip = devip2sip(devip, false);
4826 
4827 	if (!zbc_zone_is_seq(zsp))
4828 		return;
4829 
4830 	zc = zsp->z_cond;
4831 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4832 		zbc_close_zone(devip, zsp);
4833 
4834 	if (zsp->z_cond == ZC4_CLOSED)
4835 		devip->nr_closed--;
4836 
4837 	if (zsp->z_wp > zsp->z_start)
4838 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4839 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4840 
4841 	zsp->z_non_seq_resource = false;
4842 	zsp->z_wp = zsp->z_start;
4843 	zsp->z_cond = ZC1_EMPTY;
4844 }
4845 
4846 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4847 {
4848 	unsigned int i;
4849 
4850 	for (i = 0; i < devip->nr_zones; i++)
4851 		zbc_rwp_zone(devip, &devip->zstate[i]);
4852 }
4853 
4854 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4855 {
4856 	struct sdeb_zone_state *zsp;
4857 	int res = 0;
4858 	u64 z_id;
4859 	u8 *cmd = scp->cmnd;
4860 	bool all = cmd[14] & 0x01;
4861 	struct sdeb_store_info *sip = devip2sip(devip, false);
4862 
4863 	if (!sdebug_dev_is_zoned(devip)) {
4864 		mk_sense_invalid_opcode(scp);
4865 		return check_condition_result;
4866 	}
4867 
4868 	sdeb_write_lock(sip);
4869 
4870 	if (all) {
4871 		zbc_rwp_all(devip);
4872 		goto fini;
4873 	}
4874 
4875 	z_id = get_unaligned_be64(cmd + 2);
4876 	if (z_id >= sdebug_capacity) {
4877 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4878 		res = check_condition_result;
4879 		goto fini;
4880 	}
4881 
4882 	zsp = zbc_zone(devip, z_id);
4883 	if (z_id != zsp->z_start) {
4884 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4885 		res = check_condition_result;
4886 		goto fini;
4887 	}
4888 	if (zbc_zone_is_conv(zsp)) {
4889 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4890 		res = check_condition_result;
4891 		goto fini;
4892 	}
4893 
4894 	zbc_rwp_zone(devip, zsp);
4895 fini:
4896 	sdeb_write_unlock(sip);
4897 	return res;
4898 }
4899 
4900 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4901 {
4902 	u16 hwq;
4903 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4904 
4905 	hwq = blk_mq_unique_tag_to_hwq(tag);
4906 
4907 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4908 	if (WARN_ON_ONCE(hwq >= submit_queues))
4909 		hwq = 0;
4910 
4911 	return sdebug_q_arr + hwq;
4912 }
4913 
4914 static u32 get_tag(struct scsi_cmnd *cmnd)
4915 {
4916 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4917 }
4918 
4919 /* Queued (deferred) command completions converge here. */
4920 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4921 {
4922 	bool aborted = sd_dp->aborted;
4923 	int qc_idx;
4924 	int retiring = 0;
4925 	unsigned long iflags;
4926 	struct sdebug_queue *sqp;
4927 	struct sdebug_queued_cmd *sqcp;
4928 	struct scsi_cmnd *scp;
4929 
4930 	if (unlikely(aborted))
4931 		sd_dp->aborted = false;
4932 	qc_idx = sd_dp->qc_idx;
4933 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4934 	if (sdebug_statistics) {
4935 		atomic_inc(&sdebug_completions);
4936 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4937 			atomic_inc(&sdebug_miss_cpus);
4938 	}
4939 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4940 		pr_err("wild qc_idx=%d\n", qc_idx);
4941 		return;
4942 	}
4943 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4944 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4945 	sqcp = &sqp->qc_arr[qc_idx];
4946 	scp = sqcp->a_cmnd;
4947 	if (unlikely(scp == NULL)) {
4948 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4949 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4950 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4951 		return;
4952 	}
4953 
4954 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4955 		retiring = 1;
4956 
4957 	sqcp->a_cmnd = NULL;
4958 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4959 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4960 		pr_err("Unexpected completion\n");
4961 		return;
4962 	}
4963 
4964 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4965 		int k, retval;
4966 
4967 		retval = atomic_read(&retired_max_queue);
4968 		if (qc_idx >= retval) {
4969 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4970 			pr_err("index %d too large\n", retval);
4971 			return;
4972 		}
4973 		k = find_last_bit(sqp->in_use_bm, retval);
4974 		if ((k < sdebug_max_queue) || (k == retval))
4975 			atomic_set(&retired_max_queue, 0);
4976 		else
4977 			atomic_set(&retired_max_queue, k + 1);
4978 	}
4979 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 	if (unlikely(aborted)) {
4981 		if (sdebug_verbose)
4982 			pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4983 		blk_abort_request(scsi_cmd_to_rq(scp));
4984 		return;
4985 	}
4986 	scsi_done(scp); /* callback to mid level */
4987 }
4988 
4989 /* When high resolution timer goes off this function is called. */
4990 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4991 {
4992 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4993 						  hrt);
4994 	sdebug_q_cmd_complete(sd_dp);
4995 	return HRTIMER_NORESTART;
4996 }
4997 
4998 /* When work queue schedules work, it calls this function. */
4999 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5000 {
5001 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5002 						  ew.work);
5003 	sdebug_q_cmd_complete(sd_dp);
5004 }
5005 
5006 static bool got_shared_uuid;
5007 static uuid_t shared_uuid;
5008 
5009 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5010 {
5011 	struct sdeb_zone_state *zsp;
5012 	sector_t capacity = get_sdebug_capacity();
5013 	sector_t conv_capacity;
5014 	sector_t zstart = 0;
5015 	unsigned int i;
5016 
5017 	/*
5018 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5019 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5020 	 * use the specified zone size checking that at least 2 zones can be
5021 	 * created for the device.
5022 	 */
5023 	if (!sdeb_zbc_zone_size_mb) {
5024 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5025 			>> ilog2(sdebug_sector_size);
5026 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5027 			devip->zsize >>= 1;
5028 		if (devip->zsize < 2) {
5029 			pr_err("Device capacity too small\n");
5030 			return -EINVAL;
5031 		}
5032 	} else {
5033 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5034 			pr_err("Zone size is not a power of 2\n");
5035 			return -EINVAL;
5036 		}
5037 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5038 			>> ilog2(sdebug_sector_size);
5039 		if (devip->zsize >= capacity) {
5040 			pr_err("Zone size too large for device capacity\n");
5041 			return -EINVAL;
5042 		}
5043 	}
5044 
5045 	devip->zsize_shift = ilog2(devip->zsize);
5046 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5047 
5048 	if (sdeb_zbc_zone_cap_mb == 0) {
5049 		devip->zcap = devip->zsize;
5050 	} else {
5051 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5052 			      ilog2(sdebug_sector_size);
5053 		if (devip->zcap > devip->zsize) {
5054 			pr_err("Zone capacity too large\n");
5055 			return -EINVAL;
5056 		}
5057 	}
5058 
5059 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5060 	if (conv_capacity >= capacity) {
5061 		pr_err("Number of conventional zones too large\n");
5062 		return -EINVAL;
5063 	}
5064 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5065 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5066 			      devip->zsize_shift;
5067 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5068 
5069 	/* Add gap zones if zone capacity is smaller than the zone size */
5070 	if (devip->zcap < devip->zsize)
5071 		devip->nr_zones += devip->nr_seq_zones;
5072 
5073 	if (devip->zmodel == BLK_ZONED_HM) {
5074 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5075 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5076 			devip->max_open = (devip->nr_zones - 1) / 2;
5077 		else
5078 			devip->max_open = sdeb_zbc_max_open;
5079 	}
5080 
5081 	devip->zstate = kcalloc(devip->nr_zones,
5082 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5083 	if (!devip->zstate)
5084 		return -ENOMEM;
5085 
5086 	for (i = 0; i < devip->nr_zones; i++) {
5087 		zsp = &devip->zstate[i];
5088 
5089 		zsp->z_start = zstart;
5090 
5091 		if (i < devip->nr_conv_zones) {
5092 			zsp->z_type = ZBC_ZTYPE_CNV;
5093 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5094 			zsp->z_wp = (sector_t)-1;
5095 			zsp->z_size =
5096 				min_t(u64, devip->zsize, capacity - zstart);
5097 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5098 			if (devip->zmodel == BLK_ZONED_HM)
5099 				zsp->z_type = ZBC_ZTYPE_SWR;
5100 			else
5101 				zsp->z_type = ZBC_ZTYPE_SWP;
5102 			zsp->z_cond = ZC1_EMPTY;
5103 			zsp->z_wp = zsp->z_start;
5104 			zsp->z_size =
5105 				min_t(u64, devip->zcap, capacity - zstart);
5106 		} else {
5107 			zsp->z_type = ZBC_ZTYPE_GAP;
5108 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5109 			zsp->z_wp = (sector_t)-1;
5110 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5111 					    capacity - zstart);
5112 		}
5113 
5114 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5115 		zstart += zsp->z_size;
5116 	}
5117 
5118 	return 0;
5119 }
5120 
5121 static struct sdebug_dev_info *sdebug_device_create(
5122 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5123 {
5124 	struct sdebug_dev_info *devip;
5125 
5126 	devip = kzalloc(sizeof(*devip), flags);
5127 	if (devip) {
5128 		if (sdebug_uuid_ctl == 1)
5129 			uuid_gen(&devip->lu_name);
5130 		else if (sdebug_uuid_ctl == 2) {
5131 			if (got_shared_uuid)
5132 				devip->lu_name = shared_uuid;
5133 			else {
5134 				uuid_gen(&shared_uuid);
5135 				got_shared_uuid = true;
5136 				devip->lu_name = shared_uuid;
5137 			}
5138 		}
5139 		devip->sdbg_host = sdbg_host;
5140 		if (sdeb_zbc_in_use) {
5141 			devip->zmodel = sdeb_zbc_model;
5142 			if (sdebug_device_create_zones(devip)) {
5143 				kfree(devip);
5144 				return NULL;
5145 			}
5146 		} else {
5147 			devip->zmodel = BLK_ZONED_NONE;
5148 		}
5149 		devip->create_ts = ktime_get_boottime();
5150 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5151 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5152 	}
5153 	return devip;
5154 }
5155 
5156 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5157 {
5158 	struct sdebug_host_info *sdbg_host;
5159 	struct sdebug_dev_info *open_devip = NULL;
5160 	struct sdebug_dev_info *devip;
5161 
5162 	sdbg_host = shost_to_sdebug_host(sdev->host);
5163 
5164 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5165 		if ((devip->used) && (devip->channel == sdev->channel) &&
5166 		    (devip->target == sdev->id) &&
5167 		    (devip->lun == sdev->lun))
5168 			return devip;
5169 		else {
5170 			if ((!devip->used) && (!open_devip))
5171 				open_devip = devip;
5172 		}
5173 	}
5174 	if (!open_devip) { /* try and make a new one */
5175 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5176 		if (!open_devip) {
5177 			pr_err("out of memory at line %d\n", __LINE__);
5178 			return NULL;
5179 		}
5180 	}
5181 
5182 	open_devip->channel = sdev->channel;
5183 	open_devip->target = sdev->id;
5184 	open_devip->lun = sdev->lun;
5185 	open_devip->sdbg_host = sdbg_host;
5186 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5187 	open_devip->used = true;
5188 	return open_devip;
5189 }
5190 
5191 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5192 {
5193 	if (sdebug_verbose)
5194 		pr_info("slave_alloc <%u %u %u %llu>\n",
5195 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5196 	return 0;
5197 }
5198 
5199 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5200 {
5201 	struct sdebug_dev_info *devip =
5202 			(struct sdebug_dev_info *)sdp->hostdata;
5203 
5204 	if (sdebug_verbose)
5205 		pr_info("slave_configure <%u %u %u %llu>\n",
5206 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5207 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5208 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5209 	if (devip == NULL) {
5210 		devip = find_build_dev_info(sdp);
5211 		if (devip == NULL)
5212 			return 1;  /* no resources, will be marked offline */
5213 	}
5214 	sdp->hostdata = devip;
5215 	if (sdebug_no_uld)
5216 		sdp->no_uld_attach = 1;
5217 	config_cdb_len(sdp);
5218 	return 0;
5219 }
5220 
5221 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5222 {
5223 	struct sdebug_dev_info *devip =
5224 		(struct sdebug_dev_info *)sdp->hostdata;
5225 
5226 	if (sdebug_verbose)
5227 		pr_info("slave_destroy <%u %u %u %llu>\n",
5228 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5229 	if (devip) {
5230 		/* make this slot available for re-use */
5231 		devip->used = false;
5232 		sdp->hostdata = NULL;
5233 	}
5234 }
5235 
5236 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5237 			   enum sdeb_defer_type defer_t)
5238 {
5239 	if (!sd_dp)
5240 		return;
5241 	if (defer_t == SDEB_DEFER_HRT)
5242 		hrtimer_cancel(&sd_dp->hrt);
5243 	else if (defer_t == SDEB_DEFER_WQ)
5244 		cancel_work_sync(&sd_dp->ew.work);
5245 }
5246 
5247 /* If @cmnd found deletes its timer or work queue and returns true; else
5248    returns false */
5249 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5250 {
5251 	unsigned long iflags;
5252 	int j, k, qmax, r_qmax;
5253 	enum sdeb_defer_type l_defer_t;
5254 	struct sdebug_queue *sqp;
5255 	struct sdebug_queued_cmd *sqcp;
5256 	struct sdebug_defer *sd_dp;
5257 
5258 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5259 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5260 		qmax = sdebug_max_queue;
5261 		r_qmax = atomic_read(&retired_max_queue);
5262 		if (r_qmax > qmax)
5263 			qmax = r_qmax;
5264 		for (k = 0; k < qmax; ++k) {
5265 			if (test_bit(k, sqp->in_use_bm)) {
5266 				sqcp = &sqp->qc_arr[k];
5267 				if (cmnd != sqcp->a_cmnd)
5268 					continue;
5269 				/* found */
5270 				sqcp->a_cmnd = NULL;
5271 				sd_dp = sqcp->sd_dp;
5272 				if (sd_dp) {
5273 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5274 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5275 				} else
5276 					l_defer_t = SDEB_DEFER_NONE;
5277 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5278 				stop_qc_helper(sd_dp, l_defer_t);
5279 				clear_bit(k, sqp->in_use_bm);
5280 				return true;
5281 			}
5282 		}
5283 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5284 	}
5285 	return false;
5286 }
5287 
5288 /* Deletes (stops) timers or work queues of all queued commands */
5289 static void stop_all_queued(void)
5290 {
5291 	unsigned long iflags;
5292 	int j, k;
5293 	enum sdeb_defer_type l_defer_t;
5294 	struct sdebug_queue *sqp;
5295 	struct sdebug_queued_cmd *sqcp;
5296 	struct sdebug_defer *sd_dp;
5297 
5298 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5299 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5300 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5301 			if (test_bit(k, sqp->in_use_bm)) {
5302 				sqcp = &sqp->qc_arr[k];
5303 				if (sqcp->a_cmnd == NULL)
5304 					continue;
5305 				sqcp->a_cmnd = NULL;
5306 				sd_dp = sqcp->sd_dp;
5307 				if (sd_dp) {
5308 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5309 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5310 				} else
5311 					l_defer_t = SDEB_DEFER_NONE;
5312 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5313 				stop_qc_helper(sd_dp, l_defer_t);
5314 				clear_bit(k, sqp->in_use_bm);
5315 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5316 			}
5317 		}
5318 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5319 	}
5320 }
5321 
5322 /* Free queued command memory on heap */
5323 static void free_all_queued(void)
5324 {
5325 	int j, k;
5326 	struct sdebug_queue *sqp;
5327 	struct sdebug_queued_cmd *sqcp;
5328 
5329 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5330 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5331 			sqcp = &sqp->qc_arr[k];
5332 			kfree(sqcp->sd_dp);
5333 			sqcp->sd_dp = NULL;
5334 		}
5335 	}
5336 }
5337 
5338 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5339 {
5340 	bool ok;
5341 
5342 	++num_aborts;
5343 
5344 	ok = stop_queued_cmnd(SCpnt);
5345 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5346 		sdev_printk(KERN_INFO, SCpnt->device,
5347 			    "%s: command%s found\n", __func__,
5348 			    ok ? "" : " not");
5349 
5350 	return SUCCESS;
5351 }
5352 
5353 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5354 {
5355 	struct scsi_device *sdp = SCpnt->device;
5356 	struct sdebug_dev_info *devip = sdp->hostdata;
5357 
5358 	++num_dev_resets;
5359 
5360 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5362 	if (devip)
5363 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5364 
5365 	return SUCCESS;
5366 }
5367 
5368 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5369 {
5370 	struct scsi_device *sdp = SCpnt->device;
5371 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5372 	struct sdebug_dev_info *devip;
5373 	int k = 0;
5374 
5375 	++num_target_resets;
5376 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5377 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5378 
5379 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5380 		if (devip->target == sdp->id) {
5381 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5382 			++k;
5383 		}
5384 	}
5385 
5386 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5387 		sdev_printk(KERN_INFO, sdp,
5388 			    "%s: %d device(s) found in target\n", __func__, k);
5389 
5390 	return SUCCESS;
5391 }
5392 
5393 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5394 {
5395 	struct scsi_device *sdp = SCpnt->device;
5396 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5397 	struct sdebug_dev_info *devip;
5398 	int k = 0;
5399 
5400 	++num_bus_resets;
5401 
5402 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5403 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5404 
5405 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5406 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5407 		++k;
5408 	}
5409 
5410 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5411 		sdev_printk(KERN_INFO, sdp,
5412 			    "%s: %d device(s) found in host\n", __func__, k);
5413 	return SUCCESS;
5414 }
5415 
5416 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5417 {
5418 	struct sdebug_host_info *sdbg_host;
5419 	struct sdebug_dev_info *devip;
5420 	int k = 0;
5421 
5422 	++num_host_resets;
5423 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5424 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5425 	mutex_lock(&sdebug_host_list_mutex);
5426 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5427 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5428 				    dev_list) {
5429 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5430 			++k;
5431 		}
5432 	}
5433 	mutex_unlock(&sdebug_host_list_mutex);
5434 	stop_all_queued();
5435 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5436 		sdev_printk(KERN_INFO, SCpnt->device,
5437 			    "%s: %d device(s) found\n", __func__, k);
5438 	return SUCCESS;
5439 }
5440 
5441 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5442 {
5443 	struct msdos_partition *pp;
5444 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5445 	int sectors_per_part, num_sectors, k;
5446 	int heads_by_sects, start_sec, end_sec;
5447 
5448 	/* assume partition table already zeroed */
5449 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5450 		return;
5451 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5452 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5453 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5454 	}
5455 	num_sectors = (int)get_sdebug_capacity();
5456 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5457 			   / sdebug_num_parts;
5458 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5459 	starts[0] = sdebug_sectors_per;
5460 	max_part_secs = sectors_per_part;
5461 	for (k = 1; k < sdebug_num_parts; ++k) {
5462 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5463 			    * heads_by_sects;
5464 		if (starts[k] - starts[k - 1] < max_part_secs)
5465 			max_part_secs = starts[k] - starts[k - 1];
5466 	}
5467 	starts[sdebug_num_parts] = num_sectors;
5468 	starts[sdebug_num_parts + 1] = 0;
5469 
5470 	ramp[510] = 0x55;	/* magic partition markings */
5471 	ramp[511] = 0xAA;
5472 	pp = (struct msdos_partition *)(ramp + 0x1be);
5473 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5474 		start_sec = starts[k];
5475 		end_sec = starts[k] + max_part_secs - 1;
5476 		pp->boot_ind = 0;
5477 
5478 		pp->cyl = start_sec / heads_by_sects;
5479 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5480 			   / sdebug_sectors_per;
5481 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5482 
5483 		pp->end_cyl = end_sec / heads_by_sects;
5484 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5485 			       / sdebug_sectors_per;
5486 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5487 
5488 		pp->start_sect = cpu_to_le32(start_sec);
5489 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5490 		pp->sys_ind = 0x83;	/* plain Linux partition */
5491 	}
5492 }
5493 
5494 static void block_unblock_all_queues(bool block)
5495 {
5496 	struct sdebug_host_info *sdhp;
5497 
5498 	lockdep_assert_held(&sdebug_host_list_mutex);
5499 
5500 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5501 		struct Scsi_Host *shost = sdhp->shost;
5502 
5503 		if (block)
5504 			scsi_block_requests(shost);
5505 		else
5506 			scsi_unblock_requests(shost);
5507 	}
5508 }
5509 
5510 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5511  * commands will be processed normally before triggers occur.
5512  */
5513 static void tweak_cmnd_count(void)
5514 {
5515 	int count, modulo;
5516 
5517 	modulo = abs(sdebug_every_nth);
5518 	if (modulo < 2)
5519 		return;
5520 
5521 	mutex_lock(&sdebug_host_list_mutex);
5522 	block_unblock_all_queues(true);
5523 	count = atomic_read(&sdebug_cmnd_count);
5524 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5525 	block_unblock_all_queues(false);
5526 	mutex_unlock(&sdebug_host_list_mutex);
5527 }
5528 
5529 static void clear_queue_stats(void)
5530 {
5531 	atomic_set(&sdebug_cmnd_count, 0);
5532 	atomic_set(&sdebug_completions, 0);
5533 	atomic_set(&sdebug_miss_cpus, 0);
5534 	atomic_set(&sdebug_a_tsf, 0);
5535 }
5536 
5537 static bool inject_on_this_cmd(void)
5538 {
5539 	if (sdebug_every_nth == 0)
5540 		return false;
5541 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5542 }
5543 
5544 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5545 
5546 /* Complete the processing of the thread that queued a SCSI command to this
5547  * driver. It either completes the command by calling cmnd_done() or
5548  * schedules a hr timer or work queue then returns 0. Returns
5549  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5550  */
5551 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5552 			 int scsi_result,
5553 			 int (*pfp)(struct scsi_cmnd *,
5554 				    struct sdebug_dev_info *),
5555 			 int delta_jiff, int ndelay)
5556 {
5557 	bool new_sd_dp;
5558 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5559 	int k;
5560 	unsigned long iflags;
5561 	u64 ns_from_boot = 0;
5562 	struct sdebug_queue *sqp;
5563 	struct sdebug_queued_cmd *sqcp;
5564 	struct scsi_device *sdp;
5565 	struct sdebug_defer *sd_dp;
5566 
5567 	if (unlikely(devip == NULL)) {
5568 		if (scsi_result == 0)
5569 			scsi_result = DID_NO_CONNECT << 16;
5570 		goto respond_in_thread;
5571 	}
5572 	sdp = cmnd->device;
5573 
5574 	if (delta_jiff == 0)
5575 		goto respond_in_thread;
5576 
5577 	sqp = get_queue(cmnd);
5578 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5579 
5580 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5581 		     (scsi_result == 0))) {
5582 		int num_in_q = scsi_device_busy(sdp);
5583 		int qdepth = cmnd->device->queue_depth;
5584 
5585 		if ((num_in_q == qdepth) &&
5586 		    (atomic_inc_return(&sdebug_a_tsf) >=
5587 		     abs(sdebug_every_nth))) {
5588 			atomic_set(&sdebug_a_tsf, 0);
5589 			scsi_result = device_qfull_result;
5590 
5591 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5592 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5593 					    __func__, num_in_q);
5594 		}
5595 	}
5596 
5597 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5598 	if (unlikely(k >= sdebug_max_queue)) {
5599 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5600 		if (scsi_result)
5601 			goto respond_in_thread;
5602 		scsi_result = device_qfull_result;
5603 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5604 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5605 				    __func__, sdebug_max_queue);
5606 		goto respond_in_thread;
5607 	}
5608 	set_bit(k, sqp->in_use_bm);
5609 	sqcp = &sqp->qc_arr[k];
5610 	sqcp->a_cmnd = cmnd;
5611 	cmnd->host_scribble = (unsigned char *)sqcp;
5612 	sd_dp = sqcp->sd_dp;
5613 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5614 
5615 	if (!sd_dp) {
5616 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5617 		if (!sd_dp) {
5618 			clear_bit(k, sqp->in_use_bm);
5619 			return SCSI_MLQUEUE_HOST_BUSY;
5620 		}
5621 		new_sd_dp = true;
5622 	} else {
5623 		new_sd_dp = false;
5624 	}
5625 
5626 	/* Set the hostwide tag */
5627 	if (sdebug_host_max_queue)
5628 		sd_dp->hc_idx = get_tag(cmnd);
5629 
5630 	if (polled)
5631 		ns_from_boot = ktime_get_boottime_ns();
5632 
5633 	/* one of the resp_*() response functions is called here */
5634 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5635 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5636 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5637 		delta_jiff = ndelay = 0;
5638 	}
5639 	if (cmnd->result == 0 && scsi_result != 0)
5640 		cmnd->result = scsi_result;
5641 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5642 		if (atomic_read(&sdeb_inject_pending)) {
5643 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5644 			atomic_set(&sdeb_inject_pending, 0);
5645 			cmnd->result = check_condition_result;
5646 		}
5647 	}
5648 
5649 	if (unlikely(sdebug_verbose && cmnd->result))
5650 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5651 			    __func__, cmnd->result);
5652 
5653 	if (delta_jiff > 0 || ndelay > 0) {
5654 		ktime_t kt;
5655 
5656 		if (delta_jiff > 0) {
5657 			u64 ns = jiffies_to_nsecs(delta_jiff);
5658 
5659 			if (sdebug_random && ns < U32_MAX) {
5660 				ns = get_random_u32_below((u32)ns);
5661 			} else if (sdebug_random) {
5662 				ns >>= 12;	/* scale to 4 usec precision */
5663 				if (ns < U32_MAX)	/* over 4 hours max */
5664 					ns = get_random_u32_below((u32)ns);
5665 				ns <<= 12;
5666 			}
5667 			kt = ns_to_ktime(ns);
5668 		} else {	/* ndelay has a 4.2 second max */
5669 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5670 					     (u32)ndelay;
5671 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5672 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5673 
5674 				if (kt <= d) {	/* elapsed duration >= kt */
5675 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5676 					sqcp->a_cmnd = NULL;
5677 					clear_bit(k, sqp->in_use_bm);
5678 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5679 					if (new_sd_dp)
5680 						kfree(sd_dp);
5681 					/* call scsi_done() from this thread */
5682 					scsi_done(cmnd);
5683 					return 0;
5684 				}
5685 				/* otherwise reduce kt by elapsed time */
5686 				kt -= d;
5687 			}
5688 		}
5689 		if (polled) {
5690 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5691 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5692 			if (!sd_dp->init_poll) {
5693 				sd_dp->init_poll = true;
5694 				sqcp->sd_dp = sd_dp;
5695 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5696 				sd_dp->qc_idx = k;
5697 			}
5698 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5699 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5700 		} else {
5701 			if (!sd_dp->init_hrt) {
5702 				sd_dp->init_hrt = true;
5703 				sqcp->sd_dp = sd_dp;
5704 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5705 					     HRTIMER_MODE_REL_PINNED);
5706 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5707 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5708 				sd_dp->qc_idx = k;
5709 			}
5710 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5711 			/* schedule the invocation of scsi_done() for a later time */
5712 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5713 		}
5714 		if (sdebug_statistics)
5715 			sd_dp->issuing_cpu = raw_smp_processor_id();
5716 	} else {	/* jdelay < 0, use work queue */
5717 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5718 			     atomic_read(&sdeb_inject_pending))) {
5719 			sd_dp->aborted = true;
5720 			atomic_set(&sdeb_inject_pending, 0);
5721 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5722 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5723 		}
5724 
5725 		if (polled) {
5726 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5727 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5728 			if (!sd_dp->init_poll) {
5729 				sd_dp->init_poll = true;
5730 				sqcp->sd_dp = sd_dp;
5731 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5732 				sd_dp->qc_idx = k;
5733 			}
5734 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5735 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5736 		} else {
5737 			if (!sd_dp->init_wq) {
5738 				sd_dp->init_wq = true;
5739 				sqcp->sd_dp = sd_dp;
5740 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5741 				sd_dp->qc_idx = k;
5742 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5743 			}
5744 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5745 			schedule_work(&sd_dp->ew.work);
5746 		}
5747 		if (sdebug_statistics)
5748 			sd_dp->issuing_cpu = raw_smp_processor_id();
5749 	}
5750 
5751 	return 0;
5752 
5753 respond_in_thread:	/* call back to mid-layer using invocation thread */
5754 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5755 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5756 	if (cmnd->result == 0 && scsi_result != 0)
5757 		cmnd->result = scsi_result;
5758 	scsi_done(cmnd);
5759 	return 0;
5760 }
5761 
5762 /* Note: The following macros create attribute files in the
5763    /sys/module/scsi_debug/parameters directory. Unfortunately this
5764    driver is unaware of a change and cannot trigger auxiliary actions
5765    as it can when the corresponding attribute in the
5766    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5767  */
5768 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5769 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5770 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5771 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5772 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5773 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5774 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5775 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5776 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5777 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5778 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5779 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5780 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5781 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5782 module_param_string(inq_product, sdebug_inq_product_id,
5783 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5784 module_param_string(inq_rev, sdebug_inq_product_rev,
5785 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5786 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5787 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5788 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5789 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5790 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5791 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5792 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5793 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5794 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5795 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5796 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5797 		   S_IRUGO | S_IWUSR);
5798 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5799 		   S_IRUGO | S_IWUSR);
5800 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5801 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5802 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5803 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5804 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5805 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5806 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5807 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5808 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5809 module_param_named(per_host_store, sdebug_per_host_store, bool,
5810 		   S_IRUGO | S_IWUSR);
5811 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5812 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5813 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5814 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5815 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5816 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5817 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5818 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5819 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5820 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5821 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5822 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5823 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5824 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5825 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5826 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5827 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5828 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5829 		   S_IRUGO | S_IWUSR);
5830 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5831 module_param_named(write_same_length, sdebug_write_same_length, int,
5832 		   S_IRUGO | S_IWUSR);
5833 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5834 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5835 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5836 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5837 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5838 
5839 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5840 MODULE_DESCRIPTION("SCSI debug adapter driver");
5841 MODULE_LICENSE("GPL");
5842 MODULE_VERSION(SDEBUG_VERSION);
5843 
5844 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5845 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5846 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5847 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5848 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5849 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5850 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5851 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5852 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5853 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5854 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5855 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5856 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5857 MODULE_PARM_DESC(host_max_queue,
5858 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5859 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5860 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5861 		 SDEBUG_VERSION "\")");
5862 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5863 MODULE_PARM_DESC(lbprz,
5864 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5865 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5866 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5867 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5868 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5869 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5870 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5871 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5872 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5873 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5874 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5875 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5876 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5877 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5878 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5879 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5880 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5881 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5882 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5883 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5884 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5885 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5886 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5887 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5888 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5889 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5890 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5891 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5892 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5893 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5894 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5895 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5896 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5897 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5898 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5899 MODULE_PARM_DESC(uuid_ctl,
5900 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5901 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5902 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5903 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5904 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5905 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5906 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5907 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5908 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5909 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5910 
5911 #define SDEBUG_INFO_LEN 256
5912 static char sdebug_info[SDEBUG_INFO_LEN];
5913 
5914 static const char *scsi_debug_info(struct Scsi_Host *shp)
5915 {
5916 	int k;
5917 
5918 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5919 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5920 	if (k >= (SDEBUG_INFO_LEN - 1))
5921 		return sdebug_info;
5922 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5923 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5924 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5925 		  "statistics", (int)sdebug_statistics);
5926 	return sdebug_info;
5927 }
5928 
5929 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5930 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5931 				 int length)
5932 {
5933 	char arr[16];
5934 	int opts;
5935 	int minLen = length > 15 ? 15 : length;
5936 
5937 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5938 		return -EACCES;
5939 	memcpy(arr, buffer, minLen);
5940 	arr[minLen] = '\0';
5941 	if (1 != sscanf(arr, "%d", &opts))
5942 		return -EINVAL;
5943 	sdebug_opts = opts;
5944 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5945 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5946 	if (sdebug_every_nth != 0)
5947 		tweak_cmnd_count();
5948 	return length;
5949 }
5950 
5951 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5952  * same for each scsi_debug host (if more than one). Some of the counters
5953  * output are not atomics so might be inaccurate in a busy system. */
5954 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5955 {
5956 	int f, j, l;
5957 	struct sdebug_queue *sqp;
5958 	struct sdebug_host_info *sdhp;
5959 
5960 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5961 		   SDEBUG_VERSION, sdebug_version_date);
5962 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5963 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5964 		   sdebug_opts, sdebug_every_nth);
5965 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5966 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5967 		   sdebug_sector_size, "bytes");
5968 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5969 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5970 		   num_aborts);
5971 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5972 		   num_dev_resets, num_target_resets, num_bus_resets,
5973 		   num_host_resets);
5974 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5975 		   dix_reads, dix_writes, dif_errors);
5976 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5977 		   sdebug_statistics);
5978 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5979 		   atomic_read(&sdebug_cmnd_count),
5980 		   atomic_read(&sdebug_completions),
5981 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5982 		   atomic_read(&sdebug_a_tsf),
5983 		   atomic_read(&sdeb_mq_poll_count));
5984 
5985 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5986 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5987 		seq_printf(m, "  queue %d:\n", j);
5988 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5989 		if (f != sdebug_max_queue) {
5990 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5991 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5992 				   "first,last bits", f, l);
5993 		}
5994 	}
5995 
5996 	seq_printf(m, "this host_no=%d\n", host->host_no);
5997 	if (!xa_empty(per_store_ap)) {
5998 		bool niu;
5999 		int idx;
6000 		unsigned long l_idx;
6001 		struct sdeb_store_info *sip;
6002 
6003 		seq_puts(m, "\nhost list:\n");
6004 		j = 0;
6005 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6006 			idx = sdhp->si_idx;
6007 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6008 				   sdhp->shost->host_no, idx);
6009 			++j;
6010 		}
6011 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6012 			   sdeb_most_recent_idx);
6013 		j = 0;
6014 		xa_for_each(per_store_ap, l_idx, sip) {
6015 			niu = xa_get_mark(per_store_ap, l_idx,
6016 					  SDEB_XA_NOT_IN_USE);
6017 			idx = (int)l_idx;
6018 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6019 				   (niu ? "  not_in_use" : ""));
6020 			++j;
6021 		}
6022 	}
6023 	return 0;
6024 }
6025 
6026 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6027 {
6028 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6029 }
6030 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6031  * of delay is jiffies.
6032  */
6033 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6034 			   size_t count)
6035 {
6036 	int jdelay, res;
6037 
6038 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6039 		res = count;
6040 		if (sdebug_jdelay != jdelay) {
6041 			int j, k;
6042 			struct sdebug_queue *sqp;
6043 
6044 			mutex_lock(&sdebug_host_list_mutex);
6045 			block_unblock_all_queues(true);
6046 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6047 			     ++j, ++sqp) {
6048 				k = find_first_bit(sqp->in_use_bm,
6049 						   sdebug_max_queue);
6050 				if (k != sdebug_max_queue) {
6051 					res = -EBUSY;   /* queued commands */
6052 					break;
6053 				}
6054 			}
6055 			if (res > 0) {
6056 				sdebug_jdelay = jdelay;
6057 				sdebug_ndelay = 0;
6058 			}
6059 			block_unblock_all_queues(false);
6060 			mutex_unlock(&sdebug_host_list_mutex);
6061 		}
6062 		return res;
6063 	}
6064 	return -EINVAL;
6065 }
6066 static DRIVER_ATTR_RW(delay);
6067 
6068 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6069 {
6070 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6071 }
6072 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6073 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6074 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6075 			    size_t count)
6076 {
6077 	int ndelay, res;
6078 
6079 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6080 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6081 		res = count;
6082 		if (sdebug_ndelay != ndelay) {
6083 			int j, k;
6084 			struct sdebug_queue *sqp;
6085 
6086 			mutex_lock(&sdebug_host_list_mutex);
6087 			block_unblock_all_queues(true);
6088 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6089 			     ++j, ++sqp) {
6090 				k = find_first_bit(sqp->in_use_bm,
6091 						   sdebug_max_queue);
6092 				if (k != sdebug_max_queue) {
6093 					res = -EBUSY;   /* queued commands */
6094 					break;
6095 				}
6096 			}
6097 			if (res > 0) {
6098 				sdebug_ndelay = ndelay;
6099 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6100 							: DEF_JDELAY;
6101 			}
6102 			block_unblock_all_queues(false);
6103 			mutex_unlock(&sdebug_host_list_mutex);
6104 		}
6105 		return res;
6106 	}
6107 	return -EINVAL;
6108 }
6109 static DRIVER_ATTR_RW(ndelay);
6110 
6111 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6112 {
6113 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6114 }
6115 
6116 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6117 			  size_t count)
6118 {
6119 	int opts;
6120 	char work[20];
6121 
6122 	if (sscanf(buf, "%10s", work) == 1) {
6123 		if (strncasecmp(work, "0x", 2) == 0) {
6124 			if (kstrtoint(work + 2, 16, &opts) == 0)
6125 				goto opts_done;
6126 		} else {
6127 			if (kstrtoint(work, 10, &opts) == 0)
6128 				goto opts_done;
6129 		}
6130 	}
6131 	return -EINVAL;
6132 opts_done:
6133 	sdebug_opts = opts;
6134 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6135 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6136 	tweak_cmnd_count();
6137 	return count;
6138 }
6139 static DRIVER_ATTR_RW(opts);
6140 
6141 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6142 {
6143 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6144 }
6145 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6146 			   size_t count)
6147 {
6148 	int n;
6149 
6150 	/* Cannot change from or to TYPE_ZBC with sysfs */
6151 	if (sdebug_ptype == TYPE_ZBC)
6152 		return -EINVAL;
6153 
6154 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6155 		if (n == TYPE_ZBC)
6156 			return -EINVAL;
6157 		sdebug_ptype = n;
6158 		return count;
6159 	}
6160 	return -EINVAL;
6161 }
6162 static DRIVER_ATTR_RW(ptype);
6163 
6164 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6165 {
6166 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6167 }
6168 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6169 			    size_t count)
6170 {
6171 	int n;
6172 
6173 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6174 		sdebug_dsense = n;
6175 		return count;
6176 	}
6177 	return -EINVAL;
6178 }
6179 static DRIVER_ATTR_RW(dsense);
6180 
6181 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6182 {
6183 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6184 }
6185 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6186 			     size_t count)
6187 {
6188 	int n, idx;
6189 
6190 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6191 		bool want_store = (n == 0);
6192 		struct sdebug_host_info *sdhp;
6193 
6194 		n = (n > 0);
6195 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6196 		if (sdebug_fake_rw == n)
6197 			return count;	/* not transitioning so do nothing */
6198 
6199 		if (want_store) {	/* 1 --> 0 transition, set up store */
6200 			if (sdeb_first_idx < 0) {
6201 				idx = sdebug_add_store();
6202 				if (idx < 0)
6203 					return idx;
6204 			} else {
6205 				idx = sdeb_first_idx;
6206 				xa_clear_mark(per_store_ap, idx,
6207 					      SDEB_XA_NOT_IN_USE);
6208 			}
6209 			/* make all hosts use same store */
6210 			list_for_each_entry(sdhp, &sdebug_host_list,
6211 					    host_list) {
6212 				if (sdhp->si_idx != idx) {
6213 					xa_set_mark(per_store_ap, sdhp->si_idx,
6214 						    SDEB_XA_NOT_IN_USE);
6215 					sdhp->si_idx = idx;
6216 				}
6217 			}
6218 			sdeb_most_recent_idx = idx;
6219 		} else {	/* 0 --> 1 transition is trigger for shrink */
6220 			sdebug_erase_all_stores(true /* apart from first */);
6221 		}
6222 		sdebug_fake_rw = n;
6223 		return count;
6224 	}
6225 	return -EINVAL;
6226 }
6227 static DRIVER_ATTR_RW(fake_rw);
6228 
6229 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6230 {
6231 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6232 }
6233 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6234 			      size_t count)
6235 {
6236 	int n;
6237 
6238 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6239 		sdebug_no_lun_0 = n;
6240 		return count;
6241 	}
6242 	return -EINVAL;
6243 }
6244 static DRIVER_ATTR_RW(no_lun_0);
6245 
6246 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6247 {
6248 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6249 }
6250 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6251 			      size_t count)
6252 {
6253 	int n;
6254 
6255 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6256 		sdebug_num_tgts = n;
6257 		sdebug_max_tgts_luns();
6258 		return count;
6259 	}
6260 	return -EINVAL;
6261 }
6262 static DRIVER_ATTR_RW(num_tgts);
6263 
6264 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6265 {
6266 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6267 }
6268 static DRIVER_ATTR_RO(dev_size_mb);
6269 
6270 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6271 {
6272 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6273 }
6274 
6275 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6276 				    size_t count)
6277 {
6278 	bool v;
6279 
6280 	if (kstrtobool(buf, &v))
6281 		return -EINVAL;
6282 
6283 	sdebug_per_host_store = v;
6284 	return count;
6285 }
6286 static DRIVER_ATTR_RW(per_host_store);
6287 
6288 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6289 {
6290 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6291 }
6292 static DRIVER_ATTR_RO(num_parts);
6293 
6294 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6295 {
6296 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6297 }
6298 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6299 			       size_t count)
6300 {
6301 	int nth;
6302 	char work[20];
6303 
6304 	if (sscanf(buf, "%10s", work) == 1) {
6305 		if (strncasecmp(work, "0x", 2) == 0) {
6306 			if (kstrtoint(work + 2, 16, &nth) == 0)
6307 				goto every_nth_done;
6308 		} else {
6309 			if (kstrtoint(work, 10, &nth) == 0)
6310 				goto every_nth_done;
6311 		}
6312 	}
6313 	return -EINVAL;
6314 
6315 every_nth_done:
6316 	sdebug_every_nth = nth;
6317 	if (nth && !sdebug_statistics) {
6318 		pr_info("every_nth needs statistics=1, set it\n");
6319 		sdebug_statistics = true;
6320 	}
6321 	tweak_cmnd_count();
6322 	return count;
6323 }
6324 static DRIVER_ATTR_RW(every_nth);
6325 
6326 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6327 {
6328 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6329 }
6330 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6331 				size_t count)
6332 {
6333 	int n;
6334 	bool changed;
6335 
6336 	if (kstrtoint(buf, 0, &n))
6337 		return -EINVAL;
6338 	if (n >= 0) {
6339 		if (n > (int)SAM_LUN_AM_FLAT) {
6340 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6341 			return -EINVAL;
6342 		}
6343 		changed = ((int)sdebug_lun_am != n);
6344 		sdebug_lun_am = n;
6345 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6346 			struct sdebug_host_info *sdhp;
6347 			struct sdebug_dev_info *dp;
6348 
6349 			mutex_lock(&sdebug_host_list_mutex);
6350 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6351 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6352 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6353 				}
6354 			}
6355 			mutex_unlock(&sdebug_host_list_mutex);
6356 		}
6357 		return count;
6358 	}
6359 	return -EINVAL;
6360 }
6361 static DRIVER_ATTR_RW(lun_format);
6362 
6363 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6364 {
6365 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6366 }
6367 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6368 			      size_t count)
6369 {
6370 	int n;
6371 	bool changed;
6372 
6373 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6374 		if (n > 256) {
6375 			pr_warn("max_luns can be no more than 256\n");
6376 			return -EINVAL;
6377 		}
6378 		changed = (sdebug_max_luns != n);
6379 		sdebug_max_luns = n;
6380 		sdebug_max_tgts_luns();
6381 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6382 			struct sdebug_host_info *sdhp;
6383 			struct sdebug_dev_info *dp;
6384 
6385 			mutex_lock(&sdebug_host_list_mutex);
6386 			list_for_each_entry(sdhp, &sdebug_host_list,
6387 					    host_list) {
6388 				list_for_each_entry(dp, &sdhp->dev_info_list,
6389 						    dev_list) {
6390 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6391 						dp->uas_bm);
6392 				}
6393 			}
6394 			mutex_unlock(&sdebug_host_list_mutex);
6395 		}
6396 		return count;
6397 	}
6398 	return -EINVAL;
6399 }
6400 static DRIVER_ATTR_RW(max_luns);
6401 
6402 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6403 {
6404 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6405 }
6406 /* N.B. max_queue can be changed while there are queued commands. In flight
6407  * commands beyond the new max_queue will be completed. */
6408 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6409 			       size_t count)
6410 {
6411 	int j, n, k, a;
6412 	struct sdebug_queue *sqp;
6413 
6414 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6415 	    (n <= SDEBUG_CANQUEUE) &&
6416 	    (sdebug_host_max_queue == 0)) {
6417 		mutex_lock(&sdebug_host_list_mutex);
6418 		block_unblock_all_queues(true);
6419 		k = 0;
6420 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6421 		     ++j, ++sqp) {
6422 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6423 			if (a > k)
6424 				k = a;
6425 		}
6426 		sdebug_max_queue = n;
6427 		if (k == SDEBUG_CANQUEUE)
6428 			atomic_set(&retired_max_queue, 0);
6429 		else if (k >= n)
6430 			atomic_set(&retired_max_queue, k + 1);
6431 		else
6432 			atomic_set(&retired_max_queue, 0);
6433 		block_unblock_all_queues(false);
6434 		mutex_unlock(&sdebug_host_list_mutex);
6435 		return count;
6436 	}
6437 	return -EINVAL;
6438 }
6439 static DRIVER_ATTR_RW(max_queue);
6440 
6441 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6442 {
6443 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6444 }
6445 
6446 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6447 {
6448 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6449 }
6450 
6451 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6452 {
6453 	bool v;
6454 
6455 	if (kstrtobool(buf, &v))
6456 		return -EINVAL;
6457 
6458 	sdebug_no_rwlock = v;
6459 	return count;
6460 }
6461 static DRIVER_ATTR_RW(no_rwlock);
6462 
6463 /*
6464  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6465  * in range [0, sdebug_host_max_queue), we can't change it.
6466  */
6467 static DRIVER_ATTR_RO(host_max_queue);
6468 
6469 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6470 {
6471 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6472 }
6473 static DRIVER_ATTR_RO(no_uld);
6474 
6475 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6476 {
6477 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6478 }
6479 static DRIVER_ATTR_RO(scsi_level);
6480 
6481 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6482 {
6483 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6484 }
6485 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6486 				size_t count)
6487 {
6488 	int n;
6489 	bool changed;
6490 
6491 	/* Ignore capacity change for ZBC drives for now */
6492 	if (sdeb_zbc_in_use)
6493 		return -ENOTSUPP;
6494 
6495 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6496 		changed = (sdebug_virtual_gb != n);
6497 		sdebug_virtual_gb = n;
6498 		sdebug_capacity = get_sdebug_capacity();
6499 		if (changed) {
6500 			struct sdebug_host_info *sdhp;
6501 			struct sdebug_dev_info *dp;
6502 
6503 			mutex_lock(&sdebug_host_list_mutex);
6504 			list_for_each_entry(sdhp, &sdebug_host_list,
6505 					    host_list) {
6506 				list_for_each_entry(dp, &sdhp->dev_info_list,
6507 						    dev_list) {
6508 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6509 						dp->uas_bm);
6510 				}
6511 			}
6512 			mutex_unlock(&sdebug_host_list_mutex);
6513 		}
6514 		return count;
6515 	}
6516 	return -EINVAL;
6517 }
6518 static DRIVER_ATTR_RW(virtual_gb);
6519 
6520 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6521 {
6522 	/* absolute number of hosts currently active is what is shown */
6523 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6524 }
6525 
6526 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6527 			      size_t count)
6528 {
6529 	bool found;
6530 	unsigned long idx;
6531 	struct sdeb_store_info *sip;
6532 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6533 	int delta_hosts;
6534 
6535 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6536 		return -EINVAL;
6537 	if (delta_hosts > 0) {
6538 		do {
6539 			found = false;
6540 			if (want_phs) {
6541 				xa_for_each_marked(per_store_ap, idx, sip,
6542 						   SDEB_XA_NOT_IN_USE) {
6543 					sdeb_most_recent_idx = (int)idx;
6544 					found = true;
6545 					break;
6546 				}
6547 				if (found)	/* re-use case */
6548 					sdebug_add_host_helper((int)idx);
6549 				else
6550 					sdebug_do_add_host(true);
6551 			} else {
6552 				sdebug_do_add_host(false);
6553 			}
6554 		} while (--delta_hosts);
6555 	} else if (delta_hosts < 0) {
6556 		do {
6557 			sdebug_do_remove_host(false);
6558 		} while (++delta_hosts);
6559 	}
6560 	return count;
6561 }
6562 static DRIVER_ATTR_RW(add_host);
6563 
6564 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6565 {
6566 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6567 }
6568 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6569 				    size_t count)
6570 {
6571 	int n;
6572 
6573 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6574 		sdebug_vpd_use_hostno = n;
6575 		return count;
6576 	}
6577 	return -EINVAL;
6578 }
6579 static DRIVER_ATTR_RW(vpd_use_hostno);
6580 
6581 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6582 {
6583 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6584 }
6585 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6586 				size_t count)
6587 {
6588 	int n;
6589 
6590 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6591 		if (n > 0)
6592 			sdebug_statistics = true;
6593 		else {
6594 			clear_queue_stats();
6595 			sdebug_statistics = false;
6596 		}
6597 		return count;
6598 	}
6599 	return -EINVAL;
6600 }
6601 static DRIVER_ATTR_RW(statistics);
6602 
6603 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6604 {
6605 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6606 }
6607 static DRIVER_ATTR_RO(sector_size);
6608 
6609 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6610 {
6611 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6612 }
6613 static DRIVER_ATTR_RO(submit_queues);
6614 
6615 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6616 {
6617 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6618 }
6619 static DRIVER_ATTR_RO(dix);
6620 
6621 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6622 {
6623 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6624 }
6625 static DRIVER_ATTR_RO(dif);
6626 
6627 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6628 {
6629 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6630 }
6631 static DRIVER_ATTR_RO(guard);
6632 
6633 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6634 {
6635 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6636 }
6637 static DRIVER_ATTR_RO(ato);
6638 
6639 static ssize_t map_show(struct device_driver *ddp, char *buf)
6640 {
6641 	ssize_t count = 0;
6642 
6643 	if (!scsi_debug_lbp())
6644 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6645 				 sdebug_store_sectors);
6646 
6647 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6648 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6649 
6650 		if (sip)
6651 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6652 					  (int)map_size, sip->map_storep);
6653 	}
6654 	buf[count++] = '\n';
6655 	buf[count] = '\0';
6656 
6657 	return count;
6658 }
6659 static DRIVER_ATTR_RO(map);
6660 
6661 static ssize_t random_show(struct device_driver *ddp, char *buf)
6662 {
6663 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6664 }
6665 
6666 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6667 			    size_t count)
6668 {
6669 	bool v;
6670 
6671 	if (kstrtobool(buf, &v))
6672 		return -EINVAL;
6673 
6674 	sdebug_random = v;
6675 	return count;
6676 }
6677 static DRIVER_ATTR_RW(random);
6678 
6679 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6680 {
6681 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6682 }
6683 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6684 			       size_t count)
6685 {
6686 	int n;
6687 
6688 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6689 		sdebug_removable = (n > 0);
6690 		return count;
6691 	}
6692 	return -EINVAL;
6693 }
6694 static DRIVER_ATTR_RW(removable);
6695 
6696 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6697 {
6698 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6699 }
6700 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6701 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6702 			       size_t count)
6703 {
6704 	int n;
6705 
6706 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6707 		sdebug_host_lock = (n > 0);
6708 		return count;
6709 	}
6710 	return -EINVAL;
6711 }
6712 static DRIVER_ATTR_RW(host_lock);
6713 
6714 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6715 {
6716 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6717 }
6718 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6719 			    size_t count)
6720 {
6721 	int n;
6722 
6723 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6724 		sdebug_strict = (n > 0);
6725 		return count;
6726 	}
6727 	return -EINVAL;
6728 }
6729 static DRIVER_ATTR_RW(strict);
6730 
6731 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6732 {
6733 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6734 }
6735 static DRIVER_ATTR_RO(uuid_ctl);
6736 
6737 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6738 {
6739 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6740 }
6741 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6742 			     size_t count)
6743 {
6744 	int ret, n;
6745 
6746 	ret = kstrtoint(buf, 0, &n);
6747 	if (ret)
6748 		return ret;
6749 	sdebug_cdb_len = n;
6750 	all_config_cdb_len();
6751 	return count;
6752 }
6753 static DRIVER_ATTR_RW(cdb_len);
6754 
6755 static const char * const zbc_model_strs_a[] = {
6756 	[BLK_ZONED_NONE] = "none",
6757 	[BLK_ZONED_HA]   = "host-aware",
6758 	[BLK_ZONED_HM]   = "host-managed",
6759 };
6760 
6761 static const char * const zbc_model_strs_b[] = {
6762 	[BLK_ZONED_NONE] = "no",
6763 	[BLK_ZONED_HA]   = "aware",
6764 	[BLK_ZONED_HM]   = "managed",
6765 };
6766 
6767 static const char * const zbc_model_strs_c[] = {
6768 	[BLK_ZONED_NONE] = "0",
6769 	[BLK_ZONED_HA]   = "1",
6770 	[BLK_ZONED_HM]   = "2",
6771 };
6772 
6773 static int sdeb_zbc_model_str(const char *cp)
6774 {
6775 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6776 
6777 	if (res < 0) {
6778 		res = sysfs_match_string(zbc_model_strs_b, cp);
6779 		if (res < 0) {
6780 			res = sysfs_match_string(zbc_model_strs_c, cp);
6781 			if (res < 0)
6782 				return -EINVAL;
6783 		}
6784 	}
6785 	return res;
6786 }
6787 
6788 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6789 {
6790 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6791 			 zbc_model_strs_a[sdeb_zbc_model]);
6792 }
6793 static DRIVER_ATTR_RO(zbc);
6794 
6795 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6796 {
6797 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6798 }
6799 static DRIVER_ATTR_RO(tur_ms_to_ready);
6800 
6801 /* Note: The following array creates attribute files in the
6802    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6803    files (over those found in the /sys/module/scsi_debug/parameters
6804    directory) is that auxiliary actions can be triggered when an attribute
6805    is changed. For example see: add_host_store() above.
6806  */
6807 
6808 static struct attribute *sdebug_drv_attrs[] = {
6809 	&driver_attr_delay.attr,
6810 	&driver_attr_opts.attr,
6811 	&driver_attr_ptype.attr,
6812 	&driver_attr_dsense.attr,
6813 	&driver_attr_fake_rw.attr,
6814 	&driver_attr_host_max_queue.attr,
6815 	&driver_attr_no_lun_0.attr,
6816 	&driver_attr_num_tgts.attr,
6817 	&driver_attr_dev_size_mb.attr,
6818 	&driver_attr_num_parts.attr,
6819 	&driver_attr_every_nth.attr,
6820 	&driver_attr_lun_format.attr,
6821 	&driver_attr_max_luns.attr,
6822 	&driver_attr_max_queue.attr,
6823 	&driver_attr_no_rwlock.attr,
6824 	&driver_attr_no_uld.attr,
6825 	&driver_attr_scsi_level.attr,
6826 	&driver_attr_virtual_gb.attr,
6827 	&driver_attr_add_host.attr,
6828 	&driver_attr_per_host_store.attr,
6829 	&driver_attr_vpd_use_hostno.attr,
6830 	&driver_attr_sector_size.attr,
6831 	&driver_attr_statistics.attr,
6832 	&driver_attr_submit_queues.attr,
6833 	&driver_attr_dix.attr,
6834 	&driver_attr_dif.attr,
6835 	&driver_attr_guard.attr,
6836 	&driver_attr_ato.attr,
6837 	&driver_attr_map.attr,
6838 	&driver_attr_random.attr,
6839 	&driver_attr_removable.attr,
6840 	&driver_attr_host_lock.attr,
6841 	&driver_attr_ndelay.attr,
6842 	&driver_attr_strict.attr,
6843 	&driver_attr_uuid_ctl.attr,
6844 	&driver_attr_cdb_len.attr,
6845 	&driver_attr_tur_ms_to_ready.attr,
6846 	&driver_attr_zbc.attr,
6847 	NULL,
6848 };
6849 ATTRIBUTE_GROUPS(sdebug_drv);
6850 
6851 static struct device *pseudo_primary;
6852 
6853 static int __init scsi_debug_init(void)
6854 {
6855 	bool want_store = (sdebug_fake_rw == 0);
6856 	unsigned long sz;
6857 	int k, ret, hosts_to_add;
6858 	int idx = -1;
6859 
6860 	ramdisk_lck_a[0] = &atomic_rw;
6861 	ramdisk_lck_a[1] = &atomic_rw2;
6862 	atomic_set(&retired_max_queue, 0);
6863 
6864 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6865 		pr_warn("ndelay must be less than 1 second, ignored\n");
6866 		sdebug_ndelay = 0;
6867 	} else if (sdebug_ndelay > 0)
6868 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6869 
6870 	switch (sdebug_sector_size) {
6871 	case  512:
6872 	case 1024:
6873 	case 2048:
6874 	case 4096:
6875 		break;
6876 	default:
6877 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6878 		return -EINVAL;
6879 	}
6880 
6881 	switch (sdebug_dif) {
6882 	case T10_PI_TYPE0_PROTECTION:
6883 		break;
6884 	case T10_PI_TYPE1_PROTECTION:
6885 	case T10_PI_TYPE2_PROTECTION:
6886 	case T10_PI_TYPE3_PROTECTION:
6887 		have_dif_prot = true;
6888 		break;
6889 
6890 	default:
6891 		pr_err("dif must be 0, 1, 2 or 3\n");
6892 		return -EINVAL;
6893 	}
6894 
6895 	if (sdebug_num_tgts < 0) {
6896 		pr_err("num_tgts must be >= 0\n");
6897 		return -EINVAL;
6898 	}
6899 
6900 	if (sdebug_guard > 1) {
6901 		pr_err("guard must be 0 or 1\n");
6902 		return -EINVAL;
6903 	}
6904 
6905 	if (sdebug_ato > 1) {
6906 		pr_err("ato must be 0 or 1\n");
6907 		return -EINVAL;
6908 	}
6909 
6910 	if (sdebug_physblk_exp > 15) {
6911 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6912 		return -EINVAL;
6913 	}
6914 
6915 	sdebug_lun_am = sdebug_lun_am_i;
6916 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6917 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6918 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6919 	}
6920 
6921 	if (sdebug_max_luns > 256) {
6922 		if (sdebug_max_luns > 16384) {
6923 			pr_warn("max_luns can be no more than 16384, use default\n");
6924 			sdebug_max_luns = DEF_MAX_LUNS;
6925 		}
6926 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6927 	}
6928 
6929 	if (sdebug_lowest_aligned > 0x3fff) {
6930 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6931 		return -EINVAL;
6932 	}
6933 
6934 	if (submit_queues < 1) {
6935 		pr_err("submit_queues must be 1 or more\n");
6936 		return -EINVAL;
6937 	}
6938 
6939 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6940 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6941 		return -EINVAL;
6942 	}
6943 
6944 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6945 	    (sdebug_host_max_queue < 0)) {
6946 		pr_err("host_max_queue must be in range [0 %d]\n",
6947 		       SDEBUG_CANQUEUE);
6948 		return -EINVAL;
6949 	}
6950 
6951 	if (sdebug_host_max_queue &&
6952 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6953 		sdebug_max_queue = sdebug_host_max_queue;
6954 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6955 			sdebug_max_queue);
6956 	}
6957 
6958 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6959 			       GFP_KERNEL);
6960 	if (sdebug_q_arr == NULL)
6961 		return -ENOMEM;
6962 	for (k = 0; k < submit_queues; ++k)
6963 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6964 
6965 	/*
6966 	 * check for host managed zoned block device specified with
6967 	 * ptype=0x14 or zbc=XXX.
6968 	 */
6969 	if (sdebug_ptype == TYPE_ZBC) {
6970 		sdeb_zbc_model = BLK_ZONED_HM;
6971 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6972 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6973 		if (k < 0) {
6974 			ret = k;
6975 			goto free_q_arr;
6976 		}
6977 		sdeb_zbc_model = k;
6978 		switch (sdeb_zbc_model) {
6979 		case BLK_ZONED_NONE:
6980 		case BLK_ZONED_HA:
6981 			sdebug_ptype = TYPE_DISK;
6982 			break;
6983 		case BLK_ZONED_HM:
6984 			sdebug_ptype = TYPE_ZBC;
6985 			break;
6986 		default:
6987 			pr_err("Invalid ZBC model\n");
6988 			ret = -EINVAL;
6989 			goto free_q_arr;
6990 		}
6991 	}
6992 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6993 		sdeb_zbc_in_use = true;
6994 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6995 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6996 	}
6997 
6998 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6999 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7000 	if (sdebug_dev_size_mb < 1)
7001 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7002 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7003 	sdebug_store_sectors = sz / sdebug_sector_size;
7004 	sdebug_capacity = get_sdebug_capacity();
7005 
7006 	/* play around with geometry, don't waste too much on track 0 */
7007 	sdebug_heads = 8;
7008 	sdebug_sectors_per = 32;
7009 	if (sdebug_dev_size_mb >= 256)
7010 		sdebug_heads = 64;
7011 	else if (sdebug_dev_size_mb >= 16)
7012 		sdebug_heads = 32;
7013 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7014 			       (sdebug_sectors_per * sdebug_heads);
7015 	if (sdebug_cylinders_per >= 1024) {
7016 		/* other LLDs do this; implies >= 1GB ram disk ... */
7017 		sdebug_heads = 255;
7018 		sdebug_sectors_per = 63;
7019 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7020 			       (sdebug_sectors_per * sdebug_heads);
7021 	}
7022 	if (scsi_debug_lbp()) {
7023 		sdebug_unmap_max_blocks =
7024 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7025 
7026 		sdebug_unmap_max_desc =
7027 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7028 
7029 		sdebug_unmap_granularity =
7030 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7031 
7032 		if (sdebug_unmap_alignment &&
7033 		    sdebug_unmap_granularity <=
7034 		    sdebug_unmap_alignment) {
7035 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7036 			ret = -EINVAL;
7037 			goto free_q_arr;
7038 		}
7039 	}
7040 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7041 	if (want_store) {
7042 		idx = sdebug_add_store();
7043 		if (idx < 0) {
7044 			ret = idx;
7045 			goto free_q_arr;
7046 		}
7047 	}
7048 
7049 	pseudo_primary = root_device_register("pseudo_0");
7050 	if (IS_ERR(pseudo_primary)) {
7051 		pr_warn("root_device_register() error\n");
7052 		ret = PTR_ERR(pseudo_primary);
7053 		goto free_vm;
7054 	}
7055 	ret = bus_register(&pseudo_lld_bus);
7056 	if (ret < 0) {
7057 		pr_warn("bus_register error: %d\n", ret);
7058 		goto dev_unreg;
7059 	}
7060 	ret = driver_register(&sdebug_driverfs_driver);
7061 	if (ret < 0) {
7062 		pr_warn("driver_register error: %d\n", ret);
7063 		goto bus_unreg;
7064 	}
7065 
7066 	hosts_to_add = sdebug_add_host;
7067 	sdebug_add_host = 0;
7068 
7069 	for (k = 0; k < hosts_to_add; k++) {
7070 		if (want_store && k == 0) {
7071 			ret = sdebug_add_host_helper(idx);
7072 			if (ret < 0) {
7073 				pr_err("add_host_helper k=%d, error=%d\n",
7074 				       k, -ret);
7075 				break;
7076 			}
7077 		} else {
7078 			ret = sdebug_do_add_host(want_store &&
7079 						 sdebug_per_host_store);
7080 			if (ret < 0) {
7081 				pr_err("add_host k=%d error=%d\n", k, -ret);
7082 				break;
7083 			}
7084 		}
7085 	}
7086 	if (sdebug_verbose)
7087 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7088 
7089 	return 0;
7090 
7091 bus_unreg:
7092 	bus_unregister(&pseudo_lld_bus);
7093 dev_unreg:
7094 	root_device_unregister(pseudo_primary);
7095 free_vm:
7096 	sdebug_erase_store(idx, NULL);
7097 free_q_arr:
7098 	kfree(sdebug_q_arr);
7099 	return ret;
7100 }
7101 
7102 static void __exit scsi_debug_exit(void)
7103 {
7104 	int k = sdebug_num_hosts;
7105 
7106 	stop_all_queued();
7107 	for (; k; k--)
7108 		sdebug_do_remove_host(true);
7109 	free_all_queued();
7110 	driver_unregister(&sdebug_driverfs_driver);
7111 	bus_unregister(&pseudo_lld_bus);
7112 	root_device_unregister(pseudo_primary);
7113 
7114 	sdebug_erase_all_stores(false);
7115 	xa_destroy(per_store_ap);
7116 	kfree(sdebug_q_arr);
7117 }
7118 
7119 device_initcall(scsi_debug_init);
7120 module_exit(scsi_debug_exit);
7121 
7122 static void sdebug_release_adapter(struct device *dev)
7123 {
7124 	struct sdebug_host_info *sdbg_host;
7125 
7126 	sdbg_host = dev_to_sdebug_host(dev);
7127 	kfree(sdbg_host);
7128 }
7129 
7130 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7131 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7132 {
7133 	if (idx < 0)
7134 		return;
7135 	if (!sip) {
7136 		if (xa_empty(per_store_ap))
7137 			return;
7138 		sip = xa_load(per_store_ap, idx);
7139 		if (!sip)
7140 			return;
7141 	}
7142 	vfree(sip->map_storep);
7143 	vfree(sip->dif_storep);
7144 	vfree(sip->storep);
7145 	xa_erase(per_store_ap, idx);
7146 	kfree(sip);
7147 }
7148 
7149 /* Assume apart_from_first==false only in shutdown case. */
7150 static void sdebug_erase_all_stores(bool apart_from_first)
7151 {
7152 	unsigned long idx;
7153 	struct sdeb_store_info *sip = NULL;
7154 
7155 	xa_for_each(per_store_ap, idx, sip) {
7156 		if (apart_from_first)
7157 			apart_from_first = false;
7158 		else
7159 			sdebug_erase_store(idx, sip);
7160 	}
7161 	if (apart_from_first)
7162 		sdeb_most_recent_idx = sdeb_first_idx;
7163 }
7164 
7165 /*
7166  * Returns store xarray new element index (idx) if >=0 else negated errno.
7167  * Limit the number of stores to 65536.
7168  */
7169 static int sdebug_add_store(void)
7170 {
7171 	int res;
7172 	u32 n_idx;
7173 	unsigned long iflags;
7174 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7175 	struct sdeb_store_info *sip = NULL;
7176 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7177 
7178 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7179 	if (!sip)
7180 		return -ENOMEM;
7181 
7182 	xa_lock_irqsave(per_store_ap, iflags);
7183 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7184 	if (unlikely(res < 0)) {
7185 		xa_unlock_irqrestore(per_store_ap, iflags);
7186 		kfree(sip);
7187 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7188 		return res;
7189 	}
7190 	sdeb_most_recent_idx = n_idx;
7191 	if (sdeb_first_idx < 0)
7192 		sdeb_first_idx = n_idx;
7193 	xa_unlock_irqrestore(per_store_ap, iflags);
7194 
7195 	res = -ENOMEM;
7196 	sip->storep = vzalloc(sz);
7197 	if (!sip->storep) {
7198 		pr_err("user data oom\n");
7199 		goto err;
7200 	}
7201 	if (sdebug_num_parts > 0)
7202 		sdebug_build_parts(sip->storep, sz);
7203 
7204 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7205 	if (sdebug_dix) {
7206 		int dif_size;
7207 
7208 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7209 		sip->dif_storep = vmalloc(dif_size);
7210 
7211 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7212 			sip->dif_storep);
7213 
7214 		if (!sip->dif_storep) {
7215 			pr_err("DIX oom\n");
7216 			goto err;
7217 		}
7218 		memset(sip->dif_storep, 0xff, dif_size);
7219 	}
7220 	/* Logical Block Provisioning */
7221 	if (scsi_debug_lbp()) {
7222 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7223 		sip->map_storep = vmalloc(array_size(sizeof(long),
7224 						     BITS_TO_LONGS(map_size)));
7225 
7226 		pr_info("%lu provisioning blocks\n", map_size);
7227 
7228 		if (!sip->map_storep) {
7229 			pr_err("LBP map oom\n");
7230 			goto err;
7231 		}
7232 
7233 		bitmap_zero(sip->map_storep, map_size);
7234 
7235 		/* Map first 1KB for partition table */
7236 		if (sdebug_num_parts)
7237 			map_region(sip, 0, 2);
7238 	}
7239 
7240 	rwlock_init(&sip->macc_lck);
7241 	return (int)n_idx;
7242 err:
7243 	sdebug_erase_store((int)n_idx, sip);
7244 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7245 	return res;
7246 }
7247 
7248 static int sdebug_add_host_helper(int per_host_idx)
7249 {
7250 	int k, devs_per_host, idx;
7251 	int error = -ENOMEM;
7252 	struct sdebug_host_info *sdbg_host;
7253 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7254 
7255 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7256 	if (!sdbg_host)
7257 		return -ENOMEM;
7258 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7259 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7260 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7261 	sdbg_host->si_idx = idx;
7262 
7263 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7264 
7265 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7266 	for (k = 0; k < devs_per_host; k++) {
7267 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7268 		if (!sdbg_devinfo)
7269 			goto clean;
7270 	}
7271 
7272 	mutex_lock(&sdebug_host_list_mutex);
7273 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7274 	mutex_unlock(&sdebug_host_list_mutex);
7275 
7276 	sdbg_host->dev.bus = &pseudo_lld_bus;
7277 	sdbg_host->dev.parent = pseudo_primary;
7278 	sdbg_host->dev.release = &sdebug_release_adapter;
7279 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7280 
7281 	error = device_register(&sdbg_host->dev);
7282 	if (error) {
7283 		mutex_lock(&sdebug_host_list_mutex);
7284 		list_del(&sdbg_host->host_list);
7285 		mutex_unlock(&sdebug_host_list_mutex);
7286 		goto clean;
7287 	}
7288 
7289 	++sdebug_num_hosts;
7290 	return 0;
7291 
7292 clean:
7293 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7294 				 dev_list) {
7295 		list_del(&sdbg_devinfo->dev_list);
7296 		kfree(sdbg_devinfo->zstate);
7297 		kfree(sdbg_devinfo);
7298 	}
7299 	if (sdbg_host->dev.release)
7300 		put_device(&sdbg_host->dev);
7301 	else
7302 		kfree(sdbg_host);
7303 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7304 	return error;
7305 }
7306 
7307 static int sdebug_do_add_host(bool mk_new_store)
7308 {
7309 	int ph_idx = sdeb_most_recent_idx;
7310 
7311 	if (mk_new_store) {
7312 		ph_idx = sdebug_add_store();
7313 		if (ph_idx < 0)
7314 			return ph_idx;
7315 	}
7316 	return sdebug_add_host_helper(ph_idx);
7317 }
7318 
7319 static void sdebug_do_remove_host(bool the_end)
7320 {
7321 	int idx = -1;
7322 	struct sdebug_host_info *sdbg_host = NULL;
7323 	struct sdebug_host_info *sdbg_host2;
7324 
7325 	mutex_lock(&sdebug_host_list_mutex);
7326 	if (!list_empty(&sdebug_host_list)) {
7327 		sdbg_host = list_entry(sdebug_host_list.prev,
7328 				       struct sdebug_host_info, host_list);
7329 		idx = sdbg_host->si_idx;
7330 	}
7331 	if (!the_end && idx >= 0) {
7332 		bool unique = true;
7333 
7334 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7335 			if (sdbg_host2 == sdbg_host)
7336 				continue;
7337 			if (idx == sdbg_host2->si_idx) {
7338 				unique = false;
7339 				break;
7340 			}
7341 		}
7342 		if (unique) {
7343 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7344 			if (idx == sdeb_most_recent_idx)
7345 				--sdeb_most_recent_idx;
7346 		}
7347 	}
7348 	if (sdbg_host)
7349 		list_del(&sdbg_host->host_list);
7350 	mutex_unlock(&sdebug_host_list_mutex);
7351 
7352 	if (!sdbg_host)
7353 		return;
7354 
7355 	device_unregister(&sdbg_host->dev);
7356 	--sdebug_num_hosts;
7357 }
7358 
7359 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7360 {
7361 	struct sdebug_dev_info *devip = sdev->hostdata;
7362 
7363 	if (!devip)
7364 		return	-ENODEV;
7365 
7366 	mutex_lock(&sdebug_host_list_mutex);
7367 	block_unblock_all_queues(true);
7368 
7369 	if (qdepth > SDEBUG_CANQUEUE) {
7370 		qdepth = SDEBUG_CANQUEUE;
7371 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7372 			qdepth, SDEBUG_CANQUEUE);
7373 	}
7374 	if (qdepth < 1)
7375 		qdepth = 1;
7376 	if (qdepth != sdev->queue_depth)
7377 		scsi_change_queue_depth(sdev, qdepth);
7378 
7379 	block_unblock_all_queues(false);
7380 	mutex_unlock(&sdebug_host_list_mutex);
7381 
7382 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7383 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7384 
7385 	return sdev->queue_depth;
7386 }
7387 
7388 static bool fake_timeout(struct scsi_cmnd *scp)
7389 {
7390 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7391 		if (sdebug_every_nth < -1)
7392 			sdebug_every_nth = -1;
7393 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7394 			return true; /* ignore command causing timeout */
7395 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7396 			 scsi_medium_access_command(scp))
7397 			return true; /* time out reads and writes */
7398 	}
7399 	return false;
7400 }
7401 
7402 /* Response to TUR or media access command when device stopped */
7403 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7404 {
7405 	int stopped_state;
7406 	u64 diff_ns = 0;
7407 	ktime_t now_ts = ktime_get_boottime();
7408 	struct scsi_device *sdp = scp->device;
7409 
7410 	stopped_state = atomic_read(&devip->stopped);
7411 	if (stopped_state == 2) {
7412 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7413 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7414 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7415 				/* tur_ms_to_ready timer extinguished */
7416 				atomic_set(&devip->stopped, 0);
7417 				return 0;
7418 			}
7419 		}
7420 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7421 		if (sdebug_verbose)
7422 			sdev_printk(KERN_INFO, sdp,
7423 				    "%s: Not ready: in process of becoming ready\n", my_name);
7424 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7425 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7426 
7427 			if (diff_ns <= tur_nanosecs_to_ready)
7428 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7429 			else
7430 				diff_ns = tur_nanosecs_to_ready;
7431 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7432 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7433 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7434 						   diff_ns);
7435 			return check_condition_result;
7436 		}
7437 	}
7438 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7439 	if (sdebug_verbose)
7440 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7441 			    my_name);
7442 	return check_condition_result;
7443 }
7444 
7445 static void sdebug_map_queues(struct Scsi_Host *shost)
7446 {
7447 	int i, qoff;
7448 
7449 	if (shost->nr_hw_queues == 1)
7450 		return;
7451 
7452 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7453 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7454 
7455 		map->nr_queues  = 0;
7456 
7457 		if (i == HCTX_TYPE_DEFAULT)
7458 			map->nr_queues = submit_queues - poll_queues;
7459 		else if (i == HCTX_TYPE_POLL)
7460 			map->nr_queues = poll_queues;
7461 
7462 		if (!map->nr_queues) {
7463 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7464 			continue;
7465 		}
7466 
7467 		map->queue_offset = qoff;
7468 		blk_mq_map_queues(map);
7469 
7470 		qoff += map->nr_queues;
7471 	}
7472 }
7473 
7474 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7475 {
7476 	bool first;
7477 	bool retiring = false;
7478 	int num_entries = 0;
7479 	unsigned int qc_idx = 0;
7480 	unsigned long iflags;
7481 	ktime_t kt_from_boot = ktime_get_boottime();
7482 	struct sdebug_queue *sqp;
7483 	struct sdebug_queued_cmd *sqcp;
7484 	struct scsi_cmnd *scp;
7485 	struct sdebug_defer *sd_dp;
7486 
7487 	sqp = sdebug_q_arr + queue_num;
7488 
7489 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7490 
7491 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7492 	if (qc_idx >= sdebug_max_queue)
7493 		goto unlock;
7494 
7495 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7496 		if (first) {
7497 			first = false;
7498 			if (!test_bit(qc_idx, sqp->in_use_bm))
7499 				continue;
7500 		} else {
7501 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7502 		}
7503 		if (qc_idx >= sdebug_max_queue)
7504 			break;
7505 
7506 		sqcp = &sqp->qc_arr[qc_idx];
7507 		sd_dp = sqcp->sd_dp;
7508 		if (unlikely(!sd_dp))
7509 			continue;
7510 		scp = sqcp->a_cmnd;
7511 		if (unlikely(scp == NULL)) {
7512 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7513 			       queue_num, qc_idx, __func__);
7514 			break;
7515 		}
7516 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7517 			if (kt_from_boot < sd_dp->cmpl_ts)
7518 				continue;
7519 
7520 		} else		/* ignoring non REQ_POLLED requests */
7521 			continue;
7522 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7523 			retiring = true;
7524 
7525 		sqcp->a_cmnd = NULL;
7526 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7527 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7528 				sqp, queue_num, qc_idx, __func__);
7529 			break;
7530 		}
7531 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7532 			int k, retval;
7533 
7534 			retval = atomic_read(&retired_max_queue);
7535 			if (qc_idx >= retval) {
7536 				pr_err("index %d too large\n", retval);
7537 				break;
7538 			}
7539 			k = find_last_bit(sqp->in_use_bm, retval);
7540 			if ((k < sdebug_max_queue) || (k == retval))
7541 				atomic_set(&retired_max_queue, 0);
7542 			else
7543 				atomic_set(&retired_max_queue, k + 1);
7544 		}
7545 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7546 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7547 
7548 		if (sdebug_statistics) {
7549 			atomic_inc(&sdebug_completions);
7550 			if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7551 				atomic_inc(&sdebug_miss_cpus);
7552 		}
7553 
7554 		scsi_done(scp); /* callback to mid level */
7555 		num_entries++;
7556 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7557 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7558 			break;
7559 	}
7560 
7561 unlock:
7562 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7563 
7564 	if (num_entries > 0)
7565 		atomic_add(num_entries, &sdeb_mq_poll_count);
7566 	return num_entries;
7567 }
7568 
7569 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7570 				   struct scsi_cmnd *scp)
7571 {
7572 	u8 sdeb_i;
7573 	struct scsi_device *sdp = scp->device;
7574 	const struct opcode_info_t *oip;
7575 	const struct opcode_info_t *r_oip;
7576 	struct sdebug_dev_info *devip;
7577 	u8 *cmd = scp->cmnd;
7578 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7579 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7580 	int k, na;
7581 	int errsts = 0;
7582 	u64 lun_index = sdp->lun & 0x3FFF;
7583 	u32 flags;
7584 	u16 sa;
7585 	u8 opcode = cmd[0];
7586 	bool has_wlun_rl;
7587 	bool inject_now;
7588 
7589 	scsi_set_resid(scp, 0);
7590 	if (sdebug_statistics) {
7591 		atomic_inc(&sdebug_cmnd_count);
7592 		inject_now = inject_on_this_cmd();
7593 	} else {
7594 		inject_now = false;
7595 	}
7596 	if (unlikely(sdebug_verbose &&
7597 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7598 		char b[120];
7599 		int n, len, sb;
7600 
7601 		len = scp->cmd_len;
7602 		sb = (int)sizeof(b);
7603 		if (len > 32)
7604 			strcpy(b, "too long, over 32 bytes");
7605 		else {
7606 			for (k = 0, n = 0; k < len && n < sb; ++k)
7607 				n += scnprintf(b + n, sb - n, "%02x ",
7608 					       (u32)cmd[k]);
7609 		}
7610 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7611 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7612 	}
7613 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7614 		return SCSI_MLQUEUE_HOST_BUSY;
7615 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7616 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7617 		goto err_out;
7618 
7619 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7620 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7621 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7622 	if (unlikely(!devip)) {
7623 		devip = find_build_dev_info(sdp);
7624 		if (NULL == devip)
7625 			goto err_out;
7626 	}
7627 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7628 		atomic_set(&sdeb_inject_pending, 1);
7629 
7630 	na = oip->num_attached;
7631 	r_pfp = oip->pfp;
7632 	if (na) {	/* multiple commands with this opcode */
7633 		r_oip = oip;
7634 		if (FF_SA & r_oip->flags) {
7635 			if (F_SA_LOW & oip->flags)
7636 				sa = 0x1f & cmd[1];
7637 			else
7638 				sa = get_unaligned_be16(cmd + 8);
7639 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7640 				if (opcode == oip->opcode && sa == oip->sa)
7641 					break;
7642 			}
7643 		} else {   /* since no service action only check opcode */
7644 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7645 				if (opcode == oip->opcode)
7646 					break;
7647 			}
7648 		}
7649 		if (k > na) {
7650 			if (F_SA_LOW & r_oip->flags)
7651 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7652 			else if (F_SA_HIGH & r_oip->flags)
7653 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7654 			else
7655 				mk_sense_invalid_opcode(scp);
7656 			goto check_cond;
7657 		}
7658 	}	/* else (when na==0) we assume the oip is a match */
7659 	flags = oip->flags;
7660 	if (unlikely(F_INV_OP & flags)) {
7661 		mk_sense_invalid_opcode(scp);
7662 		goto check_cond;
7663 	}
7664 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7665 		if (sdebug_verbose)
7666 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7667 				    my_name, opcode, " supported for wlun");
7668 		mk_sense_invalid_opcode(scp);
7669 		goto check_cond;
7670 	}
7671 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7672 		u8 rem;
7673 		int j;
7674 
7675 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7676 			rem = ~oip->len_mask[k] & cmd[k];
7677 			if (rem) {
7678 				for (j = 7; j >= 0; --j, rem <<= 1) {
7679 					if (0x80 & rem)
7680 						break;
7681 				}
7682 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7683 				goto check_cond;
7684 			}
7685 		}
7686 	}
7687 	if (unlikely(!(F_SKIP_UA & flags) &&
7688 		     find_first_bit(devip->uas_bm,
7689 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7690 		errsts = make_ua(scp, devip);
7691 		if (errsts)
7692 			goto check_cond;
7693 	}
7694 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7695 		     atomic_read(&devip->stopped))) {
7696 		errsts = resp_not_ready(scp, devip);
7697 		if (errsts)
7698 			goto fini;
7699 	}
7700 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7701 		goto fini;
7702 	if (unlikely(sdebug_every_nth)) {
7703 		if (fake_timeout(scp))
7704 			return 0;	/* ignore command: make trouble */
7705 	}
7706 	if (likely(oip->pfp))
7707 		pfp = oip->pfp;	/* calls a resp_* function */
7708 	else
7709 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7710 
7711 fini:
7712 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7713 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7714 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7715 					    sdebug_ndelay > 10000)) {
7716 		/*
7717 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7718 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7719 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7720 		 * For Synchronize Cache want 1/20 of SSU's delay.
7721 		 */
7722 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7723 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7724 
7725 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7726 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7727 	} else
7728 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7729 				     sdebug_ndelay);
7730 check_cond:
7731 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7732 err_out:
7733 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7734 }
7735 
7736 static struct scsi_host_template sdebug_driver_template = {
7737 	.show_info =		scsi_debug_show_info,
7738 	.write_info =		scsi_debug_write_info,
7739 	.proc_name =		sdebug_proc_name,
7740 	.name =			"SCSI DEBUG",
7741 	.info =			scsi_debug_info,
7742 	.slave_alloc =		scsi_debug_slave_alloc,
7743 	.slave_configure =	scsi_debug_slave_configure,
7744 	.slave_destroy =	scsi_debug_slave_destroy,
7745 	.ioctl =		scsi_debug_ioctl,
7746 	.queuecommand =		scsi_debug_queuecommand,
7747 	.change_queue_depth =	sdebug_change_qdepth,
7748 	.map_queues =		sdebug_map_queues,
7749 	.mq_poll =		sdebug_blk_mq_poll,
7750 	.eh_abort_handler =	scsi_debug_abort,
7751 	.eh_device_reset_handler = scsi_debug_device_reset,
7752 	.eh_target_reset_handler = scsi_debug_target_reset,
7753 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7754 	.eh_host_reset_handler = scsi_debug_host_reset,
7755 	.can_queue =		SDEBUG_CANQUEUE,
7756 	.this_id =		7,
7757 	.sg_tablesize =		SG_MAX_SEGMENTS,
7758 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7759 	.max_sectors =		-1U,
7760 	.max_segment_size =	-1U,
7761 	.module =		THIS_MODULE,
7762 	.track_queue_depth =	1,
7763 };
7764 
7765 static int sdebug_driver_probe(struct device *dev)
7766 {
7767 	int error = 0;
7768 	struct sdebug_host_info *sdbg_host;
7769 	struct Scsi_Host *hpnt;
7770 	int hprot;
7771 
7772 	sdbg_host = dev_to_sdebug_host(dev);
7773 
7774 	sdebug_driver_template.can_queue = sdebug_max_queue;
7775 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7776 	if (!sdebug_clustering)
7777 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7778 
7779 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7780 	if (NULL == hpnt) {
7781 		pr_err("scsi_host_alloc failed\n");
7782 		error = -ENODEV;
7783 		return error;
7784 	}
7785 	if (submit_queues > nr_cpu_ids) {
7786 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7787 			my_name, submit_queues, nr_cpu_ids);
7788 		submit_queues = nr_cpu_ids;
7789 	}
7790 	/*
7791 	 * Decide whether to tell scsi subsystem that we want mq. The
7792 	 * following should give the same answer for each host.
7793 	 */
7794 	hpnt->nr_hw_queues = submit_queues;
7795 	if (sdebug_host_max_queue)
7796 		hpnt->host_tagset = 1;
7797 
7798 	/* poll queues are possible for nr_hw_queues > 1 */
7799 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7800 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7801 			 my_name, poll_queues, hpnt->nr_hw_queues);
7802 		poll_queues = 0;
7803 	}
7804 
7805 	/*
7806 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7807 	 * left over for non-polled I/O.
7808 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7809 	 */
7810 	if (poll_queues >= submit_queues) {
7811 		if (submit_queues < 3)
7812 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7813 		else
7814 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7815 				my_name, submit_queues - 1);
7816 		poll_queues = 1;
7817 	}
7818 	if (poll_queues)
7819 		hpnt->nr_maps = 3;
7820 
7821 	sdbg_host->shost = hpnt;
7822 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7823 		hpnt->max_id = sdebug_num_tgts + 1;
7824 	else
7825 		hpnt->max_id = sdebug_num_tgts;
7826 	/* = sdebug_max_luns; */
7827 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7828 
7829 	hprot = 0;
7830 
7831 	switch (sdebug_dif) {
7832 
7833 	case T10_PI_TYPE1_PROTECTION:
7834 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7835 		if (sdebug_dix)
7836 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7837 		break;
7838 
7839 	case T10_PI_TYPE2_PROTECTION:
7840 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7841 		if (sdebug_dix)
7842 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7843 		break;
7844 
7845 	case T10_PI_TYPE3_PROTECTION:
7846 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7847 		if (sdebug_dix)
7848 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7849 		break;
7850 
7851 	default:
7852 		if (sdebug_dix)
7853 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7854 		break;
7855 	}
7856 
7857 	scsi_host_set_prot(hpnt, hprot);
7858 
7859 	if (have_dif_prot || sdebug_dix)
7860 		pr_info("host protection%s%s%s%s%s%s%s\n",
7861 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7862 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7863 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7864 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7865 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7866 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7867 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7868 
7869 	if (sdebug_guard == 1)
7870 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7871 	else
7872 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7873 
7874 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7875 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7876 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7877 		sdebug_statistics = true;
7878 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7879 	if (error) {
7880 		pr_err("scsi_add_host failed\n");
7881 		error = -ENODEV;
7882 		scsi_host_put(hpnt);
7883 	} else {
7884 		scsi_scan_host(hpnt);
7885 	}
7886 
7887 	return error;
7888 }
7889 
7890 static void sdebug_driver_remove(struct device *dev)
7891 {
7892 	struct sdebug_host_info *sdbg_host;
7893 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7894 
7895 	sdbg_host = dev_to_sdebug_host(dev);
7896 
7897 	scsi_remove_host(sdbg_host->shost);
7898 
7899 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7900 				 dev_list) {
7901 		list_del(&sdbg_devinfo->dev_list);
7902 		kfree(sdbg_devinfo->zstate);
7903 		kfree(sdbg_devinfo);
7904 	}
7905 
7906 	scsi_host_put(sdbg_host->shost);
7907 }
7908 
7909 static int pseudo_lld_bus_match(struct device *dev,
7910 				struct device_driver *dev_driver)
7911 {
7912 	return 1;
7913 }
7914 
7915 static struct bus_type pseudo_lld_bus = {
7916 	.name = "pseudo",
7917 	.match = pseudo_lld_bus_match,
7918 	.probe = sdebug_driver_probe,
7919 	.remove = sdebug_driver_remove,
7920 	.drv_groups = sdebug_drv_groups,
7921 };
7922