xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 12f3eef0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 static struct kmem_cache *queued_cmd_cache;
254 
255 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257 
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260 	ZBC_ZTYPE_CNV	= 0x1,
261 	ZBC_ZTYPE_SWR	= 0x2,
262 	ZBC_ZTYPE_SWP	= 0x3,
263 	/* ZBC_ZTYPE_SOBR = 0x4, */
264 	ZBC_ZTYPE_GAP	= 0x5,
265 };
266 
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269 	ZBC_NOT_WRITE_POINTER	= 0x0,
270 	ZC1_EMPTY		= 0x1,
271 	ZC2_IMPLICIT_OPEN	= 0x2,
272 	ZC3_EXPLICIT_OPEN	= 0x3,
273 	ZC4_CLOSED		= 0x4,
274 	ZC6_READ_ONLY		= 0xd,
275 	ZC5_FULL		= 0xe,
276 	ZC7_OFFLINE		= 0xf,
277 };
278 
279 struct sdeb_zone_state {	/* ZBC: per zone state */
280 	enum sdebug_z_type z_type;
281 	enum sdebug_z_cond z_cond;
282 	bool z_non_seq_resource;
283 	unsigned int z_size;
284 	sector_t z_start;
285 	sector_t z_wp;
286 };
287 
288 struct sdebug_dev_info {
289 	struct list_head dev_list;
290 	unsigned int channel;
291 	unsigned int target;
292 	u64 lun;
293 	uuid_t lu_name;
294 	struct sdebug_host_info *sdbg_host;
295 	unsigned long uas_bm[1];
296 	atomic_t stopped;	/* 1: by SSU, 2: device start */
297 	bool used;
298 
299 	/* For ZBC devices */
300 	enum blk_zoned_model zmodel;
301 	unsigned int zcap;
302 	unsigned int zsize;
303 	unsigned int zsize_shift;
304 	unsigned int nr_zones;
305 	unsigned int nr_conv_zones;
306 	unsigned int nr_seq_zones;
307 	unsigned int nr_imp_open;
308 	unsigned int nr_exp_open;
309 	unsigned int nr_closed;
310 	unsigned int max_open;
311 	ktime_t create_ts;	/* time since bootup that this device was created */
312 	struct sdeb_zone_state *zstate;
313 };
314 
315 struct sdebug_host_info {
316 	struct list_head host_list;
317 	int si_idx;	/* sdeb_store_info (per host) xarray index */
318 	struct Scsi_Host *shost;
319 	struct device dev;
320 	struct list_head dev_info_list;
321 };
322 
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 	rwlock_t macc_lck;	/* for atomic media access on this store */
326 	u8 *storep;		/* user data storage (ram) */
327 	struct t10_pi_tuple *dif_storep; /* protection info */
328 	void *map_storep;	/* provisioning map */
329 };
330 
331 #define dev_to_sdebug_host(d)	\
332 	container_of(d, struct sdebug_host_info, dev)
333 
334 #define shost_to_sdebug_host(shost)	\
335 	dev_to_sdebug_host(shost->dma_dev)
336 
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
339 
340 struct sdebug_defer {
341 	struct hrtimer hrt;
342 	struct execute_work ew;
343 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
344 	int sqa_idx;	/* index of sdebug_queue array */
345 	int hc_idx;	/* hostwide tag index */
346 	int issuing_cpu;
347 	bool aborted;	/* true when blk_abort_request() already called */
348 	enum sdeb_defer_type defer_t;
349 };
350 
351 struct sdebug_queued_cmd {
352 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
353 	 * instance indicates this slot is in use.
354 	 */
355 	struct sdebug_defer sd_dp;
356 	struct scsi_cmnd *scmd;
357 };
358 
359 struct sdebug_scsi_cmd {
360 	spinlock_t   lock;
361 };
362 
363 struct sdebug_queue {
364 	struct sdebug_queued_cmd *qc_arr[SDEBUG_CANQUEUE];
365 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
366 	spinlock_t qc_lock;
367 };
368 
369 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
370 static atomic_t sdebug_completions;  /* count of deferred completions */
371 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
372 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
373 static atomic_t sdeb_inject_pending;
374 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
375 
376 struct opcode_info_t {
377 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
378 				/* for terminating element */
379 	u8 opcode;		/* if num_attached > 0, preferred */
380 	u16 sa;			/* service action */
381 	u32 flags;		/* OR-ed set of SDEB_F_* */
382 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
383 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
384 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
385 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
386 };
387 
388 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
389 enum sdeb_opcode_index {
390 	SDEB_I_INVALID_OPCODE =	0,
391 	SDEB_I_INQUIRY = 1,
392 	SDEB_I_REPORT_LUNS = 2,
393 	SDEB_I_REQUEST_SENSE = 3,
394 	SDEB_I_TEST_UNIT_READY = 4,
395 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
396 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
397 	SDEB_I_LOG_SENSE = 7,
398 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
399 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
400 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
401 	SDEB_I_START_STOP = 11,
402 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
403 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
404 	SDEB_I_MAINT_IN = 14,
405 	SDEB_I_MAINT_OUT = 15,
406 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
407 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
408 	SDEB_I_RESERVE = 18,		/* 6, 10 */
409 	SDEB_I_RELEASE = 19,		/* 6, 10 */
410 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
411 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
412 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
413 	SDEB_I_SEND_DIAG = 23,
414 	SDEB_I_UNMAP = 24,
415 	SDEB_I_WRITE_BUFFER = 25,
416 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
417 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
418 	SDEB_I_COMP_WRITE = 28,
419 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
420 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
421 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
422 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
423 };
424 
425 
426 static const unsigned char opcode_ind_arr[256] = {
427 /* 0x0; 0x0->0x1f: 6 byte cdbs */
428 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
429 	    0, 0, 0, 0,
430 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
431 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
434 	    SDEB_I_ALLOW_REMOVAL, 0,
435 /* 0x20; 0x20->0x3f: 10 byte cdbs */
436 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
437 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
438 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
439 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
440 /* 0x40; 0x40->0x5f: 10 byte cdbs */
441 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
443 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
444 	    SDEB_I_RELEASE,
445 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
446 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
447 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
449 	0, SDEB_I_VARIABLE_LEN,
450 /* 0x80; 0x80->0x9f: 16 byte cdbs */
451 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
452 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
453 	0, 0, 0, SDEB_I_VERIFY,
454 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
455 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
456 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
457 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
458 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
459 	     SDEB_I_MAINT_OUT, 0, 0, 0,
460 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
461 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0,
464 /* 0xc0; 0xc0->0xff: vendor specific */
465 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
467 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
468 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
469 };
470 
471 /*
472  * The following "response" functions return the SCSI mid-level's 4 byte
473  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
474  * command completion, they can mask their return value with
475  * SDEG_RES_IMMED_MASK .
476  */
477 #define SDEG_RES_IMMED_MASK 0x40000000
478 
479 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
508 
509 static int sdebug_do_add_host(bool mk_new_store);
510 static int sdebug_add_host_helper(int per_host_idx);
511 static void sdebug_do_remove_host(bool the_end);
512 static int sdebug_add_store(void);
513 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
514 static void sdebug_erase_all_stores(bool apart_from_first);
515 
516 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
517 
518 /*
519  * The following are overflow arrays for cdbs that "hit" the same index in
520  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
521  * should be placed in opcode_info_arr[], the others should be placed here.
522  */
523 static const struct opcode_info_t msense_iarr[] = {
524 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
525 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 };
527 
528 static const struct opcode_info_t mselect_iarr[] = {
529 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
530 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 };
532 
533 static const struct opcode_info_t read_iarr[] = {
534 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
535 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
536 	     0, 0, 0, 0} },
537 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
538 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
539 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
540 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
541 	     0xc7, 0, 0, 0, 0} },
542 };
543 
544 static const struct opcode_info_t write_iarr[] = {
545 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
546 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
547 		   0, 0, 0, 0, 0, 0} },
548 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
549 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
550 		   0, 0, 0} },
551 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
552 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 		   0xbf, 0xc7, 0, 0, 0, 0} },
554 };
555 
556 static const struct opcode_info_t verify_iarr[] = {
557 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
558 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
559 		   0, 0, 0, 0, 0, 0} },
560 };
561 
562 static const struct opcode_info_t sa_in_16_iarr[] = {
563 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
564 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
565 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
566 };
567 
568 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
569 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
570 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
571 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
572 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
573 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
574 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
575 };
576 
577 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
578 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
579 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
580 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
581 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
582 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
583 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
584 };
585 
586 static const struct opcode_info_t write_same_iarr[] = {
587 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
588 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
589 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
590 };
591 
592 static const struct opcode_info_t reserve_iarr[] = {
593 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
594 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595 };
596 
597 static const struct opcode_info_t release_iarr[] = {
598 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
599 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
600 };
601 
602 static const struct opcode_info_t sync_cache_iarr[] = {
603 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
604 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
606 };
607 
608 static const struct opcode_info_t pre_fetch_iarr[] = {
609 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
610 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
612 };
613 
614 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
615 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
616 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
618 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
619 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
620 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
621 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
622 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
623 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
624 };
625 
626 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
627 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
628 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
629 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
630 };
631 
632 
633 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
634  * plus the terminating elements for logic that scans this table such as
635  * REPORT SUPPORTED OPERATION CODES. */
636 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
637 /* 0 */
638 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
639 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
641 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
643 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
644 	     0, 0} },					/* REPORT LUNS */
645 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
646 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
647 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
648 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 /* 5 */
650 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
651 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
652 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
653 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
654 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
655 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
656 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
657 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
658 	     0, 0, 0} },
659 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
660 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
661 	     0, 0} },
662 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
663 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
665 /* 10 */
666 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
667 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
668 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
670 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
671 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
673 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
674 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
676 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
677 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
678 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
679 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
680 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
681 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
682 				0xff, 0, 0xc7, 0, 0, 0, 0} },
683 /* 15 */
684 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
685 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
686 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
687 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
688 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
689 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
690 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
691 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
692 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
693 	     0xff, 0xff} },
694 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
695 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
696 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
697 	     0} },
698 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
699 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
700 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
701 	     0} },
702 /* 20 */
703 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
704 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
706 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
707 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
708 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
709 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
710 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
712 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
713 /* 25 */
714 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
715 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
717 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
718 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
719 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
720 		 0, 0, 0, 0, 0} },
721 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
722 	    resp_sync_cache, sync_cache_iarr,
723 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
724 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
725 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
726 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
727 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
728 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
729 	    resp_pre_fetch, pre_fetch_iarr,
730 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
731 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
732 
733 /* 30 */
734 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
735 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
736 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
738 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
739 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
740 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
742 /* sentinel */
743 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
744 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
745 };
746 
747 static int sdebug_num_hosts;
748 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
749 static int sdebug_ato = DEF_ATO;
750 static int sdebug_cdb_len = DEF_CDB_LEN;
751 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
752 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
753 static int sdebug_dif = DEF_DIF;
754 static int sdebug_dix = DEF_DIX;
755 static int sdebug_dsense = DEF_D_SENSE;
756 static int sdebug_every_nth = DEF_EVERY_NTH;
757 static int sdebug_fake_rw = DEF_FAKE_RW;
758 static unsigned int sdebug_guard = DEF_GUARD;
759 static int sdebug_host_max_queue;	/* per host */
760 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
761 static int sdebug_max_luns = DEF_MAX_LUNS;
762 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
763 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
764 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
765 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
766 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
767 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
768 static int sdebug_no_uld;
769 static int sdebug_num_parts = DEF_NUM_PARTS;
770 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
771 static int sdebug_opt_blks = DEF_OPT_BLKS;
772 static int sdebug_opts = DEF_OPTS;
773 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
774 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
775 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
776 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
777 static int sdebug_sector_size = DEF_SECTOR_SIZE;
778 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
779 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
780 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
781 static unsigned int sdebug_lbpu = DEF_LBPU;
782 static unsigned int sdebug_lbpws = DEF_LBPWS;
783 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
784 static unsigned int sdebug_lbprz = DEF_LBPRZ;
785 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
786 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
787 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
788 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
789 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
790 static int sdebug_uuid_ctl = DEF_UUID_CTL;
791 static bool sdebug_random = DEF_RANDOM;
792 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
793 static bool sdebug_removable = DEF_REMOVABLE;
794 static bool sdebug_clustering;
795 static bool sdebug_host_lock = DEF_HOST_LOCK;
796 static bool sdebug_strict = DEF_STRICT;
797 static bool sdebug_any_injecting_opt;
798 static bool sdebug_no_rwlock;
799 static bool sdebug_verbose;
800 static bool have_dif_prot;
801 static bool write_since_sync;
802 static bool sdebug_statistics = DEF_STATISTICS;
803 static bool sdebug_wp;
804 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
805 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
806 static char *sdeb_zbc_model_s;
807 
808 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
809 			  SAM_LUN_AM_FLAT = 0x1,
810 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
811 			  SAM_LUN_AM_EXTENDED = 0x3};
812 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
813 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
814 
815 static unsigned int sdebug_store_sectors;
816 static sector_t sdebug_capacity;	/* in sectors */
817 
818 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
819    may still need them */
820 static int sdebug_heads;		/* heads per disk */
821 static int sdebug_cylinders_per;	/* cylinders per surface */
822 static int sdebug_sectors_per;		/* sectors per cylinder */
823 
824 static LIST_HEAD(sdebug_host_list);
825 static DEFINE_MUTEX(sdebug_host_list_mutex);
826 
827 static struct xarray per_store_arr;
828 static struct xarray *per_store_ap = &per_store_arr;
829 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
830 static int sdeb_most_recent_idx = -1;
831 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
832 
833 static unsigned long map_size;
834 static int num_aborts;
835 static int num_dev_resets;
836 static int num_target_resets;
837 static int num_bus_resets;
838 static int num_host_resets;
839 static int dix_writes;
840 static int dix_reads;
841 static int dif_errors;
842 
843 /* ZBC global data */
844 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
845 static int sdeb_zbc_zone_cap_mb;
846 static int sdeb_zbc_zone_size_mb;
847 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
848 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
849 
850 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
851 static int poll_queues; /* iouring iopoll interface.*/
852 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
853 
854 static DEFINE_RWLOCK(atomic_rw);
855 static DEFINE_RWLOCK(atomic_rw2);
856 
857 static rwlock_t *ramdisk_lck_a[2];
858 
859 static char sdebug_proc_name[] = MY_NAME;
860 static const char *my_name = MY_NAME;
861 
862 static struct bus_type pseudo_lld_bus;
863 
864 static struct device_driver sdebug_driverfs_driver = {
865 	.name 		= sdebug_proc_name,
866 	.bus		= &pseudo_lld_bus,
867 };
868 
869 static const int check_condition_result =
870 	SAM_STAT_CHECK_CONDITION;
871 
872 static const int illegal_condition_result =
873 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
874 
875 static const int device_qfull_result =
876 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
877 
878 static const int condition_met_result = SAM_STAT_CONDITION_MET;
879 
880 
881 /* Only do the extra work involved in logical block provisioning if one or
882  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
883  * real reads and writes (i.e. not skipping them for speed).
884  */
885 static inline bool scsi_debug_lbp(void)
886 {
887 	return 0 == sdebug_fake_rw &&
888 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
889 }
890 
891 static void *lba2fake_store(struct sdeb_store_info *sip,
892 			    unsigned long long lba)
893 {
894 	struct sdeb_store_info *lsip = sip;
895 
896 	lba = do_div(lba, sdebug_store_sectors);
897 	if (!sip || !sip->storep) {
898 		WARN_ON_ONCE(true);
899 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
900 	}
901 	return lsip->storep + lba * sdebug_sector_size;
902 }
903 
904 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
905 				      sector_t sector)
906 {
907 	sector = sector_div(sector, sdebug_store_sectors);
908 
909 	return sip->dif_storep + sector;
910 }
911 
912 static void sdebug_max_tgts_luns(void)
913 {
914 	struct sdebug_host_info *sdbg_host;
915 	struct Scsi_Host *hpnt;
916 
917 	mutex_lock(&sdebug_host_list_mutex);
918 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
919 		hpnt = sdbg_host->shost;
920 		if ((hpnt->this_id >= 0) &&
921 		    (sdebug_num_tgts > hpnt->this_id))
922 			hpnt->max_id = sdebug_num_tgts + 1;
923 		else
924 			hpnt->max_id = sdebug_num_tgts;
925 		/* sdebug_max_luns; */
926 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
927 	}
928 	mutex_unlock(&sdebug_host_list_mutex);
929 }
930 
931 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
932 
933 /* Set in_bit to -1 to indicate no bit position of invalid field */
934 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
935 				 enum sdeb_cmd_data c_d,
936 				 int in_byte, int in_bit)
937 {
938 	unsigned char *sbuff;
939 	u8 sks[4];
940 	int sl, asc;
941 
942 	sbuff = scp->sense_buffer;
943 	if (!sbuff) {
944 		sdev_printk(KERN_ERR, scp->device,
945 			    "%s: sense_buffer is NULL\n", __func__);
946 		return;
947 	}
948 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
949 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
950 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
951 	memset(sks, 0, sizeof(sks));
952 	sks[0] = 0x80;
953 	if (c_d)
954 		sks[0] |= 0x40;
955 	if (in_bit >= 0) {
956 		sks[0] |= 0x8;
957 		sks[0] |= 0x7 & in_bit;
958 	}
959 	put_unaligned_be16(in_byte, sks + 1);
960 	if (sdebug_dsense) {
961 		sl = sbuff[7] + 8;
962 		sbuff[7] = sl;
963 		sbuff[sl] = 0x2;
964 		sbuff[sl + 1] = 0x6;
965 		memcpy(sbuff + sl + 4, sks, 3);
966 	} else
967 		memcpy(sbuff + 15, sks, 3);
968 	if (sdebug_verbose)
969 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
970 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
971 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
972 }
973 
974 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
975 {
976 	if (!scp->sense_buffer) {
977 		sdev_printk(KERN_ERR, scp->device,
978 			    "%s: sense_buffer is NULL\n", __func__);
979 		return;
980 	}
981 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
982 
983 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
984 
985 	if (sdebug_verbose)
986 		sdev_printk(KERN_INFO, scp->device,
987 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
988 			    my_name, key, asc, asq);
989 }
990 
991 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
992 {
993 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
994 }
995 
996 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
997 			    void __user *arg)
998 {
999 	if (sdebug_verbose) {
1000 		if (0x1261 == cmd)
1001 			sdev_printk(KERN_INFO, dev,
1002 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1003 		else if (0x5331 == cmd)
1004 			sdev_printk(KERN_INFO, dev,
1005 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1006 				    __func__);
1007 		else
1008 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1009 				    __func__, cmd);
1010 	}
1011 	return -EINVAL;
1012 	/* return -ENOTTY; // correct return but upsets fdisk */
1013 }
1014 
1015 static void config_cdb_len(struct scsi_device *sdev)
1016 {
1017 	switch (sdebug_cdb_len) {
1018 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1019 		sdev->use_10_for_rw = false;
1020 		sdev->use_16_for_rw = false;
1021 		sdev->use_10_for_ms = false;
1022 		break;
1023 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1024 		sdev->use_10_for_rw = true;
1025 		sdev->use_16_for_rw = false;
1026 		sdev->use_10_for_ms = false;
1027 		break;
1028 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1029 		sdev->use_10_for_rw = true;
1030 		sdev->use_16_for_rw = false;
1031 		sdev->use_10_for_ms = true;
1032 		break;
1033 	case 16:
1034 		sdev->use_10_for_rw = false;
1035 		sdev->use_16_for_rw = true;
1036 		sdev->use_10_for_ms = true;
1037 		break;
1038 	case 32: /* No knobs to suggest this so same as 16 for now */
1039 		sdev->use_10_for_rw = false;
1040 		sdev->use_16_for_rw = true;
1041 		sdev->use_10_for_ms = true;
1042 		break;
1043 	default:
1044 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1045 			sdebug_cdb_len);
1046 		sdev->use_10_for_rw = true;
1047 		sdev->use_16_for_rw = false;
1048 		sdev->use_10_for_ms = false;
1049 		sdebug_cdb_len = 10;
1050 		break;
1051 	}
1052 }
1053 
1054 static void all_config_cdb_len(void)
1055 {
1056 	struct sdebug_host_info *sdbg_host;
1057 	struct Scsi_Host *shost;
1058 	struct scsi_device *sdev;
1059 
1060 	mutex_lock(&sdebug_host_list_mutex);
1061 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1062 		shost = sdbg_host->shost;
1063 		shost_for_each_device(sdev, shost) {
1064 			config_cdb_len(sdev);
1065 		}
1066 	}
1067 	mutex_unlock(&sdebug_host_list_mutex);
1068 }
1069 
1070 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1071 {
1072 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1073 	struct sdebug_dev_info *dp;
1074 
1075 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1076 		if ((devip->sdbg_host == dp->sdbg_host) &&
1077 		    (devip->target == dp->target)) {
1078 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1079 		}
1080 	}
1081 }
1082 
1083 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1084 {
1085 	int k;
1086 
1087 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1088 	if (k != SDEBUG_NUM_UAS) {
1089 		const char *cp = NULL;
1090 
1091 		switch (k) {
1092 		case SDEBUG_UA_POR:
1093 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1094 					POWER_ON_RESET_ASCQ);
1095 			if (sdebug_verbose)
1096 				cp = "power on reset";
1097 			break;
1098 		case SDEBUG_UA_POOCCUR:
1099 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1100 					POWER_ON_OCCURRED_ASCQ);
1101 			if (sdebug_verbose)
1102 				cp = "power on occurred";
1103 			break;
1104 		case SDEBUG_UA_BUS_RESET:
1105 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1106 					BUS_RESET_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "bus reset";
1109 			break;
1110 		case SDEBUG_UA_MODE_CHANGED:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1112 					MODE_CHANGED_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "mode parameters changed";
1115 			break;
1116 		case SDEBUG_UA_CAPACITY_CHANGED:
1117 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1118 					CAPACITY_CHANGED_ASCQ);
1119 			if (sdebug_verbose)
1120 				cp = "capacity data changed";
1121 			break;
1122 		case SDEBUG_UA_MICROCODE_CHANGED:
1123 			mk_sense_buffer(scp, UNIT_ATTENTION,
1124 					TARGET_CHANGED_ASC,
1125 					MICROCODE_CHANGED_ASCQ);
1126 			if (sdebug_verbose)
1127 				cp = "microcode has been changed";
1128 			break;
1129 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1130 			mk_sense_buffer(scp, UNIT_ATTENTION,
1131 					TARGET_CHANGED_ASC,
1132 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1133 			if (sdebug_verbose)
1134 				cp = "microcode has been changed without reset";
1135 			break;
1136 		case SDEBUG_UA_LUNS_CHANGED:
1137 			/*
1138 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1139 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1140 			 * on the target, until a REPORT LUNS command is
1141 			 * received.  SPC-4 behavior is to report it only once.
1142 			 * NOTE:  sdebug_scsi_level does not use the same
1143 			 * values as struct scsi_device->scsi_level.
1144 			 */
1145 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1146 				clear_luns_changed_on_target(devip);
1147 			mk_sense_buffer(scp, UNIT_ATTENTION,
1148 					TARGET_CHANGED_ASC,
1149 					LUNS_CHANGED_ASCQ);
1150 			if (sdebug_verbose)
1151 				cp = "reported luns data has changed";
1152 			break;
1153 		default:
1154 			pr_warn("unexpected unit attention code=%d\n", k);
1155 			if (sdebug_verbose)
1156 				cp = "unknown";
1157 			break;
1158 		}
1159 		clear_bit(k, devip->uas_bm);
1160 		if (sdebug_verbose)
1161 			sdev_printk(KERN_INFO, scp->device,
1162 				   "%s reports: Unit attention: %s\n",
1163 				   my_name, cp);
1164 		return check_condition_result;
1165 	}
1166 	return 0;
1167 }
1168 
1169 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1170 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1171 				int arr_len)
1172 {
1173 	int act_len;
1174 	struct scsi_data_buffer *sdb = &scp->sdb;
1175 
1176 	if (!sdb->length)
1177 		return 0;
1178 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1179 		return DID_ERROR << 16;
1180 
1181 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1182 				      arr, arr_len);
1183 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1184 
1185 	return 0;
1186 }
1187 
1188 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1189  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1190  * calls, not required to write in ascending offset order. Assumes resid
1191  * set to scsi_bufflen() prior to any calls.
1192  */
1193 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1194 				  int arr_len, unsigned int off_dst)
1195 {
1196 	unsigned int act_len, n;
1197 	struct scsi_data_buffer *sdb = &scp->sdb;
1198 	off_t skip = off_dst;
1199 
1200 	if (sdb->length <= off_dst)
1201 		return 0;
1202 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1203 		return DID_ERROR << 16;
1204 
1205 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1206 				       arr, arr_len, skip);
1207 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1208 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1209 		 scsi_get_resid(scp));
1210 	n = scsi_bufflen(scp) - (off_dst + act_len);
1211 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1212 	return 0;
1213 }
1214 
1215 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1216  * 'arr' or -1 if error.
1217  */
1218 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1219 			       int arr_len)
1220 {
1221 	if (!scsi_bufflen(scp))
1222 		return 0;
1223 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1224 		return -1;
1225 
1226 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1227 }
1228 
1229 
1230 static char sdebug_inq_vendor_id[9] = "Linux   ";
1231 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1232 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1233 /* Use some locally assigned NAAs for SAS addresses. */
1234 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1235 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1236 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1237 
1238 /* Device identification VPD page. Returns number of bytes placed in arr */
1239 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1240 			  int target_dev_id, int dev_id_num,
1241 			  const char *dev_id_str, int dev_id_str_len,
1242 			  const uuid_t *lu_name)
1243 {
1244 	int num, port_a;
1245 	char b[32];
1246 
1247 	port_a = target_dev_id + 1;
1248 	/* T10 vendor identifier field format (faked) */
1249 	arr[0] = 0x2;	/* ASCII */
1250 	arr[1] = 0x1;
1251 	arr[2] = 0x0;
1252 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1253 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1254 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1255 	num = 8 + 16 + dev_id_str_len;
1256 	arr[3] = num;
1257 	num += 4;
1258 	if (dev_id_num >= 0) {
1259 		if (sdebug_uuid_ctl) {
1260 			/* Locally assigned UUID */
1261 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1262 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1263 			arr[num++] = 0x0;
1264 			arr[num++] = 0x12;
1265 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1266 			arr[num++] = 0x0;
1267 			memcpy(arr + num, lu_name, 16);
1268 			num += 16;
1269 		} else {
1270 			/* NAA-3, Logical unit identifier (binary) */
1271 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1272 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1273 			arr[num++] = 0x0;
1274 			arr[num++] = 0x8;
1275 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1276 			num += 8;
1277 		}
1278 		/* Target relative port number */
1279 		arr[num++] = 0x61;	/* proto=sas, binary */
1280 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1281 		arr[num++] = 0x0;	/* reserved */
1282 		arr[num++] = 0x4;	/* length */
1283 		arr[num++] = 0x0;	/* reserved */
1284 		arr[num++] = 0x0;	/* reserved */
1285 		arr[num++] = 0x0;
1286 		arr[num++] = 0x1;	/* relative port A */
1287 	}
1288 	/* NAA-3, Target port identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x8;
1293 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1294 	num += 8;
1295 	/* NAA-3, Target port group identifier */
1296 	arr[num++] = 0x61;	/* proto=sas, binary */
1297 	arr[num++] = 0x95;	/* piv=1, target port group id */
1298 	arr[num++] = 0x0;
1299 	arr[num++] = 0x4;
1300 	arr[num++] = 0;
1301 	arr[num++] = 0;
1302 	put_unaligned_be16(port_group_id, arr + num);
1303 	num += 2;
1304 	/* NAA-3, Target device identifier */
1305 	arr[num++] = 0x61;	/* proto=sas, binary */
1306 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1307 	arr[num++] = 0x0;
1308 	arr[num++] = 0x8;
1309 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1310 	num += 8;
1311 	/* SCSI name string: Target device identifier */
1312 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1313 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1314 	arr[num++] = 0x0;
1315 	arr[num++] = 24;
1316 	memcpy(arr + num, "naa.32222220", 12);
1317 	num += 12;
1318 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1319 	memcpy(arr + num, b, 8);
1320 	num += 8;
1321 	memset(arr + num, 0, 4);
1322 	num += 4;
1323 	return num;
1324 }
1325 
1326 static unsigned char vpd84_data[] = {
1327 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1328     0x22,0x22,0x22,0x0,0xbb,0x1,
1329     0x22,0x22,0x22,0x0,0xbb,0x2,
1330 };
1331 
1332 /*  Software interface identification VPD page */
1333 static int inquiry_vpd_84(unsigned char *arr)
1334 {
1335 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1336 	return sizeof(vpd84_data);
1337 }
1338 
1339 /* Management network addresses VPD page */
1340 static int inquiry_vpd_85(unsigned char *arr)
1341 {
1342 	int num = 0;
1343 	const char *na1 = "https://www.kernel.org/config";
1344 	const char *na2 = "http://www.kernel.org/log";
1345 	int plen, olen;
1346 
1347 	arr[num++] = 0x1;	/* lu, storage config */
1348 	arr[num++] = 0x0;	/* reserved */
1349 	arr[num++] = 0x0;
1350 	olen = strlen(na1);
1351 	plen = olen + 1;
1352 	if (plen % 4)
1353 		plen = ((plen / 4) + 1) * 4;
1354 	arr[num++] = plen;	/* length, null termianted, padded */
1355 	memcpy(arr + num, na1, olen);
1356 	memset(arr + num + olen, 0, plen - olen);
1357 	num += plen;
1358 
1359 	arr[num++] = 0x4;	/* lu, logging */
1360 	arr[num++] = 0x0;	/* reserved */
1361 	arr[num++] = 0x0;
1362 	olen = strlen(na2);
1363 	plen = olen + 1;
1364 	if (plen % 4)
1365 		plen = ((plen / 4) + 1) * 4;
1366 	arr[num++] = plen;	/* length, null terminated, padded */
1367 	memcpy(arr + num, na2, olen);
1368 	memset(arr + num + olen, 0, plen - olen);
1369 	num += plen;
1370 
1371 	return num;
1372 }
1373 
1374 /* SCSI ports VPD page */
1375 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1376 {
1377 	int num = 0;
1378 	int port_a, port_b;
1379 
1380 	port_a = target_dev_id + 1;
1381 	port_b = port_a + 1;
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;	/* reserved */
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1386 	memset(arr + num, 0, 6);
1387 	num += 6;
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 12;	/* length tp descriptor */
1390 	/* naa-5 target port identifier (A) */
1391 	arr[num++] = 0x61;	/* proto=sas, binary */
1392 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x8;	/* length */
1395 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1396 	num += 8;
1397 	arr[num++] = 0x0;	/* reserved */
1398 	arr[num++] = 0x0;	/* reserved */
1399 	arr[num++] = 0x0;
1400 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1401 	memset(arr + num, 0, 6);
1402 	num += 6;
1403 	arr[num++] = 0x0;
1404 	arr[num++] = 12;	/* length tp descriptor */
1405 	/* naa-5 target port identifier (B) */
1406 	arr[num++] = 0x61;	/* proto=sas, binary */
1407 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1408 	arr[num++] = 0x0;	/* reserved */
1409 	arr[num++] = 0x8;	/* length */
1410 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1411 	num += 8;
1412 
1413 	return num;
1414 }
1415 
1416 
1417 static unsigned char vpd89_data[] = {
1418 /* from 4th byte */ 0,0,0,0,
1419 'l','i','n','u','x',' ',' ',' ',
1420 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1421 '1','2','3','4',
1422 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1423 0xec,0,0,0,
1424 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1425 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1427 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1428 0x53,0x41,
1429 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1430 0x20,0x20,
1431 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1432 0x10,0x80,
1433 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1434 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1435 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1437 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1438 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1439 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1444 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1445 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1446 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1459 };
1460 
1461 /* ATA Information VPD page */
1462 static int inquiry_vpd_89(unsigned char *arr)
1463 {
1464 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1465 	return sizeof(vpd89_data);
1466 }
1467 
1468 
1469 static unsigned char vpdb0_data[] = {
1470 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1471 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1472 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1473 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1474 };
1475 
1476 /* Block limits VPD page (SBC-3) */
1477 static int inquiry_vpd_b0(unsigned char *arr)
1478 {
1479 	unsigned int gran;
1480 
1481 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1482 
1483 	/* Optimal transfer length granularity */
1484 	if (sdebug_opt_xferlen_exp != 0 &&
1485 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1486 		gran = 1 << sdebug_opt_xferlen_exp;
1487 	else
1488 		gran = 1 << sdebug_physblk_exp;
1489 	put_unaligned_be16(gran, arr + 2);
1490 
1491 	/* Maximum Transfer Length */
1492 	if (sdebug_store_sectors > 0x400)
1493 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1494 
1495 	/* Optimal Transfer Length */
1496 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1497 
1498 	if (sdebug_lbpu) {
1499 		/* Maximum Unmap LBA Count */
1500 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1501 
1502 		/* Maximum Unmap Block Descriptor Count */
1503 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1504 	}
1505 
1506 	/* Unmap Granularity Alignment */
1507 	if (sdebug_unmap_alignment) {
1508 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1509 		arr[28] |= 0x80; /* UGAVALID */
1510 	}
1511 
1512 	/* Optimal Unmap Granularity */
1513 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1514 
1515 	/* Maximum WRITE SAME Length */
1516 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1517 
1518 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1519 }
1520 
1521 /* Block device characteristics VPD page (SBC-3) */
1522 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1523 {
1524 	memset(arr, 0, 0x3c);
1525 	arr[0] = 0;
1526 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1527 	arr[2] = 0;
1528 	arr[3] = 5;	/* less than 1.8" */
1529 	if (devip->zmodel == BLK_ZONED_HA)
1530 		arr[4] = 1 << 4;	/* zoned field = 01b */
1531 
1532 	return 0x3c;
1533 }
1534 
1535 /* Logical block provisioning VPD page (SBC-4) */
1536 static int inquiry_vpd_b2(unsigned char *arr)
1537 {
1538 	memset(arr, 0, 0x4);
1539 	arr[0] = 0;			/* threshold exponent */
1540 	if (sdebug_lbpu)
1541 		arr[1] = 1 << 7;
1542 	if (sdebug_lbpws)
1543 		arr[1] |= 1 << 6;
1544 	if (sdebug_lbpws10)
1545 		arr[1] |= 1 << 5;
1546 	if (sdebug_lbprz && scsi_debug_lbp())
1547 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1548 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1549 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1550 	/* threshold_percentage=0 */
1551 	return 0x4;
1552 }
1553 
1554 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1555 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1556 {
1557 	memset(arr, 0, 0x3c);
1558 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1559 	/*
1560 	 * Set Optimal number of open sequential write preferred zones and
1561 	 * Optimal number of non-sequentially written sequential write
1562 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1563 	 * fields set to zero, apart from Max. number of open swrz_s field.
1564 	 */
1565 	put_unaligned_be32(0xffffffff, &arr[4]);
1566 	put_unaligned_be32(0xffffffff, &arr[8]);
1567 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1568 		put_unaligned_be32(devip->max_open, &arr[12]);
1569 	else
1570 		put_unaligned_be32(0xffffffff, &arr[12]);
1571 	if (devip->zcap < devip->zsize) {
1572 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1573 		put_unaligned_be64(devip->zsize, &arr[20]);
1574 	} else {
1575 		arr[19] = 0;
1576 	}
1577 	return 0x3c;
1578 }
1579 
1580 #define SDEBUG_LONG_INQ_SZ 96
1581 #define SDEBUG_MAX_INQ_ARR_SZ 584
1582 
1583 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1584 {
1585 	unsigned char pq_pdt;
1586 	unsigned char *arr;
1587 	unsigned char *cmd = scp->cmnd;
1588 	u32 alloc_len, n;
1589 	int ret;
1590 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1591 
1592 	alloc_len = get_unaligned_be16(cmd + 3);
1593 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1594 	if (! arr)
1595 		return DID_REQUEUE << 16;
1596 	is_disk = (sdebug_ptype == TYPE_DISK);
1597 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1598 	is_disk_zbc = (is_disk || is_zbc);
1599 	have_wlun = scsi_is_wlun(scp->device->lun);
1600 	if (have_wlun)
1601 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1602 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1603 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1604 	else
1605 		pq_pdt = (sdebug_ptype & 0x1f);
1606 	arr[0] = pq_pdt;
1607 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1608 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1609 		kfree(arr);
1610 		return check_condition_result;
1611 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1612 		int lu_id_num, port_group_id, target_dev_id;
1613 		u32 len;
1614 		char lu_id_str[6];
1615 		int host_no = devip->sdbg_host->shost->host_no;
1616 
1617 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1618 		    (devip->channel & 0x7f);
1619 		if (sdebug_vpd_use_hostno == 0)
1620 			host_no = 0;
1621 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1622 			    (devip->target * 1000) + devip->lun);
1623 		target_dev_id = ((host_no + 1) * 2000) +
1624 				 (devip->target * 1000) - 3;
1625 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1626 		if (0 == cmd[2]) { /* supported vital product data pages */
1627 			arr[1] = cmd[2];	/*sanity */
1628 			n = 4;
1629 			arr[n++] = 0x0;   /* this page */
1630 			arr[n++] = 0x80;  /* unit serial number */
1631 			arr[n++] = 0x83;  /* device identification */
1632 			arr[n++] = 0x84;  /* software interface ident. */
1633 			arr[n++] = 0x85;  /* management network addresses */
1634 			arr[n++] = 0x86;  /* extended inquiry */
1635 			arr[n++] = 0x87;  /* mode page policy */
1636 			arr[n++] = 0x88;  /* SCSI ports */
1637 			if (is_disk_zbc) {	  /* SBC or ZBC */
1638 				arr[n++] = 0x89;  /* ATA information */
1639 				arr[n++] = 0xb0;  /* Block limits */
1640 				arr[n++] = 0xb1;  /* Block characteristics */
1641 				if (is_disk)
1642 					arr[n++] = 0xb2;  /* LB Provisioning */
1643 				if (is_zbc)
1644 					arr[n++] = 0xb6;  /* ZB dev. char. */
1645 			}
1646 			arr[3] = n - 4;	  /* number of supported VPD pages */
1647 		} else if (0x80 == cmd[2]) { /* unit serial number */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = len;
1650 			memcpy(&arr[4], lu_id_str, len);
1651 		} else if (0x83 == cmd[2]) { /* device identification */
1652 			arr[1] = cmd[2];	/*sanity */
1653 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1654 						target_dev_id, lu_id_num,
1655 						lu_id_str, len,
1656 						&devip->lu_name);
1657 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_84(&arr[4]);
1660 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1661 			arr[1] = cmd[2];	/*sanity */
1662 			arr[3] = inquiry_vpd_85(&arr[4]);
1663 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1664 			arr[1] = cmd[2];	/*sanity */
1665 			arr[3] = 0x3c;	/* number of following entries */
1666 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1667 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1668 			else if (have_dif_prot)
1669 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1670 			else
1671 				arr[4] = 0x0;   /* no protection stuff */
1672 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1673 		} else if (0x87 == cmd[2]) { /* mode page policy */
1674 			arr[1] = cmd[2];	/*sanity */
1675 			arr[3] = 0x8;	/* number of following entries */
1676 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1677 			arr[6] = 0x80;	/* mlus, shared */
1678 			arr[8] = 0x18;	 /* protocol specific lu */
1679 			arr[10] = 0x82;	 /* mlus, per initiator port */
1680 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1681 			arr[1] = cmd[2];	/*sanity */
1682 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1683 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1684 			arr[1] = cmd[2];        /*sanity */
1685 			n = inquiry_vpd_89(&arr[4]);
1686 			put_unaligned_be16(n, arr + 2);
1687 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1688 			arr[1] = cmd[2];        /*sanity */
1689 			arr[3] = inquiry_vpd_b0(&arr[4]);
1690 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1691 			arr[1] = cmd[2];        /*sanity */
1692 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1693 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1694 			arr[1] = cmd[2];        /*sanity */
1695 			arr[3] = inquiry_vpd_b2(&arr[4]);
1696 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1697 			arr[1] = cmd[2];        /*sanity */
1698 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1699 		} else {
1700 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1701 			kfree(arr);
1702 			return check_condition_result;
1703 		}
1704 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1705 		ret = fill_from_dev_buffer(scp, arr,
1706 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1707 		kfree(arr);
1708 		return ret;
1709 	}
1710 	/* drops through here for a standard inquiry */
1711 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1712 	arr[2] = sdebug_scsi_level;
1713 	arr[3] = 2;    /* response_data_format==2 */
1714 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1715 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1716 	if (sdebug_vpd_use_hostno == 0)
1717 		arr[5] |= 0x10; /* claim: implicit TPGS */
1718 	arr[6] = 0x10; /* claim: MultiP */
1719 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1720 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1721 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1722 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1723 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1724 	/* Use Vendor Specific area to place driver date in ASCII hex */
1725 	memcpy(&arr[36], sdebug_version_date, 8);
1726 	/* version descriptors (2 bytes each) follow */
1727 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1728 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1729 	n = 62;
1730 	if (is_disk) {		/* SBC-4 no version claimed */
1731 		put_unaligned_be16(0x600, arr + n);
1732 		n += 2;
1733 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1734 		put_unaligned_be16(0x525, arr + n);
1735 		n += 2;
1736 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1737 		put_unaligned_be16(0x624, arr + n);
1738 		n += 2;
1739 	}
1740 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1741 	ret = fill_from_dev_buffer(scp, arr,
1742 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1743 	kfree(arr);
1744 	return ret;
1745 }
1746 
1747 /* See resp_iec_m_pg() for how this data is manipulated */
1748 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1749 				   0, 0, 0x0, 0x0};
1750 
1751 static int resp_requests(struct scsi_cmnd *scp,
1752 			 struct sdebug_dev_info *devip)
1753 {
1754 	unsigned char *cmd = scp->cmnd;
1755 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1756 	bool dsense = !!(cmd[1] & 1);
1757 	u32 alloc_len = cmd[4];
1758 	u32 len = 18;
1759 	int stopped_state = atomic_read(&devip->stopped);
1760 
1761 	memset(arr, 0, sizeof(arr));
1762 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1763 		if (dsense) {
1764 			arr[0] = 0x72;
1765 			arr[1] = NOT_READY;
1766 			arr[2] = LOGICAL_UNIT_NOT_READY;
1767 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1768 			len = 8;
1769 		} else {
1770 			arr[0] = 0x70;
1771 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1772 			arr[7] = 0xa;			/* 18 byte sense buffer */
1773 			arr[12] = LOGICAL_UNIT_NOT_READY;
1774 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1775 		}
1776 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1777 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1778 		if (dsense) {
1779 			arr[0] = 0x72;
1780 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1781 			arr[2] = THRESHOLD_EXCEEDED;
1782 			arr[3] = 0xff;		/* Failure prediction(false) */
1783 			len = 8;
1784 		} else {
1785 			arr[0] = 0x70;
1786 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1787 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1788 			arr[12] = THRESHOLD_EXCEEDED;
1789 			arr[13] = 0xff;		/* Failure prediction(false) */
1790 		}
1791 	} else {	/* nothing to report */
1792 		if (dsense) {
1793 			len = 8;
1794 			memset(arr, 0, len);
1795 			arr[0] = 0x72;
1796 		} else {
1797 			memset(arr, 0, len);
1798 			arr[0] = 0x70;
1799 			arr[7] = 0xa;
1800 		}
1801 	}
1802 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1803 }
1804 
1805 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1806 {
1807 	unsigned char *cmd = scp->cmnd;
1808 	int power_cond, want_stop, stopped_state;
1809 	bool changing;
1810 
1811 	power_cond = (cmd[4] & 0xf0) >> 4;
1812 	if (power_cond) {
1813 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1814 		return check_condition_result;
1815 	}
1816 	want_stop = !(cmd[4] & 1);
1817 	stopped_state = atomic_read(&devip->stopped);
1818 	if (stopped_state == 2) {
1819 		ktime_t now_ts = ktime_get_boottime();
1820 
1821 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1822 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1823 
1824 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1825 				/* tur_ms_to_ready timer extinguished */
1826 				atomic_set(&devip->stopped, 0);
1827 				stopped_state = 0;
1828 			}
1829 		}
1830 		if (stopped_state == 2) {
1831 			if (want_stop) {
1832 				stopped_state = 1;	/* dummy up success */
1833 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1834 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1835 				return check_condition_result;
1836 			}
1837 		}
1838 	}
1839 	changing = (stopped_state != want_stop);
1840 	if (changing)
1841 		atomic_xchg(&devip->stopped, want_stop);
1842 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1843 		return SDEG_RES_IMMED_MASK;
1844 	else
1845 		return 0;
1846 }
1847 
1848 static sector_t get_sdebug_capacity(void)
1849 {
1850 	static const unsigned int gibibyte = 1073741824;
1851 
1852 	if (sdebug_virtual_gb > 0)
1853 		return (sector_t)sdebug_virtual_gb *
1854 			(gibibyte / sdebug_sector_size);
1855 	else
1856 		return sdebug_store_sectors;
1857 }
1858 
1859 #define SDEBUG_READCAP_ARR_SZ 8
1860 static int resp_readcap(struct scsi_cmnd *scp,
1861 			struct sdebug_dev_info *devip)
1862 {
1863 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1864 	unsigned int capac;
1865 
1866 	/* following just in case virtual_gb changed */
1867 	sdebug_capacity = get_sdebug_capacity();
1868 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1869 	if (sdebug_capacity < 0xffffffff) {
1870 		capac = (unsigned int)sdebug_capacity - 1;
1871 		put_unaligned_be32(capac, arr + 0);
1872 	} else
1873 		put_unaligned_be32(0xffffffff, arr + 0);
1874 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1875 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1876 }
1877 
1878 #define SDEBUG_READCAP16_ARR_SZ 32
1879 static int resp_readcap16(struct scsi_cmnd *scp,
1880 			  struct sdebug_dev_info *devip)
1881 {
1882 	unsigned char *cmd = scp->cmnd;
1883 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1884 	u32 alloc_len;
1885 
1886 	alloc_len = get_unaligned_be32(cmd + 10);
1887 	/* following just in case virtual_gb changed */
1888 	sdebug_capacity = get_sdebug_capacity();
1889 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1890 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1891 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1892 	arr[13] = sdebug_physblk_exp & 0xf;
1893 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1894 
1895 	if (scsi_debug_lbp()) {
1896 		arr[14] |= 0x80; /* LBPME */
1897 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1898 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1899 		 * in the wider field maps to 0 in this field.
1900 		 */
1901 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1902 			arr[14] |= 0x40;
1903 	}
1904 
1905 	/*
1906 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1907 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1908 	 */
1909 	if (devip->zmodel == BLK_ZONED_HM)
1910 		arr[12] |= 1 << 4;
1911 
1912 	arr[15] = sdebug_lowest_aligned & 0xff;
1913 
1914 	if (have_dif_prot) {
1915 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1916 		arr[12] |= 1; /* PROT_EN */
1917 	}
1918 
1919 	return fill_from_dev_buffer(scp, arr,
1920 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1921 }
1922 
1923 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1924 
1925 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1926 			      struct sdebug_dev_info *devip)
1927 {
1928 	unsigned char *cmd = scp->cmnd;
1929 	unsigned char *arr;
1930 	int host_no = devip->sdbg_host->shost->host_no;
1931 	int port_group_a, port_group_b, port_a, port_b;
1932 	u32 alen, n, rlen;
1933 	int ret;
1934 
1935 	alen = get_unaligned_be32(cmd + 6);
1936 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1937 	if (! arr)
1938 		return DID_REQUEUE << 16;
1939 	/*
1940 	 * EVPD page 0x88 states we have two ports, one
1941 	 * real and a fake port with no device connected.
1942 	 * So we create two port groups with one port each
1943 	 * and set the group with port B to unavailable.
1944 	 */
1945 	port_a = 0x1; /* relative port A */
1946 	port_b = 0x2; /* relative port B */
1947 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1948 			(devip->channel & 0x7f);
1949 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1950 			(devip->channel & 0x7f) + 0x80;
1951 
1952 	/*
1953 	 * The asymmetric access state is cycled according to the host_id.
1954 	 */
1955 	n = 4;
1956 	if (sdebug_vpd_use_hostno == 0) {
1957 		arr[n++] = host_no % 3; /* Asymm access state */
1958 		arr[n++] = 0x0F; /* claim: all states are supported */
1959 	} else {
1960 		arr[n++] = 0x0; /* Active/Optimized path */
1961 		arr[n++] = 0x01; /* only support active/optimized paths */
1962 	}
1963 	put_unaligned_be16(port_group_a, arr + n);
1964 	n += 2;
1965 	arr[n++] = 0;    /* Reserved */
1966 	arr[n++] = 0;    /* Status code */
1967 	arr[n++] = 0;    /* Vendor unique */
1968 	arr[n++] = 0x1;  /* One port per group */
1969 	arr[n++] = 0;    /* Reserved */
1970 	arr[n++] = 0;    /* Reserved */
1971 	put_unaligned_be16(port_a, arr + n);
1972 	n += 2;
1973 	arr[n++] = 3;    /* Port unavailable */
1974 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1975 	put_unaligned_be16(port_group_b, arr + n);
1976 	n += 2;
1977 	arr[n++] = 0;    /* Reserved */
1978 	arr[n++] = 0;    /* Status code */
1979 	arr[n++] = 0;    /* Vendor unique */
1980 	arr[n++] = 0x1;  /* One port per group */
1981 	arr[n++] = 0;    /* Reserved */
1982 	arr[n++] = 0;    /* Reserved */
1983 	put_unaligned_be16(port_b, arr + n);
1984 	n += 2;
1985 
1986 	rlen = n - 4;
1987 	put_unaligned_be32(rlen, arr + 0);
1988 
1989 	/*
1990 	 * Return the smallest value of either
1991 	 * - The allocated length
1992 	 * - The constructed command length
1993 	 * - The maximum array size
1994 	 */
1995 	rlen = min(alen, n);
1996 	ret = fill_from_dev_buffer(scp, arr,
1997 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1998 	kfree(arr);
1999 	return ret;
2000 }
2001 
2002 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2003 			     struct sdebug_dev_info *devip)
2004 {
2005 	bool rctd;
2006 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2007 	u16 req_sa, u;
2008 	u32 alloc_len, a_len;
2009 	int k, offset, len, errsts, count, bump, na;
2010 	const struct opcode_info_t *oip;
2011 	const struct opcode_info_t *r_oip;
2012 	u8 *arr;
2013 	u8 *cmd = scp->cmnd;
2014 
2015 	rctd = !!(cmd[2] & 0x80);
2016 	reporting_opts = cmd[2] & 0x7;
2017 	req_opcode = cmd[3];
2018 	req_sa = get_unaligned_be16(cmd + 4);
2019 	alloc_len = get_unaligned_be32(cmd + 6);
2020 	if (alloc_len < 4 || alloc_len > 0xffff) {
2021 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2022 		return check_condition_result;
2023 	}
2024 	if (alloc_len > 8192)
2025 		a_len = 8192;
2026 	else
2027 		a_len = alloc_len;
2028 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2029 	if (NULL == arr) {
2030 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2031 				INSUFF_RES_ASCQ);
2032 		return check_condition_result;
2033 	}
2034 	switch (reporting_opts) {
2035 	case 0:	/* all commands */
2036 		/* count number of commands */
2037 		for (count = 0, oip = opcode_info_arr;
2038 		     oip->num_attached != 0xff; ++oip) {
2039 			if (F_INV_OP & oip->flags)
2040 				continue;
2041 			count += (oip->num_attached + 1);
2042 		}
2043 		bump = rctd ? 20 : 8;
2044 		put_unaligned_be32(count * bump, arr);
2045 		for (offset = 4, oip = opcode_info_arr;
2046 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2047 			if (F_INV_OP & oip->flags)
2048 				continue;
2049 			na = oip->num_attached;
2050 			arr[offset] = oip->opcode;
2051 			put_unaligned_be16(oip->sa, arr + offset + 2);
2052 			if (rctd)
2053 				arr[offset + 5] |= 0x2;
2054 			if (FF_SA & oip->flags)
2055 				arr[offset + 5] |= 0x1;
2056 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2057 			if (rctd)
2058 				put_unaligned_be16(0xa, arr + offset + 8);
2059 			r_oip = oip;
2060 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2061 				if (F_INV_OP & oip->flags)
2062 					continue;
2063 				offset += bump;
2064 				arr[offset] = oip->opcode;
2065 				put_unaligned_be16(oip->sa, arr + offset + 2);
2066 				if (rctd)
2067 					arr[offset + 5] |= 0x2;
2068 				if (FF_SA & oip->flags)
2069 					arr[offset + 5] |= 0x1;
2070 				put_unaligned_be16(oip->len_mask[0],
2071 						   arr + offset + 6);
2072 				if (rctd)
2073 					put_unaligned_be16(0xa,
2074 							   arr + offset + 8);
2075 			}
2076 			oip = r_oip;
2077 			offset += bump;
2078 		}
2079 		break;
2080 	case 1:	/* one command: opcode only */
2081 	case 2:	/* one command: opcode plus service action */
2082 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2083 		sdeb_i = opcode_ind_arr[req_opcode];
2084 		oip = &opcode_info_arr[sdeb_i];
2085 		if (F_INV_OP & oip->flags) {
2086 			supp = 1;
2087 			offset = 4;
2088 		} else {
2089 			if (1 == reporting_opts) {
2090 				if (FF_SA & oip->flags) {
2091 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2092 							     2, 2);
2093 					kfree(arr);
2094 					return check_condition_result;
2095 				}
2096 				req_sa = 0;
2097 			} else if (2 == reporting_opts &&
2098 				   0 == (FF_SA & oip->flags)) {
2099 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2100 				kfree(arr);	/* point at requested sa */
2101 				return check_condition_result;
2102 			}
2103 			if (0 == (FF_SA & oip->flags) &&
2104 			    req_opcode == oip->opcode)
2105 				supp = 3;
2106 			else if (0 == (FF_SA & oip->flags)) {
2107 				na = oip->num_attached;
2108 				for (k = 0, oip = oip->arrp; k < na;
2109 				     ++k, ++oip) {
2110 					if (req_opcode == oip->opcode)
2111 						break;
2112 				}
2113 				supp = (k >= na) ? 1 : 3;
2114 			} else if (req_sa != oip->sa) {
2115 				na = oip->num_attached;
2116 				for (k = 0, oip = oip->arrp; k < na;
2117 				     ++k, ++oip) {
2118 					if (req_sa == oip->sa)
2119 						break;
2120 				}
2121 				supp = (k >= na) ? 1 : 3;
2122 			} else
2123 				supp = 3;
2124 			if (3 == supp) {
2125 				u = oip->len_mask[0];
2126 				put_unaligned_be16(u, arr + 2);
2127 				arr[4] = oip->opcode;
2128 				for (k = 1; k < u; ++k)
2129 					arr[4 + k] = (k < 16) ?
2130 						 oip->len_mask[k] : 0xff;
2131 				offset = 4 + u;
2132 			} else
2133 				offset = 4;
2134 		}
2135 		arr[1] = (rctd ? 0x80 : 0) | supp;
2136 		if (rctd) {
2137 			put_unaligned_be16(0xa, arr + offset);
2138 			offset += 12;
2139 		}
2140 		break;
2141 	default:
2142 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2143 		kfree(arr);
2144 		return check_condition_result;
2145 	}
2146 	offset = (offset < a_len) ? offset : a_len;
2147 	len = (offset < alloc_len) ? offset : alloc_len;
2148 	errsts = fill_from_dev_buffer(scp, arr, len);
2149 	kfree(arr);
2150 	return errsts;
2151 }
2152 
2153 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2154 			  struct sdebug_dev_info *devip)
2155 {
2156 	bool repd;
2157 	u32 alloc_len, len;
2158 	u8 arr[16];
2159 	u8 *cmd = scp->cmnd;
2160 
2161 	memset(arr, 0, sizeof(arr));
2162 	repd = !!(cmd[2] & 0x80);
2163 	alloc_len = get_unaligned_be32(cmd + 6);
2164 	if (alloc_len < 4) {
2165 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2166 		return check_condition_result;
2167 	}
2168 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2169 	arr[1] = 0x1;		/* ITNRS */
2170 	if (repd) {
2171 		arr[3] = 0xc;
2172 		len = 16;
2173 	} else
2174 		len = 4;
2175 
2176 	len = (len < alloc_len) ? len : alloc_len;
2177 	return fill_from_dev_buffer(scp, arr, len);
2178 }
2179 
2180 /* <<Following mode page info copied from ST318451LW>> */
2181 
2182 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2183 {	/* Read-Write Error Recovery page for mode_sense */
2184 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2185 					5, 0, 0xff, 0xff};
2186 
2187 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2188 	if (1 == pcontrol)
2189 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2190 	return sizeof(err_recov_pg);
2191 }
2192 
2193 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2194 { 	/* Disconnect-Reconnect page for mode_sense */
2195 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2196 					 0, 0, 0, 0, 0, 0, 0, 0};
2197 
2198 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2199 	if (1 == pcontrol)
2200 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2201 	return sizeof(disconnect_pg);
2202 }
2203 
2204 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2205 {       /* Format device page for mode_sense */
2206 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2207 				     0, 0, 0, 0, 0, 0, 0, 0,
2208 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2209 
2210 	memcpy(p, format_pg, sizeof(format_pg));
2211 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2212 	put_unaligned_be16(sdebug_sector_size, p + 12);
2213 	if (sdebug_removable)
2214 		p[20] |= 0x20; /* should agree with INQUIRY */
2215 	if (1 == pcontrol)
2216 		memset(p + 2, 0, sizeof(format_pg) - 2);
2217 	return sizeof(format_pg);
2218 }
2219 
2220 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2221 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2222 				     0, 0, 0, 0};
2223 
2224 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2225 { 	/* Caching page for mode_sense */
2226 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2227 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2228 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2229 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2230 
2231 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2232 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2233 	memcpy(p, caching_pg, sizeof(caching_pg));
2234 	if (1 == pcontrol)
2235 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2236 	else if (2 == pcontrol)
2237 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2238 	return sizeof(caching_pg);
2239 }
2240 
2241 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2242 				    0, 0, 0x2, 0x4b};
2243 
2244 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2245 { 	/* Control mode page for mode_sense */
2246 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2247 					0, 0, 0, 0};
2248 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2249 				     0, 0, 0x2, 0x4b};
2250 
2251 	if (sdebug_dsense)
2252 		ctrl_m_pg[2] |= 0x4;
2253 	else
2254 		ctrl_m_pg[2] &= ~0x4;
2255 
2256 	if (sdebug_ato)
2257 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2258 
2259 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2260 	if (1 == pcontrol)
2261 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2262 	else if (2 == pcontrol)
2263 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2264 	return sizeof(ctrl_m_pg);
2265 }
2266 
2267 
2268 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2269 {	/* Informational Exceptions control mode page for mode_sense */
2270 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2271 				       0, 0, 0x0, 0x0};
2272 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2273 				      0, 0, 0x0, 0x0};
2274 
2275 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2276 	if (1 == pcontrol)
2277 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2278 	else if (2 == pcontrol)
2279 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2280 	return sizeof(iec_m_pg);
2281 }
2282 
2283 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2284 {	/* SAS SSP mode page - short format for mode_sense */
2285 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2286 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2287 
2288 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2289 	if (1 == pcontrol)
2290 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2291 	return sizeof(sas_sf_m_pg);
2292 }
2293 
2294 
2295 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2296 			      int target_dev_id)
2297 {	/* SAS phy control and discover mode page for mode_sense */
2298 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2299 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2300 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2301 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2302 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2303 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2304 		    0, 0, 0, 0, 0, 0, 0, 0,
2305 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2306 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2307 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2308 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2309 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2310 		    0, 0, 0, 0, 0, 0, 0, 0,
2311 		};
2312 	int port_a, port_b;
2313 
2314 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2315 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2316 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2317 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2318 	port_a = target_dev_id + 1;
2319 	port_b = port_a + 1;
2320 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2321 	put_unaligned_be32(port_a, p + 20);
2322 	put_unaligned_be32(port_b, p + 48 + 20);
2323 	if (1 == pcontrol)
2324 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2325 	return sizeof(sas_pcd_m_pg);
2326 }
2327 
2328 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2329 {	/* SAS SSP shared protocol specific port mode subpage */
2330 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2331 		    0, 0, 0, 0, 0, 0, 0, 0,
2332 		};
2333 
2334 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2335 	if (1 == pcontrol)
2336 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2337 	return sizeof(sas_sha_m_pg);
2338 }
2339 
2340 #define SDEBUG_MAX_MSENSE_SZ 256
2341 
2342 static int resp_mode_sense(struct scsi_cmnd *scp,
2343 			   struct sdebug_dev_info *devip)
2344 {
2345 	int pcontrol, pcode, subpcode, bd_len;
2346 	unsigned char dev_spec;
2347 	u32 alloc_len, offset, len;
2348 	int target_dev_id;
2349 	int target = scp->device->id;
2350 	unsigned char *ap;
2351 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2352 	unsigned char *cmd = scp->cmnd;
2353 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2354 
2355 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2356 	pcontrol = (cmd[2] & 0xc0) >> 6;
2357 	pcode = cmd[2] & 0x3f;
2358 	subpcode = cmd[3];
2359 	msense_6 = (MODE_SENSE == cmd[0]);
2360 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2361 	is_disk = (sdebug_ptype == TYPE_DISK);
2362 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2363 	if ((is_disk || is_zbc) && !dbd)
2364 		bd_len = llbaa ? 16 : 8;
2365 	else
2366 		bd_len = 0;
2367 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2368 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2369 	if (0x3 == pcontrol) {  /* Saving values not supported */
2370 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2371 		return check_condition_result;
2372 	}
2373 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2374 			(devip->target * 1000) - 3;
2375 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2376 	if (is_disk || is_zbc) {
2377 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2378 		if (sdebug_wp)
2379 			dev_spec |= 0x80;
2380 	} else
2381 		dev_spec = 0x0;
2382 	if (msense_6) {
2383 		arr[2] = dev_spec;
2384 		arr[3] = bd_len;
2385 		offset = 4;
2386 	} else {
2387 		arr[3] = dev_spec;
2388 		if (16 == bd_len)
2389 			arr[4] = 0x1;	/* set LONGLBA bit */
2390 		arr[7] = bd_len;	/* assume 255 or less */
2391 		offset = 8;
2392 	}
2393 	ap = arr + offset;
2394 	if ((bd_len > 0) && (!sdebug_capacity))
2395 		sdebug_capacity = get_sdebug_capacity();
2396 
2397 	if (8 == bd_len) {
2398 		if (sdebug_capacity > 0xfffffffe)
2399 			put_unaligned_be32(0xffffffff, ap + 0);
2400 		else
2401 			put_unaligned_be32(sdebug_capacity, ap + 0);
2402 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2403 		offset += bd_len;
2404 		ap = arr + offset;
2405 	} else if (16 == bd_len) {
2406 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2407 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2408 		offset += bd_len;
2409 		ap = arr + offset;
2410 	}
2411 
2412 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2413 		/* TODO: Control Extension page */
2414 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2415 		return check_condition_result;
2416 	}
2417 	bad_pcode = false;
2418 
2419 	switch (pcode) {
2420 	case 0x1:	/* Read-Write error recovery page, direct access */
2421 		len = resp_err_recov_pg(ap, pcontrol, target);
2422 		offset += len;
2423 		break;
2424 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2425 		len = resp_disconnect_pg(ap, pcontrol, target);
2426 		offset += len;
2427 		break;
2428 	case 0x3:       /* Format device page, direct access */
2429 		if (is_disk) {
2430 			len = resp_format_pg(ap, pcontrol, target);
2431 			offset += len;
2432 		} else
2433 			bad_pcode = true;
2434 		break;
2435 	case 0x8:	/* Caching page, direct access */
2436 		if (is_disk || is_zbc) {
2437 			len = resp_caching_pg(ap, pcontrol, target);
2438 			offset += len;
2439 		} else
2440 			bad_pcode = true;
2441 		break;
2442 	case 0xa:	/* Control Mode page, all devices */
2443 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2444 		offset += len;
2445 		break;
2446 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2447 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2448 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2449 			return check_condition_result;
2450 		}
2451 		len = 0;
2452 		if ((0x0 == subpcode) || (0xff == subpcode))
2453 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2454 		if ((0x1 == subpcode) || (0xff == subpcode))
2455 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2456 						  target_dev_id);
2457 		if ((0x2 == subpcode) || (0xff == subpcode))
2458 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2459 		offset += len;
2460 		break;
2461 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2462 		len = resp_iec_m_pg(ap, pcontrol, target);
2463 		offset += len;
2464 		break;
2465 	case 0x3f:	/* Read all Mode pages */
2466 		if ((0 == subpcode) || (0xff == subpcode)) {
2467 			len = resp_err_recov_pg(ap, pcontrol, target);
2468 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2469 			if (is_disk) {
2470 				len += resp_format_pg(ap + len, pcontrol,
2471 						      target);
2472 				len += resp_caching_pg(ap + len, pcontrol,
2473 						       target);
2474 			} else if (is_zbc) {
2475 				len += resp_caching_pg(ap + len, pcontrol,
2476 						       target);
2477 			}
2478 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2479 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2480 			if (0xff == subpcode) {
2481 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2482 						  target, target_dev_id);
2483 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2484 			}
2485 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2486 			offset += len;
2487 		} else {
2488 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2489 			return check_condition_result;
2490 		}
2491 		break;
2492 	default:
2493 		bad_pcode = true;
2494 		break;
2495 	}
2496 	if (bad_pcode) {
2497 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2498 		return check_condition_result;
2499 	}
2500 	if (msense_6)
2501 		arr[0] = offset - 1;
2502 	else
2503 		put_unaligned_be16((offset - 2), arr + 0);
2504 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2505 }
2506 
2507 #define SDEBUG_MAX_MSELECT_SZ 512
2508 
2509 static int resp_mode_select(struct scsi_cmnd *scp,
2510 			    struct sdebug_dev_info *devip)
2511 {
2512 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2513 	int param_len, res, mpage;
2514 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2515 	unsigned char *cmd = scp->cmnd;
2516 	int mselect6 = (MODE_SELECT == cmd[0]);
2517 
2518 	memset(arr, 0, sizeof(arr));
2519 	pf = cmd[1] & 0x10;
2520 	sp = cmd[1] & 0x1;
2521 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2522 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2523 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2524 		return check_condition_result;
2525 	}
2526 	res = fetch_to_dev_buffer(scp, arr, param_len);
2527 	if (-1 == res)
2528 		return DID_ERROR << 16;
2529 	else if (sdebug_verbose && (res < param_len))
2530 		sdev_printk(KERN_INFO, scp->device,
2531 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2532 			    __func__, param_len, res);
2533 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2534 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2535 	off = bd_len + (mselect6 ? 4 : 8);
2536 	if (md_len > 2 || off >= res) {
2537 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2538 		return check_condition_result;
2539 	}
2540 	mpage = arr[off] & 0x3f;
2541 	ps = !!(arr[off] & 0x80);
2542 	if (ps) {
2543 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2544 		return check_condition_result;
2545 	}
2546 	spf = !!(arr[off] & 0x40);
2547 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2548 		       (arr[off + 1] + 2);
2549 	if ((pg_len + off) > param_len) {
2550 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2551 				PARAMETER_LIST_LENGTH_ERR, 0);
2552 		return check_condition_result;
2553 	}
2554 	switch (mpage) {
2555 	case 0x8:      /* Caching Mode page */
2556 		if (caching_pg[1] == arr[off + 1]) {
2557 			memcpy(caching_pg + 2, arr + off + 2,
2558 			       sizeof(caching_pg) - 2);
2559 			goto set_mode_changed_ua;
2560 		}
2561 		break;
2562 	case 0xa:      /* Control Mode page */
2563 		if (ctrl_m_pg[1] == arr[off + 1]) {
2564 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2565 			       sizeof(ctrl_m_pg) - 2);
2566 			if (ctrl_m_pg[4] & 0x8)
2567 				sdebug_wp = true;
2568 			else
2569 				sdebug_wp = false;
2570 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2571 			goto set_mode_changed_ua;
2572 		}
2573 		break;
2574 	case 0x1c:      /* Informational Exceptions Mode page */
2575 		if (iec_m_pg[1] == arr[off + 1]) {
2576 			memcpy(iec_m_pg + 2, arr + off + 2,
2577 			       sizeof(iec_m_pg) - 2);
2578 			goto set_mode_changed_ua;
2579 		}
2580 		break;
2581 	default:
2582 		break;
2583 	}
2584 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2585 	return check_condition_result;
2586 set_mode_changed_ua:
2587 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2588 	return 0;
2589 }
2590 
2591 static int resp_temp_l_pg(unsigned char *arr)
2592 {
2593 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2594 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2595 		};
2596 
2597 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2598 	return sizeof(temp_l_pg);
2599 }
2600 
2601 static int resp_ie_l_pg(unsigned char *arr)
2602 {
2603 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2604 		};
2605 
2606 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2607 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2608 		arr[4] = THRESHOLD_EXCEEDED;
2609 		arr[5] = 0xff;
2610 	}
2611 	return sizeof(ie_l_pg);
2612 }
2613 
2614 static int resp_env_rep_l_spg(unsigned char *arr)
2615 {
2616 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2617 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2618 					 0x1, 0x0, 0x23, 0x8,
2619 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2620 		};
2621 
2622 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2623 	return sizeof(env_rep_l_spg);
2624 }
2625 
2626 #define SDEBUG_MAX_LSENSE_SZ 512
2627 
2628 static int resp_log_sense(struct scsi_cmnd *scp,
2629 			  struct sdebug_dev_info *devip)
2630 {
2631 	int ppc, sp, pcode, subpcode;
2632 	u32 alloc_len, len, n;
2633 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2634 	unsigned char *cmd = scp->cmnd;
2635 
2636 	memset(arr, 0, sizeof(arr));
2637 	ppc = cmd[1] & 0x2;
2638 	sp = cmd[1] & 0x1;
2639 	if (ppc || sp) {
2640 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2641 		return check_condition_result;
2642 	}
2643 	pcode = cmd[2] & 0x3f;
2644 	subpcode = cmd[3] & 0xff;
2645 	alloc_len = get_unaligned_be16(cmd + 7);
2646 	arr[0] = pcode;
2647 	if (0 == subpcode) {
2648 		switch (pcode) {
2649 		case 0x0:	/* Supported log pages log page */
2650 			n = 4;
2651 			arr[n++] = 0x0;		/* this page */
2652 			arr[n++] = 0xd;		/* Temperature */
2653 			arr[n++] = 0x2f;	/* Informational exceptions */
2654 			arr[3] = n - 4;
2655 			break;
2656 		case 0xd:	/* Temperature log page */
2657 			arr[3] = resp_temp_l_pg(arr + 4);
2658 			break;
2659 		case 0x2f:	/* Informational exceptions log page */
2660 			arr[3] = resp_ie_l_pg(arr + 4);
2661 			break;
2662 		default:
2663 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2664 			return check_condition_result;
2665 		}
2666 	} else if (0xff == subpcode) {
2667 		arr[0] |= 0x40;
2668 		arr[1] = subpcode;
2669 		switch (pcode) {
2670 		case 0x0:	/* Supported log pages and subpages log page */
2671 			n = 4;
2672 			arr[n++] = 0x0;
2673 			arr[n++] = 0x0;		/* 0,0 page */
2674 			arr[n++] = 0x0;
2675 			arr[n++] = 0xff;	/* this page */
2676 			arr[n++] = 0xd;
2677 			arr[n++] = 0x0;		/* Temperature */
2678 			arr[n++] = 0xd;
2679 			arr[n++] = 0x1;		/* Environment reporting */
2680 			arr[n++] = 0xd;
2681 			arr[n++] = 0xff;	/* all 0xd subpages */
2682 			arr[n++] = 0x2f;
2683 			arr[n++] = 0x0;	/* Informational exceptions */
2684 			arr[n++] = 0x2f;
2685 			arr[n++] = 0xff;	/* all 0x2f subpages */
2686 			arr[3] = n - 4;
2687 			break;
2688 		case 0xd:	/* Temperature subpages */
2689 			n = 4;
2690 			arr[n++] = 0xd;
2691 			arr[n++] = 0x0;		/* Temperature */
2692 			arr[n++] = 0xd;
2693 			arr[n++] = 0x1;		/* Environment reporting */
2694 			arr[n++] = 0xd;
2695 			arr[n++] = 0xff;	/* these subpages */
2696 			arr[3] = n - 4;
2697 			break;
2698 		case 0x2f:	/* Informational exceptions subpages */
2699 			n = 4;
2700 			arr[n++] = 0x2f;
2701 			arr[n++] = 0x0;		/* Informational exceptions */
2702 			arr[n++] = 0x2f;
2703 			arr[n++] = 0xff;	/* these subpages */
2704 			arr[3] = n - 4;
2705 			break;
2706 		default:
2707 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2708 			return check_condition_result;
2709 		}
2710 	} else if (subpcode > 0) {
2711 		arr[0] |= 0x40;
2712 		arr[1] = subpcode;
2713 		if (pcode == 0xd && subpcode == 1)
2714 			arr[3] = resp_env_rep_l_spg(arr + 4);
2715 		else {
2716 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2717 			return check_condition_result;
2718 		}
2719 	} else {
2720 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2721 		return check_condition_result;
2722 	}
2723 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2724 	return fill_from_dev_buffer(scp, arr,
2725 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2726 }
2727 
2728 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2729 {
2730 	return devip->nr_zones != 0;
2731 }
2732 
2733 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2734 					unsigned long long lba)
2735 {
2736 	u32 zno = lba >> devip->zsize_shift;
2737 	struct sdeb_zone_state *zsp;
2738 
2739 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2740 		return &devip->zstate[zno];
2741 
2742 	/*
2743 	 * If the zone capacity is less than the zone size, adjust for gap
2744 	 * zones.
2745 	 */
2746 	zno = 2 * zno - devip->nr_conv_zones;
2747 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2748 	zsp = &devip->zstate[zno];
2749 	if (lba >= zsp->z_start + zsp->z_size)
2750 		zsp++;
2751 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2752 	return zsp;
2753 }
2754 
2755 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2756 {
2757 	return zsp->z_type == ZBC_ZTYPE_CNV;
2758 }
2759 
2760 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2761 {
2762 	return zsp->z_type == ZBC_ZTYPE_GAP;
2763 }
2764 
2765 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2766 {
2767 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2768 }
2769 
2770 static void zbc_close_zone(struct sdebug_dev_info *devip,
2771 			   struct sdeb_zone_state *zsp)
2772 {
2773 	enum sdebug_z_cond zc;
2774 
2775 	if (!zbc_zone_is_seq(zsp))
2776 		return;
2777 
2778 	zc = zsp->z_cond;
2779 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2780 		return;
2781 
2782 	if (zc == ZC2_IMPLICIT_OPEN)
2783 		devip->nr_imp_open--;
2784 	else
2785 		devip->nr_exp_open--;
2786 
2787 	if (zsp->z_wp == zsp->z_start) {
2788 		zsp->z_cond = ZC1_EMPTY;
2789 	} else {
2790 		zsp->z_cond = ZC4_CLOSED;
2791 		devip->nr_closed++;
2792 	}
2793 }
2794 
2795 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2796 {
2797 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2798 	unsigned int i;
2799 
2800 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2801 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2802 			zbc_close_zone(devip, zsp);
2803 			return;
2804 		}
2805 	}
2806 }
2807 
2808 static void zbc_open_zone(struct sdebug_dev_info *devip,
2809 			  struct sdeb_zone_state *zsp, bool explicit)
2810 {
2811 	enum sdebug_z_cond zc;
2812 
2813 	if (!zbc_zone_is_seq(zsp))
2814 		return;
2815 
2816 	zc = zsp->z_cond;
2817 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2818 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2819 		return;
2820 
2821 	/* Close an implicit open zone if necessary */
2822 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2823 		zbc_close_zone(devip, zsp);
2824 	else if (devip->max_open &&
2825 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2826 		zbc_close_imp_open_zone(devip);
2827 
2828 	if (zsp->z_cond == ZC4_CLOSED)
2829 		devip->nr_closed--;
2830 	if (explicit) {
2831 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2832 		devip->nr_exp_open++;
2833 	} else {
2834 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2835 		devip->nr_imp_open++;
2836 	}
2837 }
2838 
2839 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2840 				     struct sdeb_zone_state *zsp)
2841 {
2842 	switch (zsp->z_cond) {
2843 	case ZC2_IMPLICIT_OPEN:
2844 		devip->nr_imp_open--;
2845 		break;
2846 	case ZC3_EXPLICIT_OPEN:
2847 		devip->nr_exp_open--;
2848 		break;
2849 	default:
2850 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2851 			  zsp->z_start, zsp->z_cond);
2852 		break;
2853 	}
2854 	zsp->z_cond = ZC5_FULL;
2855 }
2856 
2857 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2858 		       unsigned long long lba, unsigned int num)
2859 {
2860 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2861 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2862 
2863 	if (!zbc_zone_is_seq(zsp))
2864 		return;
2865 
2866 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2867 		zsp->z_wp += num;
2868 		if (zsp->z_wp >= zend)
2869 			zbc_set_zone_full(devip, zsp);
2870 		return;
2871 	}
2872 
2873 	while (num) {
2874 		if (lba != zsp->z_wp)
2875 			zsp->z_non_seq_resource = true;
2876 
2877 		end = lba + num;
2878 		if (end >= zend) {
2879 			n = zend - lba;
2880 			zsp->z_wp = zend;
2881 		} else if (end > zsp->z_wp) {
2882 			n = num;
2883 			zsp->z_wp = end;
2884 		} else {
2885 			n = num;
2886 		}
2887 		if (zsp->z_wp >= zend)
2888 			zbc_set_zone_full(devip, zsp);
2889 
2890 		num -= n;
2891 		lba += n;
2892 		if (num) {
2893 			zsp++;
2894 			zend = zsp->z_start + zsp->z_size;
2895 		}
2896 	}
2897 }
2898 
2899 static int check_zbc_access_params(struct scsi_cmnd *scp,
2900 			unsigned long long lba, unsigned int num, bool write)
2901 {
2902 	struct scsi_device *sdp = scp->device;
2903 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2904 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2905 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2906 
2907 	if (!write) {
2908 		if (devip->zmodel == BLK_ZONED_HA)
2909 			return 0;
2910 		/* For host-managed, reads cannot cross zone types boundaries */
2911 		if (zsp->z_type != zsp_end->z_type) {
2912 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2913 					LBA_OUT_OF_RANGE,
2914 					READ_INVDATA_ASCQ);
2915 			return check_condition_result;
2916 		}
2917 		return 0;
2918 	}
2919 
2920 	/* Writing into a gap zone is not allowed */
2921 	if (zbc_zone_is_gap(zsp)) {
2922 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2923 				ATTEMPT_ACCESS_GAP);
2924 		return check_condition_result;
2925 	}
2926 
2927 	/* No restrictions for writes within conventional zones */
2928 	if (zbc_zone_is_conv(zsp)) {
2929 		if (!zbc_zone_is_conv(zsp_end)) {
2930 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2931 					LBA_OUT_OF_RANGE,
2932 					WRITE_BOUNDARY_ASCQ);
2933 			return check_condition_result;
2934 		}
2935 		return 0;
2936 	}
2937 
2938 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2939 		/* Writes cannot cross sequential zone boundaries */
2940 		if (zsp_end != zsp) {
2941 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2942 					LBA_OUT_OF_RANGE,
2943 					WRITE_BOUNDARY_ASCQ);
2944 			return check_condition_result;
2945 		}
2946 		/* Cannot write full zones */
2947 		if (zsp->z_cond == ZC5_FULL) {
2948 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2949 					INVALID_FIELD_IN_CDB, 0);
2950 			return check_condition_result;
2951 		}
2952 		/* Writes must be aligned to the zone WP */
2953 		if (lba != zsp->z_wp) {
2954 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2955 					LBA_OUT_OF_RANGE,
2956 					UNALIGNED_WRITE_ASCQ);
2957 			return check_condition_result;
2958 		}
2959 	}
2960 
2961 	/* Handle implicit open of closed and empty zones */
2962 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2963 		if (devip->max_open &&
2964 		    devip->nr_exp_open >= devip->max_open) {
2965 			mk_sense_buffer(scp, DATA_PROTECT,
2966 					INSUFF_RES_ASC,
2967 					INSUFF_ZONE_ASCQ);
2968 			return check_condition_result;
2969 		}
2970 		zbc_open_zone(devip, zsp, false);
2971 	}
2972 
2973 	return 0;
2974 }
2975 
2976 static inline int check_device_access_params
2977 			(struct scsi_cmnd *scp, unsigned long long lba,
2978 			 unsigned int num, bool write)
2979 {
2980 	struct scsi_device *sdp = scp->device;
2981 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2982 
2983 	if (lba + num > sdebug_capacity) {
2984 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2985 		return check_condition_result;
2986 	}
2987 	/* transfer length excessive (tie in to block limits VPD page) */
2988 	if (num > sdebug_store_sectors) {
2989 		/* needs work to find which cdb byte 'num' comes from */
2990 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2991 		return check_condition_result;
2992 	}
2993 	if (write && unlikely(sdebug_wp)) {
2994 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2995 		return check_condition_result;
2996 	}
2997 	if (sdebug_dev_is_zoned(devip))
2998 		return check_zbc_access_params(scp, lba, num, write);
2999 
3000 	return 0;
3001 }
3002 
3003 /*
3004  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3005  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3006  * that access any of the "stores" in struct sdeb_store_info should call this
3007  * function with bug_if_fake_rw set to true.
3008  */
3009 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3010 						bool bug_if_fake_rw)
3011 {
3012 	if (sdebug_fake_rw) {
3013 		BUG_ON(bug_if_fake_rw);	/* See note above */
3014 		return NULL;
3015 	}
3016 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3017 }
3018 
3019 /* Returns number of bytes copied or -1 if error. */
3020 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3021 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3022 {
3023 	int ret;
3024 	u64 block, rest = 0;
3025 	enum dma_data_direction dir;
3026 	struct scsi_data_buffer *sdb = &scp->sdb;
3027 	u8 *fsp;
3028 
3029 	if (do_write) {
3030 		dir = DMA_TO_DEVICE;
3031 		write_since_sync = true;
3032 	} else {
3033 		dir = DMA_FROM_DEVICE;
3034 	}
3035 
3036 	if (!sdb->length || !sip)
3037 		return 0;
3038 	if (scp->sc_data_direction != dir)
3039 		return -1;
3040 	fsp = sip->storep;
3041 
3042 	block = do_div(lba, sdebug_store_sectors);
3043 	if (block + num > sdebug_store_sectors)
3044 		rest = block + num - sdebug_store_sectors;
3045 
3046 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3047 		   fsp + (block * sdebug_sector_size),
3048 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3049 	if (ret != (num - rest) * sdebug_sector_size)
3050 		return ret;
3051 
3052 	if (rest) {
3053 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3054 			    fsp, rest * sdebug_sector_size,
3055 			    sg_skip + ((num - rest) * sdebug_sector_size),
3056 			    do_write);
3057 	}
3058 
3059 	return ret;
3060 }
3061 
3062 /* Returns number of bytes copied or -1 if error. */
3063 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3064 {
3065 	struct scsi_data_buffer *sdb = &scp->sdb;
3066 
3067 	if (!sdb->length)
3068 		return 0;
3069 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3070 		return -1;
3071 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3072 			      num * sdebug_sector_size, 0, true);
3073 }
3074 
3075 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3076  * arr into sip->storep+lba and return true. If comparison fails then
3077  * return false. */
3078 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3079 			      const u8 *arr, bool compare_only)
3080 {
3081 	bool res;
3082 	u64 block, rest = 0;
3083 	u32 store_blks = sdebug_store_sectors;
3084 	u32 lb_size = sdebug_sector_size;
3085 	u8 *fsp = sip->storep;
3086 
3087 	block = do_div(lba, store_blks);
3088 	if (block + num > store_blks)
3089 		rest = block + num - store_blks;
3090 
3091 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3092 	if (!res)
3093 		return res;
3094 	if (rest)
3095 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3096 			     rest * lb_size);
3097 	if (!res)
3098 		return res;
3099 	if (compare_only)
3100 		return true;
3101 	arr += num * lb_size;
3102 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3103 	if (rest)
3104 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3105 	return res;
3106 }
3107 
3108 static __be16 dif_compute_csum(const void *buf, int len)
3109 {
3110 	__be16 csum;
3111 
3112 	if (sdebug_guard)
3113 		csum = (__force __be16)ip_compute_csum(buf, len);
3114 	else
3115 		csum = cpu_to_be16(crc_t10dif(buf, len));
3116 
3117 	return csum;
3118 }
3119 
3120 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3121 		      sector_t sector, u32 ei_lba)
3122 {
3123 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3124 
3125 	if (sdt->guard_tag != csum) {
3126 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3127 			(unsigned long)sector,
3128 			be16_to_cpu(sdt->guard_tag),
3129 			be16_to_cpu(csum));
3130 		return 0x01;
3131 	}
3132 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3133 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3134 		pr_err("REF check failed on sector %lu\n",
3135 			(unsigned long)sector);
3136 		return 0x03;
3137 	}
3138 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3139 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3140 		pr_err("REF check failed on sector %lu\n",
3141 			(unsigned long)sector);
3142 		return 0x03;
3143 	}
3144 	return 0;
3145 }
3146 
3147 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3148 			  unsigned int sectors, bool read)
3149 {
3150 	size_t resid;
3151 	void *paddr;
3152 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3153 						scp->device->hostdata, true);
3154 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3155 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3156 	struct sg_mapping_iter miter;
3157 
3158 	/* Bytes of protection data to copy into sgl */
3159 	resid = sectors * sizeof(*dif_storep);
3160 
3161 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3162 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3163 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3164 
3165 	while (sg_miter_next(&miter) && resid > 0) {
3166 		size_t len = min_t(size_t, miter.length, resid);
3167 		void *start = dif_store(sip, sector);
3168 		size_t rest = 0;
3169 
3170 		if (dif_store_end < start + len)
3171 			rest = start + len - dif_store_end;
3172 
3173 		paddr = miter.addr;
3174 
3175 		if (read)
3176 			memcpy(paddr, start, len - rest);
3177 		else
3178 			memcpy(start, paddr, len - rest);
3179 
3180 		if (rest) {
3181 			if (read)
3182 				memcpy(paddr + len - rest, dif_storep, rest);
3183 			else
3184 				memcpy(dif_storep, paddr + len - rest, rest);
3185 		}
3186 
3187 		sector += len / sizeof(*dif_storep);
3188 		resid -= len;
3189 	}
3190 	sg_miter_stop(&miter);
3191 }
3192 
3193 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3194 			    unsigned int sectors, u32 ei_lba)
3195 {
3196 	int ret = 0;
3197 	unsigned int i;
3198 	sector_t sector;
3199 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3200 						scp->device->hostdata, true);
3201 	struct t10_pi_tuple *sdt;
3202 
3203 	for (i = 0; i < sectors; i++, ei_lba++) {
3204 		sector = start_sec + i;
3205 		sdt = dif_store(sip, sector);
3206 
3207 		if (sdt->app_tag == cpu_to_be16(0xffff))
3208 			continue;
3209 
3210 		/*
3211 		 * Because scsi_debug acts as both initiator and
3212 		 * target we proceed to verify the PI even if
3213 		 * RDPROTECT=3. This is done so the "initiator" knows
3214 		 * which type of error to return. Otherwise we would
3215 		 * have to iterate over the PI twice.
3216 		 */
3217 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3218 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3219 					 sector, ei_lba);
3220 			if (ret) {
3221 				dif_errors++;
3222 				break;
3223 			}
3224 		}
3225 	}
3226 
3227 	dif_copy_prot(scp, start_sec, sectors, true);
3228 	dix_reads++;
3229 
3230 	return ret;
3231 }
3232 
3233 static inline void
3234 sdeb_read_lock(struct sdeb_store_info *sip)
3235 {
3236 	if (sdebug_no_rwlock) {
3237 		if (sip)
3238 			__acquire(&sip->macc_lck);
3239 		else
3240 			__acquire(&sdeb_fake_rw_lck);
3241 	} else {
3242 		if (sip)
3243 			read_lock(&sip->macc_lck);
3244 		else
3245 			read_lock(&sdeb_fake_rw_lck);
3246 	}
3247 }
3248 
3249 static inline void
3250 sdeb_read_unlock(struct sdeb_store_info *sip)
3251 {
3252 	if (sdebug_no_rwlock) {
3253 		if (sip)
3254 			__release(&sip->macc_lck);
3255 		else
3256 			__release(&sdeb_fake_rw_lck);
3257 	} else {
3258 		if (sip)
3259 			read_unlock(&sip->macc_lck);
3260 		else
3261 			read_unlock(&sdeb_fake_rw_lck);
3262 	}
3263 }
3264 
3265 static inline void
3266 sdeb_write_lock(struct sdeb_store_info *sip)
3267 {
3268 	if (sdebug_no_rwlock) {
3269 		if (sip)
3270 			__acquire(&sip->macc_lck);
3271 		else
3272 			__acquire(&sdeb_fake_rw_lck);
3273 	} else {
3274 		if (sip)
3275 			write_lock(&sip->macc_lck);
3276 		else
3277 			write_lock(&sdeb_fake_rw_lck);
3278 	}
3279 }
3280 
3281 static inline void
3282 sdeb_write_unlock(struct sdeb_store_info *sip)
3283 {
3284 	if (sdebug_no_rwlock) {
3285 		if (sip)
3286 			__release(&sip->macc_lck);
3287 		else
3288 			__release(&sdeb_fake_rw_lck);
3289 	} else {
3290 		if (sip)
3291 			write_unlock(&sip->macc_lck);
3292 		else
3293 			write_unlock(&sdeb_fake_rw_lck);
3294 	}
3295 }
3296 
3297 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3298 {
3299 	bool check_prot;
3300 	u32 num;
3301 	u32 ei_lba;
3302 	int ret;
3303 	u64 lba;
3304 	struct sdeb_store_info *sip = devip2sip(devip, true);
3305 	u8 *cmd = scp->cmnd;
3306 
3307 	switch (cmd[0]) {
3308 	case READ_16:
3309 		ei_lba = 0;
3310 		lba = get_unaligned_be64(cmd + 2);
3311 		num = get_unaligned_be32(cmd + 10);
3312 		check_prot = true;
3313 		break;
3314 	case READ_10:
3315 		ei_lba = 0;
3316 		lba = get_unaligned_be32(cmd + 2);
3317 		num = get_unaligned_be16(cmd + 7);
3318 		check_prot = true;
3319 		break;
3320 	case READ_6:
3321 		ei_lba = 0;
3322 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3323 		      (u32)(cmd[1] & 0x1f) << 16;
3324 		num = (0 == cmd[4]) ? 256 : cmd[4];
3325 		check_prot = true;
3326 		break;
3327 	case READ_12:
3328 		ei_lba = 0;
3329 		lba = get_unaligned_be32(cmd + 2);
3330 		num = get_unaligned_be32(cmd + 6);
3331 		check_prot = true;
3332 		break;
3333 	case XDWRITEREAD_10:
3334 		ei_lba = 0;
3335 		lba = get_unaligned_be32(cmd + 2);
3336 		num = get_unaligned_be16(cmd + 7);
3337 		check_prot = false;
3338 		break;
3339 	default:	/* assume READ(32) */
3340 		lba = get_unaligned_be64(cmd + 12);
3341 		ei_lba = get_unaligned_be32(cmd + 20);
3342 		num = get_unaligned_be32(cmd + 28);
3343 		check_prot = false;
3344 		break;
3345 	}
3346 	if (unlikely(have_dif_prot && check_prot)) {
3347 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3348 		    (cmd[1] & 0xe0)) {
3349 			mk_sense_invalid_opcode(scp);
3350 			return check_condition_result;
3351 		}
3352 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3353 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3354 		    (cmd[1] & 0xe0) == 0)
3355 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3356 				    "to DIF device\n");
3357 	}
3358 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3359 		     atomic_read(&sdeb_inject_pending))) {
3360 		num /= 2;
3361 		atomic_set(&sdeb_inject_pending, 0);
3362 	}
3363 
3364 	ret = check_device_access_params(scp, lba, num, false);
3365 	if (ret)
3366 		return ret;
3367 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3368 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3369 		     ((lba + num) > sdebug_medium_error_start))) {
3370 		/* claim unrecoverable read error */
3371 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3372 		/* set info field and valid bit for fixed descriptor */
3373 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3374 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3375 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3376 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3377 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3378 		}
3379 		scsi_set_resid(scp, scsi_bufflen(scp));
3380 		return check_condition_result;
3381 	}
3382 
3383 	sdeb_read_lock(sip);
3384 
3385 	/* DIX + T10 DIF */
3386 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3387 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3388 		case 1: /* Guard tag error */
3389 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3390 				sdeb_read_unlock(sip);
3391 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3392 				return check_condition_result;
3393 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3394 				sdeb_read_unlock(sip);
3395 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3396 				return illegal_condition_result;
3397 			}
3398 			break;
3399 		case 3: /* Reference tag error */
3400 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3401 				sdeb_read_unlock(sip);
3402 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3403 				return check_condition_result;
3404 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3405 				sdeb_read_unlock(sip);
3406 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3407 				return illegal_condition_result;
3408 			}
3409 			break;
3410 		}
3411 	}
3412 
3413 	ret = do_device_access(sip, scp, 0, lba, num, false);
3414 	sdeb_read_unlock(sip);
3415 	if (unlikely(ret == -1))
3416 		return DID_ERROR << 16;
3417 
3418 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3419 
3420 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3421 		     atomic_read(&sdeb_inject_pending))) {
3422 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3423 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3424 			atomic_set(&sdeb_inject_pending, 0);
3425 			return check_condition_result;
3426 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3427 			/* Logical block guard check failed */
3428 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3429 			atomic_set(&sdeb_inject_pending, 0);
3430 			return illegal_condition_result;
3431 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3432 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3433 			atomic_set(&sdeb_inject_pending, 0);
3434 			return illegal_condition_result;
3435 		}
3436 	}
3437 	return 0;
3438 }
3439 
3440 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3441 			     unsigned int sectors, u32 ei_lba)
3442 {
3443 	int ret;
3444 	struct t10_pi_tuple *sdt;
3445 	void *daddr;
3446 	sector_t sector = start_sec;
3447 	int ppage_offset;
3448 	int dpage_offset;
3449 	struct sg_mapping_iter diter;
3450 	struct sg_mapping_iter piter;
3451 
3452 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3453 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3454 
3455 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3456 			scsi_prot_sg_count(SCpnt),
3457 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3458 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3459 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3460 
3461 	/* For each protection page */
3462 	while (sg_miter_next(&piter)) {
3463 		dpage_offset = 0;
3464 		if (WARN_ON(!sg_miter_next(&diter))) {
3465 			ret = 0x01;
3466 			goto out;
3467 		}
3468 
3469 		for (ppage_offset = 0; ppage_offset < piter.length;
3470 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3471 			/* If we're at the end of the current
3472 			 * data page advance to the next one
3473 			 */
3474 			if (dpage_offset >= diter.length) {
3475 				if (WARN_ON(!sg_miter_next(&diter))) {
3476 					ret = 0x01;
3477 					goto out;
3478 				}
3479 				dpage_offset = 0;
3480 			}
3481 
3482 			sdt = piter.addr + ppage_offset;
3483 			daddr = diter.addr + dpage_offset;
3484 
3485 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3486 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3487 				if (ret)
3488 					goto out;
3489 			}
3490 
3491 			sector++;
3492 			ei_lba++;
3493 			dpage_offset += sdebug_sector_size;
3494 		}
3495 		diter.consumed = dpage_offset;
3496 		sg_miter_stop(&diter);
3497 	}
3498 	sg_miter_stop(&piter);
3499 
3500 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3501 	dix_writes++;
3502 
3503 	return 0;
3504 
3505 out:
3506 	dif_errors++;
3507 	sg_miter_stop(&diter);
3508 	sg_miter_stop(&piter);
3509 	return ret;
3510 }
3511 
3512 static unsigned long lba_to_map_index(sector_t lba)
3513 {
3514 	if (sdebug_unmap_alignment)
3515 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3516 	sector_div(lba, sdebug_unmap_granularity);
3517 	return lba;
3518 }
3519 
3520 static sector_t map_index_to_lba(unsigned long index)
3521 {
3522 	sector_t lba = index * sdebug_unmap_granularity;
3523 
3524 	if (sdebug_unmap_alignment)
3525 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3526 	return lba;
3527 }
3528 
3529 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3530 			      unsigned int *num)
3531 {
3532 	sector_t end;
3533 	unsigned int mapped;
3534 	unsigned long index;
3535 	unsigned long next;
3536 
3537 	index = lba_to_map_index(lba);
3538 	mapped = test_bit(index, sip->map_storep);
3539 
3540 	if (mapped)
3541 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3542 	else
3543 		next = find_next_bit(sip->map_storep, map_size, index);
3544 
3545 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3546 	*num = end - lba;
3547 	return mapped;
3548 }
3549 
3550 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3551 		       unsigned int len)
3552 {
3553 	sector_t end = lba + len;
3554 
3555 	while (lba < end) {
3556 		unsigned long index = lba_to_map_index(lba);
3557 
3558 		if (index < map_size)
3559 			set_bit(index, sip->map_storep);
3560 
3561 		lba = map_index_to_lba(index + 1);
3562 	}
3563 }
3564 
3565 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3566 			 unsigned int len)
3567 {
3568 	sector_t end = lba + len;
3569 	u8 *fsp = sip->storep;
3570 
3571 	while (lba < end) {
3572 		unsigned long index = lba_to_map_index(lba);
3573 
3574 		if (lba == map_index_to_lba(index) &&
3575 		    lba + sdebug_unmap_granularity <= end &&
3576 		    index < map_size) {
3577 			clear_bit(index, sip->map_storep);
3578 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3579 				memset(fsp + lba * sdebug_sector_size,
3580 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3581 				       sdebug_sector_size *
3582 				       sdebug_unmap_granularity);
3583 			}
3584 			if (sip->dif_storep) {
3585 				memset(sip->dif_storep + lba, 0xff,
3586 				       sizeof(*sip->dif_storep) *
3587 				       sdebug_unmap_granularity);
3588 			}
3589 		}
3590 		lba = map_index_to_lba(index + 1);
3591 	}
3592 }
3593 
3594 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3595 {
3596 	bool check_prot;
3597 	u32 num;
3598 	u32 ei_lba;
3599 	int ret;
3600 	u64 lba;
3601 	struct sdeb_store_info *sip = devip2sip(devip, true);
3602 	u8 *cmd = scp->cmnd;
3603 
3604 	switch (cmd[0]) {
3605 	case WRITE_16:
3606 		ei_lba = 0;
3607 		lba = get_unaligned_be64(cmd + 2);
3608 		num = get_unaligned_be32(cmd + 10);
3609 		check_prot = true;
3610 		break;
3611 	case WRITE_10:
3612 		ei_lba = 0;
3613 		lba = get_unaligned_be32(cmd + 2);
3614 		num = get_unaligned_be16(cmd + 7);
3615 		check_prot = true;
3616 		break;
3617 	case WRITE_6:
3618 		ei_lba = 0;
3619 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3620 		      (u32)(cmd[1] & 0x1f) << 16;
3621 		num = (0 == cmd[4]) ? 256 : cmd[4];
3622 		check_prot = true;
3623 		break;
3624 	case WRITE_12:
3625 		ei_lba = 0;
3626 		lba = get_unaligned_be32(cmd + 2);
3627 		num = get_unaligned_be32(cmd + 6);
3628 		check_prot = true;
3629 		break;
3630 	case 0x53:	/* XDWRITEREAD(10) */
3631 		ei_lba = 0;
3632 		lba = get_unaligned_be32(cmd + 2);
3633 		num = get_unaligned_be16(cmd + 7);
3634 		check_prot = false;
3635 		break;
3636 	default:	/* assume WRITE(32) */
3637 		lba = get_unaligned_be64(cmd + 12);
3638 		ei_lba = get_unaligned_be32(cmd + 20);
3639 		num = get_unaligned_be32(cmd + 28);
3640 		check_prot = false;
3641 		break;
3642 	}
3643 	if (unlikely(have_dif_prot && check_prot)) {
3644 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3645 		    (cmd[1] & 0xe0)) {
3646 			mk_sense_invalid_opcode(scp);
3647 			return check_condition_result;
3648 		}
3649 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3650 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3651 		    (cmd[1] & 0xe0) == 0)
3652 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3653 				    "to DIF device\n");
3654 	}
3655 
3656 	sdeb_write_lock(sip);
3657 	ret = check_device_access_params(scp, lba, num, true);
3658 	if (ret) {
3659 		sdeb_write_unlock(sip);
3660 		return ret;
3661 	}
3662 
3663 	/* DIX + T10 DIF */
3664 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3665 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3666 		case 1: /* Guard tag error */
3667 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3668 				sdeb_write_unlock(sip);
3669 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3670 				return illegal_condition_result;
3671 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3672 				sdeb_write_unlock(sip);
3673 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3674 				return check_condition_result;
3675 			}
3676 			break;
3677 		case 3: /* Reference tag error */
3678 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3679 				sdeb_write_unlock(sip);
3680 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3681 				return illegal_condition_result;
3682 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3683 				sdeb_write_unlock(sip);
3684 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3685 				return check_condition_result;
3686 			}
3687 			break;
3688 		}
3689 	}
3690 
3691 	ret = do_device_access(sip, scp, 0, lba, num, true);
3692 	if (unlikely(scsi_debug_lbp()))
3693 		map_region(sip, lba, num);
3694 	/* If ZBC zone then bump its write pointer */
3695 	if (sdebug_dev_is_zoned(devip))
3696 		zbc_inc_wp(devip, lba, num);
3697 	sdeb_write_unlock(sip);
3698 	if (unlikely(-1 == ret))
3699 		return DID_ERROR << 16;
3700 	else if (unlikely(sdebug_verbose &&
3701 			  (ret < (num * sdebug_sector_size))))
3702 		sdev_printk(KERN_INFO, scp->device,
3703 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3704 			    my_name, num * sdebug_sector_size, ret);
3705 
3706 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3707 		     atomic_read(&sdeb_inject_pending))) {
3708 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3709 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3710 			atomic_set(&sdeb_inject_pending, 0);
3711 			return check_condition_result;
3712 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3713 			/* Logical block guard check failed */
3714 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3715 			atomic_set(&sdeb_inject_pending, 0);
3716 			return illegal_condition_result;
3717 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3718 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3719 			atomic_set(&sdeb_inject_pending, 0);
3720 			return illegal_condition_result;
3721 		}
3722 	}
3723 	return 0;
3724 }
3725 
3726 /*
3727  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3728  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3729  */
3730 static int resp_write_scat(struct scsi_cmnd *scp,
3731 			   struct sdebug_dev_info *devip)
3732 {
3733 	u8 *cmd = scp->cmnd;
3734 	u8 *lrdp = NULL;
3735 	u8 *up;
3736 	struct sdeb_store_info *sip = devip2sip(devip, true);
3737 	u8 wrprotect;
3738 	u16 lbdof, num_lrd, k;
3739 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3740 	u32 lb_size = sdebug_sector_size;
3741 	u32 ei_lba;
3742 	u64 lba;
3743 	int ret, res;
3744 	bool is_16;
3745 	static const u32 lrd_size = 32; /* + parameter list header size */
3746 
3747 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3748 		is_16 = false;
3749 		wrprotect = (cmd[10] >> 5) & 0x7;
3750 		lbdof = get_unaligned_be16(cmd + 12);
3751 		num_lrd = get_unaligned_be16(cmd + 16);
3752 		bt_len = get_unaligned_be32(cmd + 28);
3753 	} else {        /* that leaves WRITE SCATTERED(16) */
3754 		is_16 = true;
3755 		wrprotect = (cmd[2] >> 5) & 0x7;
3756 		lbdof = get_unaligned_be16(cmd + 4);
3757 		num_lrd = get_unaligned_be16(cmd + 8);
3758 		bt_len = get_unaligned_be32(cmd + 10);
3759 		if (unlikely(have_dif_prot)) {
3760 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3761 			    wrprotect) {
3762 				mk_sense_invalid_opcode(scp);
3763 				return illegal_condition_result;
3764 			}
3765 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3766 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3767 			     wrprotect == 0)
3768 				sdev_printk(KERN_ERR, scp->device,
3769 					    "Unprotected WR to DIF device\n");
3770 		}
3771 	}
3772 	if ((num_lrd == 0) || (bt_len == 0))
3773 		return 0;       /* T10 says these do-nothings are not errors */
3774 	if (lbdof == 0) {
3775 		if (sdebug_verbose)
3776 			sdev_printk(KERN_INFO, scp->device,
3777 				"%s: %s: LB Data Offset field bad\n",
3778 				my_name, __func__);
3779 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3780 		return illegal_condition_result;
3781 	}
3782 	lbdof_blen = lbdof * lb_size;
3783 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3784 		if (sdebug_verbose)
3785 			sdev_printk(KERN_INFO, scp->device,
3786 				"%s: %s: LBA range descriptors don't fit\n",
3787 				my_name, __func__);
3788 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3789 		return illegal_condition_result;
3790 	}
3791 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3792 	if (lrdp == NULL)
3793 		return SCSI_MLQUEUE_HOST_BUSY;
3794 	if (sdebug_verbose)
3795 		sdev_printk(KERN_INFO, scp->device,
3796 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3797 			my_name, __func__, lbdof_blen);
3798 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3799 	if (res == -1) {
3800 		ret = DID_ERROR << 16;
3801 		goto err_out;
3802 	}
3803 
3804 	sdeb_write_lock(sip);
3805 	sg_off = lbdof_blen;
3806 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3807 	cum_lb = 0;
3808 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3809 		lba = get_unaligned_be64(up + 0);
3810 		num = get_unaligned_be32(up + 8);
3811 		if (sdebug_verbose)
3812 			sdev_printk(KERN_INFO, scp->device,
3813 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3814 				my_name, __func__, k, lba, num, sg_off);
3815 		if (num == 0)
3816 			continue;
3817 		ret = check_device_access_params(scp, lba, num, true);
3818 		if (ret)
3819 			goto err_out_unlock;
3820 		num_by = num * lb_size;
3821 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3822 
3823 		if ((cum_lb + num) > bt_len) {
3824 			if (sdebug_verbose)
3825 				sdev_printk(KERN_INFO, scp->device,
3826 				    "%s: %s: sum of blocks > data provided\n",
3827 				    my_name, __func__);
3828 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3829 					0);
3830 			ret = illegal_condition_result;
3831 			goto err_out_unlock;
3832 		}
3833 
3834 		/* DIX + T10 DIF */
3835 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3836 			int prot_ret = prot_verify_write(scp, lba, num,
3837 							 ei_lba);
3838 
3839 			if (prot_ret) {
3840 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3841 						prot_ret);
3842 				ret = illegal_condition_result;
3843 				goto err_out_unlock;
3844 			}
3845 		}
3846 
3847 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3848 		/* If ZBC zone then bump its write pointer */
3849 		if (sdebug_dev_is_zoned(devip))
3850 			zbc_inc_wp(devip, lba, num);
3851 		if (unlikely(scsi_debug_lbp()))
3852 			map_region(sip, lba, num);
3853 		if (unlikely(-1 == ret)) {
3854 			ret = DID_ERROR << 16;
3855 			goto err_out_unlock;
3856 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3857 			sdev_printk(KERN_INFO, scp->device,
3858 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3859 			    my_name, num_by, ret);
3860 
3861 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3862 			     atomic_read(&sdeb_inject_pending))) {
3863 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3864 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3865 				atomic_set(&sdeb_inject_pending, 0);
3866 				ret = check_condition_result;
3867 				goto err_out_unlock;
3868 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3869 				/* Logical block guard check failed */
3870 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3871 				atomic_set(&sdeb_inject_pending, 0);
3872 				ret = illegal_condition_result;
3873 				goto err_out_unlock;
3874 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3875 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3876 				atomic_set(&sdeb_inject_pending, 0);
3877 				ret = illegal_condition_result;
3878 				goto err_out_unlock;
3879 			}
3880 		}
3881 		sg_off += num_by;
3882 		cum_lb += num;
3883 	}
3884 	ret = 0;
3885 err_out_unlock:
3886 	sdeb_write_unlock(sip);
3887 err_out:
3888 	kfree(lrdp);
3889 	return ret;
3890 }
3891 
3892 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3893 			   u32 ei_lba, bool unmap, bool ndob)
3894 {
3895 	struct scsi_device *sdp = scp->device;
3896 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3897 	unsigned long long i;
3898 	u64 block, lbaa;
3899 	u32 lb_size = sdebug_sector_size;
3900 	int ret;
3901 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3902 						scp->device->hostdata, true);
3903 	u8 *fs1p;
3904 	u8 *fsp;
3905 
3906 	sdeb_write_lock(sip);
3907 
3908 	ret = check_device_access_params(scp, lba, num, true);
3909 	if (ret) {
3910 		sdeb_write_unlock(sip);
3911 		return ret;
3912 	}
3913 
3914 	if (unmap && scsi_debug_lbp()) {
3915 		unmap_region(sip, lba, num);
3916 		goto out;
3917 	}
3918 	lbaa = lba;
3919 	block = do_div(lbaa, sdebug_store_sectors);
3920 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3921 	fsp = sip->storep;
3922 	fs1p = fsp + (block * lb_size);
3923 	if (ndob) {
3924 		memset(fs1p, 0, lb_size);
3925 		ret = 0;
3926 	} else
3927 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3928 
3929 	if (-1 == ret) {
3930 		sdeb_write_unlock(sip);
3931 		return DID_ERROR << 16;
3932 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3933 		sdev_printk(KERN_INFO, scp->device,
3934 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3935 			    my_name, "write same", lb_size, ret);
3936 
3937 	/* Copy first sector to remaining blocks */
3938 	for (i = 1 ; i < num ; i++) {
3939 		lbaa = lba + i;
3940 		block = do_div(lbaa, sdebug_store_sectors);
3941 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3942 	}
3943 	if (scsi_debug_lbp())
3944 		map_region(sip, lba, num);
3945 	/* If ZBC zone then bump its write pointer */
3946 	if (sdebug_dev_is_zoned(devip))
3947 		zbc_inc_wp(devip, lba, num);
3948 out:
3949 	sdeb_write_unlock(sip);
3950 
3951 	return 0;
3952 }
3953 
3954 static int resp_write_same_10(struct scsi_cmnd *scp,
3955 			      struct sdebug_dev_info *devip)
3956 {
3957 	u8 *cmd = scp->cmnd;
3958 	u32 lba;
3959 	u16 num;
3960 	u32 ei_lba = 0;
3961 	bool unmap = false;
3962 
3963 	if (cmd[1] & 0x8) {
3964 		if (sdebug_lbpws10 == 0) {
3965 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3966 			return check_condition_result;
3967 		} else
3968 			unmap = true;
3969 	}
3970 	lba = get_unaligned_be32(cmd + 2);
3971 	num = get_unaligned_be16(cmd + 7);
3972 	if (num > sdebug_write_same_length) {
3973 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3974 		return check_condition_result;
3975 	}
3976 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3977 }
3978 
3979 static int resp_write_same_16(struct scsi_cmnd *scp,
3980 			      struct sdebug_dev_info *devip)
3981 {
3982 	u8 *cmd = scp->cmnd;
3983 	u64 lba;
3984 	u32 num;
3985 	u32 ei_lba = 0;
3986 	bool unmap = false;
3987 	bool ndob = false;
3988 
3989 	if (cmd[1] & 0x8) {	/* UNMAP */
3990 		if (sdebug_lbpws == 0) {
3991 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3992 			return check_condition_result;
3993 		} else
3994 			unmap = true;
3995 	}
3996 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3997 		ndob = true;
3998 	lba = get_unaligned_be64(cmd + 2);
3999 	num = get_unaligned_be32(cmd + 10);
4000 	if (num > sdebug_write_same_length) {
4001 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4002 		return check_condition_result;
4003 	}
4004 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4005 }
4006 
4007 /* Note the mode field is in the same position as the (lower) service action
4008  * field. For the Report supported operation codes command, SPC-4 suggests
4009  * each mode of this command should be reported separately; for future. */
4010 static int resp_write_buffer(struct scsi_cmnd *scp,
4011 			     struct sdebug_dev_info *devip)
4012 {
4013 	u8 *cmd = scp->cmnd;
4014 	struct scsi_device *sdp = scp->device;
4015 	struct sdebug_dev_info *dp;
4016 	u8 mode;
4017 
4018 	mode = cmd[1] & 0x1f;
4019 	switch (mode) {
4020 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4021 		/* set UAs on this device only */
4022 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4023 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4024 		break;
4025 	case 0x5:	/* download MC, save and ACT */
4026 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4027 		break;
4028 	case 0x6:	/* download MC with offsets and ACT */
4029 		/* set UAs on most devices (LUs) in this target */
4030 		list_for_each_entry(dp,
4031 				    &devip->sdbg_host->dev_info_list,
4032 				    dev_list)
4033 			if (dp->target == sdp->id) {
4034 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4035 				if (devip != dp)
4036 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4037 						dp->uas_bm);
4038 			}
4039 		break;
4040 	case 0x7:	/* download MC with offsets, save, and ACT */
4041 		/* set UA on all devices (LUs) in this target */
4042 		list_for_each_entry(dp,
4043 				    &devip->sdbg_host->dev_info_list,
4044 				    dev_list)
4045 			if (dp->target == sdp->id)
4046 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4047 					dp->uas_bm);
4048 		break;
4049 	default:
4050 		/* do nothing for this command for other mode values */
4051 		break;
4052 	}
4053 	return 0;
4054 }
4055 
4056 static int resp_comp_write(struct scsi_cmnd *scp,
4057 			   struct sdebug_dev_info *devip)
4058 {
4059 	u8 *cmd = scp->cmnd;
4060 	u8 *arr;
4061 	struct sdeb_store_info *sip = devip2sip(devip, true);
4062 	u64 lba;
4063 	u32 dnum;
4064 	u32 lb_size = sdebug_sector_size;
4065 	u8 num;
4066 	int ret;
4067 	int retval = 0;
4068 
4069 	lba = get_unaligned_be64(cmd + 2);
4070 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4071 	if (0 == num)
4072 		return 0;	/* degenerate case, not an error */
4073 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4074 	    (cmd[1] & 0xe0)) {
4075 		mk_sense_invalid_opcode(scp);
4076 		return check_condition_result;
4077 	}
4078 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4079 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4080 	    (cmd[1] & 0xe0) == 0)
4081 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4082 			    "to DIF device\n");
4083 	ret = check_device_access_params(scp, lba, num, false);
4084 	if (ret)
4085 		return ret;
4086 	dnum = 2 * num;
4087 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4088 	if (NULL == arr) {
4089 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4090 				INSUFF_RES_ASCQ);
4091 		return check_condition_result;
4092 	}
4093 
4094 	sdeb_write_lock(sip);
4095 
4096 	ret = do_dout_fetch(scp, dnum, arr);
4097 	if (ret == -1) {
4098 		retval = DID_ERROR << 16;
4099 		goto cleanup;
4100 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4101 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4102 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4103 			    dnum * lb_size, ret);
4104 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4105 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4106 		retval = check_condition_result;
4107 		goto cleanup;
4108 	}
4109 	if (scsi_debug_lbp())
4110 		map_region(sip, lba, num);
4111 cleanup:
4112 	sdeb_write_unlock(sip);
4113 	kfree(arr);
4114 	return retval;
4115 }
4116 
4117 struct unmap_block_desc {
4118 	__be64	lba;
4119 	__be32	blocks;
4120 	__be32	__reserved;
4121 };
4122 
4123 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4124 {
4125 	unsigned char *buf;
4126 	struct unmap_block_desc *desc;
4127 	struct sdeb_store_info *sip = devip2sip(devip, true);
4128 	unsigned int i, payload_len, descriptors;
4129 	int ret;
4130 
4131 	if (!scsi_debug_lbp())
4132 		return 0;	/* fib and say its done */
4133 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4134 	BUG_ON(scsi_bufflen(scp) != payload_len);
4135 
4136 	descriptors = (payload_len - 8) / 16;
4137 	if (descriptors > sdebug_unmap_max_desc) {
4138 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4139 		return check_condition_result;
4140 	}
4141 
4142 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4143 	if (!buf) {
4144 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4145 				INSUFF_RES_ASCQ);
4146 		return check_condition_result;
4147 	}
4148 
4149 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4150 
4151 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4152 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4153 
4154 	desc = (void *)&buf[8];
4155 
4156 	sdeb_write_lock(sip);
4157 
4158 	for (i = 0 ; i < descriptors ; i++) {
4159 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4160 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4161 
4162 		ret = check_device_access_params(scp, lba, num, true);
4163 		if (ret)
4164 			goto out;
4165 
4166 		unmap_region(sip, lba, num);
4167 	}
4168 
4169 	ret = 0;
4170 
4171 out:
4172 	sdeb_write_unlock(sip);
4173 	kfree(buf);
4174 
4175 	return ret;
4176 }
4177 
4178 #define SDEBUG_GET_LBA_STATUS_LEN 32
4179 
4180 static int resp_get_lba_status(struct scsi_cmnd *scp,
4181 			       struct sdebug_dev_info *devip)
4182 {
4183 	u8 *cmd = scp->cmnd;
4184 	u64 lba;
4185 	u32 alloc_len, mapped, num;
4186 	int ret;
4187 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4188 
4189 	lba = get_unaligned_be64(cmd + 2);
4190 	alloc_len = get_unaligned_be32(cmd + 10);
4191 
4192 	if (alloc_len < 24)
4193 		return 0;
4194 
4195 	ret = check_device_access_params(scp, lba, 1, false);
4196 	if (ret)
4197 		return ret;
4198 
4199 	if (scsi_debug_lbp()) {
4200 		struct sdeb_store_info *sip = devip2sip(devip, true);
4201 
4202 		mapped = map_state(sip, lba, &num);
4203 	} else {
4204 		mapped = 1;
4205 		/* following just in case virtual_gb changed */
4206 		sdebug_capacity = get_sdebug_capacity();
4207 		if (sdebug_capacity - lba <= 0xffffffff)
4208 			num = sdebug_capacity - lba;
4209 		else
4210 			num = 0xffffffff;
4211 	}
4212 
4213 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4214 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4215 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4216 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4217 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4218 
4219 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4220 }
4221 
4222 static int resp_sync_cache(struct scsi_cmnd *scp,
4223 			   struct sdebug_dev_info *devip)
4224 {
4225 	int res = 0;
4226 	u64 lba;
4227 	u32 num_blocks;
4228 	u8 *cmd = scp->cmnd;
4229 
4230 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4231 		lba = get_unaligned_be32(cmd + 2);
4232 		num_blocks = get_unaligned_be16(cmd + 7);
4233 	} else {				/* SYNCHRONIZE_CACHE(16) */
4234 		lba = get_unaligned_be64(cmd + 2);
4235 		num_blocks = get_unaligned_be32(cmd + 10);
4236 	}
4237 	if (lba + num_blocks > sdebug_capacity) {
4238 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4239 		return check_condition_result;
4240 	}
4241 	if (!write_since_sync || (cmd[1] & 0x2))
4242 		res = SDEG_RES_IMMED_MASK;
4243 	else		/* delay if write_since_sync and IMMED clear */
4244 		write_since_sync = false;
4245 	return res;
4246 }
4247 
4248 /*
4249  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4250  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4251  * a GOOD status otherwise. Model a disk with a big cache and yield
4252  * CONDITION MET. Actually tries to bring range in main memory into the
4253  * cache associated with the CPU(s).
4254  */
4255 static int resp_pre_fetch(struct scsi_cmnd *scp,
4256 			  struct sdebug_dev_info *devip)
4257 {
4258 	int res = 0;
4259 	u64 lba;
4260 	u64 block, rest = 0;
4261 	u32 nblks;
4262 	u8 *cmd = scp->cmnd;
4263 	struct sdeb_store_info *sip = devip2sip(devip, true);
4264 	u8 *fsp = sip->storep;
4265 
4266 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4267 		lba = get_unaligned_be32(cmd + 2);
4268 		nblks = get_unaligned_be16(cmd + 7);
4269 	} else {			/* PRE-FETCH(16) */
4270 		lba = get_unaligned_be64(cmd + 2);
4271 		nblks = get_unaligned_be32(cmd + 10);
4272 	}
4273 	if (lba + nblks > sdebug_capacity) {
4274 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4275 		return check_condition_result;
4276 	}
4277 	if (!fsp)
4278 		goto fini;
4279 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4280 	block = do_div(lba, sdebug_store_sectors);
4281 	if (block + nblks > sdebug_store_sectors)
4282 		rest = block + nblks - sdebug_store_sectors;
4283 
4284 	/* Try to bring the PRE-FETCH range into CPU's cache */
4285 	sdeb_read_lock(sip);
4286 	prefetch_range(fsp + (sdebug_sector_size * block),
4287 		       (nblks - rest) * sdebug_sector_size);
4288 	if (rest)
4289 		prefetch_range(fsp, rest * sdebug_sector_size);
4290 	sdeb_read_unlock(sip);
4291 fini:
4292 	if (cmd[1] & 0x2)
4293 		res = SDEG_RES_IMMED_MASK;
4294 	return res | condition_met_result;
4295 }
4296 
4297 #define RL_BUCKET_ELEMS 8
4298 
4299 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4300  * (W-LUN), the normal Linux scanning logic does not associate it with a
4301  * device (e.g. /dev/sg7). The following magic will make that association:
4302  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4303  * where <n> is a host number. If there are multiple targets in a host then
4304  * the above will associate a W-LUN to each target. To only get a W-LUN
4305  * for target 2, then use "echo '- 2 49409' > scan" .
4306  */
4307 static int resp_report_luns(struct scsi_cmnd *scp,
4308 			    struct sdebug_dev_info *devip)
4309 {
4310 	unsigned char *cmd = scp->cmnd;
4311 	unsigned int alloc_len;
4312 	unsigned char select_report;
4313 	u64 lun;
4314 	struct scsi_lun *lun_p;
4315 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4316 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4317 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4318 	unsigned int tlun_cnt;	/* total LUN count */
4319 	unsigned int rlen;	/* response length (in bytes) */
4320 	int k, j, n, res;
4321 	unsigned int off_rsp = 0;
4322 	const int sz_lun = sizeof(struct scsi_lun);
4323 
4324 	clear_luns_changed_on_target(devip);
4325 
4326 	select_report = cmd[2];
4327 	alloc_len = get_unaligned_be32(cmd + 6);
4328 
4329 	if (alloc_len < 4) {
4330 		pr_err("alloc len too small %d\n", alloc_len);
4331 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4332 		return check_condition_result;
4333 	}
4334 
4335 	switch (select_report) {
4336 	case 0:		/* all LUNs apart from W-LUNs */
4337 		lun_cnt = sdebug_max_luns;
4338 		wlun_cnt = 0;
4339 		break;
4340 	case 1:		/* only W-LUNs */
4341 		lun_cnt = 0;
4342 		wlun_cnt = 1;
4343 		break;
4344 	case 2:		/* all LUNs */
4345 		lun_cnt = sdebug_max_luns;
4346 		wlun_cnt = 1;
4347 		break;
4348 	case 0x10:	/* only administrative LUs */
4349 	case 0x11:	/* see SPC-5 */
4350 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4351 	default:
4352 		pr_debug("select report invalid %d\n", select_report);
4353 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4354 		return check_condition_result;
4355 	}
4356 
4357 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4358 		--lun_cnt;
4359 
4360 	tlun_cnt = lun_cnt + wlun_cnt;
4361 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4362 	scsi_set_resid(scp, scsi_bufflen(scp));
4363 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4364 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4365 
4366 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4367 	lun = sdebug_no_lun_0 ? 1 : 0;
4368 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4369 		memset(arr, 0, sizeof(arr));
4370 		lun_p = (struct scsi_lun *)&arr[0];
4371 		if (k == 0) {
4372 			put_unaligned_be32(rlen, &arr[0]);
4373 			++lun_p;
4374 			j = 1;
4375 		}
4376 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4377 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4378 				break;
4379 			int_to_scsilun(lun++, lun_p);
4380 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4381 				lun_p->scsi_lun[0] |= 0x40;
4382 		}
4383 		if (j < RL_BUCKET_ELEMS)
4384 			break;
4385 		n = j * sz_lun;
4386 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4387 		if (res)
4388 			return res;
4389 		off_rsp += n;
4390 	}
4391 	if (wlun_cnt) {
4392 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4393 		++j;
4394 	}
4395 	if (j > 0)
4396 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4397 	return res;
4398 }
4399 
4400 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4401 {
4402 	bool is_bytchk3 = false;
4403 	u8 bytchk;
4404 	int ret, j;
4405 	u32 vnum, a_num, off;
4406 	const u32 lb_size = sdebug_sector_size;
4407 	u64 lba;
4408 	u8 *arr;
4409 	u8 *cmd = scp->cmnd;
4410 	struct sdeb_store_info *sip = devip2sip(devip, true);
4411 
4412 	bytchk = (cmd[1] >> 1) & 0x3;
4413 	if (bytchk == 0) {
4414 		return 0;	/* always claim internal verify okay */
4415 	} else if (bytchk == 2) {
4416 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4417 		return check_condition_result;
4418 	} else if (bytchk == 3) {
4419 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4420 	}
4421 	switch (cmd[0]) {
4422 	case VERIFY_16:
4423 		lba = get_unaligned_be64(cmd + 2);
4424 		vnum = get_unaligned_be32(cmd + 10);
4425 		break;
4426 	case VERIFY:		/* is VERIFY(10) */
4427 		lba = get_unaligned_be32(cmd + 2);
4428 		vnum = get_unaligned_be16(cmd + 7);
4429 		break;
4430 	default:
4431 		mk_sense_invalid_opcode(scp);
4432 		return check_condition_result;
4433 	}
4434 	if (vnum == 0)
4435 		return 0;	/* not an error */
4436 	a_num = is_bytchk3 ? 1 : vnum;
4437 	/* Treat following check like one for read (i.e. no write) access */
4438 	ret = check_device_access_params(scp, lba, a_num, false);
4439 	if (ret)
4440 		return ret;
4441 
4442 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4443 	if (!arr) {
4444 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4445 				INSUFF_RES_ASCQ);
4446 		return check_condition_result;
4447 	}
4448 	/* Not changing store, so only need read access */
4449 	sdeb_read_lock(sip);
4450 
4451 	ret = do_dout_fetch(scp, a_num, arr);
4452 	if (ret == -1) {
4453 		ret = DID_ERROR << 16;
4454 		goto cleanup;
4455 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4456 		sdev_printk(KERN_INFO, scp->device,
4457 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4458 			    my_name, __func__, a_num * lb_size, ret);
4459 	}
4460 	if (is_bytchk3) {
4461 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4462 			memcpy(arr + off, arr, lb_size);
4463 	}
4464 	ret = 0;
4465 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4466 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4467 		ret = check_condition_result;
4468 		goto cleanup;
4469 	}
4470 cleanup:
4471 	sdeb_read_unlock(sip);
4472 	kfree(arr);
4473 	return ret;
4474 }
4475 
4476 #define RZONES_DESC_HD 64
4477 
4478 /* Report zones depending on start LBA and reporting options */
4479 static int resp_report_zones(struct scsi_cmnd *scp,
4480 			     struct sdebug_dev_info *devip)
4481 {
4482 	unsigned int rep_max_zones, nrz = 0;
4483 	int ret = 0;
4484 	u32 alloc_len, rep_opts, rep_len;
4485 	bool partial;
4486 	u64 lba, zs_lba;
4487 	u8 *arr = NULL, *desc;
4488 	u8 *cmd = scp->cmnd;
4489 	struct sdeb_zone_state *zsp = NULL;
4490 	struct sdeb_store_info *sip = devip2sip(devip, false);
4491 
4492 	if (!sdebug_dev_is_zoned(devip)) {
4493 		mk_sense_invalid_opcode(scp);
4494 		return check_condition_result;
4495 	}
4496 	zs_lba = get_unaligned_be64(cmd + 2);
4497 	alloc_len = get_unaligned_be32(cmd + 10);
4498 	if (alloc_len == 0)
4499 		return 0;	/* not an error */
4500 	rep_opts = cmd[14] & 0x3f;
4501 	partial = cmd[14] & 0x80;
4502 
4503 	if (zs_lba >= sdebug_capacity) {
4504 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4505 		return check_condition_result;
4506 	}
4507 
4508 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4509 
4510 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4511 	if (!arr) {
4512 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4513 				INSUFF_RES_ASCQ);
4514 		return check_condition_result;
4515 	}
4516 
4517 	sdeb_read_lock(sip);
4518 
4519 	desc = arr + 64;
4520 	for (lba = zs_lba; lba < sdebug_capacity;
4521 	     lba = zsp->z_start + zsp->z_size) {
4522 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4523 			break;
4524 		zsp = zbc_zone(devip, lba);
4525 		switch (rep_opts) {
4526 		case 0x00:
4527 			/* All zones */
4528 			break;
4529 		case 0x01:
4530 			/* Empty zones */
4531 			if (zsp->z_cond != ZC1_EMPTY)
4532 				continue;
4533 			break;
4534 		case 0x02:
4535 			/* Implicit open zones */
4536 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4537 				continue;
4538 			break;
4539 		case 0x03:
4540 			/* Explicit open zones */
4541 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4542 				continue;
4543 			break;
4544 		case 0x04:
4545 			/* Closed zones */
4546 			if (zsp->z_cond != ZC4_CLOSED)
4547 				continue;
4548 			break;
4549 		case 0x05:
4550 			/* Full zones */
4551 			if (zsp->z_cond != ZC5_FULL)
4552 				continue;
4553 			break;
4554 		case 0x06:
4555 		case 0x07:
4556 		case 0x10:
4557 			/*
4558 			 * Read-only, offline, reset WP recommended are
4559 			 * not emulated: no zones to report;
4560 			 */
4561 			continue;
4562 		case 0x11:
4563 			/* non-seq-resource set */
4564 			if (!zsp->z_non_seq_resource)
4565 				continue;
4566 			break;
4567 		case 0x3e:
4568 			/* All zones except gap zones. */
4569 			if (zbc_zone_is_gap(zsp))
4570 				continue;
4571 			break;
4572 		case 0x3f:
4573 			/* Not write pointer (conventional) zones */
4574 			if (zbc_zone_is_seq(zsp))
4575 				continue;
4576 			break;
4577 		default:
4578 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4579 					INVALID_FIELD_IN_CDB, 0);
4580 			ret = check_condition_result;
4581 			goto fini;
4582 		}
4583 
4584 		if (nrz < rep_max_zones) {
4585 			/* Fill zone descriptor */
4586 			desc[0] = zsp->z_type;
4587 			desc[1] = zsp->z_cond << 4;
4588 			if (zsp->z_non_seq_resource)
4589 				desc[1] |= 1 << 1;
4590 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4591 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4592 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4593 			desc += 64;
4594 		}
4595 
4596 		if (partial && nrz >= rep_max_zones)
4597 			break;
4598 
4599 		nrz++;
4600 	}
4601 
4602 	/* Report header */
4603 	/* Zone list length. */
4604 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4605 	/* Maximum LBA */
4606 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4607 	/* Zone starting LBA granularity. */
4608 	if (devip->zcap < devip->zsize)
4609 		put_unaligned_be64(devip->zsize, arr + 16);
4610 
4611 	rep_len = (unsigned long)desc - (unsigned long)arr;
4612 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4613 
4614 fini:
4615 	sdeb_read_unlock(sip);
4616 	kfree(arr);
4617 	return ret;
4618 }
4619 
4620 /* Logic transplanted from tcmu-runner, file_zbc.c */
4621 static void zbc_open_all(struct sdebug_dev_info *devip)
4622 {
4623 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4624 	unsigned int i;
4625 
4626 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4627 		if (zsp->z_cond == ZC4_CLOSED)
4628 			zbc_open_zone(devip, &devip->zstate[i], true);
4629 	}
4630 }
4631 
4632 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4633 {
4634 	int res = 0;
4635 	u64 z_id;
4636 	enum sdebug_z_cond zc;
4637 	u8 *cmd = scp->cmnd;
4638 	struct sdeb_zone_state *zsp;
4639 	bool all = cmd[14] & 0x01;
4640 	struct sdeb_store_info *sip = devip2sip(devip, false);
4641 
4642 	if (!sdebug_dev_is_zoned(devip)) {
4643 		mk_sense_invalid_opcode(scp);
4644 		return check_condition_result;
4645 	}
4646 
4647 	sdeb_write_lock(sip);
4648 
4649 	if (all) {
4650 		/* Check if all closed zones can be open */
4651 		if (devip->max_open &&
4652 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4653 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4654 					INSUFF_ZONE_ASCQ);
4655 			res = check_condition_result;
4656 			goto fini;
4657 		}
4658 		/* Open all closed zones */
4659 		zbc_open_all(devip);
4660 		goto fini;
4661 	}
4662 
4663 	/* Open the specified zone */
4664 	z_id = get_unaligned_be64(cmd + 2);
4665 	if (z_id >= sdebug_capacity) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 
4671 	zsp = zbc_zone(devip, z_id);
4672 	if (z_id != zsp->z_start) {
4673 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4674 		res = check_condition_result;
4675 		goto fini;
4676 	}
4677 	if (zbc_zone_is_conv(zsp)) {
4678 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4679 		res = check_condition_result;
4680 		goto fini;
4681 	}
4682 
4683 	zc = zsp->z_cond;
4684 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4685 		goto fini;
4686 
4687 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4688 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4689 				INSUFF_ZONE_ASCQ);
4690 		res = check_condition_result;
4691 		goto fini;
4692 	}
4693 
4694 	zbc_open_zone(devip, zsp, true);
4695 fini:
4696 	sdeb_write_unlock(sip);
4697 	return res;
4698 }
4699 
4700 static void zbc_close_all(struct sdebug_dev_info *devip)
4701 {
4702 	unsigned int i;
4703 
4704 	for (i = 0; i < devip->nr_zones; i++)
4705 		zbc_close_zone(devip, &devip->zstate[i]);
4706 }
4707 
4708 static int resp_close_zone(struct scsi_cmnd *scp,
4709 			   struct sdebug_dev_info *devip)
4710 {
4711 	int res = 0;
4712 	u64 z_id;
4713 	u8 *cmd = scp->cmnd;
4714 	struct sdeb_zone_state *zsp;
4715 	bool all = cmd[14] & 0x01;
4716 	struct sdeb_store_info *sip = devip2sip(devip, false);
4717 
4718 	if (!sdebug_dev_is_zoned(devip)) {
4719 		mk_sense_invalid_opcode(scp);
4720 		return check_condition_result;
4721 	}
4722 
4723 	sdeb_write_lock(sip);
4724 
4725 	if (all) {
4726 		zbc_close_all(devip);
4727 		goto fini;
4728 	}
4729 
4730 	/* Close specified zone */
4731 	z_id = get_unaligned_be64(cmd + 2);
4732 	if (z_id >= sdebug_capacity) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4734 		res = check_condition_result;
4735 		goto fini;
4736 	}
4737 
4738 	zsp = zbc_zone(devip, z_id);
4739 	if (z_id != zsp->z_start) {
4740 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4741 		res = check_condition_result;
4742 		goto fini;
4743 	}
4744 	if (zbc_zone_is_conv(zsp)) {
4745 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4746 		res = check_condition_result;
4747 		goto fini;
4748 	}
4749 
4750 	zbc_close_zone(devip, zsp);
4751 fini:
4752 	sdeb_write_unlock(sip);
4753 	return res;
4754 }
4755 
4756 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4757 			    struct sdeb_zone_state *zsp, bool empty)
4758 {
4759 	enum sdebug_z_cond zc = zsp->z_cond;
4760 
4761 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4762 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4763 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4764 			zbc_close_zone(devip, zsp);
4765 		if (zsp->z_cond == ZC4_CLOSED)
4766 			devip->nr_closed--;
4767 		zsp->z_wp = zsp->z_start + zsp->z_size;
4768 		zsp->z_cond = ZC5_FULL;
4769 	}
4770 }
4771 
4772 static void zbc_finish_all(struct sdebug_dev_info *devip)
4773 {
4774 	unsigned int i;
4775 
4776 	for (i = 0; i < devip->nr_zones; i++)
4777 		zbc_finish_zone(devip, &devip->zstate[i], false);
4778 }
4779 
4780 static int resp_finish_zone(struct scsi_cmnd *scp,
4781 			    struct sdebug_dev_info *devip)
4782 {
4783 	struct sdeb_zone_state *zsp;
4784 	int res = 0;
4785 	u64 z_id;
4786 	u8 *cmd = scp->cmnd;
4787 	bool all = cmd[14] & 0x01;
4788 	struct sdeb_store_info *sip = devip2sip(devip, false);
4789 
4790 	if (!sdebug_dev_is_zoned(devip)) {
4791 		mk_sense_invalid_opcode(scp);
4792 		return check_condition_result;
4793 	}
4794 
4795 	sdeb_write_lock(sip);
4796 
4797 	if (all) {
4798 		zbc_finish_all(devip);
4799 		goto fini;
4800 	}
4801 
4802 	/* Finish the specified zone */
4803 	z_id = get_unaligned_be64(cmd + 2);
4804 	if (z_id >= sdebug_capacity) {
4805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4806 		res = check_condition_result;
4807 		goto fini;
4808 	}
4809 
4810 	zsp = zbc_zone(devip, z_id);
4811 	if (z_id != zsp->z_start) {
4812 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4813 		res = check_condition_result;
4814 		goto fini;
4815 	}
4816 	if (zbc_zone_is_conv(zsp)) {
4817 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4818 		res = check_condition_result;
4819 		goto fini;
4820 	}
4821 
4822 	zbc_finish_zone(devip, zsp, true);
4823 fini:
4824 	sdeb_write_unlock(sip);
4825 	return res;
4826 }
4827 
4828 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4829 			 struct sdeb_zone_state *zsp)
4830 {
4831 	enum sdebug_z_cond zc;
4832 	struct sdeb_store_info *sip = devip2sip(devip, false);
4833 
4834 	if (!zbc_zone_is_seq(zsp))
4835 		return;
4836 
4837 	zc = zsp->z_cond;
4838 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4839 		zbc_close_zone(devip, zsp);
4840 
4841 	if (zsp->z_cond == ZC4_CLOSED)
4842 		devip->nr_closed--;
4843 
4844 	if (zsp->z_wp > zsp->z_start)
4845 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4846 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4847 
4848 	zsp->z_non_seq_resource = false;
4849 	zsp->z_wp = zsp->z_start;
4850 	zsp->z_cond = ZC1_EMPTY;
4851 }
4852 
4853 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4854 {
4855 	unsigned int i;
4856 
4857 	for (i = 0; i < devip->nr_zones; i++)
4858 		zbc_rwp_zone(devip, &devip->zstate[i]);
4859 }
4860 
4861 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4862 {
4863 	struct sdeb_zone_state *zsp;
4864 	int res = 0;
4865 	u64 z_id;
4866 	u8 *cmd = scp->cmnd;
4867 	bool all = cmd[14] & 0x01;
4868 	struct sdeb_store_info *sip = devip2sip(devip, false);
4869 
4870 	if (!sdebug_dev_is_zoned(devip)) {
4871 		mk_sense_invalid_opcode(scp);
4872 		return check_condition_result;
4873 	}
4874 
4875 	sdeb_write_lock(sip);
4876 
4877 	if (all) {
4878 		zbc_rwp_all(devip);
4879 		goto fini;
4880 	}
4881 
4882 	z_id = get_unaligned_be64(cmd + 2);
4883 	if (z_id >= sdebug_capacity) {
4884 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4885 		res = check_condition_result;
4886 		goto fini;
4887 	}
4888 
4889 	zsp = zbc_zone(devip, z_id);
4890 	if (z_id != zsp->z_start) {
4891 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4892 		res = check_condition_result;
4893 		goto fini;
4894 	}
4895 	if (zbc_zone_is_conv(zsp)) {
4896 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4897 		res = check_condition_result;
4898 		goto fini;
4899 	}
4900 
4901 	zbc_rwp_zone(devip, zsp);
4902 fini:
4903 	sdeb_write_unlock(sip);
4904 	return res;
4905 }
4906 
4907 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4908 {
4909 	u16 hwq;
4910 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4911 
4912 	hwq = blk_mq_unique_tag_to_hwq(tag);
4913 
4914 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4915 	if (WARN_ON_ONCE(hwq >= submit_queues))
4916 		hwq = 0;
4917 
4918 	return sdebug_q_arr + hwq;
4919 }
4920 
4921 static u32 get_tag(struct scsi_cmnd *cmnd)
4922 {
4923 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4924 }
4925 
4926 /* Queued (deferred) command completions converge here. */
4927 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4928 {
4929 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4930 	int qc_idx;
4931 	int retiring = 0;
4932 	unsigned long flags, iflags;
4933 	struct scsi_cmnd *scp = sqcp->scmd;
4934 	struct sdebug_scsi_cmd *sdsc;
4935 	bool aborted;
4936 	struct sdebug_queue *sqp;
4937 
4938 	qc_idx = sd_dp->sqa_idx;
4939 	if (sdebug_statistics) {
4940 		atomic_inc(&sdebug_completions);
4941 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4942 			atomic_inc(&sdebug_miss_cpus);
4943 	}
4944 	if (!scp) {
4945 		pr_err("scmd=NULL\n");
4946 		goto out;
4947 	}
4948 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4949 		pr_err("wild qc_idx=%d\n", qc_idx);
4950 		goto out;
4951 	}
4952 
4953 	sdsc = scsi_cmd_priv(scp);
4954 	sqp = get_queue(scp);
4955 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4956 	spin_lock_irqsave(&sdsc->lock, flags);
4957 	aborted = sd_dp->aborted;
4958 	if (unlikely(aborted))
4959 		sd_dp->aborted = false;
4960 	ASSIGN_QUEUED_CMD(scp, NULL);
4961 
4962 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4963 		retiring = 1;
4964 
4965 	sqp->qc_arr[qc_idx] = NULL;
4966 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4967 		spin_unlock_irqrestore(&sdsc->lock, flags);
4968 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4969 		pr_err("Unexpected completion qc_idx=%d\n", qc_idx);
4970 		goto out;
4971 	}
4972 
4973 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4974 		int k, retval;
4975 
4976 		retval = atomic_read(&retired_max_queue);
4977 		if (qc_idx >= retval) {
4978 			spin_unlock_irqrestore(&sdsc->lock, flags);
4979 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 			pr_err("index %d too large\n", retval);
4981 			goto out;
4982 		}
4983 		k = find_last_bit(sqp->in_use_bm, retval);
4984 		if ((k < sdebug_max_queue) || (k == retval))
4985 			atomic_set(&retired_max_queue, 0);
4986 		else
4987 			atomic_set(&retired_max_queue, k + 1);
4988 	}
4989 
4990 	spin_unlock_irqrestore(&sdsc->lock, flags);
4991 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4992 
4993 	if (aborted) {
4994 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4995 		blk_abort_request(scsi_cmd_to_rq(scp));
4996 		goto out;
4997 	}
4998 
4999 	scsi_done(scp); /* callback to mid level */
5000 out:
5001 	sdebug_free_queued_cmd(sqcp);
5002 }
5003 
5004 /* When high resolution timer goes off this function is called. */
5005 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5006 {
5007 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5008 						  hrt);
5009 	sdebug_q_cmd_complete(sd_dp);
5010 	return HRTIMER_NORESTART;
5011 }
5012 
5013 /* When work queue schedules work, it calls this function. */
5014 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5015 {
5016 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5017 						  ew.work);
5018 	sdebug_q_cmd_complete(sd_dp);
5019 }
5020 
5021 static bool got_shared_uuid;
5022 static uuid_t shared_uuid;
5023 
5024 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5025 {
5026 	struct sdeb_zone_state *zsp;
5027 	sector_t capacity = get_sdebug_capacity();
5028 	sector_t conv_capacity;
5029 	sector_t zstart = 0;
5030 	unsigned int i;
5031 
5032 	/*
5033 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5034 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5035 	 * use the specified zone size checking that at least 2 zones can be
5036 	 * created for the device.
5037 	 */
5038 	if (!sdeb_zbc_zone_size_mb) {
5039 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5040 			>> ilog2(sdebug_sector_size);
5041 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5042 			devip->zsize >>= 1;
5043 		if (devip->zsize < 2) {
5044 			pr_err("Device capacity too small\n");
5045 			return -EINVAL;
5046 		}
5047 	} else {
5048 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5049 			pr_err("Zone size is not a power of 2\n");
5050 			return -EINVAL;
5051 		}
5052 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5053 			>> ilog2(sdebug_sector_size);
5054 		if (devip->zsize >= capacity) {
5055 			pr_err("Zone size too large for device capacity\n");
5056 			return -EINVAL;
5057 		}
5058 	}
5059 
5060 	devip->zsize_shift = ilog2(devip->zsize);
5061 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5062 
5063 	if (sdeb_zbc_zone_cap_mb == 0) {
5064 		devip->zcap = devip->zsize;
5065 	} else {
5066 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5067 			      ilog2(sdebug_sector_size);
5068 		if (devip->zcap > devip->zsize) {
5069 			pr_err("Zone capacity too large\n");
5070 			return -EINVAL;
5071 		}
5072 	}
5073 
5074 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5075 	if (conv_capacity >= capacity) {
5076 		pr_err("Number of conventional zones too large\n");
5077 		return -EINVAL;
5078 	}
5079 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5080 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5081 			      devip->zsize_shift;
5082 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5083 
5084 	/* Add gap zones if zone capacity is smaller than the zone size */
5085 	if (devip->zcap < devip->zsize)
5086 		devip->nr_zones += devip->nr_seq_zones;
5087 
5088 	if (devip->zmodel == BLK_ZONED_HM) {
5089 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5090 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5091 			devip->max_open = (devip->nr_zones - 1) / 2;
5092 		else
5093 			devip->max_open = sdeb_zbc_max_open;
5094 	}
5095 
5096 	devip->zstate = kcalloc(devip->nr_zones,
5097 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5098 	if (!devip->zstate)
5099 		return -ENOMEM;
5100 
5101 	for (i = 0; i < devip->nr_zones; i++) {
5102 		zsp = &devip->zstate[i];
5103 
5104 		zsp->z_start = zstart;
5105 
5106 		if (i < devip->nr_conv_zones) {
5107 			zsp->z_type = ZBC_ZTYPE_CNV;
5108 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5109 			zsp->z_wp = (sector_t)-1;
5110 			zsp->z_size =
5111 				min_t(u64, devip->zsize, capacity - zstart);
5112 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5113 			if (devip->zmodel == BLK_ZONED_HM)
5114 				zsp->z_type = ZBC_ZTYPE_SWR;
5115 			else
5116 				zsp->z_type = ZBC_ZTYPE_SWP;
5117 			zsp->z_cond = ZC1_EMPTY;
5118 			zsp->z_wp = zsp->z_start;
5119 			zsp->z_size =
5120 				min_t(u64, devip->zcap, capacity - zstart);
5121 		} else {
5122 			zsp->z_type = ZBC_ZTYPE_GAP;
5123 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5124 			zsp->z_wp = (sector_t)-1;
5125 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5126 					    capacity - zstart);
5127 		}
5128 
5129 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5130 		zstart += zsp->z_size;
5131 	}
5132 
5133 	return 0;
5134 }
5135 
5136 static struct sdebug_dev_info *sdebug_device_create(
5137 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5138 {
5139 	struct sdebug_dev_info *devip;
5140 
5141 	devip = kzalloc(sizeof(*devip), flags);
5142 	if (devip) {
5143 		if (sdebug_uuid_ctl == 1)
5144 			uuid_gen(&devip->lu_name);
5145 		else if (sdebug_uuid_ctl == 2) {
5146 			if (got_shared_uuid)
5147 				devip->lu_name = shared_uuid;
5148 			else {
5149 				uuid_gen(&shared_uuid);
5150 				got_shared_uuid = true;
5151 				devip->lu_name = shared_uuid;
5152 			}
5153 		}
5154 		devip->sdbg_host = sdbg_host;
5155 		if (sdeb_zbc_in_use) {
5156 			devip->zmodel = sdeb_zbc_model;
5157 			if (sdebug_device_create_zones(devip)) {
5158 				kfree(devip);
5159 				return NULL;
5160 			}
5161 		} else {
5162 			devip->zmodel = BLK_ZONED_NONE;
5163 		}
5164 		devip->create_ts = ktime_get_boottime();
5165 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5166 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5167 	}
5168 	return devip;
5169 }
5170 
5171 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5172 {
5173 	struct sdebug_host_info *sdbg_host;
5174 	struct sdebug_dev_info *open_devip = NULL;
5175 	struct sdebug_dev_info *devip;
5176 
5177 	sdbg_host = shost_to_sdebug_host(sdev->host);
5178 
5179 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5180 		if ((devip->used) && (devip->channel == sdev->channel) &&
5181 		    (devip->target == sdev->id) &&
5182 		    (devip->lun == sdev->lun))
5183 			return devip;
5184 		else {
5185 			if ((!devip->used) && (!open_devip))
5186 				open_devip = devip;
5187 		}
5188 	}
5189 	if (!open_devip) { /* try and make a new one */
5190 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5191 		if (!open_devip) {
5192 			pr_err("out of memory at line %d\n", __LINE__);
5193 			return NULL;
5194 		}
5195 	}
5196 
5197 	open_devip->channel = sdev->channel;
5198 	open_devip->target = sdev->id;
5199 	open_devip->lun = sdev->lun;
5200 	open_devip->sdbg_host = sdbg_host;
5201 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5202 	open_devip->used = true;
5203 	return open_devip;
5204 }
5205 
5206 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5207 {
5208 	if (sdebug_verbose)
5209 		pr_info("slave_alloc <%u %u %u %llu>\n",
5210 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5211 	return 0;
5212 }
5213 
5214 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5215 {
5216 	struct sdebug_dev_info *devip =
5217 			(struct sdebug_dev_info *)sdp->hostdata;
5218 
5219 	if (sdebug_verbose)
5220 		pr_info("slave_configure <%u %u %u %llu>\n",
5221 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5222 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5223 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5224 	if (devip == NULL) {
5225 		devip = find_build_dev_info(sdp);
5226 		if (devip == NULL)
5227 			return 1;  /* no resources, will be marked offline */
5228 	}
5229 	sdp->hostdata = devip;
5230 	if (sdebug_no_uld)
5231 		sdp->no_uld_attach = 1;
5232 	config_cdb_len(sdp);
5233 	return 0;
5234 }
5235 
5236 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5237 {
5238 	struct sdebug_dev_info *devip =
5239 		(struct sdebug_dev_info *)sdp->hostdata;
5240 
5241 	if (sdebug_verbose)
5242 		pr_info("slave_destroy <%u %u %u %llu>\n",
5243 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5244 	if (devip) {
5245 		/* make this slot available for re-use */
5246 		devip->used = false;
5247 		sdp->hostdata = NULL;
5248 	}
5249 }
5250 
5251 /* Returns true if we require the queued memory to be freed by the caller. */
5252 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5253 			   enum sdeb_defer_type defer_t)
5254 {
5255 	if (defer_t == SDEB_DEFER_HRT) {
5256 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5257 
5258 		switch (res) {
5259 		case 0: /* Not active, it must have already run */
5260 		case -1: /* -1 It's executing the CB */
5261 			return false;
5262 		case 1: /* Was active, we've now cancelled */
5263 		default:
5264 			return true;
5265 		}
5266 	} else if (defer_t == SDEB_DEFER_WQ) {
5267 		/* Cancel if pending */
5268 		if (cancel_work_sync(&sd_dp->ew.work))
5269 			return true;
5270 		/* Was not pending, so it must have run */
5271 		return false;
5272 	} else if (defer_t == SDEB_DEFER_POLL) {
5273 		return true;
5274 	}
5275 
5276 	return false;
5277 }
5278 
5279 
5280 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd, int *sqa_idx)
5281 {
5282 	enum sdeb_defer_type l_defer_t;
5283 	struct sdebug_queued_cmd *sqcp;
5284 	struct sdebug_defer *sd_dp;
5285 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5286 
5287 	lockdep_assert_held(&sdsc->lock);
5288 
5289 	sqcp = TO_QUEUED_CMD(cmnd);
5290 	if (!sqcp)
5291 		return false;
5292 	sd_dp = &sqcp->sd_dp;
5293 	if (sqa_idx)
5294 		*sqa_idx = sd_dp->sqa_idx;
5295 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5296 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5297 
5298 	if (stop_qc_helper(sd_dp, l_defer_t))
5299 		sdebug_free_queued_cmd(sqcp);
5300 
5301 	return true;
5302 }
5303 
5304 /*
5305  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5306  */
5307 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5308 {
5309 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5310 	struct sdebug_queue *sqp = get_queue(cmnd);
5311 	unsigned long flags, iflags;
5312 	int k = -1;
5313 	bool res;
5314 
5315 	spin_lock_irqsave(&sdsc->lock, flags);
5316 	res = scsi_debug_stop_cmnd(cmnd, &k);
5317 	spin_unlock_irqrestore(&sdsc->lock, flags);
5318 
5319 	if (k >= 0) {
5320 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5321 		clear_bit(k, sqp->in_use_bm);
5322 		sqp->qc_arr[k] = NULL;
5323 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5324 	}
5325 
5326 	return res;
5327 }
5328 
5329 /*
5330  * All we can do is set the cmnd as internally aborted and wait for it to
5331  * finish. We cannot call scsi_done() as normal completion path may do that.
5332  */
5333 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5334 {
5335 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5336 
5337 	return true;
5338 }
5339 
5340 /* Deletes (stops) timers or work queues of all queued commands */
5341 static void stop_all_queued(void)
5342 {
5343 	struct sdebug_host_info *sdhp;
5344 
5345 	mutex_lock(&sdebug_host_list_mutex);
5346 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5347 		struct Scsi_Host *shost = sdhp->shost;
5348 
5349 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5350 	}
5351 	mutex_unlock(&sdebug_host_list_mutex);
5352 }
5353 
5354 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5355 {
5356 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5357 
5358 	++num_aborts;
5359 
5360 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 		sdev_printk(KERN_INFO, SCpnt->device,
5362 			    "%s: command%s found\n", __func__,
5363 			    ok ? "" : " not");
5364 
5365 	return SUCCESS;
5366 }
5367 
5368 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5369 {
5370 	struct scsi_device *sdp = SCpnt->device;
5371 	struct sdebug_dev_info *devip = sdp->hostdata;
5372 
5373 	++num_dev_resets;
5374 
5375 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5376 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5377 	if (devip)
5378 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5379 
5380 	return SUCCESS;
5381 }
5382 
5383 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5384 {
5385 	struct scsi_device *sdp = SCpnt->device;
5386 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5387 	struct sdebug_dev_info *devip;
5388 	int k = 0;
5389 
5390 	++num_target_resets;
5391 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5392 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5393 
5394 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5395 		if (devip->target == sdp->id) {
5396 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5397 			++k;
5398 		}
5399 	}
5400 
5401 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5402 		sdev_printk(KERN_INFO, sdp,
5403 			    "%s: %d device(s) found in target\n", __func__, k);
5404 
5405 	return SUCCESS;
5406 }
5407 
5408 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5409 {
5410 	struct scsi_device *sdp = SCpnt->device;
5411 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5412 	struct sdebug_dev_info *devip;
5413 	int k = 0;
5414 
5415 	++num_bus_resets;
5416 
5417 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5418 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5419 
5420 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5421 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5422 		++k;
5423 	}
5424 
5425 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5426 		sdev_printk(KERN_INFO, sdp,
5427 			    "%s: %d device(s) found in host\n", __func__, k);
5428 	return SUCCESS;
5429 }
5430 
5431 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5432 {
5433 	struct sdebug_host_info *sdbg_host;
5434 	struct sdebug_dev_info *devip;
5435 	int k = 0;
5436 
5437 	++num_host_resets;
5438 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5439 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5440 	mutex_lock(&sdebug_host_list_mutex);
5441 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5442 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5443 				    dev_list) {
5444 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5445 			++k;
5446 		}
5447 	}
5448 	mutex_unlock(&sdebug_host_list_mutex);
5449 	stop_all_queued();
5450 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5451 		sdev_printk(KERN_INFO, SCpnt->device,
5452 			    "%s: %d device(s) found\n", __func__, k);
5453 	return SUCCESS;
5454 }
5455 
5456 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5457 {
5458 	struct msdos_partition *pp;
5459 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5460 	int sectors_per_part, num_sectors, k;
5461 	int heads_by_sects, start_sec, end_sec;
5462 
5463 	/* assume partition table already zeroed */
5464 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5465 		return;
5466 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5467 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5468 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5469 	}
5470 	num_sectors = (int)get_sdebug_capacity();
5471 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5472 			   / sdebug_num_parts;
5473 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5474 	starts[0] = sdebug_sectors_per;
5475 	max_part_secs = sectors_per_part;
5476 	for (k = 1; k < sdebug_num_parts; ++k) {
5477 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5478 			    * heads_by_sects;
5479 		if (starts[k] - starts[k - 1] < max_part_secs)
5480 			max_part_secs = starts[k] - starts[k - 1];
5481 	}
5482 	starts[sdebug_num_parts] = num_sectors;
5483 	starts[sdebug_num_parts + 1] = 0;
5484 
5485 	ramp[510] = 0x55;	/* magic partition markings */
5486 	ramp[511] = 0xAA;
5487 	pp = (struct msdos_partition *)(ramp + 0x1be);
5488 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5489 		start_sec = starts[k];
5490 		end_sec = starts[k] + max_part_secs - 1;
5491 		pp->boot_ind = 0;
5492 
5493 		pp->cyl = start_sec / heads_by_sects;
5494 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5495 			   / sdebug_sectors_per;
5496 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5497 
5498 		pp->end_cyl = end_sec / heads_by_sects;
5499 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5500 			       / sdebug_sectors_per;
5501 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5502 
5503 		pp->start_sect = cpu_to_le32(start_sec);
5504 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5505 		pp->sys_ind = 0x83;	/* plain Linux partition */
5506 	}
5507 }
5508 
5509 static void block_unblock_all_queues(bool block)
5510 {
5511 	struct sdebug_host_info *sdhp;
5512 
5513 	lockdep_assert_held(&sdebug_host_list_mutex);
5514 
5515 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5516 		struct Scsi_Host *shost = sdhp->shost;
5517 
5518 		if (block)
5519 			scsi_block_requests(shost);
5520 		else
5521 			scsi_unblock_requests(shost);
5522 	}
5523 }
5524 
5525 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5526  * commands will be processed normally before triggers occur.
5527  */
5528 static void tweak_cmnd_count(void)
5529 {
5530 	int count, modulo;
5531 
5532 	modulo = abs(sdebug_every_nth);
5533 	if (modulo < 2)
5534 		return;
5535 
5536 	mutex_lock(&sdebug_host_list_mutex);
5537 	block_unblock_all_queues(true);
5538 	count = atomic_read(&sdebug_cmnd_count);
5539 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5540 	block_unblock_all_queues(false);
5541 	mutex_unlock(&sdebug_host_list_mutex);
5542 }
5543 
5544 static void clear_queue_stats(void)
5545 {
5546 	atomic_set(&sdebug_cmnd_count, 0);
5547 	atomic_set(&sdebug_completions, 0);
5548 	atomic_set(&sdebug_miss_cpus, 0);
5549 	atomic_set(&sdebug_a_tsf, 0);
5550 }
5551 
5552 static bool inject_on_this_cmd(void)
5553 {
5554 	if (sdebug_every_nth == 0)
5555 		return false;
5556 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5557 }
5558 
5559 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5560 
5561 
5562 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5563 {
5564 	if (sqcp)
5565 		kmem_cache_free(queued_cmd_cache, sqcp);
5566 }
5567 
5568 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5569 {
5570 	struct sdebug_queued_cmd *sqcp;
5571 	struct sdebug_defer *sd_dp;
5572 
5573 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5574 	if (!sqcp)
5575 		return NULL;
5576 
5577 	sd_dp = &sqcp->sd_dp;
5578 
5579 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5580 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5581 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5582 
5583 	sqcp->scmd = scmd;
5584 	sd_dp->sqa_idx = -1;
5585 
5586 	return sqcp;
5587 }
5588 
5589 /* Complete the processing of the thread that queued a SCSI command to this
5590  * driver. It either completes the command by calling cmnd_done() or
5591  * schedules a hr timer or work queue then returns 0. Returns
5592  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5593  */
5594 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5595 			 int scsi_result,
5596 			 int (*pfp)(struct scsi_cmnd *,
5597 				    struct sdebug_dev_info *),
5598 			 int delta_jiff, int ndelay)
5599 {
5600 	struct request *rq = scsi_cmd_to_rq(cmnd);
5601 	bool polled = rq->cmd_flags & REQ_POLLED;
5602 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5603 	unsigned long iflags, flags;
5604 	u64 ns_from_boot = 0;
5605 	struct sdebug_queue *sqp;
5606 	struct sdebug_queued_cmd *sqcp;
5607 	struct scsi_device *sdp;
5608 	struct sdebug_defer *sd_dp;
5609 	int k;
5610 
5611 	if (unlikely(devip == NULL)) {
5612 		if (scsi_result == 0)
5613 			scsi_result = DID_NO_CONNECT << 16;
5614 		goto respond_in_thread;
5615 	}
5616 	sdp = cmnd->device;
5617 
5618 	if (delta_jiff == 0)
5619 		goto respond_in_thread;
5620 
5621 	sqp = get_queue(cmnd);
5622 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5623 
5624 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5625 		     (scsi_result == 0))) {
5626 		int num_in_q = scsi_device_busy(sdp);
5627 		int qdepth = cmnd->device->queue_depth;
5628 
5629 		if ((num_in_q == qdepth) &&
5630 		    (atomic_inc_return(&sdebug_a_tsf) >=
5631 		     abs(sdebug_every_nth))) {
5632 			atomic_set(&sdebug_a_tsf, 0);
5633 			scsi_result = device_qfull_result;
5634 
5635 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5636 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5637 					    __func__, num_in_q);
5638 		}
5639 	}
5640 
5641 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5642 	if (unlikely(k >= sdebug_max_queue)) {
5643 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5644 		if (scsi_result)
5645 			goto respond_in_thread;
5646 		scsi_result = device_qfull_result;
5647 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5648 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5649 				    __func__, sdebug_max_queue);
5650 		goto respond_in_thread;
5651 	}
5652 	set_bit(k, sqp->in_use_bm);
5653 
5654 	sqcp = sdebug_alloc_queued_cmd(cmnd);
5655 	if (!sqcp) {
5656 		clear_bit(k, sqp->in_use_bm);
5657 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5658 		return SCSI_MLQUEUE_HOST_BUSY;
5659 	}
5660 	sd_dp = &sqcp->sd_dp;
5661 	sd_dp->sqa_idx = k;
5662 	sqp->qc_arr[k] = sqcp;
5663 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5664 
5665 	/* Set the hostwide tag */
5666 	if (sdebug_host_max_queue)
5667 		sd_dp->hc_idx = get_tag(cmnd);
5668 
5669 	if (polled)
5670 		ns_from_boot = ktime_get_boottime_ns();
5671 
5672 	/* one of the resp_*() response functions is called here */
5673 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5674 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5675 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5676 		delta_jiff = ndelay = 0;
5677 	}
5678 	if (cmnd->result == 0 && scsi_result != 0)
5679 		cmnd->result = scsi_result;
5680 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5681 		if (atomic_read(&sdeb_inject_pending)) {
5682 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5683 			atomic_set(&sdeb_inject_pending, 0);
5684 			cmnd->result = check_condition_result;
5685 		}
5686 	}
5687 
5688 	if (unlikely(sdebug_verbose && cmnd->result))
5689 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5690 			    __func__, cmnd->result);
5691 
5692 	if (delta_jiff > 0 || ndelay > 0) {
5693 		ktime_t kt;
5694 
5695 		if (delta_jiff > 0) {
5696 			u64 ns = jiffies_to_nsecs(delta_jiff);
5697 
5698 			if (sdebug_random && ns < U32_MAX) {
5699 				ns = get_random_u32_below((u32)ns);
5700 			} else if (sdebug_random) {
5701 				ns >>= 12;	/* scale to 4 usec precision */
5702 				if (ns < U32_MAX)	/* over 4 hours max */
5703 					ns = get_random_u32_below((u32)ns);
5704 				ns <<= 12;
5705 			}
5706 			kt = ns_to_ktime(ns);
5707 		} else {	/* ndelay has a 4.2 second max */
5708 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5709 					     (u32)ndelay;
5710 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5711 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5712 
5713 				if (kt <= d) {	/* elapsed duration >= kt */
5714 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5715 					sqp->qc_arr[k] = NULL;
5716 					clear_bit(k, sqp->in_use_bm);
5717 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5718 					/* call scsi_done() from this thread */
5719 					sdebug_free_queued_cmd(sqcp);
5720 					scsi_done(cmnd);
5721 					return 0;
5722 				}
5723 				/* otherwise reduce kt by elapsed time */
5724 				kt -= d;
5725 			}
5726 		}
5727 		if (sdebug_statistics)
5728 			sd_dp->issuing_cpu = raw_smp_processor_id();
5729 		if (polled) {
5730 			spin_lock_irqsave(&sdsc->lock, flags);
5731 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5732 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5733 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5734 			spin_unlock_irqrestore(&sdsc->lock, flags);
5735 		} else {
5736 			/* schedule the invocation of scsi_done() for a later time */
5737 			spin_lock_irqsave(&sdsc->lock, flags);
5738 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5739 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5740 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5741 			/*
5742 			 * The completion handler will try to grab sqcp->lock,
5743 			 * so there is no chance that the completion handler
5744 			 * will call scsi_done() until we release the lock
5745 			 * here (so ok to keep referencing sdsc).
5746 			 */
5747 			spin_unlock_irqrestore(&sdsc->lock, flags);
5748 		}
5749 	} else {	/* jdelay < 0, use work queue */
5750 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5751 			     atomic_read(&sdeb_inject_pending))) {
5752 			sd_dp->aborted = true;
5753 			atomic_set(&sdeb_inject_pending, 0);
5754 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5755 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5756 		}
5757 
5758 		if (sdebug_statistics)
5759 			sd_dp->issuing_cpu = raw_smp_processor_id();
5760 		if (polled) {
5761 			spin_lock_irqsave(&sdsc->lock, flags);
5762 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5763 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5764 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5765 			spin_unlock_irqrestore(&sdsc->lock, flags);
5766 		} else {
5767 			spin_lock_irqsave(&sdsc->lock, flags);
5768 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5769 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5770 			schedule_work(&sd_dp->ew.work);
5771 			spin_unlock_irqrestore(&sdsc->lock, flags);
5772 		}
5773 	}
5774 
5775 	return 0;
5776 
5777 respond_in_thread:	/* call back to mid-layer using invocation thread */
5778 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5779 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5780 	if (cmnd->result == 0 && scsi_result != 0)
5781 		cmnd->result = scsi_result;
5782 	scsi_done(cmnd);
5783 	return 0;
5784 }
5785 
5786 /* Note: The following macros create attribute files in the
5787    /sys/module/scsi_debug/parameters directory. Unfortunately this
5788    driver is unaware of a change and cannot trigger auxiliary actions
5789    as it can when the corresponding attribute in the
5790    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5791  */
5792 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5793 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5794 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5795 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5796 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5797 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5798 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5799 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5800 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5801 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5802 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5803 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5804 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5805 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5806 module_param_string(inq_product, sdebug_inq_product_id,
5807 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5808 module_param_string(inq_rev, sdebug_inq_product_rev,
5809 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5810 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5811 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5812 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5813 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5814 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5815 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5816 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5817 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5818 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5819 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5820 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5821 		   S_IRUGO | S_IWUSR);
5822 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5823 		   S_IRUGO | S_IWUSR);
5824 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5825 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5826 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5827 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5828 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5829 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5830 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5831 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5832 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5833 module_param_named(per_host_store, sdebug_per_host_store, bool,
5834 		   S_IRUGO | S_IWUSR);
5835 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5836 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5837 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5838 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5839 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5840 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5841 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5842 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5843 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5844 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5845 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5846 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5847 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5848 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5849 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5850 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5851 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5852 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5853 		   S_IRUGO | S_IWUSR);
5854 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5855 module_param_named(write_same_length, sdebug_write_same_length, int,
5856 		   S_IRUGO | S_IWUSR);
5857 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5858 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5859 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5860 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5861 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5862 
5863 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5864 MODULE_DESCRIPTION("SCSI debug adapter driver");
5865 MODULE_LICENSE("GPL");
5866 MODULE_VERSION(SDEBUG_VERSION);
5867 
5868 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5869 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5870 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5871 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5872 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5873 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5874 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5875 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5876 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5877 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5878 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5879 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5880 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5881 MODULE_PARM_DESC(host_max_queue,
5882 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5883 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5884 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5885 		 SDEBUG_VERSION "\")");
5886 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5887 MODULE_PARM_DESC(lbprz,
5888 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5889 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5890 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5891 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5893 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5894 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5895 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5896 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5897 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5898 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5899 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5900 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5901 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5902 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5903 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5904 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5905 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5906 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5907 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5908 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5909 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5910 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5911 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5912 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5913 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5914 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5915 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5916 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5917 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5918 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5919 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5920 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5921 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5922 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5923 MODULE_PARM_DESC(uuid_ctl,
5924 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5925 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5926 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5927 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5928 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5929 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5930 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5931 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5932 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5933 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5934 
5935 #define SDEBUG_INFO_LEN 256
5936 static char sdebug_info[SDEBUG_INFO_LEN];
5937 
5938 static const char *scsi_debug_info(struct Scsi_Host *shp)
5939 {
5940 	int k;
5941 
5942 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5943 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5944 	if (k >= (SDEBUG_INFO_LEN - 1))
5945 		return sdebug_info;
5946 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5947 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5948 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5949 		  "statistics", (int)sdebug_statistics);
5950 	return sdebug_info;
5951 }
5952 
5953 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5954 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5955 				 int length)
5956 {
5957 	char arr[16];
5958 	int opts;
5959 	int minLen = length > 15 ? 15 : length;
5960 
5961 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5962 		return -EACCES;
5963 	memcpy(arr, buffer, minLen);
5964 	arr[minLen] = '\0';
5965 	if (1 != sscanf(arr, "%d", &opts))
5966 		return -EINVAL;
5967 	sdebug_opts = opts;
5968 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5969 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5970 	if (sdebug_every_nth != 0)
5971 		tweak_cmnd_count();
5972 	return length;
5973 }
5974 
5975 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5976  * same for each scsi_debug host (if more than one). Some of the counters
5977  * output are not atomics so might be inaccurate in a busy system. */
5978 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5979 {
5980 	int f, j, l;
5981 	struct sdebug_queue *sqp;
5982 	struct sdebug_host_info *sdhp;
5983 
5984 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5985 		   SDEBUG_VERSION, sdebug_version_date);
5986 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5987 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5988 		   sdebug_opts, sdebug_every_nth);
5989 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5990 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5991 		   sdebug_sector_size, "bytes");
5992 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5993 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5994 		   num_aborts);
5995 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5996 		   num_dev_resets, num_target_resets, num_bus_resets,
5997 		   num_host_resets);
5998 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5999 		   dix_reads, dix_writes, dif_errors);
6000 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6001 		   sdebug_statistics);
6002 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6003 		   atomic_read(&sdebug_cmnd_count),
6004 		   atomic_read(&sdebug_completions),
6005 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6006 		   atomic_read(&sdebug_a_tsf),
6007 		   atomic_read(&sdeb_mq_poll_count));
6008 
6009 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6010 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6011 		seq_printf(m, "  queue %d:\n", j);
6012 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6013 		if (f != sdebug_max_queue) {
6014 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6015 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6016 				   "first,last bits", f, l);
6017 		}
6018 	}
6019 
6020 	seq_printf(m, "this host_no=%d\n", host->host_no);
6021 	if (!xa_empty(per_store_ap)) {
6022 		bool niu;
6023 		int idx;
6024 		unsigned long l_idx;
6025 		struct sdeb_store_info *sip;
6026 
6027 		seq_puts(m, "\nhost list:\n");
6028 		j = 0;
6029 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6030 			idx = sdhp->si_idx;
6031 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6032 				   sdhp->shost->host_no, idx);
6033 			++j;
6034 		}
6035 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6036 			   sdeb_most_recent_idx);
6037 		j = 0;
6038 		xa_for_each(per_store_ap, l_idx, sip) {
6039 			niu = xa_get_mark(per_store_ap, l_idx,
6040 					  SDEB_XA_NOT_IN_USE);
6041 			idx = (int)l_idx;
6042 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6043 				   (niu ? "  not_in_use" : ""));
6044 			++j;
6045 		}
6046 	}
6047 	return 0;
6048 }
6049 
6050 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6051 {
6052 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6053 }
6054 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6055  * of delay is jiffies.
6056  */
6057 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6058 			   size_t count)
6059 {
6060 	int jdelay, res;
6061 
6062 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6063 		res = count;
6064 		if (sdebug_jdelay != jdelay) {
6065 			struct sdebug_host_info *sdhp;
6066 
6067 			mutex_lock(&sdebug_host_list_mutex);
6068 			block_unblock_all_queues(true);
6069 
6070 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6071 				struct Scsi_Host *shost = sdhp->shost;
6072 
6073 				if (scsi_host_busy(shost)) {
6074 					res = -EBUSY;   /* queued commands */
6075 					break;
6076 				}
6077 			}
6078 			if (res > 0) {
6079 				sdebug_jdelay = jdelay;
6080 				sdebug_ndelay = 0;
6081 			}
6082 			block_unblock_all_queues(false);
6083 			mutex_unlock(&sdebug_host_list_mutex);
6084 		}
6085 		return res;
6086 	}
6087 	return -EINVAL;
6088 }
6089 static DRIVER_ATTR_RW(delay);
6090 
6091 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6092 {
6093 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6094 }
6095 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6096 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6097 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6098 			    size_t count)
6099 {
6100 	int ndelay, res;
6101 
6102 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6103 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6104 		res = count;
6105 		if (sdebug_ndelay != ndelay) {
6106 			struct sdebug_host_info *sdhp;
6107 
6108 			mutex_lock(&sdebug_host_list_mutex);
6109 			block_unblock_all_queues(true);
6110 
6111 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6112 				struct Scsi_Host *shost = sdhp->shost;
6113 
6114 				if (scsi_host_busy(shost)) {
6115 					res = -EBUSY;   /* queued commands */
6116 					break;
6117 				}
6118 			}
6119 
6120 			if (res > 0) {
6121 				sdebug_ndelay = ndelay;
6122 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6123 							: DEF_JDELAY;
6124 			}
6125 			block_unblock_all_queues(false);
6126 			mutex_unlock(&sdebug_host_list_mutex);
6127 		}
6128 		return res;
6129 	}
6130 	return -EINVAL;
6131 }
6132 static DRIVER_ATTR_RW(ndelay);
6133 
6134 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6135 {
6136 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6137 }
6138 
6139 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6140 			  size_t count)
6141 {
6142 	int opts;
6143 	char work[20];
6144 
6145 	if (sscanf(buf, "%10s", work) == 1) {
6146 		if (strncasecmp(work, "0x", 2) == 0) {
6147 			if (kstrtoint(work + 2, 16, &opts) == 0)
6148 				goto opts_done;
6149 		} else {
6150 			if (kstrtoint(work, 10, &opts) == 0)
6151 				goto opts_done;
6152 		}
6153 	}
6154 	return -EINVAL;
6155 opts_done:
6156 	sdebug_opts = opts;
6157 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6158 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6159 	tweak_cmnd_count();
6160 	return count;
6161 }
6162 static DRIVER_ATTR_RW(opts);
6163 
6164 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6165 {
6166 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6167 }
6168 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6169 			   size_t count)
6170 {
6171 	int n;
6172 
6173 	/* Cannot change from or to TYPE_ZBC with sysfs */
6174 	if (sdebug_ptype == TYPE_ZBC)
6175 		return -EINVAL;
6176 
6177 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6178 		if (n == TYPE_ZBC)
6179 			return -EINVAL;
6180 		sdebug_ptype = n;
6181 		return count;
6182 	}
6183 	return -EINVAL;
6184 }
6185 static DRIVER_ATTR_RW(ptype);
6186 
6187 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6188 {
6189 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6190 }
6191 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6192 			    size_t count)
6193 {
6194 	int n;
6195 
6196 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6197 		sdebug_dsense = n;
6198 		return count;
6199 	}
6200 	return -EINVAL;
6201 }
6202 static DRIVER_ATTR_RW(dsense);
6203 
6204 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6205 {
6206 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6207 }
6208 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6209 			     size_t count)
6210 {
6211 	int n, idx;
6212 
6213 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6214 		bool want_store = (n == 0);
6215 		struct sdebug_host_info *sdhp;
6216 
6217 		n = (n > 0);
6218 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6219 		if (sdebug_fake_rw == n)
6220 			return count;	/* not transitioning so do nothing */
6221 
6222 		if (want_store) {	/* 1 --> 0 transition, set up store */
6223 			if (sdeb_first_idx < 0) {
6224 				idx = sdebug_add_store();
6225 				if (idx < 0)
6226 					return idx;
6227 			} else {
6228 				idx = sdeb_first_idx;
6229 				xa_clear_mark(per_store_ap, idx,
6230 					      SDEB_XA_NOT_IN_USE);
6231 			}
6232 			/* make all hosts use same store */
6233 			list_for_each_entry(sdhp, &sdebug_host_list,
6234 					    host_list) {
6235 				if (sdhp->si_idx != idx) {
6236 					xa_set_mark(per_store_ap, sdhp->si_idx,
6237 						    SDEB_XA_NOT_IN_USE);
6238 					sdhp->si_idx = idx;
6239 				}
6240 			}
6241 			sdeb_most_recent_idx = idx;
6242 		} else {	/* 0 --> 1 transition is trigger for shrink */
6243 			sdebug_erase_all_stores(true /* apart from first */);
6244 		}
6245 		sdebug_fake_rw = n;
6246 		return count;
6247 	}
6248 	return -EINVAL;
6249 }
6250 static DRIVER_ATTR_RW(fake_rw);
6251 
6252 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6253 {
6254 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6255 }
6256 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6257 			      size_t count)
6258 {
6259 	int n;
6260 
6261 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6262 		sdebug_no_lun_0 = n;
6263 		return count;
6264 	}
6265 	return -EINVAL;
6266 }
6267 static DRIVER_ATTR_RW(no_lun_0);
6268 
6269 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6270 {
6271 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6272 }
6273 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6274 			      size_t count)
6275 {
6276 	int n;
6277 
6278 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6279 		sdebug_num_tgts = n;
6280 		sdebug_max_tgts_luns();
6281 		return count;
6282 	}
6283 	return -EINVAL;
6284 }
6285 static DRIVER_ATTR_RW(num_tgts);
6286 
6287 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6288 {
6289 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6290 }
6291 static DRIVER_ATTR_RO(dev_size_mb);
6292 
6293 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6294 {
6295 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6296 }
6297 
6298 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6299 				    size_t count)
6300 {
6301 	bool v;
6302 
6303 	if (kstrtobool(buf, &v))
6304 		return -EINVAL;
6305 
6306 	sdebug_per_host_store = v;
6307 	return count;
6308 }
6309 static DRIVER_ATTR_RW(per_host_store);
6310 
6311 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6312 {
6313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6314 }
6315 static DRIVER_ATTR_RO(num_parts);
6316 
6317 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6318 {
6319 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6320 }
6321 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6322 			       size_t count)
6323 {
6324 	int nth;
6325 	char work[20];
6326 
6327 	if (sscanf(buf, "%10s", work) == 1) {
6328 		if (strncasecmp(work, "0x", 2) == 0) {
6329 			if (kstrtoint(work + 2, 16, &nth) == 0)
6330 				goto every_nth_done;
6331 		} else {
6332 			if (kstrtoint(work, 10, &nth) == 0)
6333 				goto every_nth_done;
6334 		}
6335 	}
6336 	return -EINVAL;
6337 
6338 every_nth_done:
6339 	sdebug_every_nth = nth;
6340 	if (nth && !sdebug_statistics) {
6341 		pr_info("every_nth needs statistics=1, set it\n");
6342 		sdebug_statistics = true;
6343 	}
6344 	tweak_cmnd_count();
6345 	return count;
6346 }
6347 static DRIVER_ATTR_RW(every_nth);
6348 
6349 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6350 {
6351 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6352 }
6353 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6354 				size_t count)
6355 {
6356 	int n;
6357 	bool changed;
6358 
6359 	if (kstrtoint(buf, 0, &n))
6360 		return -EINVAL;
6361 	if (n >= 0) {
6362 		if (n > (int)SAM_LUN_AM_FLAT) {
6363 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6364 			return -EINVAL;
6365 		}
6366 		changed = ((int)sdebug_lun_am != n);
6367 		sdebug_lun_am = n;
6368 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6369 			struct sdebug_host_info *sdhp;
6370 			struct sdebug_dev_info *dp;
6371 
6372 			mutex_lock(&sdebug_host_list_mutex);
6373 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6374 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6375 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6376 				}
6377 			}
6378 			mutex_unlock(&sdebug_host_list_mutex);
6379 		}
6380 		return count;
6381 	}
6382 	return -EINVAL;
6383 }
6384 static DRIVER_ATTR_RW(lun_format);
6385 
6386 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6387 {
6388 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6389 }
6390 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6391 			      size_t count)
6392 {
6393 	int n;
6394 	bool changed;
6395 
6396 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6397 		if (n > 256) {
6398 			pr_warn("max_luns can be no more than 256\n");
6399 			return -EINVAL;
6400 		}
6401 		changed = (sdebug_max_luns != n);
6402 		sdebug_max_luns = n;
6403 		sdebug_max_tgts_luns();
6404 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6405 			struct sdebug_host_info *sdhp;
6406 			struct sdebug_dev_info *dp;
6407 
6408 			mutex_lock(&sdebug_host_list_mutex);
6409 			list_for_each_entry(sdhp, &sdebug_host_list,
6410 					    host_list) {
6411 				list_for_each_entry(dp, &sdhp->dev_info_list,
6412 						    dev_list) {
6413 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6414 						dp->uas_bm);
6415 				}
6416 			}
6417 			mutex_unlock(&sdebug_host_list_mutex);
6418 		}
6419 		return count;
6420 	}
6421 	return -EINVAL;
6422 }
6423 static DRIVER_ATTR_RW(max_luns);
6424 
6425 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6426 {
6427 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6428 }
6429 /* N.B. max_queue can be changed while there are queued commands. In flight
6430  * commands beyond the new max_queue will be completed. */
6431 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6432 			       size_t count)
6433 {
6434 	int j, n, k, a;
6435 	struct sdebug_queue *sqp;
6436 
6437 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6438 	    (n <= SDEBUG_CANQUEUE) &&
6439 	    (sdebug_host_max_queue == 0)) {
6440 		mutex_lock(&sdebug_host_list_mutex);
6441 		block_unblock_all_queues(true);
6442 		k = 0;
6443 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6444 		     ++j, ++sqp) {
6445 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6446 			if (a > k)
6447 				k = a;
6448 		}
6449 		sdebug_max_queue = n;
6450 		if (k == SDEBUG_CANQUEUE)
6451 			atomic_set(&retired_max_queue, 0);
6452 		else if (k >= n)
6453 			atomic_set(&retired_max_queue, k + 1);
6454 		else
6455 			atomic_set(&retired_max_queue, 0);
6456 		block_unblock_all_queues(false);
6457 		mutex_unlock(&sdebug_host_list_mutex);
6458 		return count;
6459 	}
6460 	return -EINVAL;
6461 }
6462 static DRIVER_ATTR_RW(max_queue);
6463 
6464 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6465 {
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6467 }
6468 
6469 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6470 {
6471 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6472 }
6473 
6474 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6475 {
6476 	bool v;
6477 
6478 	if (kstrtobool(buf, &v))
6479 		return -EINVAL;
6480 
6481 	sdebug_no_rwlock = v;
6482 	return count;
6483 }
6484 static DRIVER_ATTR_RW(no_rwlock);
6485 
6486 /*
6487  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6488  * in range [0, sdebug_host_max_queue), we can't change it.
6489  */
6490 static DRIVER_ATTR_RO(host_max_queue);
6491 
6492 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6493 {
6494 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6495 }
6496 static DRIVER_ATTR_RO(no_uld);
6497 
6498 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6499 {
6500 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6501 }
6502 static DRIVER_ATTR_RO(scsi_level);
6503 
6504 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6505 {
6506 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6507 }
6508 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6509 				size_t count)
6510 {
6511 	int n;
6512 	bool changed;
6513 
6514 	/* Ignore capacity change for ZBC drives for now */
6515 	if (sdeb_zbc_in_use)
6516 		return -ENOTSUPP;
6517 
6518 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6519 		changed = (sdebug_virtual_gb != n);
6520 		sdebug_virtual_gb = n;
6521 		sdebug_capacity = get_sdebug_capacity();
6522 		if (changed) {
6523 			struct sdebug_host_info *sdhp;
6524 			struct sdebug_dev_info *dp;
6525 
6526 			mutex_lock(&sdebug_host_list_mutex);
6527 			list_for_each_entry(sdhp, &sdebug_host_list,
6528 					    host_list) {
6529 				list_for_each_entry(dp, &sdhp->dev_info_list,
6530 						    dev_list) {
6531 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6532 						dp->uas_bm);
6533 				}
6534 			}
6535 			mutex_unlock(&sdebug_host_list_mutex);
6536 		}
6537 		return count;
6538 	}
6539 	return -EINVAL;
6540 }
6541 static DRIVER_ATTR_RW(virtual_gb);
6542 
6543 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6544 {
6545 	/* absolute number of hosts currently active is what is shown */
6546 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6547 }
6548 
6549 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6550 			      size_t count)
6551 {
6552 	bool found;
6553 	unsigned long idx;
6554 	struct sdeb_store_info *sip;
6555 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6556 	int delta_hosts;
6557 
6558 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6559 		return -EINVAL;
6560 	if (delta_hosts > 0) {
6561 		do {
6562 			found = false;
6563 			if (want_phs) {
6564 				xa_for_each_marked(per_store_ap, idx, sip,
6565 						   SDEB_XA_NOT_IN_USE) {
6566 					sdeb_most_recent_idx = (int)idx;
6567 					found = true;
6568 					break;
6569 				}
6570 				if (found)	/* re-use case */
6571 					sdebug_add_host_helper((int)idx);
6572 				else
6573 					sdebug_do_add_host(true);
6574 			} else {
6575 				sdebug_do_add_host(false);
6576 			}
6577 		} while (--delta_hosts);
6578 	} else if (delta_hosts < 0) {
6579 		do {
6580 			sdebug_do_remove_host(false);
6581 		} while (++delta_hosts);
6582 	}
6583 	return count;
6584 }
6585 static DRIVER_ATTR_RW(add_host);
6586 
6587 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6588 {
6589 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6590 }
6591 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6592 				    size_t count)
6593 {
6594 	int n;
6595 
6596 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6597 		sdebug_vpd_use_hostno = n;
6598 		return count;
6599 	}
6600 	return -EINVAL;
6601 }
6602 static DRIVER_ATTR_RW(vpd_use_hostno);
6603 
6604 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6605 {
6606 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6607 }
6608 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6609 				size_t count)
6610 {
6611 	int n;
6612 
6613 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6614 		if (n > 0)
6615 			sdebug_statistics = true;
6616 		else {
6617 			clear_queue_stats();
6618 			sdebug_statistics = false;
6619 		}
6620 		return count;
6621 	}
6622 	return -EINVAL;
6623 }
6624 static DRIVER_ATTR_RW(statistics);
6625 
6626 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6627 {
6628 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6629 }
6630 static DRIVER_ATTR_RO(sector_size);
6631 
6632 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6633 {
6634 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6635 }
6636 static DRIVER_ATTR_RO(submit_queues);
6637 
6638 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6639 {
6640 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6641 }
6642 static DRIVER_ATTR_RO(dix);
6643 
6644 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6645 {
6646 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6647 }
6648 static DRIVER_ATTR_RO(dif);
6649 
6650 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6653 }
6654 static DRIVER_ATTR_RO(guard);
6655 
6656 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6657 {
6658 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6659 }
6660 static DRIVER_ATTR_RO(ato);
6661 
6662 static ssize_t map_show(struct device_driver *ddp, char *buf)
6663 {
6664 	ssize_t count = 0;
6665 
6666 	if (!scsi_debug_lbp())
6667 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6668 				 sdebug_store_sectors);
6669 
6670 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6671 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6672 
6673 		if (sip)
6674 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6675 					  (int)map_size, sip->map_storep);
6676 	}
6677 	buf[count++] = '\n';
6678 	buf[count] = '\0';
6679 
6680 	return count;
6681 }
6682 static DRIVER_ATTR_RO(map);
6683 
6684 static ssize_t random_show(struct device_driver *ddp, char *buf)
6685 {
6686 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6687 }
6688 
6689 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6690 			    size_t count)
6691 {
6692 	bool v;
6693 
6694 	if (kstrtobool(buf, &v))
6695 		return -EINVAL;
6696 
6697 	sdebug_random = v;
6698 	return count;
6699 }
6700 static DRIVER_ATTR_RW(random);
6701 
6702 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6703 {
6704 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6705 }
6706 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6707 			       size_t count)
6708 {
6709 	int n;
6710 
6711 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6712 		sdebug_removable = (n > 0);
6713 		return count;
6714 	}
6715 	return -EINVAL;
6716 }
6717 static DRIVER_ATTR_RW(removable);
6718 
6719 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6720 {
6721 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6722 }
6723 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6724 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6725 			       size_t count)
6726 {
6727 	int n;
6728 
6729 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6730 		sdebug_host_lock = (n > 0);
6731 		return count;
6732 	}
6733 	return -EINVAL;
6734 }
6735 static DRIVER_ATTR_RW(host_lock);
6736 
6737 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6738 {
6739 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6740 }
6741 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6742 			    size_t count)
6743 {
6744 	int n;
6745 
6746 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6747 		sdebug_strict = (n > 0);
6748 		return count;
6749 	}
6750 	return -EINVAL;
6751 }
6752 static DRIVER_ATTR_RW(strict);
6753 
6754 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6755 {
6756 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6757 }
6758 static DRIVER_ATTR_RO(uuid_ctl);
6759 
6760 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6761 {
6762 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6763 }
6764 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6765 			     size_t count)
6766 {
6767 	int ret, n;
6768 
6769 	ret = kstrtoint(buf, 0, &n);
6770 	if (ret)
6771 		return ret;
6772 	sdebug_cdb_len = n;
6773 	all_config_cdb_len();
6774 	return count;
6775 }
6776 static DRIVER_ATTR_RW(cdb_len);
6777 
6778 static const char * const zbc_model_strs_a[] = {
6779 	[BLK_ZONED_NONE] = "none",
6780 	[BLK_ZONED_HA]   = "host-aware",
6781 	[BLK_ZONED_HM]   = "host-managed",
6782 };
6783 
6784 static const char * const zbc_model_strs_b[] = {
6785 	[BLK_ZONED_NONE] = "no",
6786 	[BLK_ZONED_HA]   = "aware",
6787 	[BLK_ZONED_HM]   = "managed",
6788 };
6789 
6790 static const char * const zbc_model_strs_c[] = {
6791 	[BLK_ZONED_NONE] = "0",
6792 	[BLK_ZONED_HA]   = "1",
6793 	[BLK_ZONED_HM]   = "2",
6794 };
6795 
6796 static int sdeb_zbc_model_str(const char *cp)
6797 {
6798 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6799 
6800 	if (res < 0) {
6801 		res = sysfs_match_string(zbc_model_strs_b, cp);
6802 		if (res < 0) {
6803 			res = sysfs_match_string(zbc_model_strs_c, cp);
6804 			if (res < 0)
6805 				return -EINVAL;
6806 		}
6807 	}
6808 	return res;
6809 }
6810 
6811 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6812 {
6813 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6814 			 zbc_model_strs_a[sdeb_zbc_model]);
6815 }
6816 static DRIVER_ATTR_RO(zbc);
6817 
6818 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6819 {
6820 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6821 }
6822 static DRIVER_ATTR_RO(tur_ms_to_ready);
6823 
6824 /* Note: The following array creates attribute files in the
6825    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6826    files (over those found in the /sys/module/scsi_debug/parameters
6827    directory) is that auxiliary actions can be triggered when an attribute
6828    is changed. For example see: add_host_store() above.
6829  */
6830 
6831 static struct attribute *sdebug_drv_attrs[] = {
6832 	&driver_attr_delay.attr,
6833 	&driver_attr_opts.attr,
6834 	&driver_attr_ptype.attr,
6835 	&driver_attr_dsense.attr,
6836 	&driver_attr_fake_rw.attr,
6837 	&driver_attr_host_max_queue.attr,
6838 	&driver_attr_no_lun_0.attr,
6839 	&driver_attr_num_tgts.attr,
6840 	&driver_attr_dev_size_mb.attr,
6841 	&driver_attr_num_parts.attr,
6842 	&driver_attr_every_nth.attr,
6843 	&driver_attr_lun_format.attr,
6844 	&driver_attr_max_luns.attr,
6845 	&driver_attr_max_queue.attr,
6846 	&driver_attr_no_rwlock.attr,
6847 	&driver_attr_no_uld.attr,
6848 	&driver_attr_scsi_level.attr,
6849 	&driver_attr_virtual_gb.attr,
6850 	&driver_attr_add_host.attr,
6851 	&driver_attr_per_host_store.attr,
6852 	&driver_attr_vpd_use_hostno.attr,
6853 	&driver_attr_sector_size.attr,
6854 	&driver_attr_statistics.attr,
6855 	&driver_attr_submit_queues.attr,
6856 	&driver_attr_dix.attr,
6857 	&driver_attr_dif.attr,
6858 	&driver_attr_guard.attr,
6859 	&driver_attr_ato.attr,
6860 	&driver_attr_map.attr,
6861 	&driver_attr_random.attr,
6862 	&driver_attr_removable.attr,
6863 	&driver_attr_host_lock.attr,
6864 	&driver_attr_ndelay.attr,
6865 	&driver_attr_strict.attr,
6866 	&driver_attr_uuid_ctl.attr,
6867 	&driver_attr_cdb_len.attr,
6868 	&driver_attr_tur_ms_to_ready.attr,
6869 	&driver_attr_zbc.attr,
6870 	NULL,
6871 };
6872 ATTRIBUTE_GROUPS(sdebug_drv);
6873 
6874 static struct device *pseudo_primary;
6875 
6876 static int __init scsi_debug_init(void)
6877 {
6878 	bool want_store = (sdebug_fake_rw == 0);
6879 	unsigned long sz;
6880 	int k, ret, hosts_to_add;
6881 	int idx = -1;
6882 
6883 	ramdisk_lck_a[0] = &atomic_rw;
6884 	ramdisk_lck_a[1] = &atomic_rw2;
6885 	atomic_set(&retired_max_queue, 0);
6886 
6887 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6888 		pr_warn("ndelay must be less than 1 second, ignored\n");
6889 		sdebug_ndelay = 0;
6890 	} else if (sdebug_ndelay > 0)
6891 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6892 
6893 	switch (sdebug_sector_size) {
6894 	case  512:
6895 	case 1024:
6896 	case 2048:
6897 	case 4096:
6898 		break;
6899 	default:
6900 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6901 		return -EINVAL;
6902 	}
6903 
6904 	switch (sdebug_dif) {
6905 	case T10_PI_TYPE0_PROTECTION:
6906 		break;
6907 	case T10_PI_TYPE1_PROTECTION:
6908 	case T10_PI_TYPE2_PROTECTION:
6909 	case T10_PI_TYPE3_PROTECTION:
6910 		have_dif_prot = true;
6911 		break;
6912 
6913 	default:
6914 		pr_err("dif must be 0, 1, 2 or 3\n");
6915 		return -EINVAL;
6916 	}
6917 
6918 	if (sdebug_num_tgts < 0) {
6919 		pr_err("num_tgts must be >= 0\n");
6920 		return -EINVAL;
6921 	}
6922 
6923 	if (sdebug_guard > 1) {
6924 		pr_err("guard must be 0 or 1\n");
6925 		return -EINVAL;
6926 	}
6927 
6928 	if (sdebug_ato > 1) {
6929 		pr_err("ato must be 0 or 1\n");
6930 		return -EINVAL;
6931 	}
6932 
6933 	if (sdebug_physblk_exp > 15) {
6934 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6935 		return -EINVAL;
6936 	}
6937 
6938 	sdebug_lun_am = sdebug_lun_am_i;
6939 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6940 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6941 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6942 	}
6943 
6944 	if (sdebug_max_luns > 256) {
6945 		if (sdebug_max_luns > 16384) {
6946 			pr_warn("max_luns can be no more than 16384, use default\n");
6947 			sdebug_max_luns = DEF_MAX_LUNS;
6948 		}
6949 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6950 	}
6951 
6952 	if (sdebug_lowest_aligned > 0x3fff) {
6953 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6954 		return -EINVAL;
6955 	}
6956 
6957 	if (submit_queues < 1) {
6958 		pr_err("submit_queues must be 1 or more\n");
6959 		return -EINVAL;
6960 	}
6961 
6962 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6963 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6964 		return -EINVAL;
6965 	}
6966 
6967 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6968 	    (sdebug_host_max_queue < 0)) {
6969 		pr_err("host_max_queue must be in range [0 %d]\n",
6970 		       SDEBUG_CANQUEUE);
6971 		return -EINVAL;
6972 	}
6973 
6974 	if (sdebug_host_max_queue &&
6975 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6976 		sdebug_max_queue = sdebug_host_max_queue;
6977 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6978 			sdebug_max_queue);
6979 	}
6980 
6981 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6982 			       GFP_KERNEL);
6983 	if (sdebug_q_arr == NULL)
6984 		return -ENOMEM;
6985 	for (k = 0; k < submit_queues; ++k)
6986 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6987 
6988 	/*
6989 	 * check for host managed zoned block device specified with
6990 	 * ptype=0x14 or zbc=XXX.
6991 	 */
6992 	if (sdebug_ptype == TYPE_ZBC) {
6993 		sdeb_zbc_model = BLK_ZONED_HM;
6994 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6995 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6996 		if (k < 0) {
6997 			ret = k;
6998 			goto free_q_arr;
6999 		}
7000 		sdeb_zbc_model = k;
7001 		switch (sdeb_zbc_model) {
7002 		case BLK_ZONED_NONE:
7003 		case BLK_ZONED_HA:
7004 			sdebug_ptype = TYPE_DISK;
7005 			break;
7006 		case BLK_ZONED_HM:
7007 			sdebug_ptype = TYPE_ZBC;
7008 			break;
7009 		default:
7010 			pr_err("Invalid ZBC model\n");
7011 			ret = -EINVAL;
7012 			goto free_q_arr;
7013 		}
7014 	}
7015 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7016 		sdeb_zbc_in_use = true;
7017 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7018 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7019 	}
7020 
7021 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7022 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7023 	if (sdebug_dev_size_mb < 1)
7024 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7025 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7026 	sdebug_store_sectors = sz / sdebug_sector_size;
7027 	sdebug_capacity = get_sdebug_capacity();
7028 
7029 	/* play around with geometry, don't waste too much on track 0 */
7030 	sdebug_heads = 8;
7031 	sdebug_sectors_per = 32;
7032 	if (sdebug_dev_size_mb >= 256)
7033 		sdebug_heads = 64;
7034 	else if (sdebug_dev_size_mb >= 16)
7035 		sdebug_heads = 32;
7036 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7037 			       (sdebug_sectors_per * sdebug_heads);
7038 	if (sdebug_cylinders_per >= 1024) {
7039 		/* other LLDs do this; implies >= 1GB ram disk ... */
7040 		sdebug_heads = 255;
7041 		sdebug_sectors_per = 63;
7042 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7043 			       (sdebug_sectors_per * sdebug_heads);
7044 	}
7045 	if (scsi_debug_lbp()) {
7046 		sdebug_unmap_max_blocks =
7047 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7048 
7049 		sdebug_unmap_max_desc =
7050 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7051 
7052 		sdebug_unmap_granularity =
7053 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7054 
7055 		if (sdebug_unmap_alignment &&
7056 		    sdebug_unmap_granularity <=
7057 		    sdebug_unmap_alignment) {
7058 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7059 			ret = -EINVAL;
7060 			goto free_q_arr;
7061 		}
7062 	}
7063 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7064 	if (want_store) {
7065 		idx = sdebug_add_store();
7066 		if (idx < 0) {
7067 			ret = idx;
7068 			goto free_q_arr;
7069 		}
7070 	}
7071 
7072 	pseudo_primary = root_device_register("pseudo_0");
7073 	if (IS_ERR(pseudo_primary)) {
7074 		pr_warn("root_device_register() error\n");
7075 		ret = PTR_ERR(pseudo_primary);
7076 		goto free_vm;
7077 	}
7078 	ret = bus_register(&pseudo_lld_bus);
7079 	if (ret < 0) {
7080 		pr_warn("bus_register error: %d\n", ret);
7081 		goto dev_unreg;
7082 	}
7083 	ret = driver_register(&sdebug_driverfs_driver);
7084 	if (ret < 0) {
7085 		pr_warn("driver_register error: %d\n", ret);
7086 		goto bus_unreg;
7087 	}
7088 
7089 	hosts_to_add = sdebug_add_host;
7090 	sdebug_add_host = 0;
7091 
7092 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7093 	if (!queued_cmd_cache)
7094 		goto driver_unreg;
7095 
7096 	for (k = 0; k < hosts_to_add; k++) {
7097 		if (want_store && k == 0) {
7098 			ret = sdebug_add_host_helper(idx);
7099 			if (ret < 0) {
7100 				pr_err("add_host_helper k=%d, error=%d\n",
7101 				       k, -ret);
7102 				break;
7103 			}
7104 		} else {
7105 			ret = sdebug_do_add_host(want_store &&
7106 						 sdebug_per_host_store);
7107 			if (ret < 0) {
7108 				pr_err("add_host k=%d error=%d\n", k, -ret);
7109 				break;
7110 			}
7111 		}
7112 	}
7113 	if (sdebug_verbose)
7114 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7115 
7116 	return 0;
7117 
7118 driver_unreg:
7119 	driver_unregister(&sdebug_driverfs_driver);
7120 bus_unreg:
7121 	bus_unregister(&pseudo_lld_bus);
7122 dev_unreg:
7123 	root_device_unregister(pseudo_primary);
7124 free_vm:
7125 	sdebug_erase_store(idx, NULL);
7126 free_q_arr:
7127 	kfree(sdebug_q_arr);
7128 	return ret;
7129 }
7130 
7131 static void __exit scsi_debug_exit(void)
7132 {
7133 	int k = sdebug_num_hosts;
7134 
7135 	for (; k; k--)
7136 		sdebug_do_remove_host(true);
7137 	kmem_cache_destroy(queued_cmd_cache);
7138 	driver_unregister(&sdebug_driverfs_driver);
7139 	bus_unregister(&pseudo_lld_bus);
7140 	root_device_unregister(pseudo_primary);
7141 
7142 	sdebug_erase_all_stores(false);
7143 	xa_destroy(per_store_ap);
7144 	kfree(sdebug_q_arr);
7145 }
7146 
7147 device_initcall(scsi_debug_init);
7148 module_exit(scsi_debug_exit);
7149 
7150 static void sdebug_release_adapter(struct device *dev)
7151 {
7152 	struct sdebug_host_info *sdbg_host;
7153 
7154 	sdbg_host = dev_to_sdebug_host(dev);
7155 	kfree(sdbg_host);
7156 }
7157 
7158 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7159 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7160 {
7161 	if (idx < 0)
7162 		return;
7163 	if (!sip) {
7164 		if (xa_empty(per_store_ap))
7165 			return;
7166 		sip = xa_load(per_store_ap, idx);
7167 		if (!sip)
7168 			return;
7169 	}
7170 	vfree(sip->map_storep);
7171 	vfree(sip->dif_storep);
7172 	vfree(sip->storep);
7173 	xa_erase(per_store_ap, idx);
7174 	kfree(sip);
7175 }
7176 
7177 /* Assume apart_from_first==false only in shutdown case. */
7178 static void sdebug_erase_all_stores(bool apart_from_first)
7179 {
7180 	unsigned long idx;
7181 	struct sdeb_store_info *sip = NULL;
7182 
7183 	xa_for_each(per_store_ap, idx, sip) {
7184 		if (apart_from_first)
7185 			apart_from_first = false;
7186 		else
7187 			sdebug_erase_store(idx, sip);
7188 	}
7189 	if (apart_from_first)
7190 		sdeb_most_recent_idx = sdeb_first_idx;
7191 }
7192 
7193 /*
7194  * Returns store xarray new element index (idx) if >=0 else negated errno.
7195  * Limit the number of stores to 65536.
7196  */
7197 static int sdebug_add_store(void)
7198 {
7199 	int res;
7200 	u32 n_idx;
7201 	unsigned long iflags;
7202 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7203 	struct sdeb_store_info *sip = NULL;
7204 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7205 
7206 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7207 	if (!sip)
7208 		return -ENOMEM;
7209 
7210 	xa_lock_irqsave(per_store_ap, iflags);
7211 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7212 	if (unlikely(res < 0)) {
7213 		xa_unlock_irqrestore(per_store_ap, iflags);
7214 		kfree(sip);
7215 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7216 		return res;
7217 	}
7218 	sdeb_most_recent_idx = n_idx;
7219 	if (sdeb_first_idx < 0)
7220 		sdeb_first_idx = n_idx;
7221 	xa_unlock_irqrestore(per_store_ap, iflags);
7222 
7223 	res = -ENOMEM;
7224 	sip->storep = vzalloc(sz);
7225 	if (!sip->storep) {
7226 		pr_err("user data oom\n");
7227 		goto err;
7228 	}
7229 	if (sdebug_num_parts > 0)
7230 		sdebug_build_parts(sip->storep, sz);
7231 
7232 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7233 	if (sdebug_dix) {
7234 		int dif_size;
7235 
7236 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7237 		sip->dif_storep = vmalloc(dif_size);
7238 
7239 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7240 			sip->dif_storep);
7241 
7242 		if (!sip->dif_storep) {
7243 			pr_err("DIX oom\n");
7244 			goto err;
7245 		}
7246 		memset(sip->dif_storep, 0xff, dif_size);
7247 	}
7248 	/* Logical Block Provisioning */
7249 	if (scsi_debug_lbp()) {
7250 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7251 		sip->map_storep = vmalloc(array_size(sizeof(long),
7252 						     BITS_TO_LONGS(map_size)));
7253 
7254 		pr_info("%lu provisioning blocks\n", map_size);
7255 
7256 		if (!sip->map_storep) {
7257 			pr_err("LBP map oom\n");
7258 			goto err;
7259 		}
7260 
7261 		bitmap_zero(sip->map_storep, map_size);
7262 
7263 		/* Map first 1KB for partition table */
7264 		if (sdebug_num_parts)
7265 			map_region(sip, 0, 2);
7266 	}
7267 
7268 	rwlock_init(&sip->macc_lck);
7269 	return (int)n_idx;
7270 err:
7271 	sdebug_erase_store((int)n_idx, sip);
7272 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7273 	return res;
7274 }
7275 
7276 static int sdebug_add_host_helper(int per_host_idx)
7277 {
7278 	int k, devs_per_host, idx;
7279 	int error = -ENOMEM;
7280 	struct sdebug_host_info *sdbg_host;
7281 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7282 
7283 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7284 	if (!sdbg_host)
7285 		return -ENOMEM;
7286 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7287 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7288 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7289 	sdbg_host->si_idx = idx;
7290 
7291 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7292 
7293 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7294 	for (k = 0; k < devs_per_host; k++) {
7295 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7296 		if (!sdbg_devinfo)
7297 			goto clean;
7298 	}
7299 
7300 	mutex_lock(&sdebug_host_list_mutex);
7301 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7302 	mutex_unlock(&sdebug_host_list_mutex);
7303 
7304 	sdbg_host->dev.bus = &pseudo_lld_bus;
7305 	sdbg_host->dev.parent = pseudo_primary;
7306 	sdbg_host->dev.release = &sdebug_release_adapter;
7307 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7308 
7309 	error = device_register(&sdbg_host->dev);
7310 	if (error) {
7311 		mutex_lock(&sdebug_host_list_mutex);
7312 		list_del(&sdbg_host->host_list);
7313 		mutex_unlock(&sdebug_host_list_mutex);
7314 		goto clean;
7315 	}
7316 
7317 	++sdebug_num_hosts;
7318 	return 0;
7319 
7320 clean:
7321 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7322 				 dev_list) {
7323 		list_del(&sdbg_devinfo->dev_list);
7324 		kfree(sdbg_devinfo->zstate);
7325 		kfree(sdbg_devinfo);
7326 	}
7327 	if (sdbg_host->dev.release)
7328 		put_device(&sdbg_host->dev);
7329 	else
7330 		kfree(sdbg_host);
7331 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7332 	return error;
7333 }
7334 
7335 static int sdebug_do_add_host(bool mk_new_store)
7336 {
7337 	int ph_idx = sdeb_most_recent_idx;
7338 
7339 	if (mk_new_store) {
7340 		ph_idx = sdebug_add_store();
7341 		if (ph_idx < 0)
7342 			return ph_idx;
7343 	}
7344 	return sdebug_add_host_helper(ph_idx);
7345 }
7346 
7347 static void sdebug_do_remove_host(bool the_end)
7348 {
7349 	int idx = -1;
7350 	struct sdebug_host_info *sdbg_host = NULL;
7351 	struct sdebug_host_info *sdbg_host2;
7352 
7353 	mutex_lock(&sdebug_host_list_mutex);
7354 	if (!list_empty(&sdebug_host_list)) {
7355 		sdbg_host = list_entry(sdebug_host_list.prev,
7356 				       struct sdebug_host_info, host_list);
7357 		idx = sdbg_host->si_idx;
7358 	}
7359 	if (!the_end && idx >= 0) {
7360 		bool unique = true;
7361 
7362 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7363 			if (sdbg_host2 == sdbg_host)
7364 				continue;
7365 			if (idx == sdbg_host2->si_idx) {
7366 				unique = false;
7367 				break;
7368 			}
7369 		}
7370 		if (unique) {
7371 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7372 			if (idx == sdeb_most_recent_idx)
7373 				--sdeb_most_recent_idx;
7374 		}
7375 	}
7376 	if (sdbg_host)
7377 		list_del(&sdbg_host->host_list);
7378 	mutex_unlock(&sdebug_host_list_mutex);
7379 
7380 	if (!sdbg_host)
7381 		return;
7382 
7383 	device_unregister(&sdbg_host->dev);
7384 	--sdebug_num_hosts;
7385 }
7386 
7387 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7388 {
7389 	struct sdebug_dev_info *devip = sdev->hostdata;
7390 
7391 	if (!devip)
7392 		return	-ENODEV;
7393 
7394 	mutex_lock(&sdebug_host_list_mutex);
7395 	block_unblock_all_queues(true);
7396 
7397 	if (qdepth > SDEBUG_CANQUEUE) {
7398 		qdepth = SDEBUG_CANQUEUE;
7399 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7400 			qdepth, SDEBUG_CANQUEUE);
7401 	}
7402 	if (qdepth < 1)
7403 		qdepth = 1;
7404 	if (qdepth != sdev->queue_depth)
7405 		scsi_change_queue_depth(sdev, qdepth);
7406 
7407 	block_unblock_all_queues(false);
7408 	mutex_unlock(&sdebug_host_list_mutex);
7409 
7410 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7411 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7412 
7413 	return sdev->queue_depth;
7414 }
7415 
7416 static bool fake_timeout(struct scsi_cmnd *scp)
7417 {
7418 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7419 		if (sdebug_every_nth < -1)
7420 			sdebug_every_nth = -1;
7421 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7422 			return true; /* ignore command causing timeout */
7423 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7424 			 scsi_medium_access_command(scp))
7425 			return true; /* time out reads and writes */
7426 	}
7427 	return false;
7428 }
7429 
7430 /* Response to TUR or media access command when device stopped */
7431 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7432 {
7433 	int stopped_state;
7434 	u64 diff_ns = 0;
7435 	ktime_t now_ts = ktime_get_boottime();
7436 	struct scsi_device *sdp = scp->device;
7437 
7438 	stopped_state = atomic_read(&devip->stopped);
7439 	if (stopped_state == 2) {
7440 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7441 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7442 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7443 				/* tur_ms_to_ready timer extinguished */
7444 				atomic_set(&devip->stopped, 0);
7445 				return 0;
7446 			}
7447 		}
7448 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7449 		if (sdebug_verbose)
7450 			sdev_printk(KERN_INFO, sdp,
7451 				    "%s: Not ready: in process of becoming ready\n", my_name);
7452 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7453 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7454 
7455 			if (diff_ns <= tur_nanosecs_to_ready)
7456 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7457 			else
7458 				diff_ns = tur_nanosecs_to_ready;
7459 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7460 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7461 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7462 						   diff_ns);
7463 			return check_condition_result;
7464 		}
7465 	}
7466 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7467 	if (sdebug_verbose)
7468 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7469 			    my_name);
7470 	return check_condition_result;
7471 }
7472 
7473 static void sdebug_map_queues(struct Scsi_Host *shost)
7474 {
7475 	int i, qoff;
7476 
7477 	if (shost->nr_hw_queues == 1)
7478 		return;
7479 
7480 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7481 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7482 
7483 		map->nr_queues  = 0;
7484 
7485 		if (i == HCTX_TYPE_DEFAULT)
7486 			map->nr_queues = submit_queues - poll_queues;
7487 		else if (i == HCTX_TYPE_POLL)
7488 			map->nr_queues = poll_queues;
7489 
7490 		if (!map->nr_queues) {
7491 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7492 			continue;
7493 		}
7494 
7495 		map->queue_offset = qoff;
7496 		blk_mq_map_queues(map);
7497 
7498 		qoff += map->nr_queues;
7499 	}
7500 }
7501 
7502 struct sdebug_blk_mq_poll_data {
7503 	unsigned int queue_num;
7504 	int *num_entries;
7505 };
7506 
7507 /*
7508  * We don't handle aborted commands here, but it does not seem possible to have
7509  * aborted polled commands from schedule_resp()
7510  */
7511 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7512 {
7513 	struct sdebug_blk_mq_poll_data *data = opaque;
7514 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7515 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7516 	struct sdebug_defer *sd_dp;
7517 	u32 unique_tag = blk_mq_unique_tag(rq);
7518 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7519 	struct sdebug_queued_cmd *sqcp;
7520 	struct sdebug_queue *sqp;
7521 	unsigned long flags;
7522 	int queue_num = data->queue_num;
7523 	bool retiring = false;
7524 	int qc_idx;
7525 	ktime_t time;
7526 
7527 	/* We're only interested in one queue for this iteration */
7528 	if (hwq != queue_num)
7529 		return true;
7530 
7531 	/* Subsequent checks would fail if this failed, but check anyway */
7532 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7533 		return true;
7534 
7535 	time = ktime_get_boottime();
7536 
7537 	spin_lock_irqsave(&sdsc->lock, flags);
7538 	sqcp = TO_QUEUED_CMD(cmd);
7539 	if (!sqcp) {
7540 		spin_unlock_irqrestore(&sdsc->lock, flags);
7541 		return true;
7542 	}
7543 
7544 	sqp = sdebug_q_arr + queue_num;
7545 	sd_dp = &sqcp->sd_dp;
7546 
7547 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7548 		spin_unlock_irqrestore(&sdsc->lock, flags);
7549 		return true;
7550 	}
7551 
7552 	if (time < sd_dp->cmpl_ts) {
7553 		spin_unlock_irqrestore(&sdsc->lock, flags);
7554 		return true;
7555 	}
7556 
7557 	if (unlikely(atomic_read(&retired_max_queue) > 0))
7558 		retiring = true;
7559 
7560 	qc_idx = sd_dp->sqa_idx;
7561 	sqp->qc_arr[qc_idx] = NULL;
7562 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7563 		spin_unlock_irqrestore(&sdsc->lock, flags);
7564 		pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u\n",
7565 			sqp, queue_num, qc_idx);
7566 		sdebug_free_queued_cmd(sqcp);
7567 		return true;
7568 	}
7569 
7570 	if (unlikely(retiring)) {	/* user has reduced max_queue */
7571 		int k, retval = atomic_read(&retired_max_queue);
7572 
7573 		if (qc_idx >= retval) {
7574 			pr_err("index %d too large\n", retval);
7575 			spin_unlock_irqrestore(&sdsc->lock, flags);
7576 			sdebug_free_queued_cmd(sqcp);
7577 			return true;
7578 		}
7579 
7580 		k = find_last_bit(sqp->in_use_bm, retval);
7581 		if ((k < sdebug_max_queue) || (k == retval))
7582 			atomic_set(&retired_max_queue, 0);
7583 		else
7584 			atomic_set(&retired_max_queue, k + 1);
7585 	}
7586 
7587 	ASSIGN_QUEUED_CMD(cmd, NULL);
7588 	spin_unlock_irqrestore(&sdsc->lock, flags);
7589 
7590 	if (sdebug_statistics) {
7591 		atomic_inc(&sdebug_completions);
7592 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7593 			atomic_inc(&sdebug_miss_cpus);
7594 	}
7595 
7596 	sdebug_free_queued_cmd(sqcp);
7597 
7598 	scsi_done(cmd); /* callback to mid level */
7599 	(*data->num_entries)++;
7600 	return true;
7601 }
7602 
7603 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7604 {
7605 	int num_entries = 0;
7606 	unsigned long iflags;
7607 	struct sdebug_queue *sqp;
7608 	struct sdebug_blk_mq_poll_data data = {
7609 		.queue_num = queue_num,
7610 		.num_entries = &num_entries,
7611 	};
7612 	sqp = sdebug_q_arr + queue_num;
7613 
7614 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7615 
7616 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7617 				&data);
7618 
7619 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7620 	if (num_entries > 0)
7621 		atomic_add(num_entries, &sdeb_mq_poll_count);
7622 	return num_entries;
7623 }
7624 
7625 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7626 				   struct scsi_cmnd *scp)
7627 {
7628 	u8 sdeb_i;
7629 	struct scsi_device *sdp = scp->device;
7630 	const struct opcode_info_t *oip;
7631 	const struct opcode_info_t *r_oip;
7632 	struct sdebug_dev_info *devip;
7633 	u8 *cmd = scp->cmnd;
7634 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7635 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7636 	int k, na;
7637 	int errsts = 0;
7638 	u64 lun_index = sdp->lun & 0x3FFF;
7639 	u32 flags;
7640 	u16 sa;
7641 	u8 opcode = cmd[0];
7642 	bool has_wlun_rl;
7643 	bool inject_now;
7644 
7645 	scsi_set_resid(scp, 0);
7646 	if (sdebug_statistics) {
7647 		atomic_inc(&sdebug_cmnd_count);
7648 		inject_now = inject_on_this_cmd();
7649 	} else {
7650 		inject_now = false;
7651 	}
7652 	if (unlikely(sdebug_verbose &&
7653 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7654 		char b[120];
7655 		int n, len, sb;
7656 
7657 		len = scp->cmd_len;
7658 		sb = (int)sizeof(b);
7659 		if (len > 32)
7660 			strcpy(b, "too long, over 32 bytes");
7661 		else {
7662 			for (k = 0, n = 0; k < len && n < sb; ++k)
7663 				n += scnprintf(b + n, sb - n, "%02x ",
7664 					       (u32)cmd[k]);
7665 		}
7666 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7667 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7668 	}
7669 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7670 		return SCSI_MLQUEUE_HOST_BUSY;
7671 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7672 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7673 		goto err_out;
7674 
7675 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7676 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7677 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7678 	if (unlikely(!devip)) {
7679 		devip = find_build_dev_info(sdp);
7680 		if (NULL == devip)
7681 			goto err_out;
7682 	}
7683 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7684 		atomic_set(&sdeb_inject_pending, 1);
7685 
7686 	na = oip->num_attached;
7687 	r_pfp = oip->pfp;
7688 	if (na) {	/* multiple commands with this opcode */
7689 		r_oip = oip;
7690 		if (FF_SA & r_oip->flags) {
7691 			if (F_SA_LOW & oip->flags)
7692 				sa = 0x1f & cmd[1];
7693 			else
7694 				sa = get_unaligned_be16(cmd + 8);
7695 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7696 				if (opcode == oip->opcode && sa == oip->sa)
7697 					break;
7698 			}
7699 		} else {   /* since no service action only check opcode */
7700 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7701 				if (opcode == oip->opcode)
7702 					break;
7703 			}
7704 		}
7705 		if (k > na) {
7706 			if (F_SA_LOW & r_oip->flags)
7707 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7708 			else if (F_SA_HIGH & r_oip->flags)
7709 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7710 			else
7711 				mk_sense_invalid_opcode(scp);
7712 			goto check_cond;
7713 		}
7714 	}	/* else (when na==0) we assume the oip is a match */
7715 	flags = oip->flags;
7716 	if (unlikely(F_INV_OP & flags)) {
7717 		mk_sense_invalid_opcode(scp);
7718 		goto check_cond;
7719 	}
7720 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7721 		if (sdebug_verbose)
7722 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7723 				    my_name, opcode, " supported for wlun");
7724 		mk_sense_invalid_opcode(scp);
7725 		goto check_cond;
7726 	}
7727 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7728 		u8 rem;
7729 		int j;
7730 
7731 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7732 			rem = ~oip->len_mask[k] & cmd[k];
7733 			if (rem) {
7734 				for (j = 7; j >= 0; --j, rem <<= 1) {
7735 					if (0x80 & rem)
7736 						break;
7737 				}
7738 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7739 				goto check_cond;
7740 			}
7741 		}
7742 	}
7743 	if (unlikely(!(F_SKIP_UA & flags) &&
7744 		     find_first_bit(devip->uas_bm,
7745 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7746 		errsts = make_ua(scp, devip);
7747 		if (errsts)
7748 			goto check_cond;
7749 	}
7750 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7751 		     atomic_read(&devip->stopped))) {
7752 		errsts = resp_not_ready(scp, devip);
7753 		if (errsts)
7754 			goto fini;
7755 	}
7756 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7757 		goto fini;
7758 	if (unlikely(sdebug_every_nth)) {
7759 		if (fake_timeout(scp))
7760 			return 0;	/* ignore command: make trouble */
7761 	}
7762 	if (likely(oip->pfp))
7763 		pfp = oip->pfp;	/* calls a resp_* function */
7764 	else
7765 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7766 
7767 fini:
7768 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7769 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7770 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7771 					    sdebug_ndelay > 10000)) {
7772 		/*
7773 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7774 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7775 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7776 		 * For Synchronize Cache want 1/20 of SSU's delay.
7777 		 */
7778 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7779 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7780 
7781 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7782 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7783 	} else
7784 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7785 				     sdebug_ndelay);
7786 check_cond:
7787 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7788 err_out:
7789 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7790 }
7791 
7792 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7793 {
7794 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7795 
7796 	spin_lock_init(&sdsc->lock);
7797 
7798 	return 0;
7799 }
7800 
7801 
7802 static struct scsi_host_template sdebug_driver_template = {
7803 	.show_info =		scsi_debug_show_info,
7804 	.write_info =		scsi_debug_write_info,
7805 	.proc_name =		sdebug_proc_name,
7806 	.name =			"SCSI DEBUG",
7807 	.info =			scsi_debug_info,
7808 	.slave_alloc =		scsi_debug_slave_alloc,
7809 	.slave_configure =	scsi_debug_slave_configure,
7810 	.slave_destroy =	scsi_debug_slave_destroy,
7811 	.ioctl =		scsi_debug_ioctl,
7812 	.queuecommand =		scsi_debug_queuecommand,
7813 	.change_queue_depth =	sdebug_change_qdepth,
7814 	.map_queues =		sdebug_map_queues,
7815 	.mq_poll =		sdebug_blk_mq_poll,
7816 	.eh_abort_handler =	scsi_debug_abort,
7817 	.eh_device_reset_handler = scsi_debug_device_reset,
7818 	.eh_target_reset_handler = scsi_debug_target_reset,
7819 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7820 	.eh_host_reset_handler = scsi_debug_host_reset,
7821 	.can_queue =		SDEBUG_CANQUEUE,
7822 	.this_id =		7,
7823 	.sg_tablesize =		SG_MAX_SEGMENTS,
7824 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7825 	.max_sectors =		-1U,
7826 	.max_segment_size =	-1U,
7827 	.module =		THIS_MODULE,
7828 	.track_queue_depth =	1,
7829 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
7830 	.init_cmd_priv = sdebug_init_cmd_priv,
7831 };
7832 
7833 static int sdebug_driver_probe(struct device *dev)
7834 {
7835 	int error = 0;
7836 	struct sdebug_host_info *sdbg_host;
7837 	struct Scsi_Host *hpnt;
7838 	int hprot;
7839 
7840 	sdbg_host = dev_to_sdebug_host(dev);
7841 
7842 	sdebug_driver_template.can_queue = sdebug_max_queue;
7843 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7844 	if (!sdebug_clustering)
7845 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7846 
7847 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7848 	if (NULL == hpnt) {
7849 		pr_err("scsi_host_alloc failed\n");
7850 		error = -ENODEV;
7851 		return error;
7852 	}
7853 	if (submit_queues > nr_cpu_ids) {
7854 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7855 			my_name, submit_queues, nr_cpu_ids);
7856 		submit_queues = nr_cpu_ids;
7857 	}
7858 	/*
7859 	 * Decide whether to tell scsi subsystem that we want mq. The
7860 	 * following should give the same answer for each host.
7861 	 */
7862 	hpnt->nr_hw_queues = submit_queues;
7863 	if (sdebug_host_max_queue)
7864 		hpnt->host_tagset = 1;
7865 
7866 	/* poll queues are possible for nr_hw_queues > 1 */
7867 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7868 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7869 			 my_name, poll_queues, hpnt->nr_hw_queues);
7870 		poll_queues = 0;
7871 	}
7872 
7873 	/*
7874 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7875 	 * left over for non-polled I/O.
7876 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7877 	 */
7878 	if (poll_queues >= submit_queues) {
7879 		if (submit_queues < 3)
7880 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7881 		else
7882 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7883 				my_name, submit_queues - 1);
7884 		poll_queues = 1;
7885 	}
7886 	if (poll_queues)
7887 		hpnt->nr_maps = 3;
7888 
7889 	sdbg_host->shost = hpnt;
7890 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7891 		hpnt->max_id = sdebug_num_tgts + 1;
7892 	else
7893 		hpnt->max_id = sdebug_num_tgts;
7894 	/* = sdebug_max_luns; */
7895 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7896 
7897 	hprot = 0;
7898 
7899 	switch (sdebug_dif) {
7900 
7901 	case T10_PI_TYPE1_PROTECTION:
7902 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7903 		if (sdebug_dix)
7904 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7905 		break;
7906 
7907 	case T10_PI_TYPE2_PROTECTION:
7908 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7909 		if (sdebug_dix)
7910 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7911 		break;
7912 
7913 	case T10_PI_TYPE3_PROTECTION:
7914 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7915 		if (sdebug_dix)
7916 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7917 		break;
7918 
7919 	default:
7920 		if (sdebug_dix)
7921 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7922 		break;
7923 	}
7924 
7925 	scsi_host_set_prot(hpnt, hprot);
7926 
7927 	if (have_dif_prot || sdebug_dix)
7928 		pr_info("host protection%s%s%s%s%s%s%s\n",
7929 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7930 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7931 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7932 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7933 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7934 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7935 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7936 
7937 	if (sdebug_guard == 1)
7938 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7939 	else
7940 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7941 
7942 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7943 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7944 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7945 		sdebug_statistics = true;
7946 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7947 	if (error) {
7948 		pr_err("scsi_add_host failed\n");
7949 		error = -ENODEV;
7950 		scsi_host_put(hpnt);
7951 	} else {
7952 		scsi_scan_host(hpnt);
7953 	}
7954 
7955 	return error;
7956 }
7957 
7958 static void sdebug_driver_remove(struct device *dev)
7959 {
7960 	struct sdebug_host_info *sdbg_host;
7961 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7962 
7963 	sdbg_host = dev_to_sdebug_host(dev);
7964 
7965 	scsi_remove_host(sdbg_host->shost);
7966 
7967 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7968 				 dev_list) {
7969 		list_del(&sdbg_devinfo->dev_list);
7970 		kfree(sdbg_devinfo->zstate);
7971 		kfree(sdbg_devinfo);
7972 	}
7973 
7974 	scsi_host_put(sdbg_host->shost);
7975 }
7976 
7977 static int pseudo_lld_bus_match(struct device *dev,
7978 				struct device_driver *dev_driver)
7979 {
7980 	return 1;
7981 }
7982 
7983 static struct bus_type pseudo_lld_bus = {
7984 	.name = "pseudo",
7985 	.match = pseudo_lld_bus_match,
7986 	.probe = sdebug_driver_probe,
7987 	.remove = sdebug_driver_remove,
7988 	.drv_groups = sdebug_drv_groups,
7989 };
7990