xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision a19226f8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define dev_to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 #define shost_to_sdebug_host(shost)	\
331 	dev_to_sdebug_host(shost->dma_dev)
332 
333 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
334 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
335 
336 struct sdebug_defer {
337 	struct hrtimer hrt;
338 	struct execute_work ew;
339 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
340 	int sqa_idx;	/* index of sdebug_queue array */
341 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
342 	int hc_idx;	/* hostwide tag index */
343 	int issuing_cpu;
344 	bool init_hrt;
345 	bool init_wq;
346 	bool init_poll;
347 	bool aborted;	/* true when blk_abort_request() already called */
348 	enum sdeb_defer_type defer_t;
349 };
350 
351 struct sdebug_queued_cmd {
352 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
353 	 * instance indicates this slot is in use.
354 	 */
355 	struct sdebug_defer *sd_dp;
356 	struct scsi_cmnd *a_cmnd;
357 };
358 
359 struct sdebug_queue {
360 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
361 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
362 	spinlock_t qc_lock;
363 	atomic_t blocked;	/* to temporarily stop more being queued */
364 };
365 
366 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
367 static atomic_t sdebug_completions;  /* count of deferred completions */
368 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
369 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
370 static atomic_t sdeb_inject_pending;
371 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
372 
373 struct opcode_info_t {
374 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
375 				/* for terminating element */
376 	u8 opcode;		/* if num_attached > 0, preferred */
377 	u16 sa;			/* service action */
378 	u32 flags;		/* OR-ed set of SDEB_F_* */
379 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
380 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
381 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
382 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
383 };
384 
385 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
386 enum sdeb_opcode_index {
387 	SDEB_I_INVALID_OPCODE =	0,
388 	SDEB_I_INQUIRY = 1,
389 	SDEB_I_REPORT_LUNS = 2,
390 	SDEB_I_REQUEST_SENSE = 3,
391 	SDEB_I_TEST_UNIT_READY = 4,
392 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
393 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
394 	SDEB_I_LOG_SENSE = 7,
395 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
396 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
397 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
398 	SDEB_I_START_STOP = 11,
399 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
400 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
401 	SDEB_I_MAINT_IN = 14,
402 	SDEB_I_MAINT_OUT = 15,
403 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
404 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
405 	SDEB_I_RESERVE = 18,		/* 6, 10 */
406 	SDEB_I_RELEASE = 19,		/* 6, 10 */
407 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
408 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
409 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
410 	SDEB_I_SEND_DIAG = 23,
411 	SDEB_I_UNMAP = 24,
412 	SDEB_I_WRITE_BUFFER = 25,
413 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
414 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
415 	SDEB_I_COMP_WRITE = 28,
416 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
417 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
418 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
419 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
420 };
421 
422 
423 static const unsigned char opcode_ind_arr[256] = {
424 /* 0x0; 0x0->0x1f: 6 byte cdbs */
425 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
426 	    0, 0, 0, 0,
427 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
428 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429 	    SDEB_I_RELEASE,
430 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
431 	    SDEB_I_ALLOW_REMOVAL, 0,
432 /* 0x20; 0x20->0x3f: 10 byte cdbs */
433 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
434 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
435 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
436 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
437 /* 0x40; 0x40->0x5f: 10 byte cdbs */
438 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
439 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
440 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
441 	    SDEB_I_RELEASE,
442 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
443 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
444 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
445 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
446 	0, SDEB_I_VARIABLE_LEN,
447 /* 0x80; 0x80->0x9f: 16 byte cdbs */
448 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
449 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
450 	0, 0, 0, SDEB_I_VERIFY,
451 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
452 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
453 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
454 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
455 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
456 	     SDEB_I_MAINT_OUT, 0, 0, 0,
457 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
458 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
459 	0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0,
461 /* 0xc0; 0xc0->0xff: vendor specific */
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
464 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
465 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 };
467 
468 /*
469  * The following "response" functions return the SCSI mid-level's 4 byte
470  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
471  * command completion, they can mask their return value with
472  * SDEG_RES_IMMED_MASK .
473  */
474 #define SDEG_RES_IMMED_MASK 0x40000000
475 
476 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
505 
506 static int sdebug_do_add_host(bool mk_new_store);
507 static int sdebug_add_host_helper(int per_host_idx);
508 static void sdebug_do_remove_host(bool the_end);
509 static int sdebug_add_store(void);
510 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
511 static void sdebug_erase_all_stores(bool apart_from_first);
512 
513 /*
514  * The following are overflow arrays for cdbs that "hit" the same index in
515  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
516  * should be placed in opcode_info_arr[], the others should be placed here.
517  */
518 static const struct opcode_info_t msense_iarr[] = {
519 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
520 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 };
522 
523 static const struct opcode_info_t mselect_iarr[] = {
524 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
525 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 };
527 
528 static const struct opcode_info_t read_iarr[] = {
529 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
530 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
531 	     0, 0, 0, 0} },
532 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
533 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
534 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
535 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 	     0xc7, 0, 0, 0, 0} },
537 };
538 
539 static const struct opcode_info_t write_iarr[] = {
540 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
541 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
542 		   0, 0, 0, 0, 0, 0} },
543 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
544 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
545 		   0, 0, 0} },
546 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
547 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 		   0xbf, 0xc7, 0, 0, 0, 0} },
549 };
550 
551 static const struct opcode_info_t verify_iarr[] = {
552 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
553 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 		   0, 0, 0, 0, 0, 0} },
555 };
556 
557 static const struct opcode_info_t sa_in_16_iarr[] = {
558 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
559 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
560 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
561 };
562 
563 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
564 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
566 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
567 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
568 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
569 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
570 };
571 
572 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
573 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
574 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
575 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
576 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
577 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
578 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
579 };
580 
581 static const struct opcode_info_t write_same_iarr[] = {
582 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
583 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
584 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
585 };
586 
587 static const struct opcode_info_t reserve_iarr[] = {
588 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
589 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
590 };
591 
592 static const struct opcode_info_t release_iarr[] = {
593 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
594 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595 };
596 
597 static const struct opcode_info_t sync_cache_iarr[] = {
598 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
599 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
601 };
602 
603 static const struct opcode_info_t pre_fetch_iarr[] = {
604 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
605 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
607 };
608 
609 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
610 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
611 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
613 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
614 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
616 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
617 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
619 };
620 
621 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
622 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
623 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
624 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 };
626 
627 
628 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
629  * plus the terminating elements for logic that scans this table such as
630  * REPORT SUPPORTED OPERATION CODES. */
631 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
632 /* 0 */
633 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
634 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
636 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
637 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
638 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
639 	     0, 0} },					/* REPORT LUNS */
640 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
641 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
643 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 /* 5 */
645 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
646 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
647 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
649 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
650 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
651 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
652 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
653 	     0, 0, 0} },
654 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
655 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
656 	     0, 0} },
657 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
658 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
659 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
660 /* 10 */
661 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
662 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
663 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
665 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
666 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
667 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
668 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
669 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
670 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
671 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
672 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
673 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
674 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
675 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
676 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
677 				0xff, 0, 0xc7, 0, 0, 0, 0} },
678 /* 15 */
679 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
680 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
681 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
682 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
683 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
684 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
685 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
686 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
687 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
688 	     0xff, 0xff} },
689 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
690 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
691 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
692 	     0} },
693 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
694 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
695 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
696 	     0} },
697 /* 20 */
698 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
699 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
701 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
702 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
703 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
704 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
705 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
706 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
707 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
708 /* 25 */
709 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
710 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
711 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
712 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
713 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
714 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
715 		 0, 0, 0, 0, 0} },
716 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
717 	    resp_sync_cache, sync_cache_iarr,
718 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
719 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
720 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
721 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
722 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
723 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
724 	    resp_pre_fetch, pre_fetch_iarr,
725 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
726 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
727 
728 /* 30 */
729 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
730 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
731 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
732 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
733 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
734 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
735 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
736 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
737 /* sentinel */
738 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
739 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
740 };
741 
742 static int sdebug_num_hosts;
743 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
744 static int sdebug_ato = DEF_ATO;
745 static int sdebug_cdb_len = DEF_CDB_LEN;
746 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
747 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
748 static int sdebug_dif = DEF_DIF;
749 static int sdebug_dix = DEF_DIX;
750 static int sdebug_dsense = DEF_D_SENSE;
751 static int sdebug_every_nth = DEF_EVERY_NTH;
752 static int sdebug_fake_rw = DEF_FAKE_RW;
753 static unsigned int sdebug_guard = DEF_GUARD;
754 static int sdebug_host_max_queue;	/* per host */
755 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
756 static int sdebug_max_luns = DEF_MAX_LUNS;
757 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
758 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
759 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
760 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
761 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
762 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
763 static int sdebug_no_uld;
764 static int sdebug_num_parts = DEF_NUM_PARTS;
765 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
766 static int sdebug_opt_blks = DEF_OPT_BLKS;
767 static int sdebug_opts = DEF_OPTS;
768 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
769 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
770 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
771 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
772 static int sdebug_sector_size = DEF_SECTOR_SIZE;
773 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
774 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
775 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
776 static unsigned int sdebug_lbpu = DEF_LBPU;
777 static unsigned int sdebug_lbpws = DEF_LBPWS;
778 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
779 static unsigned int sdebug_lbprz = DEF_LBPRZ;
780 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
781 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
782 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
783 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
784 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
785 static int sdebug_uuid_ctl = DEF_UUID_CTL;
786 static bool sdebug_random = DEF_RANDOM;
787 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
788 static bool sdebug_removable = DEF_REMOVABLE;
789 static bool sdebug_clustering;
790 static bool sdebug_host_lock = DEF_HOST_LOCK;
791 static bool sdebug_strict = DEF_STRICT;
792 static bool sdebug_any_injecting_opt;
793 static bool sdebug_no_rwlock;
794 static bool sdebug_verbose;
795 static bool have_dif_prot;
796 static bool write_since_sync;
797 static bool sdebug_statistics = DEF_STATISTICS;
798 static bool sdebug_wp;
799 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
800 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
801 static char *sdeb_zbc_model_s;
802 
803 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
804 			  SAM_LUN_AM_FLAT = 0x1,
805 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
806 			  SAM_LUN_AM_EXTENDED = 0x3};
807 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
808 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
809 
810 static unsigned int sdebug_store_sectors;
811 static sector_t sdebug_capacity;	/* in sectors */
812 
813 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
814    may still need them */
815 static int sdebug_heads;		/* heads per disk */
816 static int sdebug_cylinders_per;	/* cylinders per surface */
817 static int sdebug_sectors_per;		/* sectors per cylinder */
818 
819 static LIST_HEAD(sdebug_host_list);
820 static DEFINE_SPINLOCK(sdebug_host_list_lock);
821 
822 static struct xarray per_store_arr;
823 static struct xarray *per_store_ap = &per_store_arr;
824 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
825 static int sdeb_most_recent_idx = -1;
826 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
827 
828 static unsigned long map_size;
829 static int num_aborts;
830 static int num_dev_resets;
831 static int num_target_resets;
832 static int num_bus_resets;
833 static int num_host_resets;
834 static int dix_writes;
835 static int dix_reads;
836 static int dif_errors;
837 
838 /* ZBC global data */
839 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
840 static int sdeb_zbc_zone_cap_mb;
841 static int sdeb_zbc_zone_size_mb;
842 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
843 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
844 
845 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
846 static int poll_queues; /* iouring iopoll interface.*/
847 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
848 
849 static DEFINE_RWLOCK(atomic_rw);
850 static DEFINE_RWLOCK(atomic_rw2);
851 
852 static rwlock_t *ramdisk_lck_a[2];
853 
854 static char sdebug_proc_name[] = MY_NAME;
855 static const char *my_name = MY_NAME;
856 
857 static struct bus_type pseudo_lld_bus;
858 
859 static struct device_driver sdebug_driverfs_driver = {
860 	.name 		= sdebug_proc_name,
861 	.bus		= &pseudo_lld_bus,
862 };
863 
864 static const int check_condition_result =
865 	SAM_STAT_CHECK_CONDITION;
866 
867 static const int illegal_condition_result =
868 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
869 
870 static const int device_qfull_result =
871 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
872 
873 static const int condition_met_result = SAM_STAT_CONDITION_MET;
874 
875 
876 /* Only do the extra work involved in logical block provisioning if one or
877  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
878  * real reads and writes (i.e. not skipping them for speed).
879  */
880 static inline bool scsi_debug_lbp(void)
881 {
882 	return 0 == sdebug_fake_rw &&
883 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
884 }
885 
886 static void *lba2fake_store(struct sdeb_store_info *sip,
887 			    unsigned long long lba)
888 {
889 	struct sdeb_store_info *lsip = sip;
890 
891 	lba = do_div(lba, sdebug_store_sectors);
892 	if (!sip || !sip->storep) {
893 		WARN_ON_ONCE(true);
894 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
895 	}
896 	return lsip->storep + lba * sdebug_sector_size;
897 }
898 
899 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
900 				      sector_t sector)
901 {
902 	sector = sector_div(sector, sdebug_store_sectors);
903 
904 	return sip->dif_storep + sector;
905 }
906 
907 static void sdebug_max_tgts_luns(void)
908 {
909 	struct sdebug_host_info *sdbg_host;
910 	struct Scsi_Host *hpnt;
911 
912 	spin_lock(&sdebug_host_list_lock);
913 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
914 		hpnt = sdbg_host->shost;
915 		if ((hpnt->this_id >= 0) &&
916 		    (sdebug_num_tgts > hpnt->this_id))
917 			hpnt->max_id = sdebug_num_tgts + 1;
918 		else
919 			hpnt->max_id = sdebug_num_tgts;
920 		/* sdebug_max_luns; */
921 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
922 	}
923 	spin_unlock(&sdebug_host_list_lock);
924 }
925 
926 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
927 
928 /* Set in_bit to -1 to indicate no bit position of invalid field */
929 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
930 				 enum sdeb_cmd_data c_d,
931 				 int in_byte, int in_bit)
932 {
933 	unsigned char *sbuff;
934 	u8 sks[4];
935 	int sl, asc;
936 
937 	sbuff = scp->sense_buffer;
938 	if (!sbuff) {
939 		sdev_printk(KERN_ERR, scp->device,
940 			    "%s: sense_buffer is NULL\n", __func__);
941 		return;
942 	}
943 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
944 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
945 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
946 	memset(sks, 0, sizeof(sks));
947 	sks[0] = 0x80;
948 	if (c_d)
949 		sks[0] |= 0x40;
950 	if (in_bit >= 0) {
951 		sks[0] |= 0x8;
952 		sks[0] |= 0x7 & in_bit;
953 	}
954 	put_unaligned_be16(in_byte, sks + 1);
955 	if (sdebug_dsense) {
956 		sl = sbuff[7] + 8;
957 		sbuff[7] = sl;
958 		sbuff[sl] = 0x2;
959 		sbuff[sl + 1] = 0x6;
960 		memcpy(sbuff + sl + 4, sks, 3);
961 	} else
962 		memcpy(sbuff + 15, sks, 3);
963 	if (sdebug_verbose)
964 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
965 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
966 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
967 }
968 
969 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
970 {
971 	if (!scp->sense_buffer) {
972 		sdev_printk(KERN_ERR, scp->device,
973 			    "%s: sense_buffer is NULL\n", __func__);
974 		return;
975 	}
976 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
977 
978 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
979 
980 	if (sdebug_verbose)
981 		sdev_printk(KERN_INFO, scp->device,
982 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
983 			    my_name, key, asc, asq);
984 }
985 
986 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
987 {
988 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
989 }
990 
991 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
992 			    void __user *arg)
993 {
994 	if (sdebug_verbose) {
995 		if (0x1261 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
998 		else if (0x5331 == cmd)
999 			sdev_printk(KERN_INFO, dev,
1000 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1001 				    __func__);
1002 		else
1003 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1004 				    __func__, cmd);
1005 	}
1006 	return -EINVAL;
1007 	/* return -ENOTTY; // correct return but upsets fdisk */
1008 }
1009 
1010 static void config_cdb_len(struct scsi_device *sdev)
1011 {
1012 	switch (sdebug_cdb_len) {
1013 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1014 		sdev->use_10_for_rw = false;
1015 		sdev->use_16_for_rw = false;
1016 		sdev->use_10_for_ms = false;
1017 		break;
1018 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1019 		sdev->use_10_for_rw = true;
1020 		sdev->use_16_for_rw = false;
1021 		sdev->use_10_for_ms = false;
1022 		break;
1023 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1024 		sdev->use_10_for_rw = true;
1025 		sdev->use_16_for_rw = false;
1026 		sdev->use_10_for_ms = true;
1027 		break;
1028 	case 16:
1029 		sdev->use_10_for_rw = false;
1030 		sdev->use_16_for_rw = true;
1031 		sdev->use_10_for_ms = true;
1032 		break;
1033 	case 32: /* No knobs to suggest this so same as 16 for now */
1034 		sdev->use_10_for_rw = false;
1035 		sdev->use_16_for_rw = true;
1036 		sdev->use_10_for_ms = true;
1037 		break;
1038 	default:
1039 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1040 			sdebug_cdb_len);
1041 		sdev->use_10_for_rw = true;
1042 		sdev->use_16_for_rw = false;
1043 		sdev->use_10_for_ms = false;
1044 		sdebug_cdb_len = 10;
1045 		break;
1046 	}
1047 }
1048 
1049 static void all_config_cdb_len(void)
1050 {
1051 	struct sdebug_host_info *sdbg_host;
1052 	struct Scsi_Host *shost;
1053 	struct scsi_device *sdev;
1054 
1055 	spin_lock(&sdebug_host_list_lock);
1056 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1057 		shost = sdbg_host->shost;
1058 		shost_for_each_device(sdev, shost) {
1059 			config_cdb_len(sdev);
1060 		}
1061 	}
1062 	spin_unlock(&sdebug_host_list_lock);
1063 }
1064 
1065 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1066 {
1067 	struct sdebug_host_info *sdhp;
1068 	struct sdebug_dev_info *dp;
1069 
1070 	spin_lock(&sdebug_host_list_lock);
1071 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1072 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1073 			if ((devip->sdbg_host == dp->sdbg_host) &&
1074 			    (devip->target == dp->target))
1075 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1076 		}
1077 	}
1078 	spin_unlock(&sdebug_host_list_lock);
1079 }
1080 
1081 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1082 {
1083 	int k;
1084 
1085 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1086 	if (k != SDEBUG_NUM_UAS) {
1087 		const char *cp = NULL;
1088 
1089 		switch (k) {
1090 		case SDEBUG_UA_POR:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1092 					POWER_ON_RESET_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "power on reset";
1095 			break;
1096 		case SDEBUG_UA_POOCCUR:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1098 					POWER_ON_OCCURRED_ASCQ);
1099 			if (sdebug_verbose)
1100 				cp = "power on occurred";
1101 			break;
1102 		case SDEBUG_UA_BUS_RESET:
1103 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1104 					BUS_RESET_ASCQ);
1105 			if (sdebug_verbose)
1106 				cp = "bus reset";
1107 			break;
1108 		case SDEBUG_UA_MODE_CHANGED:
1109 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1110 					MODE_CHANGED_ASCQ);
1111 			if (sdebug_verbose)
1112 				cp = "mode parameters changed";
1113 			break;
1114 		case SDEBUG_UA_CAPACITY_CHANGED:
1115 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1116 					CAPACITY_CHANGED_ASCQ);
1117 			if (sdebug_verbose)
1118 				cp = "capacity data changed";
1119 			break;
1120 		case SDEBUG_UA_MICROCODE_CHANGED:
1121 			mk_sense_buffer(scp, UNIT_ATTENTION,
1122 					TARGET_CHANGED_ASC,
1123 					MICROCODE_CHANGED_ASCQ);
1124 			if (sdebug_verbose)
1125 				cp = "microcode has been changed";
1126 			break;
1127 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1128 			mk_sense_buffer(scp, UNIT_ATTENTION,
1129 					TARGET_CHANGED_ASC,
1130 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1131 			if (sdebug_verbose)
1132 				cp = "microcode has been changed without reset";
1133 			break;
1134 		case SDEBUG_UA_LUNS_CHANGED:
1135 			/*
1136 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1137 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1138 			 * on the target, until a REPORT LUNS command is
1139 			 * received.  SPC-4 behavior is to report it only once.
1140 			 * NOTE:  sdebug_scsi_level does not use the same
1141 			 * values as struct scsi_device->scsi_level.
1142 			 */
1143 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1144 				clear_luns_changed_on_target(devip);
1145 			mk_sense_buffer(scp, UNIT_ATTENTION,
1146 					TARGET_CHANGED_ASC,
1147 					LUNS_CHANGED_ASCQ);
1148 			if (sdebug_verbose)
1149 				cp = "reported luns data has changed";
1150 			break;
1151 		default:
1152 			pr_warn("unexpected unit attention code=%d\n", k);
1153 			if (sdebug_verbose)
1154 				cp = "unknown";
1155 			break;
1156 		}
1157 		clear_bit(k, devip->uas_bm);
1158 		if (sdebug_verbose)
1159 			sdev_printk(KERN_INFO, scp->device,
1160 				   "%s reports: Unit attention: %s\n",
1161 				   my_name, cp);
1162 		return check_condition_result;
1163 	}
1164 	return 0;
1165 }
1166 
1167 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1168 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1169 				int arr_len)
1170 {
1171 	int act_len;
1172 	struct scsi_data_buffer *sdb = &scp->sdb;
1173 
1174 	if (!sdb->length)
1175 		return 0;
1176 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1177 		return DID_ERROR << 16;
1178 
1179 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1180 				      arr, arr_len);
1181 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1182 
1183 	return 0;
1184 }
1185 
1186 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1187  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1188  * calls, not required to write in ascending offset order. Assumes resid
1189  * set to scsi_bufflen() prior to any calls.
1190  */
1191 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1192 				  int arr_len, unsigned int off_dst)
1193 {
1194 	unsigned int act_len, n;
1195 	struct scsi_data_buffer *sdb = &scp->sdb;
1196 	off_t skip = off_dst;
1197 
1198 	if (sdb->length <= off_dst)
1199 		return 0;
1200 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1201 		return DID_ERROR << 16;
1202 
1203 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1204 				       arr, arr_len, skip);
1205 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1206 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1207 		 scsi_get_resid(scp));
1208 	n = scsi_bufflen(scp) - (off_dst + act_len);
1209 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1210 	return 0;
1211 }
1212 
1213 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1214  * 'arr' or -1 if error.
1215  */
1216 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1217 			       int arr_len)
1218 {
1219 	if (!scsi_bufflen(scp))
1220 		return 0;
1221 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1222 		return -1;
1223 
1224 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1225 }
1226 
1227 
1228 static char sdebug_inq_vendor_id[9] = "Linux   ";
1229 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1230 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1231 /* Use some locally assigned NAAs for SAS addresses. */
1232 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1233 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1234 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1235 
1236 /* Device identification VPD page. Returns number of bytes placed in arr */
1237 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1238 			  int target_dev_id, int dev_id_num,
1239 			  const char *dev_id_str, int dev_id_str_len,
1240 			  const uuid_t *lu_name)
1241 {
1242 	int num, port_a;
1243 	char b[32];
1244 
1245 	port_a = target_dev_id + 1;
1246 	/* T10 vendor identifier field format (faked) */
1247 	arr[0] = 0x2;	/* ASCII */
1248 	arr[1] = 0x1;
1249 	arr[2] = 0x0;
1250 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1251 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1252 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1253 	num = 8 + 16 + dev_id_str_len;
1254 	arr[3] = num;
1255 	num += 4;
1256 	if (dev_id_num >= 0) {
1257 		if (sdebug_uuid_ctl) {
1258 			/* Locally assigned UUID */
1259 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1260 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1261 			arr[num++] = 0x0;
1262 			arr[num++] = 0x12;
1263 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1264 			arr[num++] = 0x0;
1265 			memcpy(arr + num, lu_name, 16);
1266 			num += 16;
1267 		} else {
1268 			/* NAA-3, Logical unit identifier (binary) */
1269 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1270 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1271 			arr[num++] = 0x0;
1272 			arr[num++] = 0x8;
1273 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1274 			num += 8;
1275 		}
1276 		/* Target relative port number */
1277 		arr[num++] = 0x61;	/* proto=sas, binary */
1278 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x4;	/* length */
1281 		arr[num++] = 0x0;	/* reserved */
1282 		arr[num++] = 0x0;	/* reserved */
1283 		arr[num++] = 0x0;
1284 		arr[num++] = 0x1;	/* relative port A */
1285 	}
1286 	/* NAA-3, Target port identifier */
1287 	arr[num++] = 0x61;	/* proto=sas, binary */
1288 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1289 	arr[num++] = 0x0;
1290 	arr[num++] = 0x8;
1291 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1292 	num += 8;
1293 	/* NAA-3, Target port group identifier */
1294 	arr[num++] = 0x61;	/* proto=sas, binary */
1295 	arr[num++] = 0x95;	/* piv=1, target port group id */
1296 	arr[num++] = 0x0;
1297 	arr[num++] = 0x4;
1298 	arr[num++] = 0;
1299 	arr[num++] = 0;
1300 	put_unaligned_be16(port_group_id, arr + num);
1301 	num += 2;
1302 	/* NAA-3, Target device identifier */
1303 	arr[num++] = 0x61;	/* proto=sas, binary */
1304 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1305 	arr[num++] = 0x0;
1306 	arr[num++] = 0x8;
1307 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1308 	num += 8;
1309 	/* SCSI name string: Target device identifier */
1310 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1311 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1312 	arr[num++] = 0x0;
1313 	arr[num++] = 24;
1314 	memcpy(arr + num, "naa.32222220", 12);
1315 	num += 12;
1316 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1317 	memcpy(arr + num, b, 8);
1318 	num += 8;
1319 	memset(arr + num, 0, 4);
1320 	num += 4;
1321 	return num;
1322 }
1323 
1324 static unsigned char vpd84_data[] = {
1325 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1326     0x22,0x22,0x22,0x0,0xbb,0x1,
1327     0x22,0x22,0x22,0x0,0xbb,0x2,
1328 };
1329 
1330 /*  Software interface identification VPD page */
1331 static int inquiry_vpd_84(unsigned char *arr)
1332 {
1333 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1334 	return sizeof(vpd84_data);
1335 }
1336 
1337 /* Management network addresses VPD page */
1338 static int inquiry_vpd_85(unsigned char *arr)
1339 {
1340 	int num = 0;
1341 	const char *na1 = "https://www.kernel.org/config";
1342 	const char *na2 = "http://www.kernel.org/log";
1343 	int plen, olen;
1344 
1345 	arr[num++] = 0x1;	/* lu, storage config */
1346 	arr[num++] = 0x0;	/* reserved */
1347 	arr[num++] = 0x0;
1348 	olen = strlen(na1);
1349 	plen = olen + 1;
1350 	if (plen % 4)
1351 		plen = ((plen / 4) + 1) * 4;
1352 	arr[num++] = plen;	/* length, null termianted, padded */
1353 	memcpy(arr + num, na1, olen);
1354 	memset(arr + num + olen, 0, plen - olen);
1355 	num += plen;
1356 
1357 	arr[num++] = 0x4;	/* lu, logging */
1358 	arr[num++] = 0x0;	/* reserved */
1359 	arr[num++] = 0x0;
1360 	olen = strlen(na2);
1361 	plen = olen + 1;
1362 	if (plen % 4)
1363 		plen = ((plen / 4) + 1) * 4;
1364 	arr[num++] = plen;	/* length, null terminated, padded */
1365 	memcpy(arr + num, na2, olen);
1366 	memset(arr + num + olen, 0, plen - olen);
1367 	num += plen;
1368 
1369 	return num;
1370 }
1371 
1372 /* SCSI ports VPD page */
1373 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1374 {
1375 	int num = 0;
1376 	int port_a, port_b;
1377 
1378 	port_a = target_dev_id + 1;
1379 	port_b = port_a + 1;
1380 	arr[num++] = 0x0;	/* reserved */
1381 	arr[num++] = 0x0;	/* reserved */
1382 	arr[num++] = 0x0;
1383 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1384 	memset(arr + num, 0, 6);
1385 	num += 6;
1386 	arr[num++] = 0x0;
1387 	arr[num++] = 12;	/* length tp descriptor */
1388 	/* naa-5 target port identifier (A) */
1389 	arr[num++] = 0x61;	/* proto=sas, binary */
1390 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x8;	/* length */
1393 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1394 	num += 8;
1395 	arr[num++] = 0x0;	/* reserved */
1396 	arr[num++] = 0x0;	/* reserved */
1397 	arr[num++] = 0x0;
1398 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1399 	memset(arr + num, 0, 6);
1400 	num += 6;
1401 	arr[num++] = 0x0;
1402 	arr[num++] = 12;	/* length tp descriptor */
1403 	/* naa-5 target port identifier (B) */
1404 	arr[num++] = 0x61;	/* proto=sas, binary */
1405 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1406 	arr[num++] = 0x0;	/* reserved */
1407 	arr[num++] = 0x8;	/* length */
1408 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1409 	num += 8;
1410 
1411 	return num;
1412 }
1413 
1414 
1415 static unsigned char vpd89_data[] = {
1416 /* from 4th byte */ 0,0,0,0,
1417 'l','i','n','u','x',' ',' ',' ',
1418 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1419 '1','2','3','4',
1420 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1421 0xec,0,0,0,
1422 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1423 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1425 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1426 0x53,0x41,
1427 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1428 0x20,0x20,
1429 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1430 0x10,0x80,
1431 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1432 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1433 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1435 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1436 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1437 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1442 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1443 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1444 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1457 };
1458 
1459 /* ATA Information VPD page */
1460 static int inquiry_vpd_89(unsigned char *arr)
1461 {
1462 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1463 	return sizeof(vpd89_data);
1464 }
1465 
1466 
1467 static unsigned char vpdb0_data[] = {
1468 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1469 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1470 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1471 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1472 };
1473 
1474 /* Block limits VPD page (SBC-3) */
1475 static int inquiry_vpd_b0(unsigned char *arr)
1476 {
1477 	unsigned int gran;
1478 
1479 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1480 
1481 	/* Optimal transfer length granularity */
1482 	if (sdebug_opt_xferlen_exp != 0 &&
1483 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1484 		gran = 1 << sdebug_opt_xferlen_exp;
1485 	else
1486 		gran = 1 << sdebug_physblk_exp;
1487 	put_unaligned_be16(gran, arr + 2);
1488 
1489 	/* Maximum Transfer Length */
1490 	if (sdebug_store_sectors > 0x400)
1491 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1492 
1493 	/* Optimal Transfer Length */
1494 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1495 
1496 	if (sdebug_lbpu) {
1497 		/* Maximum Unmap LBA Count */
1498 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1499 
1500 		/* Maximum Unmap Block Descriptor Count */
1501 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1502 	}
1503 
1504 	/* Unmap Granularity Alignment */
1505 	if (sdebug_unmap_alignment) {
1506 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1507 		arr[28] |= 0x80; /* UGAVALID */
1508 	}
1509 
1510 	/* Optimal Unmap Granularity */
1511 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1512 
1513 	/* Maximum WRITE SAME Length */
1514 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1515 
1516 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1517 }
1518 
1519 /* Block device characteristics VPD page (SBC-3) */
1520 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1521 {
1522 	memset(arr, 0, 0x3c);
1523 	arr[0] = 0;
1524 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1525 	arr[2] = 0;
1526 	arr[3] = 5;	/* less than 1.8" */
1527 	if (devip->zmodel == BLK_ZONED_HA)
1528 		arr[4] = 1 << 4;	/* zoned field = 01b */
1529 
1530 	return 0x3c;
1531 }
1532 
1533 /* Logical block provisioning VPD page (SBC-4) */
1534 static int inquiry_vpd_b2(unsigned char *arr)
1535 {
1536 	memset(arr, 0, 0x4);
1537 	arr[0] = 0;			/* threshold exponent */
1538 	if (sdebug_lbpu)
1539 		arr[1] = 1 << 7;
1540 	if (sdebug_lbpws)
1541 		arr[1] |= 1 << 6;
1542 	if (sdebug_lbpws10)
1543 		arr[1] |= 1 << 5;
1544 	if (sdebug_lbprz && scsi_debug_lbp())
1545 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1546 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1547 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1548 	/* threshold_percentage=0 */
1549 	return 0x4;
1550 }
1551 
1552 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1553 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1554 {
1555 	memset(arr, 0, 0x3c);
1556 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1557 	/*
1558 	 * Set Optimal number of open sequential write preferred zones and
1559 	 * Optimal number of non-sequentially written sequential write
1560 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1561 	 * fields set to zero, apart from Max. number of open swrz_s field.
1562 	 */
1563 	put_unaligned_be32(0xffffffff, &arr[4]);
1564 	put_unaligned_be32(0xffffffff, &arr[8]);
1565 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1566 		put_unaligned_be32(devip->max_open, &arr[12]);
1567 	else
1568 		put_unaligned_be32(0xffffffff, &arr[12]);
1569 	if (devip->zcap < devip->zsize) {
1570 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1571 		put_unaligned_be64(devip->zsize, &arr[20]);
1572 	} else {
1573 		arr[19] = 0;
1574 	}
1575 	return 0x3c;
1576 }
1577 
1578 #define SDEBUG_LONG_INQ_SZ 96
1579 #define SDEBUG_MAX_INQ_ARR_SZ 584
1580 
1581 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1582 {
1583 	unsigned char pq_pdt;
1584 	unsigned char *arr;
1585 	unsigned char *cmd = scp->cmnd;
1586 	u32 alloc_len, n;
1587 	int ret;
1588 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1589 
1590 	alloc_len = get_unaligned_be16(cmd + 3);
1591 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1592 	if (! arr)
1593 		return DID_REQUEUE << 16;
1594 	is_disk = (sdebug_ptype == TYPE_DISK);
1595 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1596 	is_disk_zbc = (is_disk || is_zbc);
1597 	have_wlun = scsi_is_wlun(scp->device->lun);
1598 	if (have_wlun)
1599 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1600 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1601 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1602 	else
1603 		pq_pdt = (sdebug_ptype & 0x1f);
1604 	arr[0] = pq_pdt;
1605 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1606 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1607 		kfree(arr);
1608 		return check_condition_result;
1609 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1610 		int lu_id_num, port_group_id, target_dev_id;
1611 		u32 len;
1612 		char lu_id_str[6];
1613 		int host_no = devip->sdbg_host->shost->host_no;
1614 
1615 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1616 		    (devip->channel & 0x7f);
1617 		if (sdebug_vpd_use_hostno == 0)
1618 			host_no = 0;
1619 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1620 			    (devip->target * 1000) + devip->lun);
1621 		target_dev_id = ((host_no + 1) * 2000) +
1622 				 (devip->target * 1000) - 3;
1623 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1624 		if (0 == cmd[2]) { /* supported vital product data pages */
1625 			arr[1] = cmd[2];	/*sanity */
1626 			n = 4;
1627 			arr[n++] = 0x0;   /* this page */
1628 			arr[n++] = 0x80;  /* unit serial number */
1629 			arr[n++] = 0x83;  /* device identification */
1630 			arr[n++] = 0x84;  /* software interface ident. */
1631 			arr[n++] = 0x85;  /* management network addresses */
1632 			arr[n++] = 0x86;  /* extended inquiry */
1633 			arr[n++] = 0x87;  /* mode page policy */
1634 			arr[n++] = 0x88;  /* SCSI ports */
1635 			if (is_disk_zbc) {	  /* SBC or ZBC */
1636 				arr[n++] = 0x89;  /* ATA information */
1637 				arr[n++] = 0xb0;  /* Block limits */
1638 				arr[n++] = 0xb1;  /* Block characteristics */
1639 				if (is_disk)
1640 					arr[n++] = 0xb2;  /* LB Provisioning */
1641 				if (is_zbc)
1642 					arr[n++] = 0xb6;  /* ZB dev. char. */
1643 			}
1644 			arr[3] = n - 4;	  /* number of supported VPD pages */
1645 		} else if (0x80 == cmd[2]) { /* unit serial number */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = len;
1648 			memcpy(&arr[4], lu_id_str, len);
1649 		} else if (0x83 == cmd[2]) { /* device identification */
1650 			arr[1] = cmd[2];	/*sanity */
1651 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1652 						target_dev_id, lu_id_num,
1653 						lu_id_str, len,
1654 						&devip->lu_name);
1655 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1656 			arr[1] = cmd[2];	/*sanity */
1657 			arr[3] = inquiry_vpd_84(&arr[4]);
1658 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = inquiry_vpd_85(&arr[4]);
1661 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1662 			arr[1] = cmd[2];	/*sanity */
1663 			arr[3] = 0x3c;	/* number of following entries */
1664 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1665 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1666 			else if (have_dif_prot)
1667 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1668 			else
1669 				arr[4] = 0x0;   /* no protection stuff */
1670 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1671 		} else if (0x87 == cmd[2]) { /* mode page policy */
1672 			arr[1] = cmd[2];	/*sanity */
1673 			arr[3] = 0x8;	/* number of following entries */
1674 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1675 			arr[6] = 0x80;	/* mlus, shared */
1676 			arr[8] = 0x18;	 /* protocol specific lu */
1677 			arr[10] = 0x82;	 /* mlus, per initiator port */
1678 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1679 			arr[1] = cmd[2];	/*sanity */
1680 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1681 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			n = inquiry_vpd_89(&arr[4]);
1684 			put_unaligned_be16(n, arr + 2);
1685 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1686 			arr[1] = cmd[2];        /*sanity */
1687 			arr[3] = inquiry_vpd_b0(&arr[4]);
1688 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1689 			arr[1] = cmd[2];        /*sanity */
1690 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1691 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1692 			arr[1] = cmd[2];        /*sanity */
1693 			arr[3] = inquiry_vpd_b2(&arr[4]);
1694 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1695 			arr[1] = cmd[2];        /*sanity */
1696 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1697 		} else {
1698 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1699 			kfree(arr);
1700 			return check_condition_result;
1701 		}
1702 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1703 		ret = fill_from_dev_buffer(scp, arr,
1704 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1705 		kfree(arr);
1706 		return ret;
1707 	}
1708 	/* drops through here for a standard inquiry */
1709 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1710 	arr[2] = sdebug_scsi_level;
1711 	arr[3] = 2;    /* response_data_format==2 */
1712 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1713 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1714 	if (sdebug_vpd_use_hostno == 0)
1715 		arr[5] |= 0x10; /* claim: implicit TPGS */
1716 	arr[6] = 0x10; /* claim: MultiP */
1717 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1718 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1719 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1720 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1721 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1722 	/* Use Vendor Specific area to place driver date in ASCII hex */
1723 	memcpy(&arr[36], sdebug_version_date, 8);
1724 	/* version descriptors (2 bytes each) follow */
1725 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1726 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1727 	n = 62;
1728 	if (is_disk) {		/* SBC-4 no version claimed */
1729 		put_unaligned_be16(0x600, arr + n);
1730 		n += 2;
1731 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1732 		put_unaligned_be16(0x525, arr + n);
1733 		n += 2;
1734 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1735 		put_unaligned_be16(0x624, arr + n);
1736 		n += 2;
1737 	}
1738 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1739 	ret = fill_from_dev_buffer(scp, arr,
1740 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1741 	kfree(arr);
1742 	return ret;
1743 }
1744 
1745 /* See resp_iec_m_pg() for how this data is manipulated */
1746 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1747 				   0, 0, 0x0, 0x0};
1748 
1749 static int resp_requests(struct scsi_cmnd *scp,
1750 			 struct sdebug_dev_info *devip)
1751 {
1752 	unsigned char *cmd = scp->cmnd;
1753 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1754 	bool dsense = !!(cmd[1] & 1);
1755 	u32 alloc_len = cmd[4];
1756 	u32 len = 18;
1757 	int stopped_state = atomic_read(&devip->stopped);
1758 
1759 	memset(arr, 0, sizeof(arr));
1760 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1761 		if (dsense) {
1762 			arr[0] = 0x72;
1763 			arr[1] = NOT_READY;
1764 			arr[2] = LOGICAL_UNIT_NOT_READY;
1765 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1766 			len = 8;
1767 		} else {
1768 			arr[0] = 0x70;
1769 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1770 			arr[7] = 0xa;			/* 18 byte sense buffer */
1771 			arr[12] = LOGICAL_UNIT_NOT_READY;
1772 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1773 		}
1774 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1775 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1776 		if (dsense) {
1777 			arr[0] = 0x72;
1778 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1779 			arr[2] = THRESHOLD_EXCEEDED;
1780 			arr[3] = 0xff;		/* Failure prediction(false) */
1781 			len = 8;
1782 		} else {
1783 			arr[0] = 0x70;
1784 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1785 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1786 			arr[12] = THRESHOLD_EXCEEDED;
1787 			arr[13] = 0xff;		/* Failure prediction(false) */
1788 		}
1789 	} else {	/* nothing to report */
1790 		if (dsense) {
1791 			len = 8;
1792 			memset(arr, 0, len);
1793 			arr[0] = 0x72;
1794 		} else {
1795 			memset(arr, 0, len);
1796 			arr[0] = 0x70;
1797 			arr[7] = 0xa;
1798 		}
1799 	}
1800 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1801 }
1802 
1803 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1804 {
1805 	unsigned char *cmd = scp->cmnd;
1806 	int power_cond, want_stop, stopped_state;
1807 	bool changing;
1808 
1809 	power_cond = (cmd[4] & 0xf0) >> 4;
1810 	if (power_cond) {
1811 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1812 		return check_condition_result;
1813 	}
1814 	want_stop = !(cmd[4] & 1);
1815 	stopped_state = atomic_read(&devip->stopped);
1816 	if (stopped_state == 2) {
1817 		ktime_t now_ts = ktime_get_boottime();
1818 
1819 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1820 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1821 
1822 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1823 				/* tur_ms_to_ready timer extinguished */
1824 				atomic_set(&devip->stopped, 0);
1825 				stopped_state = 0;
1826 			}
1827 		}
1828 		if (stopped_state == 2) {
1829 			if (want_stop) {
1830 				stopped_state = 1;	/* dummy up success */
1831 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1832 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1833 				return check_condition_result;
1834 			}
1835 		}
1836 	}
1837 	changing = (stopped_state != want_stop);
1838 	if (changing)
1839 		atomic_xchg(&devip->stopped, want_stop);
1840 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1841 		return SDEG_RES_IMMED_MASK;
1842 	else
1843 		return 0;
1844 }
1845 
1846 static sector_t get_sdebug_capacity(void)
1847 {
1848 	static const unsigned int gibibyte = 1073741824;
1849 
1850 	if (sdebug_virtual_gb > 0)
1851 		return (sector_t)sdebug_virtual_gb *
1852 			(gibibyte / sdebug_sector_size);
1853 	else
1854 		return sdebug_store_sectors;
1855 }
1856 
1857 #define SDEBUG_READCAP_ARR_SZ 8
1858 static int resp_readcap(struct scsi_cmnd *scp,
1859 			struct sdebug_dev_info *devip)
1860 {
1861 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1862 	unsigned int capac;
1863 
1864 	/* following just in case virtual_gb changed */
1865 	sdebug_capacity = get_sdebug_capacity();
1866 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1867 	if (sdebug_capacity < 0xffffffff) {
1868 		capac = (unsigned int)sdebug_capacity - 1;
1869 		put_unaligned_be32(capac, arr + 0);
1870 	} else
1871 		put_unaligned_be32(0xffffffff, arr + 0);
1872 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1873 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1874 }
1875 
1876 #define SDEBUG_READCAP16_ARR_SZ 32
1877 static int resp_readcap16(struct scsi_cmnd *scp,
1878 			  struct sdebug_dev_info *devip)
1879 {
1880 	unsigned char *cmd = scp->cmnd;
1881 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1882 	u32 alloc_len;
1883 
1884 	alloc_len = get_unaligned_be32(cmd + 10);
1885 	/* following just in case virtual_gb changed */
1886 	sdebug_capacity = get_sdebug_capacity();
1887 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1888 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1889 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1890 	arr[13] = sdebug_physblk_exp & 0xf;
1891 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1892 
1893 	if (scsi_debug_lbp()) {
1894 		arr[14] |= 0x80; /* LBPME */
1895 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1896 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1897 		 * in the wider field maps to 0 in this field.
1898 		 */
1899 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1900 			arr[14] |= 0x40;
1901 	}
1902 
1903 	/*
1904 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1905 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1906 	 */
1907 	if (devip->zmodel == BLK_ZONED_HM)
1908 		arr[12] |= 1 << 4;
1909 
1910 	arr[15] = sdebug_lowest_aligned & 0xff;
1911 
1912 	if (have_dif_prot) {
1913 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1914 		arr[12] |= 1; /* PROT_EN */
1915 	}
1916 
1917 	return fill_from_dev_buffer(scp, arr,
1918 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1919 }
1920 
1921 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1922 
1923 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1924 			      struct sdebug_dev_info *devip)
1925 {
1926 	unsigned char *cmd = scp->cmnd;
1927 	unsigned char *arr;
1928 	int host_no = devip->sdbg_host->shost->host_no;
1929 	int port_group_a, port_group_b, port_a, port_b;
1930 	u32 alen, n, rlen;
1931 	int ret;
1932 
1933 	alen = get_unaligned_be32(cmd + 6);
1934 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1935 	if (! arr)
1936 		return DID_REQUEUE << 16;
1937 	/*
1938 	 * EVPD page 0x88 states we have two ports, one
1939 	 * real and a fake port with no device connected.
1940 	 * So we create two port groups with one port each
1941 	 * and set the group with port B to unavailable.
1942 	 */
1943 	port_a = 0x1; /* relative port A */
1944 	port_b = 0x2; /* relative port B */
1945 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1946 			(devip->channel & 0x7f);
1947 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1948 			(devip->channel & 0x7f) + 0x80;
1949 
1950 	/*
1951 	 * The asymmetric access state is cycled according to the host_id.
1952 	 */
1953 	n = 4;
1954 	if (sdebug_vpd_use_hostno == 0) {
1955 		arr[n++] = host_no % 3; /* Asymm access state */
1956 		arr[n++] = 0x0F; /* claim: all states are supported */
1957 	} else {
1958 		arr[n++] = 0x0; /* Active/Optimized path */
1959 		arr[n++] = 0x01; /* only support active/optimized paths */
1960 	}
1961 	put_unaligned_be16(port_group_a, arr + n);
1962 	n += 2;
1963 	arr[n++] = 0;    /* Reserved */
1964 	arr[n++] = 0;    /* Status code */
1965 	arr[n++] = 0;    /* Vendor unique */
1966 	arr[n++] = 0x1;  /* One port per group */
1967 	arr[n++] = 0;    /* Reserved */
1968 	arr[n++] = 0;    /* Reserved */
1969 	put_unaligned_be16(port_a, arr + n);
1970 	n += 2;
1971 	arr[n++] = 3;    /* Port unavailable */
1972 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1973 	put_unaligned_be16(port_group_b, arr + n);
1974 	n += 2;
1975 	arr[n++] = 0;    /* Reserved */
1976 	arr[n++] = 0;    /* Status code */
1977 	arr[n++] = 0;    /* Vendor unique */
1978 	arr[n++] = 0x1;  /* One port per group */
1979 	arr[n++] = 0;    /* Reserved */
1980 	arr[n++] = 0;    /* Reserved */
1981 	put_unaligned_be16(port_b, arr + n);
1982 	n += 2;
1983 
1984 	rlen = n - 4;
1985 	put_unaligned_be32(rlen, arr + 0);
1986 
1987 	/*
1988 	 * Return the smallest value of either
1989 	 * - The allocated length
1990 	 * - The constructed command length
1991 	 * - The maximum array size
1992 	 */
1993 	rlen = min(alen, n);
1994 	ret = fill_from_dev_buffer(scp, arr,
1995 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1996 	kfree(arr);
1997 	return ret;
1998 }
1999 
2000 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2001 			     struct sdebug_dev_info *devip)
2002 {
2003 	bool rctd;
2004 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2005 	u16 req_sa, u;
2006 	u32 alloc_len, a_len;
2007 	int k, offset, len, errsts, count, bump, na;
2008 	const struct opcode_info_t *oip;
2009 	const struct opcode_info_t *r_oip;
2010 	u8 *arr;
2011 	u8 *cmd = scp->cmnd;
2012 
2013 	rctd = !!(cmd[2] & 0x80);
2014 	reporting_opts = cmd[2] & 0x7;
2015 	req_opcode = cmd[3];
2016 	req_sa = get_unaligned_be16(cmd + 4);
2017 	alloc_len = get_unaligned_be32(cmd + 6);
2018 	if (alloc_len < 4 || alloc_len > 0xffff) {
2019 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2020 		return check_condition_result;
2021 	}
2022 	if (alloc_len > 8192)
2023 		a_len = 8192;
2024 	else
2025 		a_len = alloc_len;
2026 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2027 	if (NULL == arr) {
2028 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2029 				INSUFF_RES_ASCQ);
2030 		return check_condition_result;
2031 	}
2032 	switch (reporting_opts) {
2033 	case 0:	/* all commands */
2034 		/* count number of commands */
2035 		for (count = 0, oip = opcode_info_arr;
2036 		     oip->num_attached != 0xff; ++oip) {
2037 			if (F_INV_OP & oip->flags)
2038 				continue;
2039 			count += (oip->num_attached + 1);
2040 		}
2041 		bump = rctd ? 20 : 8;
2042 		put_unaligned_be32(count * bump, arr);
2043 		for (offset = 4, oip = opcode_info_arr;
2044 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2045 			if (F_INV_OP & oip->flags)
2046 				continue;
2047 			na = oip->num_attached;
2048 			arr[offset] = oip->opcode;
2049 			put_unaligned_be16(oip->sa, arr + offset + 2);
2050 			if (rctd)
2051 				arr[offset + 5] |= 0x2;
2052 			if (FF_SA & oip->flags)
2053 				arr[offset + 5] |= 0x1;
2054 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2055 			if (rctd)
2056 				put_unaligned_be16(0xa, arr + offset + 8);
2057 			r_oip = oip;
2058 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2059 				if (F_INV_OP & oip->flags)
2060 					continue;
2061 				offset += bump;
2062 				arr[offset] = oip->opcode;
2063 				put_unaligned_be16(oip->sa, arr + offset + 2);
2064 				if (rctd)
2065 					arr[offset + 5] |= 0x2;
2066 				if (FF_SA & oip->flags)
2067 					arr[offset + 5] |= 0x1;
2068 				put_unaligned_be16(oip->len_mask[0],
2069 						   arr + offset + 6);
2070 				if (rctd)
2071 					put_unaligned_be16(0xa,
2072 							   arr + offset + 8);
2073 			}
2074 			oip = r_oip;
2075 			offset += bump;
2076 		}
2077 		break;
2078 	case 1:	/* one command: opcode only */
2079 	case 2:	/* one command: opcode plus service action */
2080 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2081 		sdeb_i = opcode_ind_arr[req_opcode];
2082 		oip = &opcode_info_arr[sdeb_i];
2083 		if (F_INV_OP & oip->flags) {
2084 			supp = 1;
2085 			offset = 4;
2086 		} else {
2087 			if (1 == reporting_opts) {
2088 				if (FF_SA & oip->flags) {
2089 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2090 							     2, 2);
2091 					kfree(arr);
2092 					return check_condition_result;
2093 				}
2094 				req_sa = 0;
2095 			} else if (2 == reporting_opts &&
2096 				   0 == (FF_SA & oip->flags)) {
2097 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2098 				kfree(arr);	/* point at requested sa */
2099 				return check_condition_result;
2100 			}
2101 			if (0 == (FF_SA & oip->flags) &&
2102 			    req_opcode == oip->opcode)
2103 				supp = 3;
2104 			else if (0 == (FF_SA & oip->flags)) {
2105 				na = oip->num_attached;
2106 				for (k = 0, oip = oip->arrp; k < na;
2107 				     ++k, ++oip) {
2108 					if (req_opcode == oip->opcode)
2109 						break;
2110 				}
2111 				supp = (k >= na) ? 1 : 3;
2112 			} else if (req_sa != oip->sa) {
2113 				na = oip->num_attached;
2114 				for (k = 0, oip = oip->arrp; k < na;
2115 				     ++k, ++oip) {
2116 					if (req_sa == oip->sa)
2117 						break;
2118 				}
2119 				supp = (k >= na) ? 1 : 3;
2120 			} else
2121 				supp = 3;
2122 			if (3 == supp) {
2123 				u = oip->len_mask[0];
2124 				put_unaligned_be16(u, arr + 2);
2125 				arr[4] = oip->opcode;
2126 				for (k = 1; k < u; ++k)
2127 					arr[4 + k] = (k < 16) ?
2128 						 oip->len_mask[k] : 0xff;
2129 				offset = 4 + u;
2130 			} else
2131 				offset = 4;
2132 		}
2133 		arr[1] = (rctd ? 0x80 : 0) | supp;
2134 		if (rctd) {
2135 			put_unaligned_be16(0xa, arr + offset);
2136 			offset += 12;
2137 		}
2138 		break;
2139 	default:
2140 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2141 		kfree(arr);
2142 		return check_condition_result;
2143 	}
2144 	offset = (offset < a_len) ? offset : a_len;
2145 	len = (offset < alloc_len) ? offset : alloc_len;
2146 	errsts = fill_from_dev_buffer(scp, arr, len);
2147 	kfree(arr);
2148 	return errsts;
2149 }
2150 
2151 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2152 			  struct sdebug_dev_info *devip)
2153 {
2154 	bool repd;
2155 	u32 alloc_len, len;
2156 	u8 arr[16];
2157 	u8 *cmd = scp->cmnd;
2158 
2159 	memset(arr, 0, sizeof(arr));
2160 	repd = !!(cmd[2] & 0x80);
2161 	alloc_len = get_unaligned_be32(cmd + 6);
2162 	if (alloc_len < 4) {
2163 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2164 		return check_condition_result;
2165 	}
2166 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2167 	arr[1] = 0x1;		/* ITNRS */
2168 	if (repd) {
2169 		arr[3] = 0xc;
2170 		len = 16;
2171 	} else
2172 		len = 4;
2173 
2174 	len = (len < alloc_len) ? len : alloc_len;
2175 	return fill_from_dev_buffer(scp, arr, len);
2176 }
2177 
2178 /* <<Following mode page info copied from ST318451LW>> */
2179 
2180 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2181 {	/* Read-Write Error Recovery page for mode_sense */
2182 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2183 					5, 0, 0xff, 0xff};
2184 
2185 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2186 	if (1 == pcontrol)
2187 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2188 	return sizeof(err_recov_pg);
2189 }
2190 
2191 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2192 { 	/* Disconnect-Reconnect page for mode_sense */
2193 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2194 					 0, 0, 0, 0, 0, 0, 0, 0};
2195 
2196 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2197 	if (1 == pcontrol)
2198 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2199 	return sizeof(disconnect_pg);
2200 }
2201 
2202 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2203 {       /* Format device page for mode_sense */
2204 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2205 				     0, 0, 0, 0, 0, 0, 0, 0,
2206 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2207 
2208 	memcpy(p, format_pg, sizeof(format_pg));
2209 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2210 	put_unaligned_be16(sdebug_sector_size, p + 12);
2211 	if (sdebug_removable)
2212 		p[20] |= 0x20; /* should agree with INQUIRY */
2213 	if (1 == pcontrol)
2214 		memset(p + 2, 0, sizeof(format_pg) - 2);
2215 	return sizeof(format_pg);
2216 }
2217 
2218 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2220 				     0, 0, 0, 0};
2221 
2222 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2223 { 	/* Caching page for mode_sense */
2224 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2225 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2226 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2227 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2228 
2229 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2230 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2231 	memcpy(p, caching_pg, sizeof(caching_pg));
2232 	if (1 == pcontrol)
2233 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2234 	else if (2 == pcontrol)
2235 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2236 	return sizeof(caching_pg);
2237 }
2238 
2239 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2240 				    0, 0, 0x2, 0x4b};
2241 
2242 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2243 { 	/* Control mode page for mode_sense */
2244 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2245 					0, 0, 0, 0};
2246 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2247 				     0, 0, 0x2, 0x4b};
2248 
2249 	if (sdebug_dsense)
2250 		ctrl_m_pg[2] |= 0x4;
2251 	else
2252 		ctrl_m_pg[2] &= ~0x4;
2253 
2254 	if (sdebug_ato)
2255 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2256 
2257 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2258 	if (1 == pcontrol)
2259 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2260 	else if (2 == pcontrol)
2261 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2262 	return sizeof(ctrl_m_pg);
2263 }
2264 
2265 
2266 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2267 {	/* Informational Exceptions control mode page for mode_sense */
2268 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2269 				       0, 0, 0x0, 0x0};
2270 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2271 				      0, 0, 0x0, 0x0};
2272 
2273 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2274 	if (1 == pcontrol)
2275 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2276 	else if (2 == pcontrol)
2277 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2278 	return sizeof(iec_m_pg);
2279 }
2280 
2281 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2282 {	/* SAS SSP mode page - short format for mode_sense */
2283 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2284 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2285 
2286 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2287 	if (1 == pcontrol)
2288 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2289 	return sizeof(sas_sf_m_pg);
2290 }
2291 
2292 
2293 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2294 			      int target_dev_id)
2295 {	/* SAS phy control and discover mode page for mode_sense */
2296 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2297 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2298 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2299 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2300 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2301 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2302 		    0, 0, 0, 0, 0, 0, 0, 0,
2303 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2304 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2305 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2306 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2307 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2308 		    0, 0, 0, 0, 0, 0, 0, 0,
2309 		};
2310 	int port_a, port_b;
2311 
2312 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2313 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2314 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2315 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2316 	port_a = target_dev_id + 1;
2317 	port_b = port_a + 1;
2318 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2319 	put_unaligned_be32(port_a, p + 20);
2320 	put_unaligned_be32(port_b, p + 48 + 20);
2321 	if (1 == pcontrol)
2322 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2323 	return sizeof(sas_pcd_m_pg);
2324 }
2325 
2326 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2327 {	/* SAS SSP shared protocol specific port mode subpage */
2328 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2329 		    0, 0, 0, 0, 0, 0, 0, 0,
2330 		};
2331 
2332 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2333 	if (1 == pcontrol)
2334 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2335 	return sizeof(sas_sha_m_pg);
2336 }
2337 
2338 #define SDEBUG_MAX_MSENSE_SZ 256
2339 
2340 static int resp_mode_sense(struct scsi_cmnd *scp,
2341 			   struct sdebug_dev_info *devip)
2342 {
2343 	int pcontrol, pcode, subpcode, bd_len;
2344 	unsigned char dev_spec;
2345 	u32 alloc_len, offset, len;
2346 	int target_dev_id;
2347 	int target = scp->device->id;
2348 	unsigned char *ap;
2349 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2350 	unsigned char *cmd = scp->cmnd;
2351 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2352 
2353 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2354 	pcontrol = (cmd[2] & 0xc0) >> 6;
2355 	pcode = cmd[2] & 0x3f;
2356 	subpcode = cmd[3];
2357 	msense_6 = (MODE_SENSE == cmd[0]);
2358 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2359 	is_disk = (sdebug_ptype == TYPE_DISK);
2360 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2361 	if ((is_disk || is_zbc) && !dbd)
2362 		bd_len = llbaa ? 16 : 8;
2363 	else
2364 		bd_len = 0;
2365 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2366 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2367 	if (0x3 == pcontrol) {  /* Saving values not supported */
2368 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2369 		return check_condition_result;
2370 	}
2371 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2372 			(devip->target * 1000) - 3;
2373 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2374 	if (is_disk || is_zbc) {
2375 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2376 		if (sdebug_wp)
2377 			dev_spec |= 0x80;
2378 	} else
2379 		dev_spec = 0x0;
2380 	if (msense_6) {
2381 		arr[2] = dev_spec;
2382 		arr[3] = bd_len;
2383 		offset = 4;
2384 	} else {
2385 		arr[3] = dev_spec;
2386 		if (16 == bd_len)
2387 			arr[4] = 0x1;	/* set LONGLBA bit */
2388 		arr[7] = bd_len;	/* assume 255 or less */
2389 		offset = 8;
2390 	}
2391 	ap = arr + offset;
2392 	if ((bd_len > 0) && (!sdebug_capacity))
2393 		sdebug_capacity = get_sdebug_capacity();
2394 
2395 	if (8 == bd_len) {
2396 		if (sdebug_capacity > 0xfffffffe)
2397 			put_unaligned_be32(0xffffffff, ap + 0);
2398 		else
2399 			put_unaligned_be32(sdebug_capacity, ap + 0);
2400 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2401 		offset += bd_len;
2402 		ap = arr + offset;
2403 	} else if (16 == bd_len) {
2404 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2405 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2406 		offset += bd_len;
2407 		ap = arr + offset;
2408 	}
2409 
2410 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2411 		/* TODO: Control Extension page */
2412 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2413 		return check_condition_result;
2414 	}
2415 	bad_pcode = false;
2416 
2417 	switch (pcode) {
2418 	case 0x1:	/* Read-Write error recovery page, direct access */
2419 		len = resp_err_recov_pg(ap, pcontrol, target);
2420 		offset += len;
2421 		break;
2422 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2423 		len = resp_disconnect_pg(ap, pcontrol, target);
2424 		offset += len;
2425 		break;
2426 	case 0x3:       /* Format device page, direct access */
2427 		if (is_disk) {
2428 			len = resp_format_pg(ap, pcontrol, target);
2429 			offset += len;
2430 		} else
2431 			bad_pcode = true;
2432 		break;
2433 	case 0x8:	/* Caching page, direct access */
2434 		if (is_disk || is_zbc) {
2435 			len = resp_caching_pg(ap, pcontrol, target);
2436 			offset += len;
2437 		} else
2438 			bad_pcode = true;
2439 		break;
2440 	case 0xa:	/* Control Mode page, all devices */
2441 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2442 		offset += len;
2443 		break;
2444 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2445 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2446 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2447 			return check_condition_result;
2448 		}
2449 		len = 0;
2450 		if ((0x0 == subpcode) || (0xff == subpcode))
2451 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2452 		if ((0x1 == subpcode) || (0xff == subpcode))
2453 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2454 						  target_dev_id);
2455 		if ((0x2 == subpcode) || (0xff == subpcode))
2456 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2457 		offset += len;
2458 		break;
2459 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2460 		len = resp_iec_m_pg(ap, pcontrol, target);
2461 		offset += len;
2462 		break;
2463 	case 0x3f:	/* Read all Mode pages */
2464 		if ((0 == subpcode) || (0xff == subpcode)) {
2465 			len = resp_err_recov_pg(ap, pcontrol, target);
2466 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2467 			if (is_disk) {
2468 				len += resp_format_pg(ap + len, pcontrol,
2469 						      target);
2470 				len += resp_caching_pg(ap + len, pcontrol,
2471 						       target);
2472 			} else if (is_zbc) {
2473 				len += resp_caching_pg(ap + len, pcontrol,
2474 						       target);
2475 			}
2476 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2477 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2478 			if (0xff == subpcode) {
2479 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2480 						  target, target_dev_id);
2481 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2482 			}
2483 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2484 			offset += len;
2485 		} else {
2486 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2487 			return check_condition_result;
2488 		}
2489 		break;
2490 	default:
2491 		bad_pcode = true;
2492 		break;
2493 	}
2494 	if (bad_pcode) {
2495 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2496 		return check_condition_result;
2497 	}
2498 	if (msense_6)
2499 		arr[0] = offset - 1;
2500 	else
2501 		put_unaligned_be16((offset - 2), arr + 0);
2502 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2503 }
2504 
2505 #define SDEBUG_MAX_MSELECT_SZ 512
2506 
2507 static int resp_mode_select(struct scsi_cmnd *scp,
2508 			    struct sdebug_dev_info *devip)
2509 {
2510 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2511 	int param_len, res, mpage;
2512 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2513 	unsigned char *cmd = scp->cmnd;
2514 	int mselect6 = (MODE_SELECT == cmd[0]);
2515 
2516 	memset(arr, 0, sizeof(arr));
2517 	pf = cmd[1] & 0x10;
2518 	sp = cmd[1] & 0x1;
2519 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2520 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2521 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2522 		return check_condition_result;
2523 	}
2524 	res = fetch_to_dev_buffer(scp, arr, param_len);
2525 	if (-1 == res)
2526 		return DID_ERROR << 16;
2527 	else if (sdebug_verbose && (res < param_len))
2528 		sdev_printk(KERN_INFO, scp->device,
2529 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2530 			    __func__, param_len, res);
2531 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2532 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2533 	off = bd_len + (mselect6 ? 4 : 8);
2534 	if (md_len > 2 || off >= res) {
2535 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2536 		return check_condition_result;
2537 	}
2538 	mpage = arr[off] & 0x3f;
2539 	ps = !!(arr[off] & 0x80);
2540 	if (ps) {
2541 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2542 		return check_condition_result;
2543 	}
2544 	spf = !!(arr[off] & 0x40);
2545 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2546 		       (arr[off + 1] + 2);
2547 	if ((pg_len + off) > param_len) {
2548 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2549 				PARAMETER_LIST_LENGTH_ERR, 0);
2550 		return check_condition_result;
2551 	}
2552 	switch (mpage) {
2553 	case 0x8:      /* Caching Mode page */
2554 		if (caching_pg[1] == arr[off + 1]) {
2555 			memcpy(caching_pg + 2, arr + off + 2,
2556 			       sizeof(caching_pg) - 2);
2557 			goto set_mode_changed_ua;
2558 		}
2559 		break;
2560 	case 0xa:      /* Control Mode page */
2561 		if (ctrl_m_pg[1] == arr[off + 1]) {
2562 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2563 			       sizeof(ctrl_m_pg) - 2);
2564 			if (ctrl_m_pg[4] & 0x8)
2565 				sdebug_wp = true;
2566 			else
2567 				sdebug_wp = false;
2568 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2569 			goto set_mode_changed_ua;
2570 		}
2571 		break;
2572 	case 0x1c:      /* Informational Exceptions Mode page */
2573 		if (iec_m_pg[1] == arr[off + 1]) {
2574 			memcpy(iec_m_pg + 2, arr + off + 2,
2575 			       sizeof(iec_m_pg) - 2);
2576 			goto set_mode_changed_ua;
2577 		}
2578 		break;
2579 	default:
2580 		break;
2581 	}
2582 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2583 	return check_condition_result;
2584 set_mode_changed_ua:
2585 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2586 	return 0;
2587 }
2588 
2589 static int resp_temp_l_pg(unsigned char *arr)
2590 {
2591 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2592 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2593 		};
2594 
2595 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2596 	return sizeof(temp_l_pg);
2597 }
2598 
2599 static int resp_ie_l_pg(unsigned char *arr)
2600 {
2601 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2602 		};
2603 
2604 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2605 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2606 		arr[4] = THRESHOLD_EXCEEDED;
2607 		arr[5] = 0xff;
2608 	}
2609 	return sizeof(ie_l_pg);
2610 }
2611 
2612 static int resp_env_rep_l_spg(unsigned char *arr)
2613 {
2614 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2615 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2616 					 0x1, 0x0, 0x23, 0x8,
2617 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2618 		};
2619 
2620 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2621 	return sizeof(env_rep_l_spg);
2622 }
2623 
2624 #define SDEBUG_MAX_LSENSE_SZ 512
2625 
2626 static int resp_log_sense(struct scsi_cmnd *scp,
2627 			  struct sdebug_dev_info *devip)
2628 {
2629 	int ppc, sp, pcode, subpcode;
2630 	u32 alloc_len, len, n;
2631 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2632 	unsigned char *cmd = scp->cmnd;
2633 
2634 	memset(arr, 0, sizeof(arr));
2635 	ppc = cmd[1] & 0x2;
2636 	sp = cmd[1] & 0x1;
2637 	if (ppc || sp) {
2638 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2639 		return check_condition_result;
2640 	}
2641 	pcode = cmd[2] & 0x3f;
2642 	subpcode = cmd[3] & 0xff;
2643 	alloc_len = get_unaligned_be16(cmd + 7);
2644 	arr[0] = pcode;
2645 	if (0 == subpcode) {
2646 		switch (pcode) {
2647 		case 0x0:	/* Supported log pages log page */
2648 			n = 4;
2649 			arr[n++] = 0x0;		/* this page */
2650 			arr[n++] = 0xd;		/* Temperature */
2651 			arr[n++] = 0x2f;	/* Informational exceptions */
2652 			arr[3] = n - 4;
2653 			break;
2654 		case 0xd:	/* Temperature log page */
2655 			arr[3] = resp_temp_l_pg(arr + 4);
2656 			break;
2657 		case 0x2f:	/* Informational exceptions log page */
2658 			arr[3] = resp_ie_l_pg(arr + 4);
2659 			break;
2660 		default:
2661 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2662 			return check_condition_result;
2663 		}
2664 	} else if (0xff == subpcode) {
2665 		arr[0] |= 0x40;
2666 		arr[1] = subpcode;
2667 		switch (pcode) {
2668 		case 0x0:	/* Supported log pages and subpages log page */
2669 			n = 4;
2670 			arr[n++] = 0x0;
2671 			arr[n++] = 0x0;		/* 0,0 page */
2672 			arr[n++] = 0x0;
2673 			arr[n++] = 0xff;	/* this page */
2674 			arr[n++] = 0xd;
2675 			arr[n++] = 0x0;		/* Temperature */
2676 			arr[n++] = 0xd;
2677 			arr[n++] = 0x1;		/* Environment reporting */
2678 			arr[n++] = 0xd;
2679 			arr[n++] = 0xff;	/* all 0xd subpages */
2680 			arr[n++] = 0x2f;
2681 			arr[n++] = 0x0;	/* Informational exceptions */
2682 			arr[n++] = 0x2f;
2683 			arr[n++] = 0xff;	/* all 0x2f subpages */
2684 			arr[3] = n - 4;
2685 			break;
2686 		case 0xd:	/* Temperature subpages */
2687 			n = 4;
2688 			arr[n++] = 0xd;
2689 			arr[n++] = 0x0;		/* Temperature */
2690 			arr[n++] = 0xd;
2691 			arr[n++] = 0x1;		/* Environment reporting */
2692 			arr[n++] = 0xd;
2693 			arr[n++] = 0xff;	/* these subpages */
2694 			arr[3] = n - 4;
2695 			break;
2696 		case 0x2f:	/* Informational exceptions subpages */
2697 			n = 4;
2698 			arr[n++] = 0x2f;
2699 			arr[n++] = 0x0;		/* Informational exceptions */
2700 			arr[n++] = 0x2f;
2701 			arr[n++] = 0xff;	/* these subpages */
2702 			arr[3] = n - 4;
2703 			break;
2704 		default:
2705 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2706 			return check_condition_result;
2707 		}
2708 	} else if (subpcode > 0) {
2709 		arr[0] |= 0x40;
2710 		arr[1] = subpcode;
2711 		if (pcode == 0xd && subpcode == 1)
2712 			arr[3] = resp_env_rep_l_spg(arr + 4);
2713 		else {
2714 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2715 			return check_condition_result;
2716 		}
2717 	} else {
2718 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2719 		return check_condition_result;
2720 	}
2721 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2722 	return fill_from_dev_buffer(scp, arr,
2723 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2724 }
2725 
2726 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2727 {
2728 	return devip->nr_zones != 0;
2729 }
2730 
2731 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2732 					unsigned long long lba)
2733 {
2734 	u32 zno = lba >> devip->zsize_shift;
2735 	struct sdeb_zone_state *zsp;
2736 
2737 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2738 		return &devip->zstate[zno];
2739 
2740 	/*
2741 	 * If the zone capacity is less than the zone size, adjust for gap
2742 	 * zones.
2743 	 */
2744 	zno = 2 * zno - devip->nr_conv_zones;
2745 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2746 	zsp = &devip->zstate[zno];
2747 	if (lba >= zsp->z_start + zsp->z_size)
2748 		zsp++;
2749 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2750 	return zsp;
2751 }
2752 
2753 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2754 {
2755 	return zsp->z_type == ZBC_ZTYPE_CNV;
2756 }
2757 
2758 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2759 {
2760 	return zsp->z_type == ZBC_ZTYPE_GAP;
2761 }
2762 
2763 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2764 {
2765 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2766 }
2767 
2768 static void zbc_close_zone(struct sdebug_dev_info *devip,
2769 			   struct sdeb_zone_state *zsp)
2770 {
2771 	enum sdebug_z_cond zc;
2772 
2773 	if (!zbc_zone_is_seq(zsp))
2774 		return;
2775 
2776 	zc = zsp->z_cond;
2777 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2778 		return;
2779 
2780 	if (zc == ZC2_IMPLICIT_OPEN)
2781 		devip->nr_imp_open--;
2782 	else
2783 		devip->nr_exp_open--;
2784 
2785 	if (zsp->z_wp == zsp->z_start) {
2786 		zsp->z_cond = ZC1_EMPTY;
2787 	} else {
2788 		zsp->z_cond = ZC4_CLOSED;
2789 		devip->nr_closed++;
2790 	}
2791 }
2792 
2793 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2794 {
2795 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2796 	unsigned int i;
2797 
2798 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2799 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2800 			zbc_close_zone(devip, zsp);
2801 			return;
2802 		}
2803 	}
2804 }
2805 
2806 static void zbc_open_zone(struct sdebug_dev_info *devip,
2807 			  struct sdeb_zone_state *zsp, bool explicit)
2808 {
2809 	enum sdebug_z_cond zc;
2810 
2811 	if (!zbc_zone_is_seq(zsp))
2812 		return;
2813 
2814 	zc = zsp->z_cond;
2815 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2816 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2817 		return;
2818 
2819 	/* Close an implicit open zone if necessary */
2820 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2821 		zbc_close_zone(devip, zsp);
2822 	else if (devip->max_open &&
2823 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2824 		zbc_close_imp_open_zone(devip);
2825 
2826 	if (zsp->z_cond == ZC4_CLOSED)
2827 		devip->nr_closed--;
2828 	if (explicit) {
2829 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2830 		devip->nr_exp_open++;
2831 	} else {
2832 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2833 		devip->nr_imp_open++;
2834 	}
2835 }
2836 
2837 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2838 				     struct sdeb_zone_state *zsp)
2839 {
2840 	switch (zsp->z_cond) {
2841 	case ZC2_IMPLICIT_OPEN:
2842 		devip->nr_imp_open--;
2843 		break;
2844 	case ZC3_EXPLICIT_OPEN:
2845 		devip->nr_exp_open--;
2846 		break;
2847 	default:
2848 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2849 			  zsp->z_start, zsp->z_cond);
2850 		break;
2851 	}
2852 	zsp->z_cond = ZC5_FULL;
2853 }
2854 
2855 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2856 		       unsigned long long lba, unsigned int num)
2857 {
2858 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2859 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2860 
2861 	if (!zbc_zone_is_seq(zsp))
2862 		return;
2863 
2864 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2865 		zsp->z_wp += num;
2866 		if (zsp->z_wp >= zend)
2867 			zbc_set_zone_full(devip, zsp);
2868 		return;
2869 	}
2870 
2871 	while (num) {
2872 		if (lba != zsp->z_wp)
2873 			zsp->z_non_seq_resource = true;
2874 
2875 		end = lba + num;
2876 		if (end >= zend) {
2877 			n = zend - lba;
2878 			zsp->z_wp = zend;
2879 		} else if (end > zsp->z_wp) {
2880 			n = num;
2881 			zsp->z_wp = end;
2882 		} else {
2883 			n = num;
2884 		}
2885 		if (zsp->z_wp >= zend)
2886 			zbc_set_zone_full(devip, zsp);
2887 
2888 		num -= n;
2889 		lba += n;
2890 		if (num) {
2891 			zsp++;
2892 			zend = zsp->z_start + zsp->z_size;
2893 		}
2894 	}
2895 }
2896 
2897 static int check_zbc_access_params(struct scsi_cmnd *scp,
2898 			unsigned long long lba, unsigned int num, bool write)
2899 {
2900 	struct scsi_device *sdp = scp->device;
2901 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2902 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2903 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2904 
2905 	if (!write) {
2906 		if (devip->zmodel == BLK_ZONED_HA)
2907 			return 0;
2908 		/* For host-managed, reads cannot cross zone types boundaries */
2909 		if (zsp->z_type != zsp_end->z_type) {
2910 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2911 					LBA_OUT_OF_RANGE,
2912 					READ_INVDATA_ASCQ);
2913 			return check_condition_result;
2914 		}
2915 		return 0;
2916 	}
2917 
2918 	/* Writing into a gap zone is not allowed */
2919 	if (zbc_zone_is_gap(zsp)) {
2920 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2921 				ATTEMPT_ACCESS_GAP);
2922 		return check_condition_result;
2923 	}
2924 
2925 	/* No restrictions for writes within conventional zones */
2926 	if (zbc_zone_is_conv(zsp)) {
2927 		if (!zbc_zone_is_conv(zsp_end)) {
2928 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2929 					LBA_OUT_OF_RANGE,
2930 					WRITE_BOUNDARY_ASCQ);
2931 			return check_condition_result;
2932 		}
2933 		return 0;
2934 	}
2935 
2936 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2937 		/* Writes cannot cross sequential zone boundaries */
2938 		if (zsp_end != zsp) {
2939 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2940 					LBA_OUT_OF_RANGE,
2941 					WRITE_BOUNDARY_ASCQ);
2942 			return check_condition_result;
2943 		}
2944 		/* Cannot write full zones */
2945 		if (zsp->z_cond == ZC5_FULL) {
2946 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2947 					INVALID_FIELD_IN_CDB, 0);
2948 			return check_condition_result;
2949 		}
2950 		/* Writes must be aligned to the zone WP */
2951 		if (lba != zsp->z_wp) {
2952 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2953 					LBA_OUT_OF_RANGE,
2954 					UNALIGNED_WRITE_ASCQ);
2955 			return check_condition_result;
2956 		}
2957 	}
2958 
2959 	/* Handle implicit open of closed and empty zones */
2960 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2961 		if (devip->max_open &&
2962 		    devip->nr_exp_open >= devip->max_open) {
2963 			mk_sense_buffer(scp, DATA_PROTECT,
2964 					INSUFF_RES_ASC,
2965 					INSUFF_ZONE_ASCQ);
2966 			return check_condition_result;
2967 		}
2968 		zbc_open_zone(devip, zsp, false);
2969 	}
2970 
2971 	return 0;
2972 }
2973 
2974 static inline int check_device_access_params
2975 			(struct scsi_cmnd *scp, unsigned long long lba,
2976 			 unsigned int num, bool write)
2977 {
2978 	struct scsi_device *sdp = scp->device;
2979 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2980 
2981 	if (lba + num > sdebug_capacity) {
2982 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2983 		return check_condition_result;
2984 	}
2985 	/* transfer length excessive (tie in to block limits VPD page) */
2986 	if (num > sdebug_store_sectors) {
2987 		/* needs work to find which cdb byte 'num' comes from */
2988 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2989 		return check_condition_result;
2990 	}
2991 	if (write && unlikely(sdebug_wp)) {
2992 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2993 		return check_condition_result;
2994 	}
2995 	if (sdebug_dev_is_zoned(devip))
2996 		return check_zbc_access_params(scp, lba, num, write);
2997 
2998 	return 0;
2999 }
3000 
3001 /*
3002  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3003  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3004  * that access any of the "stores" in struct sdeb_store_info should call this
3005  * function with bug_if_fake_rw set to true.
3006  */
3007 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3008 						bool bug_if_fake_rw)
3009 {
3010 	if (sdebug_fake_rw) {
3011 		BUG_ON(bug_if_fake_rw);	/* See note above */
3012 		return NULL;
3013 	}
3014 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3015 }
3016 
3017 /* Returns number of bytes copied or -1 if error. */
3018 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3019 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3020 {
3021 	int ret;
3022 	u64 block, rest = 0;
3023 	enum dma_data_direction dir;
3024 	struct scsi_data_buffer *sdb = &scp->sdb;
3025 	u8 *fsp;
3026 
3027 	if (do_write) {
3028 		dir = DMA_TO_DEVICE;
3029 		write_since_sync = true;
3030 	} else {
3031 		dir = DMA_FROM_DEVICE;
3032 	}
3033 
3034 	if (!sdb->length || !sip)
3035 		return 0;
3036 	if (scp->sc_data_direction != dir)
3037 		return -1;
3038 	fsp = sip->storep;
3039 
3040 	block = do_div(lba, sdebug_store_sectors);
3041 	if (block + num > sdebug_store_sectors)
3042 		rest = block + num - sdebug_store_sectors;
3043 
3044 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3045 		   fsp + (block * sdebug_sector_size),
3046 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3047 	if (ret != (num - rest) * sdebug_sector_size)
3048 		return ret;
3049 
3050 	if (rest) {
3051 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3052 			    fsp, rest * sdebug_sector_size,
3053 			    sg_skip + ((num - rest) * sdebug_sector_size),
3054 			    do_write);
3055 	}
3056 
3057 	return ret;
3058 }
3059 
3060 /* Returns number of bytes copied or -1 if error. */
3061 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3062 {
3063 	struct scsi_data_buffer *sdb = &scp->sdb;
3064 
3065 	if (!sdb->length)
3066 		return 0;
3067 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3068 		return -1;
3069 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3070 			      num * sdebug_sector_size, 0, true);
3071 }
3072 
3073 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3074  * arr into sip->storep+lba and return true. If comparison fails then
3075  * return false. */
3076 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3077 			      const u8 *arr, bool compare_only)
3078 {
3079 	bool res;
3080 	u64 block, rest = 0;
3081 	u32 store_blks = sdebug_store_sectors;
3082 	u32 lb_size = sdebug_sector_size;
3083 	u8 *fsp = sip->storep;
3084 
3085 	block = do_div(lba, store_blks);
3086 	if (block + num > store_blks)
3087 		rest = block + num - store_blks;
3088 
3089 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3090 	if (!res)
3091 		return res;
3092 	if (rest)
3093 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3094 			     rest * lb_size);
3095 	if (!res)
3096 		return res;
3097 	if (compare_only)
3098 		return true;
3099 	arr += num * lb_size;
3100 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3101 	if (rest)
3102 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3103 	return res;
3104 }
3105 
3106 static __be16 dif_compute_csum(const void *buf, int len)
3107 {
3108 	__be16 csum;
3109 
3110 	if (sdebug_guard)
3111 		csum = (__force __be16)ip_compute_csum(buf, len);
3112 	else
3113 		csum = cpu_to_be16(crc_t10dif(buf, len));
3114 
3115 	return csum;
3116 }
3117 
3118 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3119 		      sector_t sector, u32 ei_lba)
3120 {
3121 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3122 
3123 	if (sdt->guard_tag != csum) {
3124 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3125 			(unsigned long)sector,
3126 			be16_to_cpu(sdt->guard_tag),
3127 			be16_to_cpu(csum));
3128 		return 0x01;
3129 	}
3130 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3131 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3132 		pr_err("REF check failed on sector %lu\n",
3133 			(unsigned long)sector);
3134 		return 0x03;
3135 	}
3136 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3137 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3138 		pr_err("REF check failed on sector %lu\n",
3139 			(unsigned long)sector);
3140 		return 0x03;
3141 	}
3142 	return 0;
3143 }
3144 
3145 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3146 			  unsigned int sectors, bool read)
3147 {
3148 	size_t resid;
3149 	void *paddr;
3150 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3151 						scp->device->hostdata, true);
3152 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3153 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3154 	struct sg_mapping_iter miter;
3155 
3156 	/* Bytes of protection data to copy into sgl */
3157 	resid = sectors * sizeof(*dif_storep);
3158 
3159 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3160 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3161 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3162 
3163 	while (sg_miter_next(&miter) && resid > 0) {
3164 		size_t len = min_t(size_t, miter.length, resid);
3165 		void *start = dif_store(sip, sector);
3166 		size_t rest = 0;
3167 
3168 		if (dif_store_end < start + len)
3169 			rest = start + len - dif_store_end;
3170 
3171 		paddr = miter.addr;
3172 
3173 		if (read)
3174 			memcpy(paddr, start, len - rest);
3175 		else
3176 			memcpy(start, paddr, len - rest);
3177 
3178 		if (rest) {
3179 			if (read)
3180 				memcpy(paddr + len - rest, dif_storep, rest);
3181 			else
3182 				memcpy(dif_storep, paddr + len - rest, rest);
3183 		}
3184 
3185 		sector += len / sizeof(*dif_storep);
3186 		resid -= len;
3187 	}
3188 	sg_miter_stop(&miter);
3189 }
3190 
3191 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3192 			    unsigned int sectors, u32 ei_lba)
3193 {
3194 	int ret = 0;
3195 	unsigned int i;
3196 	sector_t sector;
3197 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3198 						scp->device->hostdata, true);
3199 	struct t10_pi_tuple *sdt;
3200 
3201 	for (i = 0; i < sectors; i++, ei_lba++) {
3202 		sector = start_sec + i;
3203 		sdt = dif_store(sip, sector);
3204 
3205 		if (sdt->app_tag == cpu_to_be16(0xffff))
3206 			continue;
3207 
3208 		/*
3209 		 * Because scsi_debug acts as both initiator and
3210 		 * target we proceed to verify the PI even if
3211 		 * RDPROTECT=3. This is done so the "initiator" knows
3212 		 * which type of error to return. Otherwise we would
3213 		 * have to iterate over the PI twice.
3214 		 */
3215 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3216 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3217 					 sector, ei_lba);
3218 			if (ret) {
3219 				dif_errors++;
3220 				break;
3221 			}
3222 		}
3223 	}
3224 
3225 	dif_copy_prot(scp, start_sec, sectors, true);
3226 	dix_reads++;
3227 
3228 	return ret;
3229 }
3230 
3231 static inline void
3232 sdeb_read_lock(struct sdeb_store_info *sip)
3233 {
3234 	if (sdebug_no_rwlock) {
3235 		if (sip)
3236 			__acquire(&sip->macc_lck);
3237 		else
3238 			__acquire(&sdeb_fake_rw_lck);
3239 	} else {
3240 		if (sip)
3241 			read_lock(&sip->macc_lck);
3242 		else
3243 			read_lock(&sdeb_fake_rw_lck);
3244 	}
3245 }
3246 
3247 static inline void
3248 sdeb_read_unlock(struct sdeb_store_info *sip)
3249 {
3250 	if (sdebug_no_rwlock) {
3251 		if (sip)
3252 			__release(&sip->macc_lck);
3253 		else
3254 			__release(&sdeb_fake_rw_lck);
3255 	} else {
3256 		if (sip)
3257 			read_unlock(&sip->macc_lck);
3258 		else
3259 			read_unlock(&sdeb_fake_rw_lck);
3260 	}
3261 }
3262 
3263 static inline void
3264 sdeb_write_lock(struct sdeb_store_info *sip)
3265 {
3266 	if (sdebug_no_rwlock) {
3267 		if (sip)
3268 			__acquire(&sip->macc_lck);
3269 		else
3270 			__acquire(&sdeb_fake_rw_lck);
3271 	} else {
3272 		if (sip)
3273 			write_lock(&sip->macc_lck);
3274 		else
3275 			write_lock(&sdeb_fake_rw_lck);
3276 	}
3277 }
3278 
3279 static inline void
3280 sdeb_write_unlock(struct sdeb_store_info *sip)
3281 {
3282 	if (sdebug_no_rwlock) {
3283 		if (sip)
3284 			__release(&sip->macc_lck);
3285 		else
3286 			__release(&sdeb_fake_rw_lck);
3287 	} else {
3288 		if (sip)
3289 			write_unlock(&sip->macc_lck);
3290 		else
3291 			write_unlock(&sdeb_fake_rw_lck);
3292 	}
3293 }
3294 
3295 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3296 {
3297 	bool check_prot;
3298 	u32 num;
3299 	u32 ei_lba;
3300 	int ret;
3301 	u64 lba;
3302 	struct sdeb_store_info *sip = devip2sip(devip, true);
3303 	u8 *cmd = scp->cmnd;
3304 
3305 	switch (cmd[0]) {
3306 	case READ_16:
3307 		ei_lba = 0;
3308 		lba = get_unaligned_be64(cmd + 2);
3309 		num = get_unaligned_be32(cmd + 10);
3310 		check_prot = true;
3311 		break;
3312 	case READ_10:
3313 		ei_lba = 0;
3314 		lba = get_unaligned_be32(cmd + 2);
3315 		num = get_unaligned_be16(cmd + 7);
3316 		check_prot = true;
3317 		break;
3318 	case READ_6:
3319 		ei_lba = 0;
3320 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3321 		      (u32)(cmd[1] & 0x1f) << 16;
3322 		num = (0 == cmd[4]) ? 256 : cmd[4];
3323 		check_prot = true;
3324 		break;
3325 	case READ_12:
3326 		ei_lba = 0;
3327 		lba = get_unaligned_be32(cmd + 2);
3328 		num = get_unaligned_be32(cmd + 6);
3329 		check_prot = true;
3330 		break;
3331 	case XDWRITEREAD_10:
3332 		ei_lba = 0;
3333 		lba = get_unaligned_be32(cmd + 2);
3334 		num = get_unaligned_be16(cmd + 7);
3335 		check_prot = false;
3336 		break;
3337 	default:	/* assume READ(32) */
3338 		lba = get_unaligned_be64(cmd + 12);
3339 		ei_lba = get_unaligned_be32(cmd + 20);
3340 		num = get_unaligned_be32(cmd + 28);
3341 		check_prot = false;
3342 		break;
3343 	}
3344 	if (unlikely(have_dif_prot && check_prot)) {
3345 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3346 		    (cmd[1] & 0xe0)) {
3347 			mk_sense_invalid_opcode(scp);
3348 			return check_condition_result;
3349 		}
3350 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3351 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3352 		    (cmd[1] & 0xe0) == 0)
3353 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3354 				    "to DIF device\n");
3355 	}
3356 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3357 		     atomic_read(&sdeb_inject_pending))) {
3358 		num /= 2;
3359 		atomic_set(&sdeb_inject_pending, 0);
3360 	}
3361 
3362 	ret = check_device_access_params(scp, lba, num, false);
3363 	if (ret)
3364 		return ret;
3365 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3366 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3367 		     ((lba + num) > sdebug_medium_error_start))) {
3368 		/* claim unrecoverable read error */
3369 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3370 		/* set info field and valid bit for fixed descriptor */
3371 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3372 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3373 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3374 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3375 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3376 		}
3377 		scsi_set_resid(scp, scsi_bufflen(scp));
3378 		return check_condition_result;
3379 	}
3380 
3381 	sdeb_read_lock(sip);
3382 
3383 	/* DIX + T10 DIF */
3384 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3385 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3386 		case 1: /* Guard tag error */
3387 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3388 				sdeb_read_unlock(sip);
3389 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3390 				return check_condition_result;
3391 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3392 				sdeb_read_unlock(sip);
3393 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3394 				return illegal_condition_result;
3395 			}
3396 			break;
3397 		case 3: /* Reference tag error */
3398 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3399 				sdeb_read_unlock(sip);
3400 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3401 				return check_condition_result;
3402 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3403 				sdeb_read_unlock(sip);
3404 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3405 				return illegal_condition_result;
3406 			}
3407 			break;
3408 		}
3409 	}
3410 
3411 	ret = do_device_access(sip, scp, 0, lba, num, false);
3412 	sdeb_read_unlock(sip);
3413 	if (unlikely(ret == -1))
3414 		return DID_ERROR << 16;
3415 
3416 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3417 
3418 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3419 		     atomic_read(&sdeb_inject_pending))) {
3420 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3421 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3422 			atomic_set(&sdeb_inject_pending, 0);
3423 			return check_condition_result;
3424 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3425 			/* Logical block guard check failed */
3426 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3427 			atomic_set(&sdeb_inject_pending, 0);
3428 			return illegal_condition_result;
3429 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3430 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3431 			atomic_set(&sdeb_inject_pending, 0);
3432 			return illegal_condition_result;
3433 		}
3434 	}
3435 	return 0;
3436 }
3437 
3438 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3439 			     unsigned int sectors, u32 ei_lba)
3440 {
3441 	int ret;
3442 	struct t10_pi_tuple *sdt;
3443 	void *daddr;
3444 	sector_t sector = start_sec;
3445 	int ppage_offset;
3446 	int dpage_offset;
3447 	struct sg_mapping_iter diter;
3448 	struct sg_mapping_iter piter;
3449 
3450 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3451 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3452 
3453 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3454 			scsi_prot_sg_count(SCpnt),
3455 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3456 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3457 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3458 
3459 	/* For each protection page */
3460 	while (sg_miter_next(&piter)) {
3461 		dpage_offset = 0;
3462 		if (WARN_ON(!sg_miter_next(&diter))) {
3463 			ret = 0x01;
3464 			goto out;
3465 		}
3466 
3467 		for (ppage_offset = 0; ppage_offset < piter.length;
3468 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3469 			/* If we're at the end of the current
3470 			 * data page advance to the next one
3471 			 */
3472 			if (dpage_offset >= diter.length) {
3473 				if (WARN_ON(!sg_miter_next(&diter))) {
3474 					ret = 0x01;
3475 					goto out;
3476 				}
3477 				dpage_offset = 0;
3478 			}
3479 
3480 			sdt = piter.addr + ppage_offset;
3481 			daddr = diter.addr + dpage_offset;
3482 
3483 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3484 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3485 				if (ret)
3486 					goto out;
3487 			}
3488 
3489 			sector++;
3490 			ei_lba++;
3491 			dpage_offset += sdebug_sector_size;
3492 		}
3493 		diter.consumed = dpage_offset;
3494 		sg_miter_stop(&diter);
3495 	}
3496 	sg_miter_stop(&piter);
3497 
3498 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3499 	dix_writes++;
3500 
3501 	return 0;
3502 
3503 out:
3504 	dif_errors++;
3505 	sg_miter_stop(&diter);
3506 	sg_miter_stop(&piter);
3507 	return ret;
3508 }
3509 
3510 static unsigned long lba_to_map_index(sector_t lba)
3511 {
3512 	if (sdebug_unmap_alignment)
3513 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3514 	sector_div(lba, sdebug_unmap_granularity);
3515 	return lba;
3516 }
3517 
3518 static sector_t map_index_to_lba(unsigned long index)
3519 {
3520 	sector_t lba = index * sdebug_unmap_granularity;
3521 
3522 	if (sdebug_unmap_alignment)
3523 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3524 	return lba;
3525 }
3526 
3527 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3528 			      unsigned int *num)
3529 {
3530 	sector_t end;
3531 	unsigned int mapped;
3532 	unsigned long index;
3533 	unsigned long next;
3534 
3535 	index = lba_to_map_index(lba);
3536 	mapped = test_bit(index, sip->map_storep);
3537 
3538 	if (mapped)
3539 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3540 	else
3541 		next = find_next_bit(sip->map_storep, map_size, index);
3542 
3543 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3544 	*num = end - lba;
3545 	return mapped;
3546 }
3547 
3548 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3549 		       unsigned int len)
3550 {
3551 	sector_t end = lba + len;
3552 
3553 	while (lba < end) {
3554 		unsigned long index = lba_to_map_index(lba);
3555 
3556 		if (index < map_size)
3557 			set_bit(index, sip->map_storep);
3558 
3559 		lba = map_index_to_lba(index + 1);
3560 	}
3561 }
3562 
3563 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3564 			 unsigned int len)
3565 {
3566 	sector_t end = lba + len;
3567 	u8 *fsp = sip->storep;
3568 
3569 	while (lba < end) {
3570 		unsigned long index = lba_to_map_index(lba);
3571 
3572 		if (lba == map_index_to_lba(index) &&
3573 		    lba + sdebug_unmap_granularity <= end &&
3574 		    index < map_size) {
3575 			clear_bit(index, sip->map_storep);
3576 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3577 				memset(fsp + lba * sdebug_sector_size,
3578 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3579 				       sdebug_sector_size *
3580 				       sdebug_unmap_granularity);
3581 			}
3582 			if (sip->dif_storep) {
3583 				memset(sip->dif_storep + lba, 0xff,
3584 				       sizeof(*sip->dif_storep) *
3585 				       sdebug_unmap_granularity);
3586 			}
3587 		}
3588 		lba = map_index_to_lba(index + 1);
3589 	}
3590 }
3591 
3592 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3593 {
3594 	bool check_prot;
3595 	u32 num;
3596 	u32 ei_lba;
3597 	int ret;
3598 	u64 lba;
3599 	struct sdeb_store_info *sip = devip2sip(devip, true);
3600 	u8 *cmd = scp->cmnd;
3601 
3602 	switch (cmd[0]) {
3603 	case WRITE_16:
3604 		ei_lba = 0;
3605 		lba = get_unaligned_be64(cmd + 2);
3606 		num = get_unaligned_be32(cmd + 10);
3607 		check_prot = true;
3608 		break;
3609 	case WRITE_10:
3610 		ei_lba = 0;
3611 		lba = get_unaligned_be32(cmd + 2);
3612 		num = get_unaligned_be16(cmd + 7);
3613 		check_prot = true;
3614 		break;
3615 	case WRITE_6:
3616 		ei_lba = 0;
3617 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3618 		      (u32)(cmd[1] & 0x1f) << 16;
3619 		num = (0 == cmd[4]) ? 256 : cmd[4];
3620 		check_prot = true;
3621 		break;
3622 	case WRITE_12:
3623 		ei_lba = 0;
3624 		lba = get_unaligned_be32(cmd + 2);
3625 		num = get_unaligned_be32(cmd + 6);
3626 		check_prot = true;
3627 		break;
3628 	case 0x53:	/* XDWRITEREAD(10) */
3629 		ei_lba = 0;
3630 		lba = get_unaligned_be32(cmd + 2);
3631 		num = get_unaligned_be16(cmd + 7);
3632 		check_prot = false;
3633 		break;
3634 	default:	/* assume WRITE(32) */
3635 		lba = get_unaligned_be64(cmd + 12);
3636 		ei_lba = get_unaligned_be32(cmd + 20);
3637 		num = get_unaligned_be32(cmd + 28);
3638 		check_prot = false;
3639 		break;
3640 	}
3641 	if (unlikely(have_dif_prot && check_prot)) {
3642 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3643 		    (cmd[1] & 0xe0)) {
3644 			mk_sense_invalid_opcode(scp);
3645 			return check_condition_result;
3646 		}
3647 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3648 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3649 		    (cmd[1] & 0xe0) == 0)
3650 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3651 				    "to DIF device\n");
3652 	}
3653 
3654 	sdeb_write_lock(sip);
3655 	ret = check_device_access_params(scp, lba, num, true);
3656 	if (ret) {
3657 		sdeb_write_unlock(sip);
3658 		return ret;
3659 	}
3660 
3661 	/* DIX + T10 DIF */
3662 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3663 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3664 		case 1: /* Guard tag error */
3665 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3666 				sdeb_write_unlock(sip);
3667 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3668 				return illegal_condition_result;
3669 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3670 				sdeb_write_unlock(sip);
3671 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3672 				return check_condition_result;
3673 			}
3674 			break;
3675 		case 3: /* Reference tag error */
3676 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3677 				sdeb_write_unlock(sip);
3678 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3679 				return illegal_condition_result;
3680 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3681 				sdeb_write_unlock(sip);
3682 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3683 				return check_condition_result;
3684 			}
3685 			break;
3686 		}
3687 	}
3688 
3689 	ret = do_device_access(sip, scp, 0, lba, num, true);
3690 	if (unlikely(scsi_debug_lbp()))
3691 		map_region(sip, lba, num);
3692 	/* If ZBC zone then bump its write pointer */
3693 	if (sdebug_dev_is_zoned(devip))
3694 		zbc_inc_wp(devip, lba, num);
3695 	sdeb_write_unlock(sip);
3696 	if (unlikely(-1 == ret))
3697 		return DID_ERROR << 16;
3698 	else if (unlikely(sdebug_verbose &&
3699 			  (ret < (num * sdebug_sector_size))))
3700 		sdev_printk(KERN_INFO, scp->device,
3701 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3702 			    my_name, num * sdebug_sector_size, ret);
3703 
3704 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3705 		     atomic_read(&sdeb_inject_pending))) {
3706 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3707 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3708 			atomic_set(&sdeb_inject_pending, 0);
3709 			return check_condition_result;
3710 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3711 			/* Logical block guard check failed */
3712 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3713 			atomic_set(&sdeb_inject_pending, 0);
3714 			return illegal_condition_result;
3715 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3716 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3717 			atomic_set(&sdeb_inject_pending, 0);
3718 			return illegal_condition_result;
3719 		}
3720 	}
3721 	return 0;
3722 }
3723 
3724 /*
3725  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3726  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3727  */
3728 static int resp_write_scat(struct scsi_cmnd *scp,
3729 			   struct sdebug_dev_info *devip)
3730 {
3731 	u8 *cmd = scp->cmnd;
3732 	u8 *lrdp = NULL;
3733 	u8 *up;
3734 	struct sdeb_store_info *sip = devip2sip(devip, true);
3735 	u8 wrprotect;
3736 	u16 lbdof, num_lrd, k;
3737 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3738 	u32 lb_size = sdebug_sector_size;
3739 	u32 ei_lba;
3740 	u64 lba;
3741 	int ret, res;
3742 	bool is_16;
3743 	static const u32 lrd_size = 32; /* + parameter list header size */
3744 
3745 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3746 		is_16 = false;
3747 		wrprotect = (cmd[10] >> 5) & 0x7;
3748 		lbdof = get_unaligned_be16(cmd + 12);
3749 		num_lrd = get_unaligned_be16(cmd + 16);
3750 		bt_len = get_unaligned_be32(cmd + 28);
3751 	} else {        /* that leaves WRITE SCATTERED(16) */
3752 		is_16 = true;
3753 		wrprotect = (cmd[2] >> 5) & 0x7;
3754 		lbdof = get_unaligned_be16(cmd + 4);
3755 		num_lrd = get_unaligned_be16(cmd + 8);
3756 		bt_len = get_unaligned_be32(cmd + 10);
3757 		if (unlikely(have_dif_prot)) {
3758 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3759 			    wrprotect) {
3760 				mk_sense_invalid_opcode(scp);
3761 				return illegal_condition_result;
3762 			}
3763 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3764 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3765 			     wrprotect == 0)
3766 				sdev_printk(KERN_ERR, scp->device,
3767 					    "Unprotected WR to DIF device\n");
3768 		}
3769 	}
3770 	if ((num_lrd == 0) || (bt_len == 0))
3771 		return 0;       /* T10 says these do-nothings are not errors */
3772 	if (lbdof == 0) {
3773 		if (sdebug_verbose)
3774 			sdev_printk(KERN_INFO, scp->device,
3775 				"%s: %s: LB Data Offset field bad\n",
3776 				my_name, __func__);
3777 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3778 		return illegal_condition_result;
3779 	}
3780 	lbdof_blen = lbdof * lb_size;
3781 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3782 		if (sdebug_verbose)
3783 			sdev_printk(KERN_INFO, scp->device,
3784 				"%s: %s: LBA range descriptors don't fit\n",
3785 				my_name, __func__);
3786 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3787 		return illegal_condition_result;
3788 	}
3789 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3790 	if (lrdp == NULL)
3791 		return SCSI_MLQUEUE_HOST_BUSY;
3792 	if (sdebug_verbose)
3793 		sdev_printk(KERN_INFO, scp->device,
3794 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3795 			my_name, __func__, lbdof_blen);
3796 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3797 	if (res == -1) {
3798 		ret = DID_ERROR << 16;
3799 		goto err_out;
3800 	}
3801 
3802 	sdeb_write_lock(sip);
3803 	sg_off = lbdof_blen;
3804 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3805 	cum_lb = 0;
3806 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3807 		lba = get_unaligned_be64(up + 0);
3808 		num = get_unaligned_be32(up + 8);
3809 		if (sdebug_verbose)
3810 			sdev_printk(KERN_INFO, scp->device,
3811 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3812 				my_name, __func__, k, lba, num, sg_off);
3813 		if (num == 0)
3814 			continue;
3815 		ret = check_device_access_params(scp, lba, num, true);
3816 		if (ret)
3817 			goto err_out_unlock;
3818 		num_by = num * lb_size;
3819 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3820 
3821 		if ((cum_lb + num) > bt_len) {
3822 			if (sdebug_verbose)
3823 				sdev_printk(KERN_INFO, scp->device,
3824 				    "%s: %s: sum of blocks > data provided\n",
3825 				    my_name, __func__);
3826 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3827 					0);
3828 			ret = illegal_condition_result;
3829 			goto err_out_unlock;
3830 		}
3831 
3832 		/* DIX + T10 DIF */
3833 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3834 			int prot_ret = prot_verify_write(scp, lba, num,
3835 							 ei_lba);
3836 
3837 			if (prot_ret) {
3838 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3839 						prot_ret);
3840 				ret = illegal_condition_result;
3841 				goto err_out_unlock;
3842 			}
3843 		}
3844 
3845 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3846 		/* If ZBC zone then bump its write pointer */
3847 		if (sdebug_dev_is_zoned(devip))
3848 			zbc_inc_wp(devip, lba, num);
3849 		if (unlikely(scsi_debug_lbp()))
3850 			map_region(sip, lba, num);
3851 		if (unlikely(-1 == ret)) {
3852 			ret = DID_ERROR << 16;
3853 			goto err_out_unlock;
3854 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3855 			sdev_printk(KERN_INFO, scp->device,
3856 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3857 			    my_name, num_by, ret);
3858 
3859 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3860 			     atomic_read(&sdeb_inject_pending))) {
3861 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3862 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3863 				atomic_set(&sdeb_inject_pending, 0);
3864 				ret = check_condition_result;
3865 				goto err_out_unlock;
3866 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3867 				/* Logical block guard check failed */
3868 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3869 				atomic_set(&sdeb_inject_pending, 0);
3870 				ret = illegal_condition_result;
3871 				goto err_out_unlock;
3872 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3873 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3874 				atomic_set(&sdeb_inject_pending, 0);
3875 				ret = illegal_condition_result;
3876 				goto err_out_unlock;
3877 			}
3878 		}
3879 		sg_off += num_by;
3880 		cum_lb += num;
3881 	}
3882 	ret = 0;
3883 err_out_unlock:
3884 	sdeb_write_unlock(sip);
3885 err_out:
3886 	kfree(lrdp);
3887 	return ret;
3888 }
3889 
3890 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3891 			   u32 ei_lba, bool unmap, bool ndob)
3892 {
3893 	struct scsi_device *sdp = scp->device;
3894 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3895 	unsigned long long i;
3896 	u64 block, lbaa;
3897 	u32 lb_size = sdebug_sector_size;
3898 	int ret;
3899 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3900 						scp->device->hostdata, true);
3901 	u8 *fs1p;
3902 	u8 *fsp;
3903 
3904 	sdeb_write_lock(sip);
3905 
3906 	ret = check_device_access_params(scp, lba, num, true);
3907 	if (ret) {
3908 		sdeb_write_unlock(sip);
3909 		return ret;
3910 	}
3911 
3912 	if (unmap && scsi_debug_lbp()) {
3913 		unmap_region(sip, lba, num);
3914 		goto out;
3915 	}
3916 	lbaa = lba;
3917 	block = do_div(lbaa, sdebug_store_sectors);
3918 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3919 	fsp = sip->storep;
3920 	fs1p = fsp + (block * lb_size);
3921 	if (ndob) {
3922 		memset(fs1p, 0, lb_size);
3923 		ret = 0;
3924 	} else
3925 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3926 
3927 	if (-1 == ret) {
3928 		sdeb_write_unlock(sip);
3929 		return DID_ERROR << 16;
3930 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3931 		sdev_printk(KERN_INFO, scp->device,
3932 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3933 			    my_name, "write same", lb_size, ret);
3934 
3935 	/* Copy first sector to remaining blocks */
3936 	for (i = 1 ; i < num ; i++) {
3937 		lbaa = lba + i;
3938 		block = do_div(lbaa, sdebug_store_sectors);
3939 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3940 	}
3941 	if (scsi_debug_lbp())
3942 		map_region(sip, lba, num);
3943 	/* If ZBC zone then bump its write pointer */
3944 	if (sdebug_dev_is_zoned(devip))
3945 		zbc_inc_wp(devip, lba, num);
3946 out:
3947 	sdeb_write_unlock(sip);
3948 
3949 	return 0;
3950 }
3951 
3952 static int resp_write_same_10(struct scsi_cmnd *scp,
3953 			      struct sdebug_dev_info *devip)
3954 {
3955 	u8 *cmd = scp->cmnd;
3956 	u32 lba;
3957 	u16 num;
3958 	u32 ei_lba = 0;
3959 	bool unmap = false;
3960 
3961 	if (cmd[1] & 0x8) {
3962 		if (sdebug_lbpws10 == 0) {
3963 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3964 			return check_condition_result;
3965 		} else
3966 			unmap = true;
3967 	}
3968 	lba = get_unaligned_be32(cmd + 2);
3969 	num = get_unaligned_be16(cmd + 7);
3970 	if (num > sdebug_write_same_length) {
3971 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3972 		return check_condition_result;
3973 	}
3974 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3975 }
3976 
3977 static int resp_write_same_16(struct scsi_cmnd *scp,
3978 			      struct sdebug_dev_info *devip)
3979 {
3980 	u8 *cmd = scp->cmnd;
3981 	u64 lba;
3982 	u32 num;
3983 	u32 ei_lba = 0;
3984 	bool unmap = false;
3985 	bool ndob = false;
3986 
3987 	if (cmd[1] & 0x8) {	/* UNMAP */
3988 		if (sdebug_lbpws == 0) {
3989 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3990 			return check_condition_result;
3991 		} else
3992 			unmap = true;
3993 	}
3994 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3995 		ndob = true;
3996 	lba = get_unaligned_be64(cmd + 2);
3997 	num = get_unaligned_be32(cmd + 10);
3998 	if (num > sdebug_write_same_length) {
3999 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4000 		return check_condition_result;
4001 	}
4002 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4003 }
4004 
4005 /* Note the mode field is in the same position as the (lower) service action
4006  * field. For the Report supported operation codes command, SPC-4 suggests
4007  * each mode of this command should be reported separately; for future. */
4008 static int resp_write_buffer(struct scsi_cmnd *scp,
4009 			     struct sdebug_dev_info *devip)
4010 {
4011 	u8 *cmd = scp->cmnd;
4012 	struct scsi_device *sdp = scp->device;
4013 	struct sdebug_dev_info *dp;
4014 	u8 mode;
4015 
4016 	mode = cmd[1] & 0x1f;
4017 	switch (mode) {
4018 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4019 		/* set UAs on this device only */
4020 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4021 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4022 		break;
4023 	case 0x5:	/* download MC, save and ACT */
4024 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4025 		break;
4026 	case 0x6:	/* download MC with offsets and ACT */
4027 		/* set UAs on most devices (LUs) in this target */
4028 		list_for_each_entry(dp,
4029 				    &devip->sdbg_host->dev_info_list,
4030 				    dev_list)
4031 			if (dp->target == sdp->id) {
4032 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4033 				if (devip != dp)
4034 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4035 						dp->uas_bm);
4036 			}
4037 		break;
4038 	case 0x7:	/* download MC with offsets, save, and ACT */
4039 		/* set UA on all devices (LUs) in this target */
4040 		list_for_each_entry(dp,
4041 				    &devip->sdbg_host->dev_info_list,
4042 				    dev_list)
4043 			if (dp->target == sdp->id)
4044 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4045 					dp->uas_bm);
4046 		break;
4047 	default:
4048 		/* do nothing for this command for other mode values */
4049 		break;
4050 	}
4051 	return 0;
4052 }
4053 
4054 static int resp_comp_write(struct scsi_cmnd *scp,
4055 			   struct sdebug_dev_info *devip)
4056 {
4057 	u8 *cmd = scp->cmnd;
4058 	u8 *arr;
4059 	struct sdeb_store_info *sip = devip2sip(devip, true);
4060 	u64 lba;
4061 	u32 dnum;
4062 	u32 lb_size = sdebug_sector_size;
4063 	u8 num;
4064 	int ret;
4065 	int retval = 0;
4066 
4067 	lba = get_unaligned_be64(cmd + 2);
4068 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4069 	if (0 == num)
4070 		return 0;	/* degenerate case, not an error */
4071 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4072 	    (cmd[1] & 0xe0)) {
4073 		mk_sense_invalid_opcode(scp);
4074 		return check_condition_result;
4075 	}
4076 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4077 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4078 	    (cmd[1] & 0xe0) == 0)
4079 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4080 			    "to DIF device\n");
4081 	ret = check_device_access_params(scp, lba, num, false);
4082 	if (ret)
4083 		return ret;
4084 	dnum = 2 * num;
4085 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4086 	if (NULL == arr) {
4087 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4088 				INSUFF_RES_ASCQ);
4089 		return check_condition_result;
4090 	}
4091 
4092 	sdeb_write_lock(sip);
4093 
4094 	ret = do_dout_fetch(scp, dnum, arr);
4095 	if (ret == -1) {
4096 		retval = DID_ERROR << 16;
4097 		goto cleanup;
4098 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4099 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4100 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4101 			    dnum * lb_size, ret);
4102 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4103 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4104 		retval = check_condition_result;
4105 		goto cleanup;
4106 	}
4107 	if (scsi_debug_lbp())
4108 		map_region(sip, lba, num);
4109 cleanup:
4110 	sdeb_write_unlock(sip);
4111 	kfree(arr);
4112 	return retval;
4113 }
4114 
4115 struct unmap_block_desc {
4116 	__be64	lba;
4117 	__be32	blocks;
4118 	__be32	__reserved;
4119 };
4120 
4121 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4122 {
4123 	unsigned char *buf;
4124 	struct unmap_block_desc *desc;
4125 	struct sdeb_store_info *sip = devip2sip(devip, true);
4126 	unsigned int i, payload_len, descriptors;
4127 	int ret;
4128 
4129 	if (!scsi_debug_lbp())
4130 		return 0;	/* fib and say its done */
4131 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4132 	BUG_ON(scsi_bufflen(scp) != payload_len);
4133 
4134 	descriptors = (payload_len - 8) / 16;
4135 	if (descriptors > sdebug_unmap_max_desc) {
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4141 	if (!buf) {
4142 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4143 				INSUFF_RES_ASCQ);
4144 		return check_condition_result;
4145 	}
4146 
4147 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4148 
4149 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4150 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4151 
4152 	desc = (void *)&buf[8];
4153 
4154 	sdeb_write_lock(sip);
4155 
4156 	for (i = 0 ; i < descriptors ; i++) {
4157 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4158 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4159 
4160 		ret = check_device_access_params(scp, lba, num, true);
4161 		if (ret)
4162 			goto out;
4163 
4164 		unmap_region(sip, lba, num);
4165 	}
4166 
4167 	ret = 0;
4168 
4169 out:
4170 	sdeb_write_unlock(sip);
4171 	kfree(buf);
4172 
4173 	return ret;
4174 }
4175 
4176 #define SDEBUG_GET_LBA_STATUS_LEN 32
4177 
4178 static int resp_get_lba_status(struct scsi_cmnd *scp,
4179 			       struct sdebug_dev_info *devip)
4180 {
4181 	u8 *cmd = scp->cmnd;
4182 	u64 lba;
4183 	u32 alloc_len, mapped, num;
4184 	int ret;
4185 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4186 
4187 	lba = get_unaligned_be64(cmd + 2);
4188 	alloc_len = get_unaligned_be32(cmd + 10);
4189 
4190 	if (alloc_len < 24)
4191 		return 0;
4192 
4193 	ret = check_device_access_params(scp, lba, 1, false);
4194 	if (ret)
4195 		return ret;
4196 
4197 	if (scsi_debug_lbp()) {
4198 		struct sdeb_store_info *sip = devip2sip(devip, true);
4199 
4200 		mapped = map_state(sip, lba, &num);
4201 	} else {
4202 		mapped = 1;
4203 		/* following just in case virtual_gb changed */
4204 		sdebug_capacity = get_sdebug_capacity();
4205 		if (sdebug_capacity - lba <= 0xffffffff)
4206 			num = sdebug_capacity - lba;
4207 		else
4208 			num = 0xffffffff;
4209 	}
4210 
4211 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4212 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4213 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4214 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4215 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4216 
4217 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4218 }
4219 
4220 static int resp_sync_cache(struct scsi_cmnd *scp,
4221 			   struct sdebug_dev_info *devip)
4222 {
4223 	int res = 0;
4224 	u64 lba;
4225 	u32 num_blocks;
4226 	u8 *cmd = scp->cmnd;
4227 
4228 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4229 		lba = get_unaligned_be32(cmd + 2);
4230 		num_blocks = get_unaligned_be16(cmd + 7);
4231 	} else {				/* SYNCHRONIZE_CACHE(16) */
4232 		lba = get_unaligned_be64(cmd + 2);
4233 		num_blocks = get_unaligned_be32(cmd + 10);
4234 	}
4235 	if (lba + num_blocks > sdebug_capacity) {
4236 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4237 		return check_condition_result;
4238 	}
4239 	if (!write_since_sync || (cmd[1] & 0x2))
4240 		res = SDEG_RES_IMMED_MASK;
4241 	else		/* delay if write_since_sync and IMMED clear */
4242 		write_since_sync = false;
4243 	return res;
4244 }
4245 
4246 /*
4247  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4248  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4249  * a GOOD status otherwise. Model a disk with a big cache and yield
4250  * CONDITION MET. Actually tries to bring range in main memory into the
4251  * cache associated with the CPU(s).
4252  */
4253 static int resp_pre_fetch(struct scsi_cmnd *scp,
4254 			  struct sdebug_dev_info *devip)
4255 {
4256 	int res = 0;
4257 	u64 lba;
4258 	u64 block, rest = 0;
4259 	u32 nblks;
4260 	u8 *cmd = scp->cmnd;
4261 	struct sdeb_store_info *sip = devip2sip(devip, true);
4262 	u8 *fsp = sip->storep;
4263 
4264 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4265 		lba = get_unaligned_be32(cmd + 2);
4266 		nblks = get_unaligned_be16(cmd + 7);
4267 	} else {			/* PRE-FETCH(16) */
4268 		lba = get_unaligned_be64(cmd + 2);
4269 		nblks = get_unaligned_be32(cmd + 10);
4270 	}
4271 	if (lba + nblks > sdebug_capacity) {
4272 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4273 		return check_condition_result;
4274 	}
4275 	if (!fsp)
4276 		goto fini;
4277 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4278 	block = do_div(lba, sdebug_store_sectors);
4279 	if (block + nblks > sdebug_store_sectors)
4280 		rest = block + nblks - sdebug_store_sectors;
4281 
4282 	/* Try to bring the PRE-FETCH range into CPU's cache */
4283 	sdeb_read_lock(sip);
4284 	prefetch_range(fsp + (sdebug_sector_size * block),
4285 		       (nblks - rest) * sdebug_sector_size);
4286 	if (rest)
4287 		prefetch_range(fsp, rest * sdebug_sector_size);
4288 	sdeb_read_unlock(sip);
4289 fini:
4290 	if (cmd[1] & 0x2)
4291 		res = SDEG_RES_IMMED_MASK;
4292 	return res | condition_met_result;
4293 }
4294 
4295 #define RL_BUCKET_ELEMS 8
4296 
4297 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4298  * (W-LUN), the normal Linux scanning logic does not associate it with a
4299  * device (e.g. /dev/sg7). The following magic will make that association:
4300  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4301  * where <n> is a host number. If there are multiple targets in a host then
4302  * the above will associate a W-LUN to each target. To only get a W-LUN
4303  * for target 2, then use "echo '- 2 49409' > scan" .
4304  */
4305 static int resp_report_luns(struct scsi_cmnd *scp,
4306 			    struct sdebug_dev_info *devip)
4307 {
4308 	unsigned char *cmd = scp->cmnd;
4309 	unsigned int alloc_len;
4310 	unsigned char select_report;
4311 	u64 lun;
4312 	struct scsi_lun *lun_p;
4313 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4314 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4315 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4316 	unsigned int tlun_cnt;	/* total LUN count */
4317 	unsigned int rlen;	/* response length (in bytes) */
4318 	int k, j, n, res;
4319 	unsigned int off_rsp = 0;
4320 	const int sz_lun = sizeof(struct scsi_lun);
4321 
4322 	clear_luns_changed_on_target(devip);
4323 
4324 	select_report = cmd[2];
4325 	alloc_len = get_unaligned_be32(cmd + 6);
4326 
4327 	if (alloc_len < 4) {
4328 		pr_err("alloc len too small %d\n", alloc_len);
4329 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4330 		return check_condition_result;
4331 	}
4332 
4333 	switch (select_report) {
4334 	case 0:		/* all LUNs apart from W-LUNs */
4335 		lun_cnt = sdebug_max_luns;
4336 		wlun_cnt = 0;
4337 		break;
4338 	case 1:		/* only W-LUNs */
4339 		lun_cnt = 0;
4340 		wlun_cnt = 1;
4341 		break;
4342 	case 2:		/* all LUNs */
4343 		lun_cnt = sdebug_max_luns;
4344 		wlun_cnt = 1;
4345 		break;
4346 	case 0x10:	/* only administrative LUs */
4347 	case 0x11:	/* see SPC-5 */
4348 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4349 	default:
4350 		pr_debug("select report invalid %d\n", select_report);
4351 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4352 		return check_condition_result;
4353 	}
4354 
4355 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4356 		--lun_cnt;
4357 
4358 	tlun_cnt = lun_cnt + wlun_cnt;
4359 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4360 	scsi_set_resid(scp, scsi_bufflen(scp));
4361 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4362 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4363 
4364 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4365 	lun = sdebug_no_lun_0 ? 1 : 0;
4366 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4367 		memset(arr, 0, sizeof(arr));
4368 		lun_p = (struct scsi_lun *)&arr[0];
4369 		if (k == 0) {
4370 			put_unaligned_be32(rlen, &arr[0]);
4371 			++lun_p;
4372 			j = 1;
4373 		}
4374 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4375 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4376 				break;
4377 			int_to_scsilun(lun++, lun_p);
4378 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4379 				lun_p->scsi_lun[0] |= 0x40;
4380 		}
4381 		if (j < RL_BUCKET_ELEMS)
4382 			break;
4383 		n = j * sz_lun;
4384 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4385 		if (res)
4386 			return res;
4387 		off_rsp += n;
4388 	}
4389 	if (wlun_cnt) {
4390 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4391 		++j;
4392 	}
4393 	if (j > 0)
4394 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4395 	return res;
4396 }
4397 
4398 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4399 {
4400 	bool is_bytchk3 = false;
4401 	u8 bytchk;
4402 	int ret, j;
4403 	u32 vnum, a_num, off;
4404 	const u32 lb_size = sdebug_sector_size;
4405 	u64 lba;
4406 	u8 *arr;
4407 	u8 *cmd = scp->cmnd;
4408 	struct sdeb_store_info *sip = devip2sip(devip, true);
4409 
4410 	bytchk = (cmd[1] >> 1) & 0x3;
4411 	if (bytchk == 0) {
4412 		return 0;	/* always claim internal verify okay */
4413 	} else if (bytchk == 2) {
4414 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4415 		return check_condition_result;
4416 	} else if (bytchk == 3) {
4417 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4418 	}
4419 	switch (cmd[0]) {
4420 	case VERIFY_16:
4421 		lba = get_unaligned_be64(cmd + 2);
4422 		vnum = get_unaligned_be32(cmd + 10);
4423 		break;
4424 	case VERIFY:		/* is VERIFY(10) */
4425 		lba = get_unaligned_be32(cmd + 2);
4426 		vnum = get_unaligned_be16(cmd + 7);
4427 		break;
4428 	default:
4429 		mk_sense_invalid_opcode(scp);
4430 		return check_condition_result;
4431 	}
4432 	if (vnum == 0)
4433 		return 0;	/* not an error */
4434 	a_num = is_bytchk3 ? 1 : vnum;
4435 	/* Treat following check like one for read (i.e. no write) access */
4436 	ret = check_device_access_params(scp, lba, a_num, false);
4437 	if (ret)
4438 		return ret;
4439 
4440 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4441 	if (!arr) {
4442 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4443 				INSUFF_RES_ASCQ);
4444 		return check_condition_result;
4445 	}
4446 	/* Not changing store, so only need read access */
4447 	sdeb_read_lock(sip);
4448 
4449 	ret = do_dout_fetch(scp, a_num, arr);
4450 	if (ret == -1) {
4451 		ret = DID_ERROR << 16;
4452 		goto cleanup;
4453 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4454 		sdev_printk(KERN_INFO, scp->device,
4455 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4456 			    my_name, __func__, a_num * lb_size, ret);
4457 	}
4458 	if (is_bytchk3) {
4459 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4460 			memcpy(arr + off, arr, lb_size);
4461 	}
4462 	ret = 0;
4463 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4464 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4465 		ret = check_condition_result;
4466 		goto cleanup;
4467 	}
4468 cleanup:
4469 	sdeb_read_unlock(sip);
4470 	kfree(arr);
4471 	return ret;
4472 }
4473 
4474 #define RZONES_DESC_HD 64
4475 
4476 /* Report zones depending on start LBA and reporting options */
4477 static int resp_report_zones(struct scsi_cmnd *scp,
4478 			     struct sdebug_dev_info *devip)
4479 {
4480 	unsigned int rep_max_zones, nrz = 0;
4481 	int ret = 0;
4482 	u32 alloc_len, rep_opts, rep_len;
4483 	bool partial;
4484 	u64 lba, zs_lba;
4485 	u8 *arr = NULL, *desc;
4486 	u8 *cmd = scp->cmnd;
4487 	struct sdeb_zone_state *zsp = NULL;
4488 	struct sdeb_store_info *sip = devip2sip(devip, false);
4489 
4490 	if (!sdebug_dev_is_zoned(devip)) {
4491 		mk_sense_invalid_opcode(scp);
4492 		return check_condition_result;
4493 	}
4494 	zs_lba = get_unaligned_be64(cmd + 2);
4495 	alloc_len = get_unaligned_be32(cmd + 10);
4496 	if (alloc_len == 0)
4497 		return 0;	/* not an error */
4498 	rep_opts = cmd[14] & 0x3f;
4499 	partial = cmd[14] & 0x80;
4500 
4501 	if (zs_lba >= sdebug_capacity) {
4502 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4503 		return check_condition_result;
4504 	}
4505 
4506 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4507 
4508 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4509 	if (!arr) {
4510 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4511 				INSUFF_RES_ASCQ);
4512 		return check_condition_result;
4513 	}
4514 
4515 	sdeb_read_lock(sip);
4516 
4517 	desc = arr + 64;
4518 	for (lba = zs_lba; lba < sdebug_capacity;
4519 	     lba = zsp->z_start + zsp->z_size) {
4520 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4521 			break;
4522 		zsp = zbc_zone(devip, lba);
4523 		switch (rep_opts) {
4524 		case 0x00:
4525 			/* All zones */
4526 			break;
4527 		case 0x01:
4528 			/* Empty zones */
4529 			if (zsp->z_cond != ZC1_EMPTY)
4530 				continue;
4531 			break;
4532 		case 0x02:
4533 			/* Implicit open zones */
4534 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4535 				continue;
4536 			break;
4537 		case 0x03:
4538 			/* Explicit open zones */
4539 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4540 				continue;
4541 			break;
4542 		case 0x04:
4543 			/* Closed zones */
4544 			if (zsp->z_cond != ZC4_CLOSED)
4545 				continue;
4546 			break;
4547 		case 0x05:
4548 			/* Full zones */
4549 			if (zsp->z_cond != ZC5_FULL)
4550 				continue;
4551 			break;
4552 		case 0x06:
4553 		case 0x07:
4554 		case 0x10:
4555 			/*
4556 			 * Read-only, offline, reset WP recommended are
4557 			 * not emulated: no zones to report;
4558 			 */
4559 			continue;
4560 		case 0x11:
4561 			/* non-seq-resource set */
4562 			if (!zsp->z_non_seq_resource)
4563 				continue;
4564 			break;
4565 		case 0x3e:
4566 			/* All zones except gap zones. */
4567 			if (zbc_zone_is_gap(zsp))
4568 				continue;
4569 			break;
4570 		case 0x3f:
4571 			/* Not write pointer (conventional) zones */
4572 			if (zbc_zone_is_seq(zsp))
4573 				continue;
4574 			break;
4575 		default:
4576 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4577 					INVALID_FIELD_IN_CDB, 0);
4578 			ret = check_condition_result;
4579 			goto fini;
4580 		}
4581 
4582 		if (nrz < rep_max_zones) {
4583 			/* Fill zone descriptor */
4584 			desc[0] = zsp->z_type;
4585 			desc[1] = zsp->z_cond << 4;
4586 			if (zsp->z_non_seq_resource)
4587 				desc[1] |= 1 << 1;
4588 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4589 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4590 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4591 			desc += 64;
4592 		}
4593 
4594 		if (partial && nrz >= rep_max_zones)
4595 			break;
4596 
4597 		nrz++;
4598 	}
4599 
4600 	/* Report header */
4601 	/* Zone list length. */
4602 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4603 	/* Maximum LBA */
4604 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4605 	/* Zone starting LBA granularity. */
4606 	if (devip->zcap < devip->zsize)
4607 		put_unaligned_be64(devip->zsize, arr + 16);
4608 
4609 	rep_len = (unsigned long)desc - (unsigned long)arr;
4610 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4611 
4612 fini:
4613 	sdeb_read_unlock(sip);
4614 	kfree(arr);
4615 	return ret;
4616 }
4617 
4618 /* Logic transplanted from tcmu-runner, file_zbc.c */
4619 static void zbc_open_all(struct sdebug_dev_info *devip)
4620 {
4621 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4622 	unsigned int i;
4623 
4624 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4625 		if (zsp->z_cond == ZC4_CLOSED)
4626 			zbc_open_zone(devip, &devip->zstate[i], true);
4627 	}
4628 }
4629 
4630 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4631 {
4632 	int res = 0;
4633 	u64 z_id;
4634 	enum sdebug_z_cond zc;
4635 	u8 *cmd = scp->cmnd;
4636 	struct sdeb_zone_state *zsp;
4637 	bool all = cmd[14] & 0x01;
4638 	struct sdeb_store_info *sip = devip2sip(devip, false);
4639 
4640 	if (!sdebug_dev_is_zoned(devip)) {
4641 		mk_sense_invalid_opcode(scp);
4642 		return check_condition_result;
4643 	}
4644 
4645 	sdeb_write_lock(sip);
4646 
4647 	if (all) {
4648 		/* Check if all closed zones can be open */
4649 		if (devip->max_open &&
4650 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4651 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4652 					INSUFF_ZONE_ASCQ);
4653 			res = check_condition_result;
4654 			goto fini;
4655 		}
4656 		/* Open all closed zones */
4657 		zbc_open_all(devip);
4658 		goto fini;
4659 	}
4660 
4661 	/* Open the specified zone */
4662 	z_id = get_unaligned_be64(cmd + 2);
4663 	if (z_id >= sdebug_capacity) {
4664 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4665 		res = check_condition_result;
4666 		goto fini;
4667 	}
4668 
4669 	zsp = zbc_zone(devip, z_id);
4670 	if (z_id != zsp->z_start) {
4671 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4672 		res = check_condition_result;
4673 		goto fini;
4674 	}
4675 	if (zbc_zone_is_conv(zsp)) {
4676 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4677 		res = check_condition_result;
4678 		goto fini;
4679 	}
4680 
4681 	zc = zsp->z_cond;
4682 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4683 		goto fini;
4684 
4685 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4686 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4687 				INSUFF_ZONE_ASCQ);
4688 		res = check_condition_result;
4689 		goto fini;
4690 	}
4691 
4692 	zbc_open_zone(devip, zsp, true);
4693 fini:
4694 	sdeb_write_unlock(sip);
4695 	return res;
4696 }
4697 
4698 static void zbc_close_all(struct sdebug_dev_info *devip)
4699 {
4700 	unsigned int i;
4701 
4702 	for (i = 0; i < devip->nr_zones; i++)
4703 		zbc_close_zone(devip, &devip->zstate[i]);
4704 }
4705 
4706 static int resp_close_zone(struct scsi_cmnd *scp,
4707 			   struct sdebug_dev_info *devip)
4708 {
4709 	int res = 0;
4710 	u64 z_id;
4711 	u8 *cmd = scp->cmnd;
4712 	struct sdeb_zone_state *zsp;
4713 	bool all = cmd[14] & 0x01;
4714 	struct sdeb_store_info *sip = devip2sip(devip, false);
4715 
4716 	if (!sdebug_dev_is_zoned(devip)) {
4717 		mk_sense_invalid_opcode(scp);
4718 		return check_condition_result;
4719 	}
4720 
4721 	sdeb_write_lock(sip);
4722 
4723 	if (all) {
4724 		zbc_close_all(devip);
4725 		goto fini;
4726 	}
4727 
4728 	/* Close specified zone */
4729 	z_id = get_unaligned_be64(cmd + 2);
4730 	if (z_id >= sdebug_capacity) {
4731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4732 		res = check_condition_result;
4733 		goto fini;
4734 	}
4735 
4736 	zsp = zbc_zone(devip, z_id);
4737 	if (z_id != zsp->z_start) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 	if (zbc_zone_is_conv(zsp)) {
4743 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4744 		res = check_condition_result;
4745 		goto fini;
4746 	}
4747 
4748 	zbc_close_zone(devip, zsp);
4749 fini:
4750 	sdeb_write_unlock(sip);
4751 	return res;
4752 }
4753 
4754 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4755 			    struct sdeb_zone_state *zsp, bool empty)
4756 {
4757 	enum sdebug_z_cond zc = zsp->z_cond;
4758 
4759 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4760 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4761 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4762 			zbc_close_zone(devip, zsp);
4763 		if (zsp->z_cond == ZC4_CLOSED)
4764 			devip->nr_closed--;
4765 		zsp->z_wp = zsp->z_start + zsp->z_size;
4766 		zsp->z_cond = ZC5_FULL;
4767 	}
4768 }
4769 
4770 static void zbc_finish_all(struct sdebug_dev_info *devip)
4771 {
4772 	unsigned int i;
4773 
4774 	for (i = 0; i < devip->nr_zones; i++)
4775 		zbc_finish_zone(devip, &devip->zstate[i], false);
4776 }
4777 
4778 static int resp_finish_zone(struct scsi_cmnd *scp,
4779 			    struct sdebug_dev_info *devip)
4780 {
4781 	struct sdeb_zone_state *zsp;
4782 	int res = 0;
4783 	u64 z_id;
4784 	u8 *cmd = scp->cmnd;
4785 	bool all = cmd[14] & 0x01;
4786 	struct sdeb_store_info *sip = devip2sip(devip, false);
4787 
4788 	if (!sdebug_dev_is_zoned(devip)) {
4789 		mk_sense_invalid_opcode(scp);
4790 		return check_condition_result;
4791 	}
4792 
4793 	sdeb_write_lock(sip);
4794 
4795 	if (all) {
4796 		zbc_finish_all(devip);
4797 		goto fini;
4798 	}
4799 
4800 	/* Finish the specified zone */
4801 	z_id = get_unaligned_be64(cmd + 2);
4802 	if (z_id >= sdebug_capacity) {
4803 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4804 		res = check_condition_result;
4805 		goto fini;
4806 	}
4807 
4808 	zsp = zbc_zone(devip, z_id);
4809 	if (z_id != zsp->z_start) {
4810 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4811 		res = check_condition_result;
4812 		goto fini;
4813 	}
4814 	if (zbc_zone_is_conv(zsp)) {
4815 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4816 		res = check_condition_result;
4817 		goto fini;
4818 	}
4819 
4820 	zbc_finish_zone(devip, zsp, true);
4821 fini:
4822 	sdeb_write_unlock(sip);
4823 	return res;
4824 }
4825 
4826 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4827 			 struct sdeb_zone_state *zsp)
4828 {
4829 	enum sdebug_z_cond zc;
4830 	struct sdeb_store_info *sip = devip2sip(devip, false);
4831 
4832 	if (!zbc_zone_is_seq(zsp))
4833 		return;
4834 
4835 	zc = zsp->z_cond;
4836 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4837 		zbc_close_zone(devip, zsp);
4838 
4839 	if (zsp->z_cond == ZC4_CLOSED)
4840 		devip->nr_closed--;
4841 
4842 	if (zsp->z_wp > zsp->z_start)
4843 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4844 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4845 
4846 	zsp->z_non_seq_resource = false;
4847 	zsp->z_wp = zsp->z_start;
4848 	zsp->z_cond = ZC1_EMPTY;
4849 }
4850 
4851 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4852 {
4853 	unsigned int i;
4854 
4855 	for (i = 0; i < devip->nr_zones; i++)
4856 		zbc_rwp_zone(devip, &devip->zstate[i]);
4857 }
4858 
4859 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4860 {
4861 	struct sdeb_zone_state *zsp;
4862 	int res = 0;
4863 	u64 z_id;
4864 	u8 *cmd = scp->cmnd;
4865 	bool all = cmd[14] & 0x01;
4866 	struct sdeb_store_info *sip = devip2sip(devip, false);
4867 
4868 	if (!sdebug_dev_is_zoned(devip)) {
4869 		mk_sense_invalid_opcode(scp);
4870 		return check_condition_result;
4871 	}
4872 
4873 	sdeb_write_lock(sip);
4874 
4875 	if (all) {
4876 		zbc_rwp_all(devip);
4877 		goto fini;
4878 	}
4879 
4880 	z_id = get_unaligned_be64(cmd + 2);
4881 	if (z_id >= sdebug_capacity) {
4882 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4883 		res = check_condition_result;
4884 		goto fini;
4885 	}
4886 
4887 	zsp = zbc_zone(devip, z_id);
4888 	if (z_id != zsp->z_start) {
4889 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4890 		res = check_condition_result;
4891 		goto fini;
4892 	}
4893 	if (zbc_zone_is_conv(zsp)) {
4894 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4895 		res = check_condition_result;
4896 		goto fini;
4897 	}
4898 
4899 	zbc_rwp_zone(devip, zsp);
4900 fini:
4901 	sdeb_write_unlock(sip);
4902 	return res;
4903 }
4904 
4905 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4906 {
4907 	u16 hwq;
4908 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4909 
4910 	hwq = blk_mq_unique_tag_to_hwq(tag);
4911 
4912 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4913 	if (WARN_ON_ONCE(hwq >= submit_queues))
4914 		hwq = 0;
4915 
4916 	return sdebug_q_arr + hwq;
4917 }
4918 
4919 static u32 get_tag(struct scsi_cmnd *cmnd)
4920 {
4921 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4922 }
4923 
4924 /* Queued (deferred) command completions converge here. */
4925 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4926 {
4927 	bool aborted = sd_dp->aborted;
4928 	int qc_idx;
4929 	int retiring = 0;
4930 	unsigned long iflags;
4931 	struct sdebug_queue *sqp;
4932 	struct sdebug_queued_cmd *sqcp;
4933 	struct scsi_cmnd *scp;
4934 	struct sdebug_dev_info *devip;
4935 
4936 	if (unlikely(aborted))
4937 		sd_dp->aborted = false;
4938 	qc_idx = sd_dp->qc_idx;
4939 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4940 	if (sdebug_statistics) {
4941 		atomic_inc(&sdebug_completions);
4942 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4943 			atomic_inc(&sdebug_miss_cpus);
4944 	}
4945 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4946 		pr_err("wild qc_idx=%d\n", qc_idx);
4947 		return;
4948 	}
4949 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4950 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4951 	sqcp = &sqp->qc_arr[qc_idx];
4952 	scp = sqcp->a_cmnd;
4953 	if (unlikely(scp == NULL)) {
4954 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4955 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4956 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4957 		return;
4958 	}
4959 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4960 	if (likely(devip))
4961 		atomic_dec(&devip->num_in_q);
4962 	else
4963 		pr_err("devip=NULL\n");
4964 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4965 		retiring = 1;
4966 
4967 	sqcp->a_cmnd = NULL;
4968 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4969 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4970 		pr_err("Unexpected completion\n");
4971 		return;
4972 	}
4973 
4974 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4975 		int k, retval;
4976 
4977 		retval = atomic_read(&retired_max_queue);
4978 		if (qc_idx >= retval) {
4979 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 			pr_err("index %d too large\n", retval);
4981 			return;
4982 		}
4983 		k = find_last_bit(sqp->in_use_bm, retval);
4984 		if ((k < sdebug_max_queue) || (k == retval))
4985 			atomic_set(&retired_max_queue, 0);
4986 		else
4987 			atomic_set(&retired_max_queue, k + 1);
4988 	}
4989 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4990 	if (unlikely(aborted)) {
4991 		if (sdebug_verbose)
4992 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4993 		return;
4994 	}
4995 	scsi_done(scp); /* callback to mid level */
4996 }
4997 
4998 /* When high resolution timer goes off this function is called. */
4999 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5000 {
5001 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5002 						  hrt);
5003 	sdebug_q_cmd_complete(sd_dp);
5004 	return HRTIMER_NORESTART;
5005 }
5006 
5007 /* When work queue schedules work, it calls this function. */
5008 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5009 {
5010 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5011 						  ew.work);
5012 	sdebug_q_cmd_complete(sd_dp);
5013 }
5014 
5015 static bool got_shared_uuid;
5016 static uuid_t shared_uuid;
5017 
5018 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5019 {
5020 	struct sdeb_zone_state *zsp;
5021 	sector_t capacity = get_sdebug_capacity();
5022 	sector_t conv_capacity;
5023 	sector_t zstart = 0;
5024 	unsigned int i;
5025 
5026 	/*
5027 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5028 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5029 	 * use the specified zone size checking that at least 2 zones can be
5030 	 * created for the device.
5031 	 */
5032 	if (!sdeb_zbc_zone_size_mb) {
5033 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5034 			>> ilog2(sdebug_sector_size);
5035 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5036 			devip->zsize >>= 1;
5037 		if (devip->zsize < 2) {
5038 			pr_err("Device capacity too small\n");
5039 			return -EINVAL;
5040 		}
5041 	} else {
5042 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5043 			pr_err("Zone size is not a power of 2\n");
5044 			return -EINVAL;
5045 		}
5046 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5047 			>> ilog2(sdebug_sector_size);
5048 		if (devip->zsize >= capacity) {
5049 			pr_err("Zone size too large for device capacity\n");
5050 			return -EINVAL;
5051 		}
5052 	}
5053 
5054 	devip->zsize_shift = ilog2(devip->zsize);
5055 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5056 
5057 	if (sdeb_zbc_zone_cap_mb == 0) {
5058 		devip->zcap = devip->zsize;
5059 	} else {
5060 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5061 			      ilog2(sdebug_sector_size);
5062 		if (devip->zcap > devip->zsize) {
5063 			pr_err("Zone capacity too large\n");
5064 			return -EINVAL;
5065 		}
5066 	}
5067 
5068 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5069 	if (conv_capacity >= capacity) {
5070 		pr_err("Number of conventional zones too large\n");
5071 		return -EINVAL;
5072 	}
5073 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5074 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5075 			      devip->zsize_shift;
5076 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5077 
5078 	/* Add gap zones if zone capacity is smaller than the zone size */
5079 	if (devip->zcap < devip->zsize)
5080 		devip->nr_zones += devip->nr_seq_zones;
5081 
5082 	if (devip->zmodel == BLK_ZONED_HM) {
5083 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5084 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5085 			devip->max_open = (devip->nr_zones - 1) / 2;
5086 		else
5087 			devip->max_open = sdeb_zbc_max_open;
5088 	}
5089 
5090 	devip->zstate = kcalloc(devip->nr_zones,
5091 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5092 	if (!devip->zstate)
5093 		return -ENOMEM;
5094 
5095 	for (i = 0; i < devip->nr_zones; i++) {
5096 		zsp = &devip->zstate[i];
5097 
5098 		zsp->z_start = zstart;
5099 
5100 		if (i < devip->nr_conv_zones) {
5101 			zsp->z_type = ZBC_ZTYPE_CNV;
5102 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5103 			zsp->z_wp = (sector_t)-1;
5104 			zsp->z_size =
5105 				min_t(u64, devip->zsize, capacity - zstart);
5106 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5107 			if (devip->zmodel == BLK_ZONED_HM)
5108 				zsp->z_type = ZBC_ZTYPE_SWR;
5109 			else
5110 				zsp->z_type = ZBC_ZTYPE_SWP;
5111 			zsp->z_cond = ZC1_EMPTY;
5112 			zsp->z_wp = zsp->z_start;
5113 			zsp->z_size =
5114 				min_t(u64, devip->zcap, capacity - zstart);
5115 		} else {
5116 			zsp->z_type = ZBC_ZTYPE_GAP;
5117 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5118 			zsp->z_wp = (sector_t)-1;
5119 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5120 					    capacity - zstart);
5121 		}
5122 
5123 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5124 		zstart += zsp->z_size;
5125 	}
5126 
5127 	return 0;
5128 }
5129 
5130 static struct sdebug_dev_info *sdebug_device_create(
5131 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5132 {
5133 	struct sdebug_dev_info *devip;
5134 
5135 	devip = kzalloc(sizeof(*devip), flags);
5136 	if (devip) {
5137 		if (sdebug_uuid_ctl == 1)
5138 			uuid_gen(&devip->lu_name);
5139 		else if (sdebug_uuid_ctl == 2) {
5140 			if (got_shared_uuid)
5141 				devip->lu_name = shared_uuid;
5142 			else {
5143 				uuid_gen(&shared_uuid);
5144 				got_shared_uuid = true;
5145 				devip->lu_name = shared_uuid;
5146 			}
5147 		}
5148 		devip->sdbg_host = sdbg_host;
5149 		if (sdeb_zbc_in_use) {
5150 			devip->zmodel = sdeb_zbc_model;
5151 			if (sdebug_device_create_zones(devip)) {
5152 				kfree(devip);
5153 				return NULL;
5154 			}
5155 		} else {
5156 			devip->zmodel = BLK_ZONED_NONE;
5157 		}
5158 		devip->create_ts = ktime_get_boottime();
5159 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5160 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5161 	}
5162 	return devip;
5163 }
5164 
5165 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5166 {
5167 	struct sdebug_host_info *sdbg_host;
5168 	struct sdebug_dev_info *open_devip = NULL;
5169 	struct sdebug_dev_info *devip;
5170 
5171 	sdbg_host = shost_to_sdebug_host(sdev->host);
5172 
5173 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5174 		if ((devip->used) && (devip->channel == sdev->channel) &&
5175 		    (devip->target == sdev->id) &&
5176 		    (devip->lun == sdev->lun))
5177 			return devip;
5178 		else {
5179 			if ((!devip->used) && (!open_devip))
5180 				open_devip = devip;
5181 		}
5182 	}
5183 	if (!open_devip) { /* try and make a new one */
5184 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5185 		if (!open_devip) {
5186 			pr_err("out of memory at line %d\n", __LINE__);
5187 			return NULL;
5188 		}
5189 	}
5190 
5191 	open_devip->channel = sdev->channel;
5192 	open_devip->target = sdev->id;
5193 	open_devip->lun = sdev->lun;
5194 	open_devip->sdbg_host = sdbg_host;
5195 	atomic_set(&open_devip->num_in_q, 0);
5196 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5197 	open_devip->used = true;
5198 	return open_devip;
5199 }
5200 
5201 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5202 {
5203 	if (sdebug_verbose)
5204 		pr_info("slave_alloc <%u %u %u %llu>\n",
5205 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5206 	return 0;
5207 }
5208 
5209 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5210 {
5211 	struct sdebug_dev_info *devip =
5212 			(struct sdebug_dev_info *)sdp->hostdata;
5213 
5214 	if (sdebug_verbose)
5215 		pr_info("slave_configure <%u %u %u %llu>\n",
5216 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5217 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5218 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5219 	if (devip == NULL) {
5220 		devip = find_build_dev_info(sdp);
5221 		if (devip == NULL)
5222 			return 1;  /* no resources, will be marked offline */
5223 	}
5224 	sdp->hostdata = devip;
5225 	if (sdebug_no_uld)
5226 		sdp->no_uld_attach = 1;
5227 	config_cdb_len(sdp);
5228 	return 0;
5229 }
5230 
5231 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5232 {
5233 	struct sdebug_dev_info *devip =
5234 		(struct sdebug_dev_info *)sdp->hostdata;
5235 
5236 	if (sdebug_verbose)
5237 		pr_info("slave_destroy <%u %u %u %llu>\n",
5238 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5239 	if (devip) {
5240 		/* make this slot available for re-use */
5241 		devip->used = false;
5242 		sdp->hostdata = NULL;
5243 	}
5244 }
5245 
5246 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5247 			   enum sdeb_defer_type defer_t)
5248 {
5249 	if (!sd_dp)
5250 		return;
5251 	if (defer_t == SDEB_DEFER_HRT)
5252 		hrtimer_cancel(&sd_dp->hrt);
5253 	else if (defer_t == SDEB_DEFER_WQ)
5254 		cancel_work_sync(&sd_dp->ew.work);
5255 }
5256 
5257 /* If @cmnd found deletes its timer or work queue and returns true; else
5258    returns false */
5259 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5260 {
5261 	unsigned long iflags;
5262 	int j, k, qmax, r_qmax;
5263 	enum sdeb_defer_type l_defer_t;
5264 	struct sdebug_queue *sqp;
5265 	struct sdebug_queued_cmd *sqcp;
5266 	struct sdebug_dev_info *devip;
5267 	struct sdebug_defer *sd_dp;
5268 
5269 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5270 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5271 		qmax = sdebug_max_queue;
5272 		r_qmax = atomic_read(&retired_max_queue);
5273 		if (r_qmax > qmax)
5274 			qmax = r_qmax;
5275 		for (k = 0; k < qmax; ++k) {
5276 			if (test_bit(k, sqp->in_use_bm)) {
5277 				sqcp = &sqp->qc_arr[k];
5278 				if (cmnd != sqcp->a_cmnd)
5279 					continue;
5280 				/* found */
5281 				devip = (struct sdebug_dev_info *)
5282 						cmnd->device->hostdata;
5283 				if (devip)
5284 					atomic_dec(&devip->num_in_q);
5285 				sqcp->a_cmnd = NULL;
5286 				sd_dp = sqcp->sd_dp;
5287 				if (sd_dp) {
5288 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5289 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5290 				} else
5291 					l_defer_t = SDEB_DEFER_NONE;
5292 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5293 				stop_qc_helper(sd_dp, l_defer_t);
5294 				clear_bit(k, sqp->in_use_bm);
5295 				return true;
5296 			}
5297 		}
5298 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5299 	}
5300 	return false;
5301 }
5302 
5303 /* Deletes (stops) timers or work queues of all queued commands */
5304 static void stop_all_queued(void)
5305 {
5306 	unsigned long iflags;
5307 	int j, k;
5308 	enum sdeb_defer_type l_defer_t;
5309 	struct sdebug_queue *sqp;
5310 	struct sdebug_queued_cmd *sqcp;
5311 	struct sdebug_dev_info *devip;
5312 	struct sdebug_defer *sd_dp;
5313 
5314 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5315 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5316 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5317 			if (test_bit(k, sqp->in_use_bm)) {
5318 				sqcp = &sqp->qc_arr[k];
5319 				if (sqcp->a_cmnd == NULL)
5320 					continue;
5321 				devip = (struct sdebug_dev_info *)
5322 					sqcp->a_cmnd->device->hostdata;
5323 				if (devip)
5324 					atomic_dec(&devip->num_in_q);
5325 				sqcp->a_cmnd = NULL;
5326 				sd_dp = sqcp->sd_dp;
5327 				if (sd_dp) {
5328 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5329 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5330 				} else
5331 					l_defer_t = SDEB_DEFER_NONE;
5332 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5333 				stop_qc_helper(sd_dp, l_defer_t);
5334 				clear_bit(k, sqp->in_use_bm);
5335 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5336 			}
5337 		}
5338 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5339 	}
5340 }
5341 
5342 /* Free queued command memory on heap */
5343 static void free_all_queued(void)
5344 {
5345 	int j, k;
5346 	struct sdebug_queue *sqp;
5347 	struct sdebug_queued_cmd *sqcp;
5348 
5349 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5350 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5351 			sqcp = &sqp->qc_arr[k];
5352 			kfree(sqcp->sd_dp);
5353 			sqcp->sd_dp = NULL;
5354 		}
5355 	}
5356 }
5357 
5358 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5359 {
5360 	bool ok;
5361 
5362 	++num_aborts;
5363 
5364 	ok = stop_queued_cmnd(SCpnt);
5365 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5366 		sdev_printk(KERN_INFO, SCpnt->device,
5367 			    "%s: command%s found\n", __func__,
5368 			    ok ? "" : " not");
5369 
5370 	return SUCCESS;
5371 }
5372 
5373 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5374 {
5375 	struct scsi_device *sdp = SCpnt->device;
5376 	struct sdebug_dev_info *devip = sdp->hostdata;
5377 
5378 	++num_dev_resets;
5379 
5380 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5381 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5382 	if (devip)
5383 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5384 
5385 	return SUCCESS;
5386 }
5387 
5388 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5389 {
5390 	struct sdebug_host_info *sdbg_host;
5391 	struct sdebug_dev_info *devip;
5392 	struct scsi_device *sdp;
5393 	struct Scsi_Host *hp;
5394 	int k = 0;
5395 
5396 	++num_target_resets;
5397 	if (!SCpnt)
5398 		goto lie;
5399 	sdp = SCpnt->device;
5400 	if (!sdp)
5401 		goto lie;
5402 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5403 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5404 	hp = sdp->host;
5405 	if (!hp)
5406 		goto lie;
5407 	sdbg_host = shost_to_sdebug_host(hp);
5408 	if (sdbg_host) {
5409 		list_for_each_entry(devip,
5410 				    &sdbg_host->dev_info_list,
5411 				    dev_list)
5412 			if (devip->target == sdp->id) {
5413 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5414 				++k;
5415 			}
5416 	}
5417 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5418 		sdev_printk(KERN_INFO, sdp,
5419 			    "%s: %d device(s) found in target\n", __func__, k);
5420 lie:
5421 	return SUCCESS;
5422 }
5423 
5424 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5425 {
5426 	struct sdebug_host_info *sdbg_host;
5427 	struct sdebug_dev_info *devip;
5428 	struct scsi_device *sdp;
5429 	struct Scsi_Host *hp;
5430 	int k = 0;
5431 
5432 	++num_bus_resets;
5433 	if (!(SCpnt && SCpnt->device))
5434 		goto lie;
5435 	sdp = SCpnt->device;
5436 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5437 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5438 	hp = sdp->host;
5439 	if (hp) {
5440 		sdbg_host = shost_to_sdebug_host(hp);
5441 		if (sdbg_host) {
5442 			list_for_each_entry(devip,
5443 					    &sdbg_host->dev_info_list,
5444 					    dev_list) {
5445 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5446 				++k;
5447 			}
5448 		}
5449 	}
5450 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5451 		sdev_printk(KERN_INFO, sdp,
5452 			    "%s: %d device(s) found in host\n", __func__, k);
5453 lie:
5454 	return SUCCESS;
5455 }
5456 
5457 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5458 {
5459 	struct sdebug_host_info *sdbg_host;
5460 	struct sdebug_dev_info *devip;
5461 	int k = 0;
5462 
5463 	++num_host_resets;
5464 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5465 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5466 	spin_lock(&sdebug_host_list_lock);
5467 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5468 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5469 				    dev_list) {
5470 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5471 			++k;
5472 		}
5473 	}
5474 	spin_unlock(&sdebug_host_list_lock);
5475 	stop_all_queued();
5476 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5477 		sdev_printk(KERN_INFO, SCpnt->device,
5478 			    "%s: %d device(s) found\n", __func__, k);
5479 	return SUCCESS;
5480 }
5481 
5482 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5483 {
5484 	struct msdos_partition *pp;
5485 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5486 	int sectors_per_part, num_sectors, k;
5487 	int heads_by_sects, start_sec, end_sec;
5488 
5489 	/* assume partition table already zeroed */
5490 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5491 		return;
5492 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5493 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5494 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5495 	}
5496 	num_sectors = (int)get_sdebug_capacity();
5497 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5498 			   / sdebug_num_parts;
5499 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5500 	starts[0] = sdebug_sectors_per;
5501 	max_part_secs = sectors_per_part;
5502 	for (k = 1; k < sdebug_num_parts; ++k) {
5503 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5504 			    * heads_by_sects;
5505 		if (starts[k] - starts[k - 1] < max_part_secs)
5506 			max_part_secs = starts[k] - starts[k - 1];
5507 	}
5508 	starts[sdebug_num_parts] = num_sectors;
5509 	starts[sdebug_num_parts + 1] = 0;
5510 
5511 	ramp[510] = 0x55;	/* magic partition markings */
5512 	ramp[511] = 0xAA;
5513 	pp = (struct msdos_partition *)(ramp + 0x1be);
5514 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5515 		start_sec = starts[k];
5516 		end_sec = starts[k] + max_part_secs - 1;
5517 		pp->boot_ind = 0;
5518 
5519 		pp->cyl = start_sec / heads_by_sects;
5520 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5521 			   / sdebug_sectors_per;
5522 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5523 
5524 		pp->end_cyl = end_sec / heads_by_sects;
5525 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5526 			       / sdebug_sectors_per;
5527 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5528 
5529 		pp->start_sect = cpu_to_le32(start_sec);
5530 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5531 		pp->sys_ind = 0x83;	/* plain Linux partition */
5532 	}
5533 }
5534 
5535 static void block_unblock_all_queues(bool block)
5536 {
5537 	int j;
5538 	struct sdebug_queue *sqp;
5539 
5540 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5541 		atomic_set(&sqp->blocked, (int)block);
5542 }
5543 
5544 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5545  * commands will be processed normally before triggers occur.
5546  */
5547 static void tweak_cmnd_count(void)
5548 {
5549 	int count, modulo;
5550 
5551 	modulo = abs(sdebug_every_nth);
5552 	if (modulo < 2)
5553 		return;
5554 	block_unblock_all_queues(true);
5555 	count = atomic_read(&sdebug_cmnd_count);
5556 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5557 	block_unblock_all_queues(false);
5558 }
5559 
5560 static void clear_queue_stats(void)
5561 {
5562 	atomic_set(&sdebug_cmnd_count, 0);
5563 	atomic_set(&sdebug_completions, 0);
5564 	atomic_set(&sdebug_miss_cpus, 0);
5565 	atomic_set(&sdebug_a_tsf, 0);
5566 }
5567 
5568 static bool inject_on_this_cmd(void)
5569 {
5570 	if (sdebug_every_nth == 0)
5571 		return false;
5572 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5573 }
5574 
5575 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5576 
5577 /* Complete the processing of the thread that queued a SCSI command to this
5578  * driver. It either completes the command by calling cmnd_done() or
5579  * schedules a hr timer or work queue then returns 0. Returns
5580  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5581  */
5582 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5583 			 int scsi_result,
5584 			 int (*pfp)(struct scsi_cmnd *,
5585 				    struct sdebug_dev_info *),
5586 			 int delta_jiff, int ndelay)
5587 {
5588 	bool new_sd_dp;
5589 	bool inject = false;
5590 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5591 	int k, num_in_q, qdepth;
5592 	unsigned long iflags;
5593 	u64 ns_from_boot = 0;
5594 	struct sdebug_queue *sqp;
5595 	struct sdebug_queued_cmd *sqcp;
5596 	struct scsi_device *sdp;
5597 	struct sdebug_defer *sd_dp;
5598 
5599 	if (unlikely(devip == NULL)) {
5600 		if (scsi_result == 0)
5601 			scsi_result = DID_NO_CONNECT << 16;
5602 		goto respond_in_thread;
5603 	}
5604 	sdp = cmnd->device;
5605 
5606 	if (delta_jiff == 0)
5607 		goto respond_in_thread;
5608 
5609 	sqp = get_queue(cmnd);
5610 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5611 	if (unlikely(atomic_read(&sqp->blocked))) {
5612 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5613 		return SCSI_MLQUEUE_HOST_BUSY;
5614 	}
5615 	num_in_q = atomic_read(&devip->num_in_q);
5616 	qdepth = cmnd->device->queue_depth;
5617 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5618 		if (scsi_result) {
5619 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5620 			goto respond_in_thread;
5621 		} else
5622 			scsi_result = device_qfull_result;
5623 	} else if (unlikely(sdebug_every_nth &&
5624 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5625 			    (scsi_result == 0))) {
5626 		if ((num_in_q == (qdepth - 1)) &&
5627 		    (atomic_inc_return(&sdebug_a_tsf) >=
5628 		     abs(sdebug_every_nth))) {
5629 			atomic_set(&sdebug_a_tsf, 0);
5630 			inject = true;
5631 			scsi_result = device_qfull_result;
5632 		}
5633 	}
5634 
5635 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5636 	if (unlikely(k >= sdebug_max_queue)) {
5637 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5638 		if (scsi_result)
5639 			goto respond_in_thread;
5640 		scsi_result = device_qfull_result;
5641 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5642 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5643 				    __func__, sdebug_max_queue);
5644 		goto respond_in_thread;
5645 	}
5646 	set_bit(k, sqp->in_use_bm);
5647 	atomic_inc(&devip->num_in_q);
5648 	sqcp = &sqp->qc_arr[k];
5649 	sqcp->a_cmnd = cmnd;
5650 	cmnd->host_scribble = (unsigned char *)sqcp;
5651 	sd_dp = sqcp->sd_dp;
5652 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5653 
5654 	if (!sd_dp) {
5655 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5656 		if (!sd_dp) {
5657 			atomic_dec(&devip->num_in_q);
5658 			clear_bit(k, sqp->in_use_bm);
5659 			return SCSI_MLQUEUE_HOST_BUSY;
5660 		}
5661 		new_sd_dp = true;
5662 	} else {
5663 		new_sd_dp = false;
5664 	}
5665 
5666 	/* Set the hostwide tag */
5667 	if (sdebug_host_max_queue)
5668 		sd_dp->hc_idx = get_tag(cmnd);
5669 
5670 	if (polled)
5671 		ns_from_boot = ktime_get_boottime_ns();
5672 
5673 	/* one of the resp_*() response functions is called here */
5674 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5675 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5676 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5677 		delta_jiff = ndelay = 0;
5678 	}
5679 	if (cmnd->result == 0 && scsi_result != 0)
5680 		cmnd->result = scsi_result;
5681 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5682 		if (atomic_read(&sdeb_inject_pending)) {
5683 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5684 			atomic_set(&sdeb_inject_pending, 0);
5685 			cmnd->result = check_condition_result;
5686 		}
5687 	}
5688 
5689 	if (unlikely(sdebug_verbose && cmnd->result))
5690 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5691 			    __func__, cmnd->result);
5692 
5693 	if (delta_jiff > 0 || ndelay > 0) {
5694 		ktime_t kt;
5695 
5696 		if (delta_jiff > 0) {
5697 			u64 ns = jiffies_to_nsecs(delta_jiff);
5698 
5699 			if (sdebug_random && ns < U32_MAX) {
5700 				ns = get_random_u32_below((u32)ns);
5701 			} else if (sdebug_random) {
5702 				ns >>= 12;	/* scale to 4 usec precision */
5703 				if (ns < U32_MAX)	/* over 4 hours max */
5704 					ns = get_random_u32_below((u32)ns);
5705 				ns <<= 12;
5706 			}
5707 			kt = ns_to_ktime(ns);
5708 		} else {	/* ndelay has a 4.2 second max */
5709 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5710 					     (u32)ndelay;
5711 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5712 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5713 
5714 				if (kt <= d) {	/* elapsed duration >= kt */
5715 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5716 					sqcp->a_cmnd = NULL;
5717 					atomic_dec(&devip->num_in_q);
5718 					clear_bit(k, sqp->in_use_bm);
5719 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5720 					if (new_sd_dp)
5721 						kfree(sd_dp);
5722 					/* call scsi_done() from this thread */
5723 					scsi_done(cmnd);
5724 					return 0;
5725 				}
5726 				/* otherwise reduce kt by elapsed time */
5727 				kt -= d;
5728 			}
5729 		}
5730 		if (polled) {
5731 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5732 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5733 			if (!sd_dp->init_poll) {
5734 				sd_dp->init_poll = true;
5735 				sqcp->sd_dp = sd_dp;
5736 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5737 				sd_dp->qc_idx = k;
5738 			}
5739 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5740 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5741 		} else {
5742 			if (!sd_dp->init_hrt) {
5743 				sd_dp->init_hrt = true;
5744 				sqcp->sd_dp = sd_dp;
5745 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5746 					     HRTIMER_MODE_REL_PINNED);
5747 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5748 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5749 				sd_dp->qc_idx = k;
5750 			}
5751 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5752 			/* schedule the invocation of scsi_done() for a later time */
5753 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5754 		}
5755 		if (sdebug_statistics)
5756 			sd_dp->issuing_cpu = raw_smp_processor_id();
5757 	} else {	/* jdelay < 0, use work queue */
5758 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5759 			     atomic_read(&sdeb_inject_pending)))
5760 			sd_dp->aborted = true;
5761 		if (polled) {
5762 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5763 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5764 			if (!sd_dp->init_poll) {
5765 				sd_dp->init_poll = true;
5766 				sqcp->sd_dp = sd_dp;
5767 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5768 				sd_dp->qc_idx = k;
5769 			}
5770 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5771 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5772 		} else {
5773 			if (!sd_dp->init_wq) {
5774 				sd_dp->init_wq = true;
5775 				sqcp->sd_dp = sd_dp;
5776 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5777 				sd_dp->qc_idx = k;
5778 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5779 			}
5780 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5781 			schedule_work(&sd_dp->ew.work);
5782 		}
5783 		if (sdebug_statistics)
5784 			sd_dp->issuing_cpu = raw_smp_processor_id();
5785 		if (unlikely(sd_dp->aborted)) {
5786 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5787 				    scsi_cmd_to_rq(cmnd)->tag);
5788 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5789 			atomic_set(&sdeb_inject_pending, 0);
5790 			sd_dp->aborted = false;
5791 		}
5792 	}
5793 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5794 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5795 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5796 	return 0;
5797 
5798 respond_in_thread:	/* call back to mid-layer using invocation thread */
5799 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5800 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5801 	if (cmnd->result == 0 && scsi_result != 0)
5802 		cmnd->result = scsi_result;
5803 	scsi_done(cmnd);
5804 	return 0;
5805 }
5806 
5807 /* Note: The following macros create attribute files in the
5808    /sys/module/scsi_debug/parameters directory. Unfortunately this
5809    driver is unaware of a change and cannot trigger auxiliary actions
5810    as it can when the corresponding attribute in the
5811    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5812  */
5813 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5814 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5815 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5816 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5817 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5818 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5819 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5820 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5821 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5822 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5823 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5824 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5825 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5826 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5827 module_param_string(inq_product, sdebug_inq_product_id,
5828 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5829 module_param_string(inq_rev, sdebug_inq_product_rev,
5830 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5831 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5832 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5833 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5834 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5835 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5836 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5837 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5838 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5839 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5840 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5841 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5842 		   S_IRUGO | S_IWUSR);
5843 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5844 		   S_IRUGO | S_IWUSR);
5845 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5846 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5847 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5848 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5849 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5850 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5851 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5852 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5853 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5854 module_param_named(per_host_store, sdebug_per_host_store, bool,
5855 		   S_IRUGO | S_IWUSR);
5856 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5857 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5858 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5859 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5860 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5861 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5862 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5863 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5864 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5865 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5866 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5867 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5868 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5869 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5870 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5871 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5872 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5873 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5874 		   S_IRUGO | S_IWUSR);
5875 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5876 module_param_named(write_same_length, sdebug_write_same_length, int,
5877 		   S_IRUGO | S_IWUSR);
5878 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5879 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5880 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5881 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5882 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5883 
5884 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5885 MODULE_DESCRIPTION("SCSI debug adapter driver");
5886 MODULE_LICENSE("GPL");
5887 MODULE_VERSION(SDEBUG_VERSION);
5888 
5889 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5890 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5891 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5892 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5893 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5894 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5895 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5896 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5897 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5898 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5899 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5900 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5901 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5902 MODULE_PARM_DESC(host_max_queue,
5903 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5904 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5905 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5906 		 SDEBUG_VERSION "\")");
5907 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5908 MODULE_PARM_DESC(lbprz,
5909 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5910 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5911 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5912 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5913 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5914 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5915 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5916 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5917 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5918 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5919 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5920 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5921 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5922 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5923 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5924 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5925 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5926 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5927 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5928 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5929 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5930 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5931 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5932 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5933 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5934 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5935 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5936 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5937 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5938 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5939 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5940 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5941 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5942 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5943 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5944 MODULE_PARM_DESC(uuid_ctl,
5945 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5946 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5947 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5948 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5949 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5950 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5951 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5952 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5953 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5954 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5955 
5956 #define SDEBUG_INFO_LEN 256
5957 static char sdebug_info[SDEBUG_INFO_LEN];
5958 
5959 static const char *scsi_debug_info(struct Scsi_Host *shp)
5960 {
5961 	int k;
5962 
5963 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5964 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5965 	if (k >= (SDEBUG_INFO_LEN - 1))
5966 		return sdebug_info;
5967 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5968 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5969 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5970 		  "statistics", (int)sdebug_statistics);
5971 	return sdebug_info;
5972 }
5973 
5974 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5975 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5976 				 int length)
5977 {
5978 	char arr[16];
5979 	int opts;
5980 	int minLen = length > 15 ? 15 : length;
5981 
5982 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5983 		return -EACCES;
5984 	memcpy(arr, buffer, minLen);
5985 	arr[minLen] = '\0';
5986 	if (1 != sscanf(arr, "%d", &opts))
5987 		return -EINVAL;
5988 	sdebug_opts = opts;
5989 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5990 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5991 	if (sdebug_every_nth != 0)
5992 		tweak_cmnd_count();
5993 	return length;
5994 }
5995 
5996 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5997  * same for each scsi_debug host (if more than one). Some of the counters
5998  * output are not atomics so might be inaccurate in a busy system. */
5999 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6000 {
6001 	int f, j, l;
6002 	struct sdebug_queue *sqp;
6003 	struct sdebug_host_info *sdhp;
6004 
6005 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6006 		   SDEBUG_VERSION, sdebug_version_date);
6007 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6008 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6009 		   sdebug_opts, sdebug_every_nth);
6010 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6011 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6012 		   sdebug_sector_size, "bytes");
6013 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6014 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6015 		   num_aborts);
6016 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6017 		   num_dev_resets, num_target_resets, num_bus_resets,
6018 		   num_host_resets);
6019 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6020 		   dix_reads, dix_writes, dif_errors);
6021 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6022 		   sdebug_statistics);
6023 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6024 		   atomic_read(&sdebug_cmnd_count),
6025 		   atomic_read(&sdebug_completions),
6026 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6027 		   atomic_read(&sdebug_a_tsf),
6028 		   atomic_read(&sdeb_mq_poll_count));
6029 
6030 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6031 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6032 		seq_printf(m, "  queue %d:\n", j);
6033 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6034 		if (f != sdebug_max_queue) {
6035 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6036 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6037 				   "first,last bits", f, l);
6038 		}
6039 	}
6040 
6041 	seq_printf(m, "this host_no=%d\n", host->host_no);
6042 	if (!xa_empty(per_store_ap)) {
6043 		bool niu;
6044 		int idx;
6045 		unsigned long l_idx;
6046 		struct sdeb_store_info *sip;
6047 
6048 		seq_puts(m, "\nhost list:\n");
6049 		j = 0;
6050 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6051 			idx = sdhp->si_idx;
6052 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6053 				   sdhp->shost->host_no, idx);
6054 			++j;
6055 		}
6056 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6057 			   sdeb_most_recent_idx);
6058 		j = 0;
6059 		xa_for_each(per_store_ap, l_idx, sip) {
6060 			niu = xa_get_mark(per_store_ap, l_idx,
6061 					  SDEB_XA_NOT_IN_USE);
6062 			idx = (int)l_idx;
6063 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6064 				   (niu ? "  not_in_use" : ""));
6065 			++j;
6066 		}
6067 	}
6068 	return 0;
6069 }
6070 
6071 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6072 {
6073 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6074 }
6075 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6076  * of delay is jiffies.
6077  */
6078 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6079 			   size_t count)
6080 {
6081 	int jdelay, res;
6082 
6083 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6084 		res = count;
6085 		if (sdebug_jdelay != jdelay) {
6086 			int j, k;
6087 			struct sdebug_queue *sqp;
6088 
6089 			block_unblock_all_queues(true);
6090 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6091 			     ++j, ++sqp) {
6092 				k = find_first_bit(sqp->in_use_bm,
6093 						   sdebug_max_queue);
6094 				if (k != sdebug_max_queue) {
6095 					res = -EBUSY;   /* queued commands */
6096 					break;
6097 				}
6098 			}
6099 			if (res > 0) {
6100 				sdebug_jdelay = jdelay;
6101 				sdebug_ndelay = 0;
6102 			}
6103 			block_unblock_all_queues(false);
6104 		}
6105 		return res;
6106 	}
6107 	return -EINVAL;
6108 }
6109 static DRIVER_ATTR_RW(delay);
6110 
6111 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6112 {
6113 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6114 }
6115 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6116 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6117 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6118 			    size_t count)
6119 {
6120 	int ndelay, res;
6121 
6122 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6123 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6124 		res = count;
6125 		if (sdebug_ndelay != ndelay) {
6126 			int j, k;
6127 			struct sdebug_queue *sqp;
6128 
6129 			block_unblock_all_queues(true);
6130 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6131 			     ++j, ++sqp) {
6132 				k = find_first_bit(sqp->in_use_bm,
6133 						   sdebug_max_queue);
6134 				if (k != sdebug_max_queue) {
6135 					res = -EBUSY;   /* queued commands */
6136 					break;
6137 				}
6138 			}
6139 			if (res > 0) {
6140 				sdebug_ndelay = ndelay;
6141 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6142 							: DEF_JDELAY;
6143 			}
6144 			block_unblock_all_queues(false);
6145 		}
6146 		return res;
6147 	}
6148 	return -EINVAL;
6149 }
6150 static DRIVER_ATTR_RW(ndelay);
6151 
6152 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6153 {
6154 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6155 }
6156 
6157 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6158 			  size_t count)
6159 {
6160 	int opts;
6161 	char work[20];
6162 
6163 	if (sscanf(buf, "%10s", work) == 1) {
6164 		if (strncasecmp(work, "0x", 2) == 0) {
6165 			if (kstrtoint(work + 2, 16, &opts) == 0)
6166 				goto opts_done;
6167 		} else {
6168 			if (kstrtoint(work, 10, &opts) == 0)
6169 				goto opts_done;
6170 		}
6171 	}
6172 	return -EINVAL;
6173 opts_done:
6174 	sdebug_opts = opts;
6175 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6176 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6177 	tweak_cmnd_count();
6178 	return count;
6179 }
6180 static DRIVER_ATTR_RW(opts);
6181 
6182 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6183 {
6184 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6185 }
6186 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6187 			   size_t count)
6188 {
6189 	int n;
6190 
6191 	/* Cannot change from or to TYPE_ZBC with sysfs */
6192 	if (sdebug_ptype == TYPE_ZBC)
6193 		return -EINVAL;
6194 
6195 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6196 		if (n == TYPE_ZBC)
6197 			return -EINVAL;
6198 		sdebug_ptype = n;
6199 		return count;
6200 	}
6201 	return -EINVAL;
6202 }
6203 static DRIVER_ATTR_RW(ptype);
6204 
6205 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6206 {
6207 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6208 }
6209 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6210 			    size_t count)
6211 {
6212 	int n;
6213 
6214 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6215 		sdebug_dsense = n;
6216 		return count;
6217 	}
6218 	return -EINVAL;
6219 }
6220 static DRIVER_ATTR_RW(dsense);
6221 
6222 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6223 {
6224 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6225 }
6226 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6227 			     size_t count)
6228 {
6229 	int n, idx;
6230 
6231 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6232 		bool want_store = (n == 0);
6233 		struct sdebug_host_info *sdhp;
6234 
6235 		n = (n > 0);
6236 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6237 		if (sdebug_fake_rw == n)
6238 			return count;	/* not transitioning so do nothing */
6239 
6240 		if (want_store) {	/* 1 --> 0 transition, set up store */
6241 			if (sdeb_first_idx < 0) {
6242 				idx = sdebug_add_store();
6243 				if (idx < 0)
6244 					return idx;
6245 			} else {
6246 				idx = sdeb_first_idx;
6247 				xa_clear_mark(per_store_ap, idx,
6248 					      SDEB_XA_NOT_IN_USE);
6249 			}
6250 			/* make all hosts use same store */
6251 			list_for_each_entry(sdhp, &sdebug_host_list,
6252 					    host_list) {
6253 				if (sdhp->si_idx != idx) {
6254 					xa_set_mark(per_store_ap, sdhp->si_idx,
6255 						    SDEB_XA_NOT_IN_USE);
6256 					sdhp->si_idx = idx;
6257 				}
6258 			}
6259 			sdeb_most_recent_idx = idx;
6260 		} else {	/* 0 --> 1 transition is trigger for shrink */
6261 			sdebug_erase_all_stores(true /* apart from first */);
6262 		}
6263 		sdebug_fake_rw = n;
6264 		return count;
6265 	}
6266 	return -EINVAL;
6267 }
6268 static DRIVER_ATTR_RW(fake_rw);
6269 
6270 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6271 {
6272 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6273 }
6274 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6275 			      size_t count)
6276 {
6277 	int n;
6278 
6279 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6280 		sdebug_no_lun_0 = n;
6281 		return count;
6282 	}
6283 	return -EINVAL;
6284 }
6285 static DRIVER_ATTR_RW(no_lun_0);
6286 
6287 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6288 {
6289 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6290 }
6291 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6292 			      size_t count)
6293 {
6294 	int n;
6295 
6296 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6297 		sdebug_num_tgts = n;
6298 		sdebug_max_tgts_luns();
6299 		return count;
6300 	}
6301 	return -EINVAL;
6302 }
6303 static DRIVER_ATTR_RW(num_tgts);
6304 
6305 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6308 }
6309 static DRIVER_ATTR_RO(dev_size_mb);
6310 
6311 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6312 {
6313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6314 }
6315 
6316 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6317 				    size_t count)
6318 {
6319 	bool v;
6320 
6321 	if (kstrtobool(buf, &v))
6322 		return -EINVAL;
6323 
6324 	sdebug_per_host_store = v;
6325 	return count;
6326 }
6327 static DRIVER_ATTR_RW(per_host_store);
6328 
6329 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6330 {
6331 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6332 }
6333 static DRIVER_ATTR_RO(num_parts);
6334 
6335 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6336 {
6337 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6338 }
6339 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6340 			       size_t count)
6341 {
6342 	int nth;
6343 	char work[20];
6344 
6345 	if (sscanf(buf, "%10s", work) == 1) {
6346 		if (strncasecmp(work, "0x", 2) == 0) {
6347 			if (kstrtoint(work + 2, 16, &nth) == 0)
6348 				goto every_nth_done;
6349 		} else {
6350 			if (kstrtoint(work, 10, &nth) == 0)
6351 				goto every_nth_done;
6352 		}
6353 	}
6354 	return -EINVAL;
6355 
6356 every_nth_done:
6357 	sdebug_every_nth = nth;
6358 	if (nth && !sdebug_statistics) {
6359 		pr_info("every_nth needs statistics=1, set it\n");
6360 		sdebug_statistics = true;
6361 	}
6362 	tweak_cmnd_count();
6363 	return count;
6364 }
6365 static DRIVER_ATTR_RW(every_nth);
6366 
6367 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6368 {
6369 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6370 }
6371 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6372 				size_t count)
6373 {
6374 	int n;
6375 	bool changed;
6376 
6377 	if (kstrtoint(buf, 0, &n))
6378 		return -EINVAL;
6379 	if (n >= 0) {
6380 		if (n > (int)SAM_LUN_AM_FLAT) {
6381 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6382 			return -EINVAL;
6383 		}
6384 		changed = ((int)sdebug_lun_am != n);
6385 		sdebug_lun_am = n;
6386 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6387 			struct sdebug_host_info *sdhp;
6388 			struct sdebug_dev_info *dp;
6389 
6390 			spin_lock(&sdebug_host_list_lock);
6391 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6392 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6393 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6394 				}
6395 			}
6396 			spin_unlock(&sdebug_host_list_lock);
6397 		}
6398 		return count;
6399 	}
6400 	return -EINVAL;
6401 }
6402 static DRIVER_ATTR_RW(lun_format);
6403 
6404 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6405 {
6406 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6407 }
6408 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6409 			      size_t count)
6410 {
6411 	int n;
6412 	bool changed;
6413 
6414 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6415 		if (n > 256) {
6416 			pr_warn("max_luns can be no more than 256\n");
6417 			return -EINVAL;
6418 		}
6419 		changed = (sdebug_max_luns != n);
6420 		sdebug_max_luns = n;
6421 		sdebug_max_tgts_luns();
6422 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6423 			struct sdebug_host_info *sdhp;
6424 			struct sdebug_dev_info *dp;
6425 
6426 			spin_lock(&sdebug_host_list_lock);
6427 			list_for_each_entry(sdhp, &sdebug_host_list,
6428 					    host_list) {
6429 				list_for_each_entry(dp, &sdhp->dev_info_list,
6430 						    dev_list) {
6431 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6432 						dp->uas_bm);
6433 				}
6434 			}
6435 			spin_unlock(&sdebug_host_list_lock);
6436 		}
6437 		return count;
6438 	}
6439 	return -EINVAL;
6440 }
6441 static DRIVER_ATTR_RW(max_luns);
6442 
6443 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6444 {
6445 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6446 }
6447 /* N.B. max_queue can be changed while there are queued commands. In flight
6448  * commands beyond the new max_queue will be completed. */
6449 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6450 			       size_t count)
6451 {
6452 	int j, n, k, a;
6453 	struct sdebug_queue *sqp;
6454 
6455 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6456 	    (n <= SDEBUG_CANQUEUE) &&
6457 	    (sdebug_host_max_queue == 0)) {
6458 		block_unblock_all_queues(true);
6459 		k = 0;
6460 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6461 		     ++j, ++sqp) {
6462 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6463 			if (a > k)
6464 				k = a;
6465 		}
6466 		sdebug_max_queue = n;
6467 		if (k == SDEBUG_CANQUEUE)
6468 			atomic_set(&retired_max_queue, 0);
6469 		else if (k >= n)
6470 			atomic_set(&retired_max_queue, k + 1);
6471 		else
6472 			atomic_set(&retired_max_queue, 0);
6473 		block_unblock_all_queues(false);
6474 		return count;
6475 	}
6476 	return -EINVAL;
6477 }
6478 static DRIVER_ATTR_RW(max_queue);
6479 
6480 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6481 {
6482 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6483 }
6484 
6485 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6486 {
6487 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6488 }
6489 
6490 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6491 {
6492 	bool v;
6493 
6494 	if (kstrtobool(buf, &v))
6495 		return -EINVAL;
6496 
6497 	sdebug_no_rwlock = v;
6498 	return count;
6499 }
6500 static DRIVER_ATTR_RW(no_rwlock);
6501 
6502 /*
6503  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6504  * in range [0, sdebug_host_max_queue), we can't change it.
6505  */
6506 static DRIVER_ATTR_RO(host_max_queue);
6507 
6508 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6509 {
6510 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6511 }
6512 static DRIVER_ATTR_RO(no_uld);
6513 
6514 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6515 {
6516 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6517 }
6518 static DRIVER_ATTR_RO(scsi_level);
6519 
6520 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6521 {
6522 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6523 }
6524 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6525 				size_t count)
6526 {
6527 	int n;
6528 	bool changed;
6529 
6530 	/* Ignore capacity change for ZBC drives for now */
6531 	if (sdeb_zbc_in_use)
6532 		return -ENOTSUPP;
6533 
6534 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6535 		changed = (sdebug_virtual_gb != n);
6536 		sdebug_virtual_gb = n;
6537 		sdebug_capacity = get_sdebug_capacity();
6538 		if (changed) {
6539 			struct sdebug_host_info *sdhp;
6540 			struct sdebug_dev_info *dp;
6541 
6542 			spin_lock(&sdebug_host_list_lock);
6543 			list_for_each_entry(sdhp, &sdebug_host_list,
6544 					    host_list) {
6545 				list_for_each_entry(dp, &sdhp->dev_info_list,
6546 						    dev_list) {
6547 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6548 						dp->uas_bm);
6549 				}
6550 			}
6551 			spin_unlock(&sdebug_host_list_lock);
6552 		}
6553 		return count;
6554 	}
6555 	return -EINVAL;
6556 }
6557 static DRIVER_ATTR_RW(virtual_gb);
6558 
6559 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6560 {
6561 	/* absolute number of hosts currently active is what is shown */
6562 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6563 }
6564 
6565 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6566 			      size_t count)
6567 {
6568 	bool found;
6569 	unsigned long idx;
6570 	struct sdeb_store_info *sip;
6571 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6572 	int delta_hosts;
6573 
6574 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6575 		return -EINVAL;
6576 	if (delta_hosts > 0) {
6577 		do {
6578 			found = false;
6579 			if (want_phs) {
6580 				xa_for_each_marked(per_store_ap, idx, sip,
6581 						   SDEB_XA_NOT_IN_USE) {
6582 					sdeb_most_recent_idx = (int)idx;
6583 					found = true;
6584 					break;
6585 				}
6586 				if (found)	/* re-use case */
6587 					sdebug_add_host_helper((int)idx);
6588 				else
6589 					sdebug_do_add_host(true);
6590 			} else {
6591 				sdebug_do_add_host(false);
6592 			}
6593 		} while (--delta_hosts);
6594 	} else if (delta_hosts < 0) {
6595 		do {
6596 			sdebug_do_remove_host(false);
6597 		} while (++delta_hosts);
6598 	}
6599 	return count;
6600 }
6601 static DRIVER_ATTR_RW(add_host);
6602 
6603 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6604 {
6605 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6606 }
6607 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6608 				    size_t count)
6609 {
6610 	int n;
6611 
6612 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6613 		sdebug_vpd_use_hostno = n;
6614 		return count;
6615 	}
6616 	return -EINVAL;
6617 }
6618 static DRIVER_ATTR_RW(vpd_use_hostno);
6619 
6620 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6621 {
6622 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6623 }
6624 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6625 				size_t count)
6626 {
6627 	int n;
6628 
6629 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6630 		if (n > 0)
6631 			sdebug_statistics = true;
6632 		else {
6633 			clear_queue_stats();
6634 			sdebug_statistics = false;
6635 		}
6636 		return count;
6637 	}
6638 	return -EINVAL;
6639 }
6640 static DRIVER_ATTR_RW(statistics);
6641 
6642 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6643 {
6644 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6645 }
6646 static DRIVER_ATTR_RO(sector_size);
6647 
6648 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6649 {
6650 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6651 }
6652 static DRIVER_ATTR_RO(submit_queues);
6653 
6654 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6655 {
6656 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6657 }
6658 static DRIVER_ATTR_RO(dix);
6659 
6660 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6661 {
6662 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6663 }
6664 static DRIVER_ATTR_RO(dif);
6665 
6666 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6667 {
6668 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6669 }
6670 static DRIVER_ATTR_RO(guard);
6671 
6672 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6673 {
6674 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6675 }
6676 static DRIVER_ATTR_RO(ato);
6677 
6678 static ssize_t map_show(struct device_driver *ddp, char *buf)
6679 {
6680 	ssize_t count = 0;
6681 
6682 	if (!scsi_debug_lbp())
6683 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6684 				 sdebug_store_sectors);
6685 
6686 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6687 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6688 
6689 		if (sip)
6690 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6691 					  (int)map_size, sip->map_storep);
6692 	}
6693 	buf[count++] = '\n';
6694 	buf[count] = '\0';
6695 
6696 	return count;
6697 }
6698 static DRIVER_ATTR_RO(map);
6699 
6700 static ssize_t random_show(struct device_driver *ddp, char *buf)
6701 {
6702 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6703 }
6704 
6705 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6706 			    size_t count)
6707 {
6708 	bool v;
6709 
6710 	if (kstrtobool(buf, &v))
6711 		return -EINVAL;
6712 
6713 	sdebug_random = v;
6714 	return count;
6715 }
6716 static DRIVER_ATTR_RW(random);
6717 
6718 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6719 {
6720 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6721 }
6722 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6723 			       size_t count)
6724 {
6725 	int n;
6726 
6727 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6728 		sdebug_removable = (n > 0);
6729 		return count;
6730 	}
6731 	return -EINVAL;
6732 }
6733 static DRIVER_ATTR_RW(removable);
6734 
6735 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6736 {
6737 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6738 }
6739 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6740 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6741 			       size_t count)
6742 {
6743 	int n;
6744 
6745 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6746 		sdebug_host_lock = (n > 0);
6747 		return count;
6748 	}
6749 	return -EINVAL;
6750 }
6751 static DRIVER_ATTR_RW(host_lock);
6752 
6753 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6754 {
6755 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6756 }
6757 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6758 			    size_t count)
6759 {
6760 	int n;
6761 
6762 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6763 		sdebug_strict = (n > 0);
6764 		return count;
6765 	}
6766 	return -EINVAL;
6767 }
6768 static DRIVER_ATTR_RW(strict);
6769 
6770 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6771 {
6772 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6773 }
6774 static DRIVER_ATTR_RO(uuid_ctl);
6775 
6776 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6777 {
6778 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6779 }
6780 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6781 			     size_t count)
6782 {
6783 	int ret, n;
6784 
6785 	ret = kstrtoint(buf, 0, &n);
6786 	if (ret)
6787 		return ret;
6788 	sdebug_cdb_len = n;
6789 	all_config_cdb_len();
6790 	return count;
6791 }
6792 static DRIVER_ATTR_RW(cdb_len);
6793 
6794 static const char * const zbc_model_strs_a[] = {
6795 	[BLK_ZONED_NONE] = "none",
6796 	[BLK_ZONED_HA]   = "host-aware",
6797 	[BLK_ZONED_HM]   = "host-managed",
6798 };
6799 
6800 static const char * const zbc_model_strs_b[] = {
6801 	[BLK_ZONED_NONE] = "no",
6802 	[BLK_ZONED_HA]   = "aware",
6803 	[BLK_ZONED_HM]   = "managed",
6804 };
6805 
6806 static const char * const zbc_model_strs_c[] = {
6807 	[BLK_ZONED_NONE] = "0",
6808 	[BLK_ZONED_HA]   = "1",
6809 	[BLK_ZONED_HM]   = "2",
6810 };
6811 
6812 static int sdeb_zbc_model_str(const char *cp)
6813 {
6814 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6815 
6816 	if (res < 0) {
6817 		res = sysfs_match_string(zbc_model_strs_b, cp);
6818 		if (res < 0) {
6819 			res = sysfs_match_string(zbc_model_strs_c, cp);
6820 			if (res < 0)
6821 				return -EINVAL;
6822 		}
6823 	}
6824 	return res;
6825 }
6826 
6827 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6828 {
6829 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6830 			 zbc_model_strs_a[sdeb_zbc_model]);
6831 }
6832 static DRIVER_ATTR_RO(zbc);
6833 
6834 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6835 {
6836 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6837 }
6838 static DRIVER_ATTR_RO(tur_ms_to_ready);
6839 
6840 /* Note: The following array creates attribute files in the
6841    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6842    files (over those found in the /sys/module/scsi_debug/parameters
6843    directory) is that auxiliary actions can be triggered when an attribute
6844    is changed. For example see: add_host_store() above.
6845  */
6846 
6847 static struct attribute *sdebug_drv_attrs[] = {
6848 	&driver_attr_delay.attr,
6849 	&driver_attr_opts.attr,
6850 	&driver_attr_ptype.attr,
6851 	&driver_attr_dsense.attr,
6852 	&driver_attr_fake_rw.attr,
6853 	&driver_attr_host_max_queue.attr,
6854 	&driver_attr_no_lun_0.attr,
6855 	&driver_attr_num_tgts.attr,
6856 	&driver_attr_dev_size_mb.attr,
6857 	&driver_attr_num_parts.attr,
6858 	&driver_attr_every_nth.attr,
6859 	&driver_attr_lun_format.attr,
6860 	&driver_attr_max_luns.attr,
6861 	&driver_attr_max_queue.attr,
6862 	&driver_attr_no_rwlock.attr,
6863 	&driver_attr_no_uld.attr,
6864 	&driver_attr_scsi_level.attr,
6865 	&driver_attr_virtual_gb.attr,
6866 	&driver_attr_add_host.attr,
6867 	&driver_attr_per_host_store.attr,
6868 	&driver_attr_vpd_use_hostno.attr,
6869 	&driver_attr_sector_size.attr,
6870 	&driver_attr_statistics.attr,
6871 	&driver_attr_submit_queues.attr,
6872 	&driver_attr_dix.attr,
6873 	&driver_attr_dif.attr,
6874 	&driver_attr_guard.attr,
6875 	&driver_attr_ato.attr,
6876 	&driver_attr_map.attr,
6877 	&driver_attr_random.attr,
6878 	&driver_attr_removable.attr,
6879 	&driver_attr_host_lock.attr,
6880 	&driver_attr_ndelay.attr,
6881 	&driver_attr_strict.attr,
6882 	&driver_attr_uuid_ctl.attr,
6883 	&driver_attr_cdb_len.attr,
6884 	&driver_attr_tur_ms_to_ready.attr,
6885 	&driver_attr_zbc.attr,
6886 	NULL,
6887 };
6888 ATTRIBUTE_GROUPS(sdebug_drv);
6889 
6890 static struct device *pseudo_primary;
6891 
6892 static int __init scsi_debug_init(void)
6893 {
6894 	bool want_store = (sdebug_fake_rw == 0);
6895 	unsigned long sz;
6896 	int k, ret, hosts_to_add;
6897 	int idx = -1;
6898 
6899 	ramdisk_lck_a[0] = &atomic_rw;
6900 	ramdisk_lck_a[1] = &atomic_rw2;
6901 	atomic_set(&retired_max_queue, 0);
6902 
6903 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6904 		pr_warn("ndelay must be less than 1 second, ignored\n");
6905 		sdebug_ndelay = 0;
6906 	} else if (sdebug_ndelay > 0)
6907 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6908 
6909 	switch (sdebug_sector_size) {
6910 	case  512:
6911 	case 1024:
6912 	case 2048:
6913 	case 4096:
6914 		break;
6915 	default:
6916 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6917 		return -EINVAL;
6918 	}
6919 
6920 	switch (sdebug_dif) {
6921 	case T10_PI_TYPE0_PROTECTION:
6922 		break;
6923 	case T10_PI_TYPE1_PROTECTION:
6924 	case T10_PI_TYPE2_PROTECTION:
6925 	case T10_PI_TYPE3_PROTECTION:
6926 		have_dif_prot = true;
6927 		break;
6928 
6929 	default:
6930 		pr_err("dif must be 0, 1, 2 or 3\n");
6931 		return -EINVAL;
6932 	}
6933 
6934 	if (sdebug_num_tgts < 0) {
6935 		pr_err("num_tgts must be >= 0\n");
6936 		return -EINVAL;
6937 	}
6938 
6939 	if (sdebug_guard > 1) {
6940 		pr_err("guard must be 0 or 1\n");
6941 		return -EINVAL;
6942 	}
6943 
6944 	if (sdebug_ato > 1) {
6945 		pr_err("ato must be 0 or 1\n");
6946 		return -EINVAL;
6947 	}
6948 
6949 	if (sdebug_physblk_exp > 15) {
6950 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6951 		return -EINVAL;
6952 	}
6953 
6954 	sdebug_lun_am = sdebug_lun_am_i;
6955 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6956 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6957 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6958 	}
6959 
6960 	if (sdebug_max_luns > 256) {
6961 		if (sdebug_max_luns > 16384) {
6962 			pr_warn("max_luns can be no more than 16384, use default\n");
6963 			sdebug_max_luns = DEF_MAX_LUNS;
6964 		}
6965 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6966 	}
6967 
6968 	if (sdebug_lowest_aligned > 0x3fff) {
6969 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6970 		return -EINVAL;
6971 	}
6972 
6973 	if (submit_queues < 1) {
6974 		pr_err("submit_queues must be 1 or more\n");
6975 		return -EINVAL;
6976 	}
6977 
6978 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6979 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6980 		return -EINVAL;
6981 	}
6982 
6983 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6984 	    (sdebug_host_max_queue < 0)) {
6985 		pr_err("host_max_queue must be in range [0 %d]\n",
6986 		       SDEBUG_CANQUEUE);
6987 		return -EINVAL;
6988 	}
6989 
6990 	if (sdebug_host_max_queue &&
6991 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6992 		sdebug_max_queue = sdebug_host_max_queue;
6993 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6994 			sdebug_max_queue);
6995 	}
6996 
6997 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6998 			       GFP_KERNEL);
6999 	if (sdebug_q_arr == NULL)
7000 		return -ENOMEM;
7001 	for (k = 0; k < submit_queues; ++k)
7002 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
7003 
7004 	/*
7005 	 * check for host managed zoned block device specified with
7006 	 * ptype=0x14 or zbc=XXX.
7007 	 */
7008 	if (sdebug_ptype == TYPE_ZBC) {
7009 		sdeb_zbc_model = BLK_ZONED_HM;
7010 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7011 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7012 		if (k < 0) {
7013 			ret = k;
7014 			goto free_q_arr;
7015 		}
7016 		sdeb_zbc_model = k;
7017 		switch (sdeb_zbc_model) {
7018 		case BLK_ZONED_NONE:
7019 		case BLK_ZONED_HA:
7020 			sdebug_ptype = TYPE_DISK;
7021 			break;
7022 		case BLK_ZONED_HM:
7023 			sdebug_ptype = TYPE_ZBC;
7024 			break;
7025 		default:
7026 			pr_err("Invalid ZBC model\n");
7027 			ret = -EINVAL;
7028 			goto free_q_arr;
7029 		}
7030 	}
7031 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7032 		sdeb_zbc_in_use = true;
7033 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7034 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7035 	}
7036 
7037 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7038 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7039 	if (sdebug_dev_size_mb < 1)
7040 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7041 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7042 	sdebug_store_sectors = sz / sdebug_sector_size;
7043 	sdebug_capacity = get_sdebug_capacity();
7044 
7045 	/* play around with geometry, don't waste too much on track 0 */
7046 	sdebug_heads = 8;
7047 	sdebug_sectors_per = 32;
7048 	if (sdebug_dev_size_mb >= 256)
7049 		sdebug_heads = 64;
7050 	else if (sdebug_dev_size_mb >= 16)
7051 		sdebug_heads = 32;
7052 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7053 			       (sdebug_sectors_per * sdebug_heads);
7054 	if (sdebug_cylinders_per >= 1024) {
7055 		/* other LLDs do this; implies >= 1GB ram disk ... */
7056 		sdebug_heads = 255;
7057 		sdebug_sectors_per = 63;
7058 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7059 			       (sdebug_sectors_per * sdebug_heads);
7060 	}
7061 	if (scsi_debug_lbp()) {
7062 		sdebug_unmap_max_blocks =
7063 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7064 
7065 		sdebug_unmap_max_desc =
7066 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7067 
7068 		sdebug_unmap_granularity =
7069 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7070 
7071 		if (sdebug_unmap_alignment &&
7072 		    sdebug_unmap_granularity <=
7073 		    sdebug_unmap_alignment) {
7074 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7075 			ret = -EINVAL;
7076 			goto free_q_arr;
7077 		}
7078 	}
7079 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7080 	if (want_store) {
7081 		idx = sdebug_add_store();
7082 		if (idx < 0) {
7083 			ret = idx;
7084 			goto free_q_arr;
7085 		}
7086 	}
7087 
7088 	pseudo_primary = root_device_register("pseudo_0");
7089 	if (IS_ERR(pseudo_primary)) {
7090 		pr_warn("root_device_register() error\n");
7091 		ret = PTR_ERR(pseudo_primary);
7092 		goto free_vm;
7093 	}
7094 	ret = bus_register(&pseudo_lld_bus);
7095 	if (ret < 0) {
7096 		pr_warn("bus_register error: %d\n", ret);
7097 		goto dev_unreg;
7098 	}
7099 	ret = driver_register(&sdebug_driverfs_driver);
7100 	if (ret < 0) {
7101 		pr_warn("driver_register error: %d\n", ret);
7102 		goto bus_unreg;
7103 	}
7104 
7105 	hosts_to_add = sdebug_add_host;
7106 	sdebug_add_host = 0;
7107 
7108 	for (k = 0; k < hosts_to_add; k++) {
7109 		if (want_store && k == 0) {
7110 			ret = sdebug_add_host_helper(idx);
7111 			if (ret < 0) {
7112 				pr_err("add_host_helper k=%d, error=%d\n",
7113 				       k, -ret);
7114 				break;
7115 			}
7116 		} else {
7117 			ret = sdebug_do_add_host(want_store &&
7118 						 sdebug_per_host_store);
7119 			if (ret < 0) {
7120 				pr_err("add_host k=%d error=%d\n", k, -ret);
7121 				break;
7122 			}
7123 		}
7124 	}
7125 	if (sdebug_verbose)
7126 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7127 
7128 	return 0;
7129 
7130 bus_unreg:
7131 	bus_unregister(&pseudo_lld_bus);
7132 dev_unreg:
7133 	root_device_unregister(pseudo_primary);
7134 free_vm:
7135 	sdebug_erase_store(idx, NULL);
7136 free_q_arr:
7137 	kfree(sdebug_q_arr);
7138 	return ret;
7139 }
7140 
7141 static void __exit scsi_debug_exit(void)
7142 {
7143 	int k = sdebug_num_hosts;
7144 
7145 	stop_all_queued();
7146 	for (; k; k--)
7147 		sdebug_do_remove_host(true);
7148 	free_all_queued();
7149 	driver_unregister(&sdebug_driverfs_driver);
7150 	bus_unregister(&pseudo_lld_bus);
7151 	root_device_unregister(pseudo_primary);
7152 
7153 	sdebug_erase_all_stores(false);
7154 	xa_destroy(per_store_ap);
7155 	kfree(sdebug_q_arr);
7156 }
7157 
7158 device_initcall(scsi_debug_init);
7159 module_exit(scsi_debug_exit);
7160 
7161 static void sdebug_release_adapter(struct device *dev)
7162 {
7163 	struct sdebug_host_info *sdbg_host;
7164 
7165 	sdbg_host = dev_to_sdebug_host(dev);
7166 	kfree(sdbg_host);
7167 }
7168 
7169 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7170 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7171 {
7172 	if (idx < 0)
7173 		return;
7174 	if (!sip) {
7175 		if (xa_empty(per_store_ap))
7176 			return;
7177 		sip = xa_load(per_store_ap, idx);
7178 		if (!sip)
7179 			return;
7180 	}
7181 	vfree(sip->map_storep);
7182 	vfree(sip->dif_storep);
7183 	vfree(sip->storep);
7184 	xa_erase(per_store_ap, idx);
7185 	kfree(sip);
7186 }
7187 
7188 /* Assume apart_from_first==false only in shutdown case. */
7189 static void sdebug_erase_all_stores(bool apart_from_first)
7190 {
7191 	unsigned long idx;
7192 	struct sdeb_store_info *sip = NULL;
7193 
7194 	xa_for_each(per_store_ap, idx, sip) {
7195 		if (apart_from_first)
7196 			apart_from_first = false;
7197 		else
7198 			sdebug_erase_store(idx, sip);
7199 	}
7200 	if (apart_from_first)
7201 		sdeb_most_recent_idx = sdeb_first_idx;
7202 }
7203 
7204 /*
7205  * Returns store xarray new element index (idx) if >=0 else negated errno.
7206  * Limit the number of stores to 65536.
7207  */
7208 static int sdebug_add_store(void)
7209 {
7210 	int res;
7211 	u32 n_idx;
7212 	unsigned long iflags;
7213 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7214 	struct sdeb_store_info *sip = NULL;
7215 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7216 
7217 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7218 	if (!sip)
7219 		return -ENOMEM;
7220 
7221 	xa_lock_irqsave(per_store_ap, iflags);
7222 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7223 	if (unlikely(res < 0)) {
7224 		xa_unlock_irqrestore(per_store_ap, iflags);
7225 		kfree(sip);
7226 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7227 		return res;
7228 	}
7229 	sdeb_most_recent_idx = n_idx;
7230 	if (sdeb_first_idx < 0)
7231 		sdeb_first_idx = n_idx;
7232 	xa_unlock_irqrestore(per_store_ap, iflags);
7233 
7234 	res = -ENOMEM;
7235 	sip->storep = vzalloc(sz);
7236 	if (!sip->storep) {
7237 		pr_err("user data oom\n");
7238 		goto err;
7239 	}
7240 	if (sdebug_num_parts > 0)
7241 		sdebug_build_parts(sip->storep, sz);
7242 
7243 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7244 	if (sdebug_dix) {
7245 		int dif_size;
7246 
7247 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7248 		sip->dif_storep = vmalloc(dif_size);
7249 
7250 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7251 			sip->dif_storep);
7252 
7253 		if (!sip->dif_storep) {
7254 			pr_err("DIX oom\n");
7255 			goto err;
7256 		}
7257 		memset(sip->dif_storep, 0xff, dif_size);
7258 	}
7259 	/* Logical Block Provisioning */
7260 	if (scsi_debug_lbp()) {
7261 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7262 		sip->map_storep = vmalloc(array_size(sizeof(long),
7263 						     BITS_TO_LONGS(map_size)));
7264 
7265 		pr_info("%lu provisioning blocks\n", map_size);
7266 
7267 		if (!sip->map_storep) {
7268 			pr_err("LBP map oom\n");
7269 			goto err;
7270 		}
7271 
7272 		bitmap_zero(sip->map_storep, map_size);
7273 
7274 		/* Map first 1KB for partition table */
7275 		if (sdebug_num_parts)
7276 			map_region(sip, 0, 2);
7277 	}
7278 
7279 	rwlock_init(&sip->macc_lck);
7280 	return (int)n_idx;
7281 err:
7282 	sdebug_erase_store((int)n_idx, sip);
7283 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7284 	return res;
7285 }
7286 
7287 static int sdebug_add_host_helper(int per_host_idx)
7288 {
7289 	int k, devs_per_host, idx;
7290 	int error = -ENOMEM;
7291 	struct sdebug_host_info *sdbg_host;
7292 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7293 
7294 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7295 	if (!sdbg_host)
7296 		return -ENOMEM;
7297 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7298 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7299 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7300 	sdbg_host->si_idx = idx;
7301 
7302 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7303 
7304 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7305 	for (k = 0; k < devs_per_host; k++) {
7306 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7307 		if (!sdbg_devinfo)
7308 			goto clean;
7309 	}
7310 
7311 	spin_lock(&sdebug_host_list_lock);
7312 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7313 	spin_unlock(&sdebug_host_list_lock);
7314 
7315 	sdbg_host->dev.bus = &pseudo_lld_bus;
7316 	sdbg_host->dev.parent = pseudo_primary;
7317 	sdbg_host->dev.release = &sdebug_release_adapter;
7318 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7319 
7320 	error = device_register(&sdbg_host->dev);
7321 	if (error) {
7322 		spin_lock(&sdebug_host_list_lock);
7323 		list_del(&sdbg_host->host_list);
7324 		spin_unlock(&sdebug_host_list_lock);
7325 		goto clean;
7326 	}
7327 
7328 	++sdebug_num_hosts;
7329 	return 0;
7330 
7331 clean:
7332 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7333 				 dev_list) {
7334 		list_del(&sdbg_devinfo->dev_list);
7335 		kfree(sdbg_devinfo->zstate);
7336 		kfree(sdbg_devinfo);
7337 	}
7338 	if (sdbg_host->dev.release)
7339 		put_device(&sdbg_host->dev);
7340 	else
7341 		kfree(sdbg_host);
7342 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7343 	return error;
7344 }
7345 
7346 static int sdebug_do_add_host(bool mk_new_store)
7347 {
7348 	int ph_idx = sdeb_most_recent_idx;
7349 
7350 	if (mk_new_store) {
7351 		ph_idx = sdebug_add_store();
7352 		if (ph_idx < 0)
7353 			return ph_idx;
7354 	}
7355 	return sdebug_add_host_helper(ph_idx);
7356 }
7357 
7358 static void sdebug_do_remove_host(bool the_end)
7359 {
7360 	int idx = -1;
7361 	struct sdebug_host_info *sdbg_host = NULL;
7362 	struct sdebug_host_info *sdbg_host2;
7363 
7364 	spin_lock(&sdebug_host_list_lock);
7365 	if (!list_empty(&sdebug_host_list)) {
7366 		sdbg_host = list_entry(sdebug_host_list.prev,
7367 				       struct sdebug_host_info, host_list);
7368 		idx = sdbg_host->si_idx;
7369 	}
7370 	if (!the_end && idx >= 0) {
7371 		bool unique = true;
7372 
7373 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7374 			if (sdbg_host2 == sdbg_host)
7375 				continue;
7376 			if (idx == sdbg_host2->si_idx) {
7377 				unique = false;
7378 				break;
7379 			}
7380 		}
7381 		if (unique) {
7382 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7383 			if (idx == sdeb_most_recent_idx)
7384 				--sdeb_most_recent_idx;
7385 		}
7386 	}
7387 	if (sdbg_host)
7388 		list_del(&sdbg_host->host_list);
7389 	spin_unlock(&sdebug_host_list_lock);
7390 
7391 	if (!sdbg_host)
7392 		return;
7393 
7394 	device_unregister(&sdbg_host->dev);
7395 	--sdebug_num_hosts;
7396 }
7397 
7398 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7399 {
7400 	int num_in_q = 0;
7401 	struct sdebug_dev_info *devip;
7402 
7403 	block_unblock_all_queues(true);
7404 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7405 	if (NULL == devip) {
7406 		block_unblock_all_queues(false);
7407 		return	-ENODEV;
7408 	}
7409 	num_in_q = atomic_read(&devip->num_in_q);
7410 
7411 	if (qdepth > SDEBUG_CANQUEUE) {
7412 		qdepth = SDEBUG_CANQUEUE;
7413 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7414 			qdepth, SDEBUG_CANQUEUE);
7415 	}
7416 	if (qdepth < 1)
7417 		qdepth = 1;
7418 	if (qdepth != sdev->queue_depth)
7419 		scsi_change_queue_depth(sdev, qdepth);
7420 
7421 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7422 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7423 			    __func__, qdepth, num_in_q);
7424 	}
7425 	block_unblock_all_queues(false);
7426 	return sdev->queue_depth;
7427 }
7428 
7429 static bool fake_timeout(struct scsi_cmnd *scp)
7430 {
7431 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7432 		if (sdebug_every_nth < -1)
7433 			sdebug_every_nth = -1;
7434 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7435 			return true; /* ignore command causing timeout */
7436 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7437 			 scsi_medium_access_command(scp))
7438 			return true; /* time out reads and writes */
7439 	}
7440 	return false;
7441 }
7442 
7443 /* Response to TUR or media access command when device stopped */
7444 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7445 {
7446 	int stopped_state;
7447 	u64 diff_ns = 0;
7448 	ktime_t now_ts = ktime_get_boottime();
7449 	struct scsi_device *sdp = scp->device;
7450 
7451 	stopped_state = atomic_read(&devip->stopped);
7452 	if (stopped_state == 2) {
7453 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7454 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7455 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7456 				/* tur_ms_to_ready timer extinguished */
7457 				atomic_set(&devip->stopped, 0);
7458 				return 0;
7459 			}
7460 		}
7461 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7462 		if (sdebug_verbose)
7463 			sdev_printk(KERN_INFO, sdp,
7464 				    "%s: Not ready: in process of becoming ready\n", my_name);
7465 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7466 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7467 
7468 			if (diff_ns <= tur_nanosecs_to_ready)
7469 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7470 			else
7471 				diff_ns = tur_nanosecs_to_ready;
7472 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7473 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7474 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7475 						   diff_ns);
7476 			return check_condition_result;
7477 		}
7478 	}
7479 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7480 	if (sdebug_verbose)
7481 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7482 			    my_name);
7483 	return check_condition_result;
7484 }
7485 
7486 static void sdebug_map_queues(struct Scsi_Host *shost)
7487 {
7488 	int i, qoff;
7489 
7490 	if (shost->nr_hw_queues == 1)
7491 		return;
7492 
7493 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7494 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7495 
7496 		map->nr_queues  = 0;
7497 
7498 		if (i == HCTX_TYPE_DEFAULT)
7499 			map->nr_queues = submit_queues - poll_queues;
7500 		else if (i == HCTX_TYPE_POLL)
7501 			map->nr_queues = poll_queues;
7502 
7503 		if (!map->nr_queues) {
7504 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7505 			continue;
7506 		}
7507 
7508 		map->queue_offset = qoff;
7509 		blk_mq_map_queues(map);
7510 
7511 		qoff += map->nr_queues;
7512 	}
7513 }
7514 
7515 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7516 {
7517 	bool first;
7518 	bool retiring = false;
7519 	int num_entries = 0;
7520 	unsigned int qc_idx = 0;
7521 	unsigned long iflags;
7522 	ktime_t kt_from_boot = ktime_get_boottime();
7523 	struct sdebug_queue *sqp;
7524 	struct sdebug_queued_cmd *sqcp;
7525 	struct scsi_cmnd *scp;
7526 	struct sdebug_dev_info *devip;
7527 	struct sdebug_defer *sd_dp;
7528 
7529 	sqp = sdebug_q_arr + queue_num;
7530 
7531 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7532 
7533 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7534 	if (qc_idx >= sdebug_max_queue)
7535 		goto unlock;
7536 
7537 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7538 		if (first) {
7539 			first = false;
7540 			if (!test_bit(qc_idx, sqp->in_use_bm))
7541 				continue;
7542 		} else {
7543 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7544 		}
7545 		if (qc_idx >= sdebug_max_queue)
7546 			break;
7547 
7548 		sqcp = &sqp->qc_arr[qc_idx];
7549 		sd_dp = sqcp->sd_dp;
7550 		if (unlikely(!sd_dp))
7551 			continue;
7552 		scp = sqcp->a_cmnd;
7553 		if (unlikely(scp == NULL)) {
7554 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7555 			       queue_num, qc_idx, __func__);
7556 			break;
7557 		}
7558 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7559 			if (kt_from_boot < sd_dp->cmpl_ts)
7560 				continue;
7561 
7562 		} else		/* ignoring non REQ_POLLED requests */
7563 			continue;
7564 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7565 		if (likely(devip))
7566 			atomic_dec(&devip->num_in_q);
7567 		else
7568 			pr_err("devip=NULL from %s\n", __func__);
7569 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7570 			retiring = true;
7571 
7572 		sqcp->a_cmnd = NULL;
7573 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7574 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7575 				sqp, queue_num, qc_idx, __func__);
7576 			break;
7577 		}
7578 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7579 			int k, retval;
7580 
7581 			retval = atomic_read(&retired_max_queue);
7582 			if (qc_idx >= retval) {
7583 				pr_err("index %d too large\n", retval);
7584 				break;
7585 			}
7586 			k = find_last_bit(sqp->in_use_bm, retval);
7587 			if ((k < sdebug_max_queue) || (k == retval))
7588 				atomic_set(&retired_max_queue, 0);
7589 			else
7590 				atomic_set(&retired_max_queue, k + 1);
7591 		}
7592 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7593 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7594 		scsi_done(scp); /* callback to mid level */
7595 		num_entries++;
7596 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7597 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7598 			break;
7599 	}
7600 
7601 unlock:
7602 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7603 
7604 	if (num_entries > 0)
7605 		atomic_add(num_entries, &sdeb_mq_poll_count);
7606 	return num_entries;
7607 }
7608 
7609 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7610 				   struct scsi_cmnd *scp)
7611 {
7612 	u8 sdeb_i;
7613 	struct scsi_device *sdp = scp->device;
7614 	const struct opcode_info_t *oip;
7615 	const struct opcode_info_t *r_oip;
7616 	struct sdebug_dev_info *devip;
7617 	u8 *cmd = scp->cmnd;
7618 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7619 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7620 	int k, na;
7621 	int errsts = 0;
7622 	u64 lun_index = sdp->lun & 0x3FFF;
7623 	u32 flags;
7624 	u16 sa;
7625 	u8 opcode = cmd[0];
7626 	bool has_wlun_rl;
7627 	bool inject_now;
7628 
7629 	scsi_set_resid(scp, 0);
7630 	if (sdebug_statistics) {
7631 		atomic_inc(&sdebug_cmnd_count);
7632 		inject_now = inject_on_this_cmd();
7633 	} else {
7634 		inject_now = false;
7635 	}
7636 	if (unlikely(sdebug_verbose &&
7637 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7638 		char b[120];
7639 		int n, len, sb;
7640 
7641 		len = scp->cmd_len;
7642 		sb = (int)sizeof(b);
7643 		if (len > 32)
7644 			strcpy(b, "too long, over 32 bytes");
7645 		else {
7646 			for (k = 0, n = 0; k < len && n < sb; ++k)
7647 				n += scnprintf(b + n, sb - n, "%02x ",
7648 					       (u32)cmd[k]);
7649 		}
7650 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7651 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7652 	}
7653 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7654 		return SCSI_MLQUEUE_HOST_BUSY;
7655 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7656 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7657 		goto err_out;
7658 
7659 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7660 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7661 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7662 	if (unlikely(!devip)) {
7663 		devip = find_build_dev_info(sdp);
7664 		if (NULL == devip)
7665 			goto err_out;
7666 	}
7667 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7668 		atomic_set(&sdeb_inject_pending, 1);
7669 
7670 	na = oip->num_attached;
7671 	r_pfp = oip->pfp;
7672 	if (na) {	/* multiple commands with this opcode */
7673 		r_oip = oip;
7674 		if (FF_SA & r_oip->flags) {
7675 			if (F_SA_LOW & oip->flags)
7676 				sa = 0x1f & cmd[1];
7677 			else
7678 				sa = get_unaligned_be16(cmd + 8);
7679 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7680 				if (opcode == oip->opcode && sa == oip->sa)
7681 					break;
7682 			}
7683 		} else {   /* since no service action only check opcode */
7684 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7685 				if (opcode == oip->opcode)
7686 					break;
7687 			}
7688 		}
7689 		if (k > na) {
7690 			if (F_SA_LOW & r_oip->flags)
7691 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7692 			else if (F_SA_HIGH & r_oip->flags)
7693 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7694 			else
7695 				mk_sense_invalid_opcode(scp);
7696 			goto check_cond;
7697 		}
7698 	}	/* else (when na==0) we assume the oip is a match */
7699 	flags = oip->flags;
7700 	if (unlikely(F_INV_OP & flags)) {
7701 		mk_sense_invalid_opcode(scp);
7702 		goto check_cond;
7703 	}
7704 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7705 		if (sdebug_verbose)
7706 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7707 				    my_name, opcode, " supported for wlun");
7708 		mk_sense_invalid_opcode(scp);
7709 		goto check_cond;
7710 	}
7711 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7712 		u8 rem;
7713 		int j;
7714 
7715 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7716 			rem = ~oip->len_mask[k] & cmd[k];
7717 			if (rem) {
7718 				for (j = 7; j >= 0; --j, rem <<= 1) {
7719 					if (0x80 & rem)
7720 						break;
7721 				}
7722 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7723 				goto check_cond;
7724 			}
7725 		}
7726 	}
7727 	if (unlikely(!(F_SKIP_UA & flags) &&
7728 		     find_first_bit(devip->uas_bm,
7729 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7730 		errsts = make_ua(scp, devip);
7731 		if (errsts)
7732 			goto check_cond;
7733 	}
7734 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7735 		     atomic_read(&devip->stopped))) {
7736 		errsts = resp_not_ready(scp, devip);
7737 		if (errsts)
7738 			goto fini;
7739 	}
7740 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7741 		goto fini;
7742 	if (unlikely(sdebug_every_nth)) {
7743 		if (fake_timeout(scp))
7744 			return 0;	/* ignore command: make trouble */
7745 	}
7746 	if (likely(oip->pfp))
7747 		pfp = oip->pfp;	/* calls a resp_* function */
7748 	else
7749 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7750 
7751 fini:
7752 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7753 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7754 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7755 					    sdebug_ndelay > 10000)) {
7756 		/*
7757 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7758 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7759 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7760 		 * For Synchronize Cache want 1/20 of SSU's delay.
7761 		 */
7762 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7763 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7764 
7765 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7766 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7767 	} else
7768 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7769 				     sdebug_ndelay);
7770 check_cond:
7771 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7772 err_out:
7773 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7774 }
7775 
7776 static struct scsi_host_template sdebug_driver_template = {
7777 	.show_info =		scsi_debug_show_info,
7778 	.write_info =		scsi_debug_write_info,
7779 	.proc_name =		sdebug_proc_name,
7780 	.name =			"SCSI DEBUG",
7781 	.info =			scsi_debug_info,
7782 	.slave_alloc =		scsi_debug_slave_alloc,
7783 	.slave_configure =	scsi_debug_slave_configure,
7784 	.slave_destroy =	scsi_debug_slave_destroy,
7785 	.ioctl =		scsi_debug_ioctl,
7786 	.queuecommand =		scsi_debug_queuecommand,
7787 	.change_queue_depth =	sdebug_change_qdepth,
7788 	.map_queues =		sdebug_map_queues,
7789 	.mq_poll =		sdebug_blk_mq_poll,
7790 	.eh_abort_handler =	scsi_debug_abort,
7791 	.eh_device_reset_handler = scsi_debug_device_reset,
7792 	.eh_target_reset_handler = scsi_debug_target_reset,
7793 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7794 	.eh_host_reset_handler = scsi_debug_host_reset,
7795 	.can_queue =		SDEBUG_CANQUEUE,
7796 	.this_id =		7,
7797 	.sg_tablesize =		SG_MAX_SEGMENTS,
7798 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7799 	.max_sectors =		-1U,
7800 	.max_segment_size =	-1U,
7801 	.module =		THIS_MODULE,
7802 	.track_queue_depth =	1,
7803 };
7804 
7805 static int sdebug_driver_probe(struct device *dev)
7806 {
7807 	int error = 0;
7808 	struct sdebug_host_info *sdbg_host;
7809 	struct Scsi_Host *hpnt;
7810 	int hprot;
7811 
7812 	sdbg_host = dev_to_sdebug_host(dev);
7813 
7814 	sdebug_driver_template.can_queue = sdebug_max_queue;
7815 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7816 	if (!sdebug_clustering)
7817 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7818 
7819 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7820 	if (NULL == hpnt) {
7821 		pr_err("scsi_host_alloc failed\n");
7822 		error = -ENODEV;
7823 		return error;
7824 	}
7825 	if (submit_queues > nr_cpu_ids) {
7826 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7827 			my_name, submit_queues, nr_cpu_ids);
7828 		submit_queues = nr_cpu_ids;
7829 	}
7830 	/*
7831 	 * Decide whether to tell scsi subsystem that we want mq. The
7832 	 * following should give the same answer for each host.
7833 	 */
7834 	hpnt->nr_hw_queues = submit_queues;
7835 	if (sdebug_host_max_queue)
7836 		hpnt->host_tagset = 1;
7837 
7838 	/* poll queues are possible for nr_hw_queues > 1 */
7839 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7840 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7841 			 my_name, poll_queues, hpnt->nr_hw_queues);
7842 		poll_queues = 0;
7843 	}
7844 
7845 	/*
7846 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7847 	 * left over for non-polled I/O.
7848 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7849 	 */
7850 	if (poll_queues >= submit_queues) {
7851 		if (submit_queues < 3)
7852 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7853 		else
7854 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7855 				my_name, submit_queues - 1);
7856 		poll_queues = 1;
7857 	}
7858 	if (poll_queues)
7859 		hpnt->nr_maps = 3;
7860 
7861 	sdbg_host->shost = hpnt;
7862 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7863 		hpnt->max_id = sdebug_num_tgts + 1;
7864 	else
7865 		hpnt->max_id = sdebug_num_tgts;
7866 	/* = sdebug_max_luns; */
7867 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7868 
7869 	hprot = 0;
7870 
7871 	switch (sdebug_dif) {
7872 
7873 	case T10_PI_TYPE1_PROTECTION:
7874 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7875 		if (sdebug_dix)
7876 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7877 		break;
7878 
7879 	case T10_PI_TYPE2_PROTECTION:
7880 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7881 		if (sdebug_dix)
7882 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7883 		break;
7884 
7885 	case T10_PI_TYPE3_PROTECTION:
7886 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7887 		if (sdebug_dix)
7888 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7889 		break;
7890 
7891 	default:
7892 		if (sdebug_dix)
7893 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7894 		break;
7895 	}
7896 
7897 	scsi_host_set_prot(hpnt, hprot);
7898 
7899 	if (have_dif_prot || sdebug_dix)
7900 		pr_info("host protection%s%s%s%s%s%s%s\n",
7901 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7902 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7903 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7904 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7905 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7906 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7907 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7908 
7909 	if (sdebug_guard == 1)
7910 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7911 	else
7912 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7913 
7914 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7915 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7916 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7917 		sdebug_statistics = true;
7918 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7919 	if (error) {
7920 		pr_err("scsi_add_host failed\n");
7921 		error = -ENODEV;
7922 		scsi_host_put(hpnt);
7923 	} else {
7924 		scsi_scan_host(hpnt);
7925 	}
7926 
7927 	return error;
7928 }
7929 
7930 static void sdebug_driver_remove(struct device *dev)
7931 {
7932 	struct sdebug_host_info *sdbg_host;
7933 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7934 
7935 	sdbg_host = dev_to_sdebug_host(dev);
7936 
7937 	scsi_remove_host(sdbg_host->shost);
7938 
7939 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7940 				 dev_list) {
7941 		list_del(&sdbg_devinfo->dev_list);
7942 		kfree(sdbg_devinfo->zstate);
7943 		kfree(sdbg_devinfo);
7944 	}
7945 
7946 	scsi_host_put(sdbg_host->shost);
7947 }
7948 
7949 static int pseudo_lld_bus_match(struct device *dev,
7950 				struct device_driver *dev_driver)
7951 {
7952 	return 1;
7953 }
7954 
7955 static struct bus_type pseudo_lld_bus = {
7956 	.name = "pseudo",
7957 	.match = pseudo_lld_bus_match,
7958 	.probe = sdebug_driver_probe,
7959 	.remove = sdebug_driver_remove,
7960 	.drv_groups = sdebug_drv_groups,
7961 };
7962