xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 62257638)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
332 
333 struct sdebug_defer {
334 	struct hrtimer hrt;
335 	struct execute_work ew;
336 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 	int sqa_idx;	/* index of sdebug_queue array */
338 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
339 	int hc_idx;	/* hostwide tag index */
340 	int issuing_cpu;
341 	bool init_hrt;
342 	bool init_wq;
343 	bool init_poll;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer *sd_dp;
353 	struct scsi_cmnd *a_cmnd;
354 };
355 
356 struct sdebug_queue {
357 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 	spinlock_t qc_lock;
360 	atomic_t blocked;	/* to temporarily stop more being queued */
361 };
362 
363 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
364 static atomic_t sdebug_completions;  /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
369 
370 struct opcode_info_t {
371 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
372 				/* for terminating element */
373 	u8 opcode;		/* if num_attached > 0, preferred */
374 	u16 sa;			/* service action */
375 	u32 flags;		/* OR-ed set of SDEB_F_* */
376 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
378 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
379 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 };
381 
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 	SDEB_I_INVALID_OPCODE =	0,
385 	SDEB_I_INQUIRY = 1,
386 	SDEB_I_REPORT_LUNS = 2,
387 	SDEB_I_REQUEST_SENSE = 3,
388 	SDEB_I_TEST_UNIT_READY = 4,
389 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
390 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
391 	SDEB_I_LOG_SENSE = 7,
392 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
393 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
394 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
395 	SDEB_I_START_STOP = 11,
396 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
397 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
398 	SDEB_I_MAINT_IN = 14,
399 	SDEB_I_MAINT_OUT = 15,
400 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
401 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
402 	SDEB_I_RESERVE = 18,		/* 6, 10 */
403 	SDEB_I_RELEASE = 19,		/* 6, 10 */
404 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
405 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
406 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
407 	SDEB_I_SEND_DIAG = 23,
408 	SDEB_I_UNMAP = 24,
409 	SDEB_I_WRITE_BUFFER = 25,
410 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
411 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
412 	SDEB_I_COMP_WRITE = 28,
413 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
414 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
415 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
416 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
417 };
418 
419 
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
423 	    0, 0, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
426 	    SDEB_I_RELEASE,
427 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 	    SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 	0, 0, 0, SDEB_I_VERIFY,
448 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 	     SDEB_I_MAINT_OUT, 0, 0, 0,
454 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 };
464 
465 /*
466  * The following "response" functions return the SCSI mid-level's 4 byte
467  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468  * command completion, they can mask their return value with
469  * SDEG_RES_IMMED_MASK .
470  */
471 #define SDEG_RES_IMMED_MASK 0x40000000
472 
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
509 
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
517 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t mselect_iarr[] = {
521 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
522 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524 
525 static const struct opcode_info_t read_iarr[] = {
526 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 	     0, 0, 0, 0} },
529 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 	     0xc7, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t write_iarr[] = {
537 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 		   0, 0, 0, 0, 0, 0} },
540 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 		   0, 0, 0} },
543 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		   0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t verify_iarr[] = {
549 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 };
553 
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
558 };
559 
560 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
561 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
564 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
567 };
568 
569 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
570 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577 
578 static const struct opcode_info_t write_same_iarr[] = {
579 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
582 };
583 
584 static const struct opcode_info_t reserve_iarr[] = {
585 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
586 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t release_iarr[] = {
590 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
591 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593 
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
598 };
599 
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
604 };
605 
606 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
607 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
610 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
613 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617 
618 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
619 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623 
624 
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
631 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 	     0, 0} },					/* REPORT LUNS */
637 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
643 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
646 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
649 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 	     0, 0, 0} },
651 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 	     0, 0} },
654 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
660 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
673 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 				0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
680 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
684 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 	     0xff, 0xff} },
686 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
688 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
692 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 	     0} },
694 /* 20 */
695 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
702 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
709 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
711 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 		 0, 0, 0, 0, 0} },
713 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 	    resp_sync_cache, sync_cache_iarr,
715 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
717 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
720 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 	    resp_pre_fetch, pre_fetch_iarr,
722 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
724 
725 /* 30 */
726 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
736 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738 
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;	/* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
799 
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 			  SAM_LUN_AM_FLAT = 0x1,
802 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 			  SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity;	/* in sectors */
809 
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811    may still need them */
812 static int sdebug_heads;		/* heads per disk */
813 static int sdebug_cylinders_per;	/* cylinders per surface */
814 static int sdebug_sectors_per;		/* sectors per cylinder */
815 
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
824 
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
834 
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 
842 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
845 
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
848 
849 static rwlock_t *ramdisk_lck_a[2];
850 
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
853 
854 static struct bus_type pseudo_lld_bus;
855 
856 static struct device_driver sdebug_driverfs_driver = {
857 	.name 		= sdebug_proc_name,
858 	.bus		= &pseudo_lld_bus,
859 };
860 
861 static const int check_condition_result =
862 	SAM_STAT_CHECK_CONDITION;
863 
864 static const int illegal_condition_result =
865 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
866 
867 static const int device_qfull_result =
868 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
869 
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 
872 
873 /* Only do the extra work involved in logical block provisioning if one or
874  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875  * real reads and writes (i.e. not skipping them for speed).
876  */
877 static inline bool scsi_debug_lbp(void)
878 {
879 	return 0 == sdebug_fake_rw &&
880 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 }
882 
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 			    unsigned long long lba)
885 {
886 	struct sdeb_store_info *lsip = sip;
887 
888 	lba = do_div(lba, sdebug_store_sectors);
889 	if (!sip || !sip->storep) {
890 		WARN_ON_ONCE(true);
891 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
892 	}
893 	return lsip->storep + lba * sdebug_sector_size;
894 }
895 
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 				      sector_t sector)
898 {
899 	sector = sector_div(sector, sdebug_store_sectors);
900 
901 	return sip->dif_storep + sector;
902 }
903 
904 static void sdebug_max_tgts_luns(void)
905 {
906 	struct sdebug_host_info *sdbg_host;
907 	struct Scsi_Host *hpnt;
908 
909 	spin_lock(&sdebug_host_list_lock);
910 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 		hpnt = sdbg_host->shost;
912 		if ((hpnt->this_id >= 0) &&
913 		    (sdebug_num_tgts > hpnt->this_id))
914 			hpnt->max_id = sdebug_num_tgts + 1;
915 		else
916 			hpnt->max_id = sdebug_num_tgts;
917 		/* sdebug_max_luns; */
918 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
919 	}
920 	spin_unlock(&sdebug_host_list_lock);
921 }
922 
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
924 
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 				 enum sdeb_cmd_data c_d,
928 				 int in_byte, int in_bit)
929 {
930 	unsigned char *sbuff;
931 	u8 sks[4];
932 	int sl, asc;
933 
934 	sbuff = scp->sense_buffer;
935 	if (!sbuff) {
936 		sdev_printk(KERN_ERR, scp->device,
937 			    "%s: sense_buffer is NULL\n", __func__);
938 		return;
939 	}
940 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 	memset(sks, 0, sizeof(sks));
944 	sks[0] = 0x80;
945 	if (c_d)
946 		sks[0] |= 0x40;
947 	if (in_bit >= 0) {
948 		sks[0] |= 0x8;
949 		sks[0] |= 0x7 & in_bit;
950 	}
951 	put_unaligned_be16(in_byte, sks + 1);
952 	if (sdebug_dsense) {
953 		sl = sbuff[7] + 8;
954 		sbuff[7] = sl;
955 		sbuff[sl] = 0x2;
956 		sbuff[sl + 1] = 0x6;
957 		memcpy(sbuff + sl + 4, sks, 3);
958 	} else
959 		memcpy(sbuff + 15, sks, 3);
960 	if (sdebug_verbose)
961 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
962 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 }
965 
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
967 {
968 	if (!scp->sense_buffer) {
969 		sdev_printk(KERN_ERR, scp->device,
970 			    "%s: sense_buffer is NULL\n", __func__);
971 		return;
972 	}
973 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
974 
975 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 
977 	if (sdebug_verbose)
978 		sdev_printk(KERN_INFO, scp->device,
979 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 			    my_name, key, asc, asq);
981 }
982 
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987 
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 			    void __user *arg)
990 {
991 	if (sdebug_verbose) {
992 		if (0x1261 == cmd)
993 			sdev_printk(KERN_INFO, dev,
994 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
995 		else if (0x5331 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 				    __func__);
999 		else
1000 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 				    __func__, cmd);
1002 	}
1003 	return -EINVAL;
1004 	/* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006 
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 	switch (sdebug_cdb_len) {
1010 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = false;
1019 		break;
1020 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 		sdev->use_10_for_rw = true;
1022 		sdev->use_16_for_rw = false;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 16:
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	case 32: /* No knobs to suggest this so same as 16 for now */
1031 		sdev->use_10_for_rw = false;
1032 		sdev->use_16_for_rw = true;
1033 		sdev->use_10_for_ms = true;
1034 		break;
1035 	default:
1036 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 			sdebug_cdb_len);
1038 		sdev->use_10_for_rw = true;
1039 		sdev->use_16_for_rw = false;
1040 		sdev->use_10_for_ms = false;
1041 		sdebug_cdb_len = 10;
1042 		break;
1043 	}
1044 }
1045 
1046 static void all_config_cdb_len(void)
1047 {
1048 	struct sdebug_host_info *sdbg_host;
1049 	struct Scsi_Host *shost;
1050 	struct scsi_device *sdev;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 		shost = sdbg_host->shost;
1055 		shost_for_each_device(sdev, shost) {
1056 			config_cdb_len(sdev);
1057 		}
1058 	}
1059 	spin_unlock(&sdebug_host_list_lock);
1060 }
1061 
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 	struct sdebug_host_info *sdhp;
1065 	struct sdebug_dev_info *dp;
1066 
1067 	spin_lock(&sdebug_host_list_lock);
1068 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 			if ((devip->sdbg_host == dp->sdbg_host) &&
1071 			    (devip->target == dp->target))
1072 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 	spin_unlock(&sdebug_host_list_lock);
1076 }
1077 
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 	int k;
1081 
1082 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 	if (k != SDEBUG_NUM_UAS) {
1084 		const char *cp = NULL;
1085 
1086 		switch (k) {
1087 		case SDEBUG_UA_POR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_RESET_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on reset";
1092 			break;
1093 		case SDEBUG_UA_POOCCUR:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					POWER_ON_OCCURRED_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "power on occurred";
1098 			break;
1099 		case SDEBUG_UA_BUS_RESET:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1101 					BUS_RESET_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "bus reset";
1104 			break;
1105 		case SDEBUG_UA_MODE_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					MODE_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "mode parameters changed";
1110 			break;
1111 		case SDEBUG_UA_CAPACITY_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 					CAPACITY_CHANGED_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "capacity data changed";
1116 			break;
1117 		case SDEBUG_UA_MICROCODE_CHANGED:
1118 			mk_sense_buffer(scp, UNIT_ATTENTION,
1119 					TARGET_CHANGED_ASC,
1120 					MICROCODE_CHANGED_ASCQ);
1121 			if (sdebug_verbose)
1122 				cp = "microcode has been changed";
1123 			break;
1124 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 			mk_sense_buffer(scp, UNIT_ATTENTION,
1126 					TARGET_CHANGED_ASC,
1127 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1128 			if (sdebug_verbose)
1129 				cp = "microcode has been changed without reset";
1130 			break;
1131 		case SDEBUG_UA_LUNS_CHANGED:
1132 			/*
1133 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 			 * on the target, until a REPORT LUNS command is
1136 			 * received.  SPC-4 behavior is to report it only once.
1137 			 * NOTE:  sdebug_scsi_level does not use the same
1138 			 * values as struct scsi_device->scsi_level.
1139 			 */
1140 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1141 				clear_luns_changed_on_target(devip);
1142 			mk_sense_buffer(scp, UNIT_ATTENTION,
1143 					TARGET_CHANGED_ASC,
1144 					LUNS_CHANGED_ASCQ);
1145 			if (sdebug_verbose)
1146 				cp = "reported luns data has changed";
1147 			break;
1148 		default:
1149 			pr_warn("unexpected unit attention code=%d\n", k);
1150 			if (sdebug_verbose)
1151 				cp = "unknown";
1152 			break;
1153 		}
1154 		clear_bit(k, devip->uas_bm);
1155 		if (sdebug_verbose)
1156 			sdev_printk(KERN_INFO, scp->device,
1157 				   "%s reports: Unit attention: %s\n",
1158 				   my_name, cp);
1159 		return check_condition_result;
1160 	}
1161 	return 0;
1162 }
1163 
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1166 				int arr_len)
1167 {
1168 	int act_len;
1169 	struct scsi_data_buffer *sdb = &scp->sdb;
1170 
1171 	if (!sdb->length)
1172 		return 0;
1173 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 		return DID_ERROR << 16;
1175 
1176 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1177 				      arr, arr_len);
1178 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1179 
1180 	return 0;
1181 }
1182 
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185  * calls, not required to write in ascending offset order. Assumes resid
1186  * set to scsi_bufflen() prior to any calls.
1187  */
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 				  int arr_len, unsigned int off_dst)
1190 {
1191 	unsigned int act_len, n;
1192 	struct scsi_data_buffer *sdb = &scp->sdb;
1193 	off_t skip = off_dst;
1194 
1195 	if (sdb->length <= off_dst)
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 		return DID_ERROR << 16;
1199 
1200 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 				       arr, arr_len, skip);
1202 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 		 scsi_get_resid(scp));
1205 	n = scsi_bufflen(scp) - (off_dst + act_len);
1206 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1207 	return 0;
1208 }
1209 
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211  * 'arr' or -1 if error.
1212  */
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 			       int arr_len)
1215 {
1216 	if (!scsi_bufflen(scp))
1217 		return 0;
1218 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 		return -1;
1220 
1221 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1222 }
1223 
1224 
1225 static char sdebug_inq_vendor_id[9] = "Linux   ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1232 
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 			  int target_dev_id, int dev_id_num,
1236 			  const char *dev_id_str, int dev_id_str_len,
1237 			  const uuid_t *lu_name)
1238 {
1239 	int num, port_a;
1240 	char b[32];
1241 
1242 	port_a = target_dev_id + 1;
1243 	/* T10 vendor identifier field format (faked) */
1244 	arr[0] = 0x2;	/* ASCII */
1245 	arr[1] = 0x1;
1246 	arr[2] = 0x0;
1247 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 	num = 8 + 16 + dev_id_str_len;
1251 	arr[3] = num;
1252 	num += 4;
1253 	if (dev_id_num >= 0) {
1254 		if (sdebug_uuid_ctl) {
1255 			/* Locally assigned UUID */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x12;
1260 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1261 			arr[num++] = 0x0;
1262 			memcpy(arr + num, lu_name, 16);
1263 			num += 16;
1264 		} else {
1265 			/* NAA-3, Logical unit identifier (binary) */
1266 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1267 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1268 			arr[num++] = 0x0;
1269 			arr[num++] = 0x8;
1270 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 			num += 8;
1272 		}
1273 		/* Target relative port number */
1274 		arr[num++] = 0x61;	/* proto=sas, binary */
1275 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x4;	/* length */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x0;
1281 		arr[num++] = 0x1;	/* relative port A */
1282 	}
1283 	/* NAA-3, Target port identifier */
1284 	arr[num++] = 0x61;	/* proto=sas, binary */
1285 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1286 	arr[num++] = 0x0;
1287 	arr[num++] = 0x8;
1288 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1289 	num += 8;
1290 	/* NAA-3, Target port group identifier */
1291 	arr[num++] = 0x61;	/* proto=sas, binary */
1292 	arr[num++] = 0x95;	/* piv=1, target port group id */
1293 	arr[num++] = 0x0;
1294 	arr[num++] = 0x4;
1295 	arr[num++] = 0;
1296 	arr[num++] = 0;
1297 	put_unaligned_be16(port_group_id, arr + num);
1298 	num += 2;
1299 	/* NAA-3, Target device identifier */
1300 	arr[num++] = 0x61;	/* proto=sas, binary */
1301 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1302 	arr[num++] = 0x0;
1303 	arr[num++] = 0x8;
1304 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1305 	num += 8;
1306 	/* SCSI name string: Target device identifier */
1307 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1308 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1309 	arr[num++] = 0x0;
1310 	arr[num++] = 24;
1311 	memcpy(arr + num, "naa.32222220", 12);
1312 	num += 12;
1313 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 	memcpy(arr + num, b, 8);
1315 	num += 8;
1316 	memset(arr + num, 0, 4);
1317 	num += 4;
1318 	return num;
1319 }
1320 
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323     0x22,0x22,0x22,0x0,0xbb,0x1,
1324     0x22,0x22,0x22,0x0,0xbb,0x2,
1325 };
1326 
1327 /*  Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1329 {
1330 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 	return sizeof(vpd84_data);
1332 }
1333 
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1336 {
1337 	int num = 0;
1338 	const char *na1 = "https://www.kernel.org/config";
1339 	const char *na2 = "http://www.kernel.org/log";
1340 	int plen, olen;
1341 
1342 	arr[num++] = 0x1;	/* lu, storage config */
1343 	arr[num++] = 0x0;	/* reserved */
1344 	arr[num++] = 0x0;
1345 	olen = strlen(na1);
1346 	plen = olen + 1;
1347 	if (plen % 4)
1348 		plen = ((plen / 4) + 1) * 4;
1349 	arr[num++] = plen;	/* length, null termianted, padded */
1350 	memcpy(arr + num, na1, olen);
1351 	memset(arr + num + olen, 0, plen - olen);
1352 	num += plen;
1353 
1354 	arr[num++] = 0x4;	/* lu, logging */
1355 	arr[num++] = 0x0;	/* reserved */
1356 	arr[num++] = 0x0;
1357 	olen = strlen(na2);
1358 	plen = olen + 1;
1359 	if (plen % 4)
1360 		plen = ((plen / 4) + 1) * 4;
1361 	arr[num++] = plen;	/* length, null terminated, padded */
1362 	memcpy(arr + num, na2, olen);
1363 	memset(arr + num + olen, 0, plen - olen);
1364 	num += plen;
1365 
1366 	return num;
1367 }
1368 
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1371 {
1372 	int num = 0;
1373 	int port_a, port_b;
1374 
1375 	port_a = target_dev_id + 1;
1376 	port_b = port_a + 1;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (A) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1391 	num += 8;
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x0;
1395 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1396 	memset(arr + num, 0, 6);
1397 	num += 6;
1398 	arr[num++] = 0x0;
1399 	arr[num++] = 12;	/* length tp descriptor */
1400 	/* naa-5 target port identifier (B) */
1401 	arr[num++] = 0x61;	/* proto=sas, binary */
1402 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1403 	arr[num++] = 0x0;	/* reserved */
1404 	arr[num++] = 0x8;	/* length */
1405 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1406 	num += 8;
1407 
1408 	return num;
1409 }
1410 
1411 
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1416 '1','2','3','4',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1418 0xec,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1423 0x53,0x41,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1427 0x10,0x80,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 };
1455 
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1458 {
1459 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 	return sizeof(vpd89_data);
1461 }
1462 
1463 
1464 static unsigned char vpdb0_data[] = {
1465 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 };
1470 
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1473 {
1474 	unsigned int gran;
1475 
1476 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1477 
1478 	/* Optimal transfer length granularity */
1479 	if (sdebug_opt_xferlen_exp != 0 &&
1480 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 		gran = 1 << sdebug_opt_xferlen_exp;
1482 	else
1483 		gran = 1 << sdebug_physblk_exp;
1484 	put_unaligned_be16(gran, arr + 2);
1485 
1486 	/* Maximum Transfer Length */
1487 	if (sdebug_store_sectors > 0x400)
1488 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1489 
1490 	/* Optimal Transfer Length */
1491 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492 
1493 	if (sdebug_lbpu) {
1494 		/* Maximum Unmap LBA Count */
1495 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1496 
1497 		/* Maximum Unmap Block Descriptor Count */
1498 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 	}
1500 
1501 	/* Unmap Granularity Alignment */
1502 	if (sdebug_unmap_alignment) {
1503 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 		arr[28] |= 0x80; /* UGAVALID */
1505 	}
1506 
1507 	/* Optimal Unmap Granularity */
1508 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1509 
1510 	/* Maximum WRITE SAME Length */
1511 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1512 
1513 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1514 
1515 	return sizeof(vpdb0_data);
1516 }
1517 
1518 /* Block device characteristics VPD page (SBC-3) */
1519 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1520 {
1521 	memset(arr, 0, 0x3c);
1522 	arr[0] = 0;
1523 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1524 	arr[2] = 0;
1525 	arr[3] = 5;	/* less than 1.8" */
1526 	if (devip->zmodel == BLK_ZONED_HA)
1527 		arr[4] = 1 << 4;	/* zoned field = 01b */
1528 
1529 	return 0x3c;
1530 }
1531 
1532 /* Logical block provisioning VPD page (SBC-4) */
1533 static int inquiry_vpd_b2(unsigned char *arr)
1534 {
1535 	memset(arr, 0, 0x4);
1536 	arr[0] = 0;			/* threshold exponent */
1537 	if (sdebug_lbpu)
1538 		arr[1] = 1 << 7;
1539 	if (sdebug_lbpws)
1540 		arr[1] |= 1 << 6;
1541 	if (sdebug_lbpws10)
1542 		arr[1] |= 1 << 5;
1543 	if (sdebug_lbprz && scsi_debug_lbp())
1544 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1545 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1546 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1547 	/* threshold_percentage=0 */
1548 	return 0x4;
1549 }
1550 
1551 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1552 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1553 {
1554 	memset(arr, 0, 0x3c);
1555 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1556 	/*
1557 	 * Set Optimal number of open sequential write preferred zones and
1558 	 * Optimal number of non-sequentially written sequential write
1559 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1560 	 * fields set to zero, apart from Max. number of open swrz_s field.
1561 	 */
1562 	put_unaligned_be32(0xffffffff, &arr[4]);
1563 	put_unaligned_be32(0xffffffff, &arr[8]);
1564 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1565 		put_unaligned_be32(devip->max_open, &arr[12]);
1566 	else
1567 		put_unaligned_be32(0xffffffff, &arr[12]);
1568 	if (devip->zcap < devip->zsize) {
1569 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1570 		put_unaligned_be64(devip->zsize, &arr[20]);
1571 	} else {
1572 		arr[19] = 0;
1573 	}
1574 	return 0x3c;
1575 }
1576 
1577 #define SDEBUG_LONG_INQ_SZ 96
1578 #define SDEBUG_MAX_INQ_ARR_SZ 584
1579 
1580 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1581 {
1582 	unsigned char pq_pdt;
1583 	unsigned char *arr;
1584 	unsigned char *cmd = scp->cmnd;
1585 	u32 alloc_len, n;
1586 	int ret;
1587 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1588 
1589 	alloc_len = get_unaligned_be16(cmd + 3);
1590 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1591 	if (! arr)
1592 		return DID_REQUEUE << 16;
1593 	is_disk = (sdebug_ptype == TYPE_DISK);
1594 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1595 	is_disk_zbc = (is_disk || is_zbc);
1596 	have_wlun = scsi_is_wlun(scp->device->lun);
1597 	if (have_wlun)
1598 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1599 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1600 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1601 	else
1602 		pq_pdt = (sdebug_ptype & 0x1f);
1603 	arr[0] = pq_pdt;
1604 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1605 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1606 		kfree(arr);
1607 		return check_condition_result;
1608 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1609 		int lu_id_num, port_group_id, target_dev_id;
1610 		u32 len;
1611 		char lu_id_str[6];
1612 		int host_no = devip->sdbg_host->shost->host_no;
1613 
1614 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1615 		    (devip->channel & 0x7f);
1616 		if (sdebug_vpd_use_hostno == 0)
1617 			host_no = 0;
1618 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1619 			    (devip->target * 1000) + devip->lun);
1620 		target_dev_id = ((host_no + 1) * 2000) +
1621 				 (devip->target * 1000) - 3;
1622 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1623 		if (0 == cmd[2]) { /* supported vital product data pages */
1624 			arr[1] = cmd[2];	/*sanity */
1625 			n = 4;
1626 			arr[n++] = 0x0;   /* this page */
1627 			arr[n++] = 0x80;  /* unit serial number */
1628 			arr[n++] = 0x83;  /* device identification */
1629 			arr[n++] = 0x84;  /* software interface ident. */
1630 			arr[n++] = 0x85;  /* management network addresses */
1631 			arr[n++] = 0x86;  /* extended inquiry */
1632 			arr[n++] = 0x87;  /* mode page policy */
1633 			arr[n++] = 0x88;  /* SCSI ports */
1634 			if (is_disk_zbc) {	  /* SBC or ZBC */
1635 				arr[n++] = 0x89;  /* ATA information */
1636 				arr[n++] = 0xb0;  /* Block limits */
1637 				arr[n++] = 0xb1;  /* Block characteristics */
1638 				if (is_disk)
1639 					arr[n++] = 0xb2;  /* LB Provisioning */
1640 				if (is_zbc)
1641 					arr[n++] = 0xb6;  /* ZB dev. char. */
1642 			}
1643 			arr[3] = n - 4;	  /* number of supported VPD pages */
1644 		} else if (0x80 == cmd[2]) { /* unit serial number */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = len;
1647 			memcpy(&arr[4], lu_id_str, len);
1648 		} else if (0x83 == cmd[2]) { /* device identification */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1651 						target_dev_id, lu_id_num,
1652 						lu_id_str, len,
1653 						&devip->lu_name);
1654 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = inquiry_vpd_84(&arr[4]);
1657 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_85(&arr[4]);
1660 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1661 			arr[1] = cmd[2];	/*sanity */
1662 			arr[3] = 0x3c;	/* number of following entries */
1663 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1664 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1665 			else if (have_dif_prot)
1666 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1667 			else
1668 				arr[4] = 0x0;   /* no protection stuff */
1669 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1670 		} else if (0x87 == cmd[2]) { /* mode page policy */
1671 			arr[1] = cmd[2];	/*sanity */
1672 			arr[3] = 0x8;	/* number of following entries */
1673 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1674 			arr[6] = 0x80;	/* mlus, shared */
1675 			arr[8] = 0x18;	 /* protocol specific lu */
1676 			arr[10] = 0x82;	 /* mlus, per initiator port */
1677 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1678 			arr[1] = cmd[2];	/*sanity */
1679 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1680 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1681 			arr[1] = cmd[2];        /*sanity */
1682 			n = inquiry_vpd_89(&arr[4]);
1683 			put_unaligned_be16(n, arr + 2);
1684 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1685 			arr[1] = cmd[2];        /*sanity */
1686 			arr[3] = inquiry_vpd_b0(&arr[4]);
1687 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1688 			arr[1] = cmd[2];        /*sanity */
1689 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1690 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1691 			arr[1] = cmd[2];        /*sanity */
1692 			arr[3] = inquiry_vpd_b2(&arr[4]);
1693 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1694 			arr[1] = cmd[2];        /*sanity */
1695 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1696 		} else {
1697 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1698 			kfree(arr);
1699 			return check_condition_result;
1700 		}
1701 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1702 		ret = fill_from_dev_buffer(scp, arr,
1703 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1704 		kfree(arr);
1705 		return ret;
1706 	}
1707 	/* drops through here for a standard inquiry */
1708 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1709 	arr[2] = sdebug_scsi_level;
1710 	arr[3] = 2;    /* response_data_format==2 */
1711 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1712 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1713 	if (sdebug_vpd_use_hostno == 0)
1714 		arr[5] |= 0x10; /* claim: implicit TPGS */
1715 	arr[6] = 0x10; /* claim: MultiP */
1716 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1717 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1718 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1719 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1720 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1721 	/* Use Vendor Specific area to place driver date in ASCII hex */
1722 	memcpy(&arr[36], sdebug_version_date, 8);
1723 	/* version descriptors (2 bytes each) follow */
1724 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1725 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1726 	n = 62;
1727 	if (is_disk) {		/* SBC-4 no version claimed */
1728 		put_unaligned_be16(0x600, arr + n);
1729 		n += 2;
1730 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1731 		put_unaligned_be16(0x525, arr + n);
1732 		n += 2;
1733 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1734 		put_unaligned_be16(0x624, arr + n);
1735 		n += 2;
1736 	}
1737 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1738 	ret = fill_from_dev_buffer(scp, arr,
1739 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1740 	kfree(arr);
1741 	return ret;
1742 }
1743 
1744 /* See resp_iec_m_pg() for how this data is manipulated */
1745 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1746 				   0, 0, 0x0, 0x0};
1747 
1748 static int resp_requests(struct scsi_cmnd *scp,
1749 			 struct sdebug_dev_info *devip)
1750 {
1751 	unsigned char *cmd = scp->cmnd;
1752 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1753 	bool dsense = !!(cmd[1] & 1);
1754 	u32 alloc_len = cmd[4];
1755 	u32 len = 18;
1756 	int stopped_state = atomic_read(&devip->stopped);
1757 
1758 	memset(arr, 0, sizeof(arr));
1759 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1760 		if (dsense) {
1761 			arr[0] = 0x72;
1762 			arr[1] = NOT_READY;
1763 			arr[2] = LOGICAL_UNIT_NOT_READY;
1764 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1765 			len = 8;
1766 		} else {
1767 			arr[0] = 0x70;
1768 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1769 			arr[7] = 0xa;			/* 18 byte sense buffer */
1770 			arr[12] = LOGICAL_UNIT_NOT_READY;
1771 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1772 		}
1773 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1774 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1775 		if (dsense) {
1776 			arr[0] = 0x72;
1777 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1778 			arr[2] = THRESHOLD_EXCEEDED;
1779 			arr[3] = 0xff;		/* Failure prediction(false) */
1780 			len = 8;
1781 		} else {
1782 			arr[0] = 0x70;
1783 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1784 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1785 			arr[12] = THRESHOLD_EXCEEDED;
1786 			arr[13] = 0xff;		/* Failure prediction(false) */
1787 		}
1788 	} else {	/* nothing to report */
1789 		if (dsense) {
1790 			len = 8;
1791 			memset(arr, 0, len);
1792 			arr[0] = 0x72;
1793 		} else {
1794 			memset(arr, 0, len);
1795 			arr[0] = 0x70;
1796 			arr[7] = 0xa;
1797 		}
1798 	}
1799 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1800 }
1801 
1802 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1803 {
1804 	unsigned char *cmd = scp->cmnd;
1805 	int power_cond, want_stop, stopped_state;
1806 	bool changing;
1807 
1808 	power_cond = (cmd[4] & 0xf0) >> 4;
1809 	if (power_cond) {
1810 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1811 		return check_condition_result;
1812 	}
1813 	want_stop = !(cmd[4] & 1);
1814 	stopped_state = atomic_read(&devip->stopped);
1815 	if (stopped_state == 2) {
1816 		ktime_t now_ts = ktime_get_boottime();
1817 
1818 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1819 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1820 
1821 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1822 				/* tur_ms_to_ready timer extinguished */
1823 				atomic_set(&devip->stopped, 0);
1824 				stopped_state = 0;
1825 			}
1826 		}
1827 		if (stopped_state == 2) {
1828 			if (want_stop) {
1829 				stopped_state = 1;	/* dummy up success */
1830 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1831 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1832 				return check_condition_result;
1833 			}
1834 		}
1835 	}
1836 	changing = (stopped_state != want_stop);
1837 	if (changing)
1838 		atomic_xchg(&devip->stopped, want_stop);
1839 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1840 		return SDEG_RES_IMMED_MASK;
1841 	else
1842 		return 0;
1843 }
1844 
1845 static sector_t get_sdebug_capacity(void)
1846 {
1847 	static const unsigned int gibibyte = 1073741824;
1848 
1849 	if (sdebug_virtual_gb > 0)
1850 		return (sector_t)sdebug_virtual_gb *
1851 			(gibibyte / sdebug_sector_size);
1852 	else
1853 		return sdebug_store_sectors;
1854 }
1855 
1856 #define SDEBUG_READCAP_ARR_SZ 8
1857 static int resp_readcap(struct scsi_cmnd *scp,
1858 			struct sdebug_dev_info *devip)
1859 {
1860 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1861 	unsigned int capac;
1862 
1863 	/* following just in case virtual_gb changed */
1864 	sdebug_capacity = get_sdebug_capacity();
1865 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1866 	if (sdebug_capacity < 0xffffffff) {
1867 		capac = (unsigned int)sdebug_capacity - 1;
1868 		put_unaligned_be32(capac, arr + 0);
1869 	} else
1870 		put_unaligned_be32(0xffffffff, arr + 0);
1871 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1872 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1873 }
1874 
1875 #define SDEBUG_READCAP16_ARR_SZ 32
1876 static int resp_readcap16(struct scsi_cmnd *scp,
1877 			  struct sdebug_dev_info *devip)
1878 {
1879 	unsigned char *cmd = scp->cmnd;
1880 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1881 	u32 alloc_len;
1882 
1883 	alloc_len = get_unaligned_be32(cmd + 10);
1884 	/* following just in case virtual_gb changed */
1885 	sdebug_capacity = get_sdebug_capacity();
1886 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1887 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1888 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1889 	arr[13] = sdebug_physblk_exp & 0xf;
1890 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1891 
1892 	if (scsi_debug_lbp()) {
1893 		arr[14] |= 0x80; /* LBPME */
1894 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1895 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1896 		 * in the wider field maps to 0 in this field.
1897 		 */
1898 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1899 			arr[14] |= 0x40;
1900 	}
1901 
1902 	arr[15] = sdebug_lowest_aligned & 0xff;
1903 
1904 	if (have_dif_prot) {
1905 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1906 		arr[12] |= 1; /* PROT_EN */
1907 	}
1908 
1909 	return fill_from_dev_buffer(scp, arr,
1910 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1911 }
1912 
1913 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1914 
1915 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1916 			      struct sdebug_dev_info *devip)
1917 {
1918 	unsigned char *cmd = scp->cmnd;
1919 	unsigned char *arr;
1920 	int host_no = devip->sdbg_host->shost->host_no;
1921 	int port_group_a, port_group_b, port_a, port_b;
1922 	u32 alen, n, rlen;
1923 	int ret;
1924 
1925 	alen = get_unaligned_be32(cmd + 6);
1926 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1927 	if (! arr)
1928 		return DID_REQUEUE << 16;
1929 	/*
1930 	 * EVPD page 0x88 states we have two ports, one
1931 	 * real and a fake port with no device connected.
1932 	 * So we create two port groups with one port each
1933 	 * and set the group with port B to unavailable.
1934 	 */
1935 	port_a = 0x1; /* relative port A */
1936 	port_b = 0x2; /* relative port B */
1937 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1938 			(devip->channel & 0x7f);
1939 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1940 			(devip->channel & 0x7f) + 0x80;
1941 
1942 	/*
1943 	 * The asymmetric access state is cycled according to the host_id.
1944 	 */
1945 	n = 4;
1946 	if (sdebug_vpd_use_hostno == 0) {
1947 		arr[n++] = host_no % 3; /* Asymm access state */
1948 		arr[n++] = 0x0F; /* claim: all states are supported */
1949 	} else {
1950 		arr[n++] = 0x0; /* Active/Optimized path */
1951 		arr[n++] = 0x01; /* only support active/optimized paths */
1952 	}
1953 	put_unaligned_be16(port_group_a, arr + n);
1954 	n += 2;
1955 	arr[n++] = 0;    /* Reserved */
1956 	arr[n++] = 0;    /* Status code */
1957 	arr[n++] = 0;    /* Vendor unique */
1958 	arr[n++] = 0x1;  /* One port per group */
1959 	arr[n++] = 0;    /* Reserved */
1960 	arr[n++] = 0;    /* Reserved */
1961 	put_unaligned_be16(port_a, arr + n);
1962 	n += 2;
1963 	arr[n++] = 3;    /* Port unavailable */
1964 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1965 	put_unaligned_be16(port_group_b, arr + n);
1966 	n += 2;
1967 	arr[n++] = 0;    /* Reserved */
1968 	arr[n++] = 0;    /* Status code */
1969 	arr[n++] = 0;    /* Vendor unique */
1970 	arr[n++] = 0x1;  /* One port per group */
1971 	arr[n++] = 0;    /* Reserved */
1972 	arr[n++] = 0;    /* Reserved */
1973 	put_unaligned_be16(port_b, arr + n);
1974 	n += 2;
1975 
1976 	rlen = n - 4;
1977 	put_unaligned_be32(rlen, arr + 0);
1978 
1979 	/*
1980 	 * Return the smallest value of either
1981 	 * - The allocated length
1982 	 * - The constructed command length
1983 	 * - The maximum array size
1984 	 */
1985 	rlen = min(alen, n);
1986 	ret = fill_from_dev_buffer(scp, arr,
1987 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1988 	kfree(arr);
1989 	return ret;
1990 }
1991 
1992 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1993 			     struct sdebug_dev_info *devip)
1994 {
1995 	bool rctd;
1996 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1997 	u16 req_sa, u;
1998 	u32 alloc_len, a_len;
1999 	int k, offset, len, errsts, count, bump, na;
2000 	const struct opcode_info_t *oip;
2001 	const struct opcode_info_t *r_oip;
2002 	u8 *arr;
2003 	u8 *cmd = scp->cmnd;
2004 
2005 	rctd = !!(cmd[2] & 0x80);
2006 	reporting_opts = cmd[2] & 0x7;
2007 	req_opcode = cmd[3];
2008 	req_sa = get_unaligned_be16(cmd + 4);
2009 	alloc_len = get_unaligned_be32(cmd + 6);
2010 	if (alloc_len < 4 || alloc_len > 0xffff) {
2011 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2012 		return check_condition_result;
2013 	}
2014 	if (alloc_len > 8192)
2015 		a_len = 8192;
2016 	else
2017 		a_len = alloc_len;
2018 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2019 	if (NULL == arr) {
2020 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2021 				INSUFF_RES_ASCQ);
2022 		return check_condition_result;
2023 	}
2024 	switch (reporting_opts) {
2025 	case 0:	/* all commands */
2026 		/* count number of commands */
2027 		for (count = 0, oip = opcode_info_arr;
2028 		     oip->num_attached != 0xff; ++oip) {
2029 			if (F_INV_OP & oip->flags)
2030 				continue;
2031 			count += (oip->num_attached + 1);
2032 		}
2033 		bump = rctd ? 20 : 8;
2034 		put_unaligned_be32(count * bump, arr);
2035 		for (offset = 4, oip = opcode_info_arr;
2036 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2037 			if (F_INV_OP & oip->flags)
2038 				continue;
2039 			na = oip->num_attached;
2040 			arr[offset] = oip->opcode;
2041 			put_unaligned_be16(oip->sa, arr + offset + 2);
2042 			if (rctd)
2043 				arr[offset + 5] |= 0x2;
2044 			if (FF_SA & oip->flags)
2045 				arr[offset + 5] |= 0x1;
2046 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2047 			if (rctd)
2048 				put_unaligned_be16(0xa, arr + offset + 8);
2049 			r_oip = oip;
2050 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2051 				if (F_INV_OP & oip->flags)
2052 					continue;
2053 				offset += bump;
2054 				arr[offset] = oip->opcode;
2055 				put_unaligned_be16(oip->sa, arr + offset + 2);
2056 				if (rctd)
2057 					arr[offset + 5] |= 0x2;
2058 				if (FF_SA & oip->flags)
2059 					arr[offset + 5] |= 0x1;
2060 				put_unaligned_be16(oip->len_mask[0],
2061 						   arr + offset + 6);
2062 				if (rctd)
2063 					put_unaligned_be16(0xa,
2064 							   arr + offset + 8);
2065 			}
2066 			oip = r_oip;
2067 			offset += bump;
2068 		}
2069 		break;
2070 	case 1:	/* one command: opcode only */
2071 	case 2:	/* one command: opcode plus service action */
2072 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2073 		sdeb_i = opcode_ind_arr[req_opcode];
2074 		oip = &opcode_info_arr[sdeb_i];
2075 		if (F_INV_OP & oip->flags) {
2076 			supp = 1;
2077 			offset = 4;
2078 		} else {
2079 			if (1 == reporting_opts) {
2080 				if (FF_SA & oip->flags) {
2081 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2082 							     2, 2);
2083 					kfree(arr);
2084 					return check_condition_result;
2085 				}
2086 				req_sa = 0;
2087 			} else if (2 == reporting_opts &&
2088 				   0 == (FF_SA & oip->flags)) {
2089 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2090 				kfree(arr);	/* point at requested sa */
2091 				return check_condition_result;
2092 			}
2093 			if (0 == (FF_SA & oip->flags) &&
2094 			    req_opcode == oip->opcode)
2095 				supp = 3;
2096 			else if (0 == (FF_SA & oip->flags)) {
2097 				na = oip->num_attached;
2098 				for (k = 0, oip = oip->arrp; k < na;
2099 				     ++k, ++oip) {
2100 					if (req_opcode == oip->opcode)
2101 						break;
2102 				}
2103 				supp = (k >= na) ? 1 : 3;
2104 			} else if (req_sa != oip->sa) {
2105 				na = oip->num_attached;
2106 				for (k = 0, oip = oip->arrp; k < na;
2107 				     ++k, ++oip) {
2108 					if (req_sa == oip->sa)
2109 						break;
2110 				}
2111 				supp = (k >= na) ? 1 : 3;
2112 			} else
2113 				supp = 3;
2114 			if (3 == supp) {
2115 				u = oip->len_mask[0];
2116 				put_unaligned_be16(u, arr + 2);
2117 				arr[4] = oip->opcode;
2118 				for (k = 1; k < u; ++k)
2119 					arr[4 + k] = (k < 16) ?
2120 						 oip->len_mask[k] : 0xff;
2121 				offset = 4 + u;
2122 			} else
2123 				offset = 4;
2124 		}
2125 		arr[1] = (rctd ? 0x80 : 0) | supp;
2126 		if (rctd) {
2127 			put_unaligned_be16(0xa, arr + offset);
2128 			offset += 12;
2129 		}
2130 		break;
2131 	default:
2132 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2133 		kfree(arr);
2134 		return check_condition_result;
2135 	}
2136 	offset = (offset < a_len) ? offset : a_len;
2137 	len = (offset < alloc_len) ? offset : alloc_len;
2138 	errsts = fill_from_dev_buffer(scp, arr, len);
2139 	kfree(arr);
2140 	return errsts;
2141 }
2142 
2143 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2144 			  struct sdebug_dev_info *devip)
2145 {
2146 	bool repd;
2147 	u32 alloc_len, len;
2148 	u8 arr[16];
2149 	u8 *cmd = scp->cmnd;
2150 
2151 	memset(arr, 0, sizeof(arr));
2152 	repd = !!(cmd[2] & 0x80);
2153 	alloc_len = get_unaligned_be32(cmd + 6);
2154 	if (alloc_len < 4) {
2155 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2156 		return check_condition_result;
2157 	}
2158 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2159 	arr[1] = 0x1;		/* ITNRS */
2160 	if (repd) {
2161 		arr[3] = 0xc;
2162 		len = 16;
2163 	} else
2164 		len = 4;
2165 
2166 	len = (len < alloc_len) ? len : alloc_len;
2167 	return fill_from_dev_buffer(scp, arr, len);
2168 }
2169 
2170 /* <<Following mode page info copied from ST318451LW>> */
2171 
2172 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2173 {	/* Read-Write Error Recovery page for mode_sense */
2174 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2175 					5, 0, 0xff, 0xff};
2176 
2177 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2178 	if (1 == pcontrol)
2179 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2180 	return sizeof(err_recov_pg);
2181 }
2182 
2183 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2184 { 	/* Disconnect-Reconnect page for mode_sense */
2185 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2186 					 0, 0, 0, 0, 0, 0, 0, 0};
2187 
2188 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2189 	if (1 == pcontrol)
2190 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2191 	return sizeof(disconnect_pg);
2192 }
2193 
2194 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2195 {       /* Format device page for mode_sense */
2196 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2197 				     0, 0, 0, 0, 0, 0, 0, 0,
2198 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2199 
2200 	memcpy(p, format_pg, sizeof(format_pg));
2201 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2202 	put_unaligned_be16(sdebug_sector_size, p + 12);
2203 	if (sdebug_removable)
2204 		p[20] |= 0x20; /* should agree with INQUIRY */
2205 	if (1 == pcontrol)
2206 		memset(p + 2, 0, sizeof(format_pg) - 2);
2207 	return sizeof(format_pg);
2208 }
2209 
2210 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2212 				     0, 0, 0, 0};
2213 
2214 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2215 { 	/* Caching page for mode_sense */
2216 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2217 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2218 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2220 
2221 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2222 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2223 	memcpy(p, caching_pg, sizeof(caching_pg));
2224 	if (1 == pcontrol)
2225 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2226 	else if (2 == pcontrol)
2227 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2228 	return sizeof(caching_pg);
2229 }
2230 
2231 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2232 				    0, 0, 0x2, 0x4b};
2233 
2234 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2235 { 	/* Control mode page for mode_sense */
2236 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2237 					0, 0, 0, 0};
2238 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2239 				     0, 0, 0x2, 0x4b};
2240 
2241 	if (sdebug_dsense)
2242 		ctrl_m_pg[2] |= 0x4;
2243 	else
2244 		ctrl_m_pg[2] &= ~0x4;
2245 
2246 	if (sdebug_ato)
2247 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2248 
2249 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2250 	if (1 == pcontrol)
2251 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2252 	else if (2 == pcontrol)
2253 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2254 	return sizeof(ctrl_m_pg);
2255 }
2256 
2257 
2258 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2259 {	/* Informational Exceptions control mode page for mode_sense */
2260 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2261 				       0, 0, 0x0, 0x0};
2262 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2263 				      0, 0, 0x0, 0x0};
2264 
2265 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2266 	if (1 == pcontrol)
2267 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2268 	else if (2 == pcontrol)
2269 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2270 	return sizeof(iec_m_pg);
2271 }
2272 
2273 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2274 {	/* SAS SSP mode page - short format for mode_sense */
2275 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2276 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2277 
2278 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2279 	if (1 == pcontrol)
2280 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2281 	return sizeof(sas_sf_m_pg);
2282 }
2283 
2284 
2285 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2286 			      int target_dev_id)
2287 {	/* SAS phy control and discover mode page for mode_sense */
2288 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2289 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2290 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2291 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2292 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2293 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2294 		    0, 0, 0, 0, 0, 0, 0, 0,
2295 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2296 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2297 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2298 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2299 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2300 		    0, 0, 0, 0, 0, 0, 0, 0,
2301 		};
2302 	int port_a, port_b;
2303 
2304 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2305 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2306 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2307 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2308 	port_a = target_dev_id + 1;
2309 	port_b = port_a + 1;
2310 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2311 	put_unaligned_be32(port_a, p + 20);
2312 	put_unaligned_be32(port_b, p + 48 + 20);
2313 	if (1 == pcontrol)
2314 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2315 	return sizeof(sas_pcd_m_pg);
2316 }
2317 
2318 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2319 {	/* SAS SSP shared protocol specific port mode subpage */
2320 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2321 		    0, 0, 0, 0, 0, 0, 0, 0,
2322 		};
2323 
2324 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2325 	if (1 == pcontrol)
2326 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2327 	return sizeof(sas_sha_m_pg);
2328 }
2329 
2330 #define SDEBUG_MAX_MSENSE_SZ 256
2331 
2332 static int resp_mode_sense(struct scsi_cmnd *scp,
2333 			   struct sdebug_dev_info *devip)
2334 {
2335 	int pcontrol, pcode, subpcode, bd_len;
2336 	unsigned char dev_spec;
2337 	u32 alloc_len, offset, len;
2338 	int target_dev_id;
2339 	int target = scp->device->id;
2340 	unsigned char *ap;
2341 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2342 	unsigned char *cmd = scp->cmnd;
2343 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2344 
2345 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2346 	pcontrol = (cmd[2] & 0xc0) >> 6;
2347 	pcode = cmd[2] & 0x3f;
2348 	subpcode = cmd[3];
2349 	msense_6 = (MODE_SENSE == cmd[0]);
2350 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2351 	is_disk = (sdebug_ptype == TYPE_DISK);
2352 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2353 	if ((is_disk || is_zbc) && !dbd)
2354 		bd_len = llbaa ? 16 : 8;
2355 	else
2356 		bd_len = 0;
2357 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2358 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2359 	if (0x3 == pcontrol) {  /* Saving values not supported */
2360 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2361 		return check_condition_result;
2362 	}
2363 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2364 			(devip->target * 1000) - 3;
2365 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2366 	if (is_disk || is_zbc) {
2367 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2368 		if (sdebug_wp)
2369 			dev_spec |= 0x80;
2370 	} else
2371 		dev_spec = 0x0;
2372 	if (msense_6) {
2373 		arr[2] = dev_spec;
2374 		arr[3] = bd_len;
2375 		offset = 4;
2376 	} else {
2377 		arr[3] = dev_spec;
2378 		if (16 == bd_len)
2379 			arr[4] = 0x1;	/* set LONGLBA bit */
2380 		arr[7] = bd_len;	/* assume 255 or less */
2381 		offset = 8;
2382 	}
2383 	ap = arr + offset;
2384 	if ((bd_len > 0) && (!sdebug_capacity))
2385 		sdebug_capacity = get_sdebug_capacity();
2386 
2387 	if (8 == bd_len) {
2388 		if (sdebug_capacity > 0xfffffffe)
2389 			put_unaligned_be32(0xffffffff, ap + 0);
2390 		else
2391 			put_unaligned_be32(sdebug_capacity, ap + 0);
2392 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2393 		offset += bd_len;
2394 		ap = arr + offset;
2395 	} else if (16 == bd_len) {
2396 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2397 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2398 		offset += bd_len;
2399 		ap = arr + offset;
2400 	}
2401 
2402 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2403 		/* TODO: Control Extension page */
2404 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2405 		return check_condition_result;
2406 	}
2407 	bad_pcode = false;
2408 
2409 	switch (pcode) {
2410 	case 0x1:	/* Read-Write error recovery page, direct access */
2411 		len = resp_err_recov_pg(ap, pcontrol, target);
2412 		offset += len;
2413 		break;
2414 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2415 		len = resp_disconnect_pg(ap, pcontrol, target);
2416 		offset += len;
2417 		break;
2418 	case 0x3:       /* Format device page, direct access */
2419 		if (is_disk) {
2420 			len = resp_format_pg(ap, pcontrol, target);
2421 			offset += len;
2422 		} else
2423 			bad_pcode = true;
2424 		break;
2425 	case 0x8:	/* Caching page, direct access */
2426 		if (is_disk || is_zbc) {
2427 			len = resp_caching_pg(ap, pcontrol, target);
2428 			offset += len;
2429 		} else
2430 			bad_pcode = true;
2431 		break;
2432 	case 0xa:	/* Control Mode page, all devices */
2433 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2434 		offset += len;
2435 		break;
2436 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2437 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2438 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2439 			return check_condition_result;
2440 		}
2441 		len = 0;
2442 		if ((0x0 == subpcode) || (0xff == subpcode))
2443 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2444 		if ((0x1 == subpcode) || (0xff == subpcode))
2445 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2446 						  target_dev_id);
2447 		if ((0x2 == subpcode) || (0xff == subpcode))
2448 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2449 		offset += len;
2450 		break;
2451 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2452 		len = resp_iec_m_pg(ap, pcontrol, target);
2453 		offset += len;
2454 		break;
2455 	case 0x3f:	/* Read all Mode pages */
2456 		if ((0 == subpcode) || (0xff == subpcode)) {
2457 			len = resp_err_recov_pg(ap, pcontrol, target);
2458 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2459 			if (is_disk) {
2460 				len += resp_format_pg(ap + len, pcontrol,
2461 						      target);
2462 				len += resp_caching_pg(ap + len, pcontrol,
2463 						       target);
2464 			} else if (is_zbc) {
2465 				len += resp_caching_pg(ap + len, pcontrol,
2466 						       target);
2467 			}
2468 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2469 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2470 			if (0xff == subpcode) {
2471 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2472 						  target, target_dev_id);
2473 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2474 			}
2475 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2476 			offset += len;
2477 		} else {
2478 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2479 			return check_condition_result;
2480 		}
2481 		break;
2482 	default:
2483 		bad_pcode = true;
2484 		break;
2485 	}
2486 	if (bad_pcode) {
2487 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2488 		return check_condition_result;
2489 	}
2490 	if (msense_6)
2491 		arr[0] = offset - 1;
2492 	else
2493 		put_unaligned_be16((offset - 2), arr + 0);
2494 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2495 }
2496 
2497 #define SDEBUG_MAX_MSELECT_SZ 512
2498 
2499 static int resp_mode_select(struct scsi_cmnd *scp,
2500 			    struct sdebug_dev_info *devip)
2501 {
2502 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2503 	int param_len, res, mpage;
2504 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2505 	unsigned char *cmd = scp->cmnd;
2506 	int mselect6 = (MODE_SELECT == cmd[0]);
2507 
2508 	memset(arr, 0, sizeof(arr));
2509 	pf = cmd[1] & 0x10;
2510 	sp = cmd[1] & 0x1;
2511 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2512 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2513 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2514 		return check_condition_result;
2515 	}
2516 	res = fetch_to_dev_buffer(scp, arr, param_len);
2517 	if (-1 == res)
2518 		return DID_ERROR << 16;
2519 	else if (sdebug_verbose && (res < param_len))
2520 		sdev_printk(KERN_INFO, scp->device,
2521 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2522 			    __func__, param_len, res);
2523 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2524 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2525 	off = bd_len + (mselect6 ? 4 : 8);
2526 	if (md_len > 2 || off >= res) {
2527 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2528 		return check_condition_result;
2529 	}
2530 	mpage = arr[off] & 0x3f;
2531 	ps = !!(arr[off] & 0x80);
2532 	if (ps) {
2533 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2534 		return check_condition_result;
2535 	}
2536 	spf = !!(arr[off] & 0x40);
2537 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2538 		       (arr[off + 1] + 2);
2539 	if ((pg_len + off) > param_len) {
2540 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2541 				PARAMETER_LIST_LENGTH_ERR, 0);
2542 		return check_condition_result;
2543 	}
2544 	switch (mpage) {
2545 	case 0x8:      /* Caching Mode page */
2546 		if (caching_pg[1] == arr[off + 1]) {
2547 			memcpy(caching_pg + 2, arr + off + 2,
2548 			       sizeof(caching_pg) - 2);
2549 			goto set_mode_changed_ua;
2550 		}
2551 		break;
2552 	case 0xa:      /* Control Mode page */
2553 		if (ctrl_m_pg[1] == arr[off + 1]) {
2554 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2555 			       sizeof(ctrl_m_pg) - 2);
2556 			if (ctrl_m_pg[4] & 0x8)
2557 				sdebug_wp = true;
2558 			else
2559 				sdebug_wp = false;
2560 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2561 			goto set_mode_changed_ua;
2562 		}
2563 		break;
2564 	case 0x1c:      /* Informational Exceptions Mode page */
2565 		if (iec_m_pg[1] == arr[off + 1]) {
2566 			memcpy(iec_m_pg + 2, arr + off + 2,
2567 			       sizeof(iec_m_pg) - 2);
2568 			goto set_mode_changed_ua;
2569 		}
2570 		break;
2571 	default:
2572 		break;
2573 	}
2574 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2575 	return check_condition_result;
2576 set_mode_changed_ua:
2577 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2578 	return 0;
2579 }
2580 
2581 static int resp_temp_l_pg(unsigned char *arr)
2582 {
2583 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2584 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2585 		};
2586 
2587 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2588 	return sizeof(temp_l_pg);
2589 }
2590 
2591 static int resp_ie_l_pg(unsigned char *arr)
2592 {
2593 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2594 		};
2595 
2596 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2597 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2598 		arr[4] = THRESHOLD_EXCEEDED;
2599 		arr[5] = 0xff;
2600 	}
2601 	return sizeof(ie_l_pg);
2602 }
2603 
2604 static int resp_env_rep_l_spg(unsigned char *arr)
2605 {
2606 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2607 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2608 					 0x1, 0x0, 0x23, 0x8,
2609 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2610 		};
2611 
2612 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2613 	return sizeof(env_rep_l_spg);
2614 }
2615 
2616 #define SDEBUG_MAX_LSENSE_SZ 512
2617 
2618 static int resp_log_sense(struct scsi_cmnd *scp,
2619 			  struct sdebug_dev_info *devip)
2620 {
2621 	int ppc, sp, pcode, subpcode;
2622 	u32 alloc_len, len, n;
2623 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2624 	unsigned char *cmd = scp->cmnd;
2625 
2626 	memset(arr, 0, sizeof(arr));
2627 	ppc = cmd[1] & 0x2;
2628 	sp = cmd[1] & 0x1;
2629 	if (ppc || sp) {
2630 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2631 		return check_condition_result;
2632 	}
2633 	pcode = cmd[2] & 0x3f;
2634 	subpcode = cmd[3] & 0xff;
2635 	alloc_len = get_unaligned_be16(cmd + 7);
2636 	arr[0] = pcode;
2637 	if (0 == subpcode) {
2638 		switch (pcode) {
2639 		case 0x0:	/* Supported log pages log page */
2640 			n = 4;
2641 			arr[n++] = 0x0;		/* this page */
2642 			arr[n++] = 0xd;		/* Temperature */
2643 			arr[n++] = 0x2f;	/* Informational exceptions */
2644 			arr[3] = n - 4;
2645 			break;
2646 		case 0xd:	/* Temperature log page */
2647 			arr[3] = resp_temp_l_pg(arr + 4);
2648 			break;
2649 		case 0x2f:	/* Informational exceptions log page */
2650 			arr[3] = resp_ie_l_pg(arr + 4);
2651 			break;
2652 		default:
2653 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 			return check_condition_result;
2655 		}
2656 	} else if (0xff == subpcode) {
2657 		arr[0] |= 0x40;
2658 		arr[1] = subpcode;
2659 		switch (pcode) {
2660 		case 0x0:	/* Supported log pages and subpages log page */
2661 			n = 4;
2662 			arr[n++] = 0x0;
2663 			arr[n++] = 0x0;		/* 0,0 page */
2664 			arr[n++] = 0x0;
2665 			arr[n++] = 0xff;	/* this page */
2666 			arr[n++] = 0xd;
2667 			arr[n++] = 0x0;		/* Temperature */
2668 			arr[n++] = 0xd;
2669 			arr[n++] = 0x1;		/* Environment reporting */
2670 			arr[n++] = 0xd;
2671 			arr[n++] = 0xff;	/* all 0xd subpages */
2672 			arr[n++] = 0x2f;
2673 			arr[n++] = 0x0;	/* Informational exceptions */
2674 			arr[n++] = 0x2f;
2675 			arr[n++] = 0xff;	/* all 0x2f subpages */
2676 			arr[3] = n - 4;
2677 			break;
2678 		case 0xd:	/* Temperature subpages */
2679 			n = 4;
2680 			arr[n++] = 0xd;
2681 			arr[n++] = 0x0;		/* Temperature */
2682 			arr[n++] = 0xd;
2683 			arr[n++] = 0x1;		/* Environment reporting */
2684 			arr[n++] = 0xd;
2685 			arr[n++] = 0xff;	/* these subpages */
2686 			arr[3] = n - 4;
2687 			break;
2688 		case 0x2f:	/* Informational exceptions subpages */
2689 			n = 4;
2690 			arr[n++] = 0x2f;
2691 			arr[n++] = 0x0;		/* Informational exceptions */
2692 			arr[n++] = 0x2f;
2693 			arr[n++] = 0xff;	/* these subpages */
2694 			arr[3] = n - 4;
2695 			break;
2696 		default:
2697 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2698 			return check_condition_result;
2699 		}
2700 	} else if (subpcode > 0) {
2701 		arr[0] |= 0x40;
2702 		arr[1] = subpcode;
2703 		if (pcode == 0xd && subpcode == 1)
2704 			arr[3] = resp_env_rep_l_spg(arr + 4);
2705 		else {
2706 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707 			return check_condition_result;
2708 		}
2709 	} else {
2710 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711 		return check_condition_result;
2712 	}
2713 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2714 	return fill_from_dev_buffer(scp, arr,
2715 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2716 }
2717 
2718 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2719 {
2720 	return devip->nr_zones != 0;
2721 }
2722 
2723 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2724 					unsigned long long lba)
2725 {
2726 	u32 zno = lba >> devip->zsize_shift;
2727 	struct sdeb_zone_state *zsp;
2728 
2729 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2730 		return &devip->zstate[zno];
2731 
2732 	/*
2733 	 * If the zone capacity is less than the zone size, adjust for gap
2734 	 * zones.
2735 	 */
2736 	zno = 2 * zno - devip->nr_conv_zones;
2737 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2738 	zsp = &devip->zstate[zno];
2739 	if (lba >= zsp->z_start + zsp->z_size)
2740 		zsp++;
2741 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2742 	return zsp;
2743 }
2744 
2745 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2746 {
2747 	return zsp->z_type == ZBC_ZTYPE_CNV;
2748 }
2749 
2750 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2751 {
2752 	return zsp->z_type == ZBC_ZTYPE_GAP;
2753 }
2754 
2755 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2756 {
2757 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2758 }
2759 
2760 static void zbc_close_zone(struct sdebug_dev_info *devip,
2761 			   struct sdeb_zone_state *zsp)
2762 {
2763 	enum sdebug_z_cond zc;
2764 
2765 	if (!zbc_zone_is_seq(zsp))
2766 		return;
2767 
2768 	zc = zsp->z_cond;
2769 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2770 		return;
2771 
2772 	if (zc == ZC2_IMPLICIT_OPEN)
2773 		devip->nr_imp_open--;
2774 	else
2775 		devip->nr_exp_open--;
2776 
2777 	if (zsp->z_wp == zsp->z_start) {
2778 		zsp->z_cond = ZC1_EMPTY;
2779 	} else {
2780 		zsp->z_cond = ZC4_CLOSED;
2781 		devip->nr_closed++;
2782 	}
2783 }
2784 
2785 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2786 {
2787 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2788 	unsigned int i;
2789 
2790 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2791 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2792 			zbc_close_zone(devip, zsp);
2793 			return;
2794 		}
2795 	}
2796 }
2797 
2798 static void zbc_open_zone(struct sdebug_dev_info *devip,
2799 			  struct sdeb_zone_state *zsp, bool explicit)
2800 {
2801 	enum sdebug_z_cond zc;
2802 
2803 	if (!zbc_zone_is_seq(zsp))
2804 		return;
2805 
2806 	zc = zsp->z_cond;
2807 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2808 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2809 		return;
2810 
2811 	/* Close an implicit open zone if necessary */
2812 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2813 		zbc_close_zone(devip, zsp);
2814 	else if (devip->max_open &&
2815 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2816 		zbc_close_imp_open_zone(devip);
2817 
2818 	if (zsp->z_cond == ZC4_CLOSED)
2819 		devip->nr_closed--;
2820 	if (explicit) {
2821 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2822 		devip->nr_exp_open++;
2823 	} else {
2824 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2825 		devip->nr_imp_open++;
2826 	}
2827 }
2828 
2829 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2830 		       unsigned long long lba, unsigned int num)
2831 {
2832 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2833 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2834 
2835 	if (!zbc_zone_is_seq(zsp))
2836 		return;
2837 
2838 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2839 		zsp->z_wp += num;
2840 		if (zsp->z_wp >= zend)
2841 			zsp->z_cond = ZC5_FULL;
2842 		return;
2843 	}
2844 
2845 	while (num) {
2846 		if (lba != zsp->z_wp)
2847 			zsp->z_non_seq_resource = true;
2848 
2849 		end = lba + num;
2850 		if (end >= zend) {
2851 			n = zend - lba;
2852 			zsp->z_wp = zend;
2853 		} else if (end > zsp->z_wp) {
2854 			n = num;
2855 			zsp->z_wp = end;
2856 		} else {
2857 			n = num;
2858 		}
2859 		if (zsp->z_wp >= zend)
2860 			zsp->z_cond = ZC5_FULL;
2861 
2862 		num -= n;
2863 		lba += n;
2864 		if (num) {
2865 			zsp++;
2866 			zend = zsp->z_start + zsp->z_size;
2867 		}
2868 	}
2869 }
2870 
2871 static int check_zbc_access_params(struct scsi_cmnd *scp,
2872 			unsigned long long lba, unsigned int num, bool write)
2873 {
2874 	struct scsi_device *sdp = scp->device;
2875 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2876 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2877 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2878 
2879 	if (!write) {
2880 		if (devip->zmodel == BLK_ZONED_HA)
2881 			return 0;
2882 		/* For host-managed, reads cannot cross zone types boundaries */
2883 		if (zsp->z_type != zsp_end->z_type) {
2884 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2885 					LBA_OUT_OF_RANGE,
2886 					READ_INVDATA_ASCQ);
2887 			return check_condition_result;
2888 		}
2889 		return 0;
2890 	}
2891 
2892 	/* Writing into a gap zone is not allowed */
2893 	if (zbc_zone_is_gap(zsp)) {
2894 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2895 				ATTEMPT_ACCESS_GAP);
2896 		return check_condition_result;
2897 	}
2898 
2899 	/* No restrictions for writes within conventional zones */
2900 	if (zbc_zone_is_conv(zsp)) {
2901 		if (!zbc_zone_is_conv(zsp_end)) {
2902 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2903 					LBA_OUT_OF_RANGE,
2904 					WRITE_BOUNDARY_ASCQ);
2905 			return check_condition_result;
2906 		}
2907 		return 0;
2908 	}
2909 
2910 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2911 		/* Writes cannot cross sequential zone boundaries */
2912 		if (zsp_end != zsp) {
2913 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2914 					LBA_OUT_OF_RANGE,
2915 					WRITE_BOUNDARY_ASCQ);
2916 			return check_condition_result;
2917 		}
2918 		/* Cannot write full zones */
2919 		if (zsp->z_cond == ZC5_FULL) {
2920 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2921 					INVALID_FIELD_IN_CDB, 0);
2922 			return check_condition_result;
2923 		}
2924 		/* Writes must be aligned to the zone WP */
2925 		if (lba != zsp->z_wp) {
2926 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2927 					LBA_OUT_OF_RANGE,
2928 					UNALIGNED_WRITE_ASCQ);
2929 			return check_condition_result;
2930 		}
2931 	}
2932 
2933 	/* Handle implicit open of closed and empty zones */
2934 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2935 		if (devip->max_open &&
2936 		    devip->nr_exp_open >= devip->max_open) {
2937 			mk_sense_buffer(scp, DATA_PROTECT,
2938 					INSUFF_RES_ASC,
2939 					INSUFF_ZONE_ASCQ);
2940 			return check_condition_result;
2941 		}
2942 		zbc_open_zone(devip, zsp, false);
2943 	}
2944 
2945 	return 0;
2946 }
2947 
2948 static inline int check_device_access_params
2949 			(struct scsi_cmnd *scp, unsigned long long lba,
2950 			 unsigned int num, bool write)
2951 {
2952 	struct scsi_device *sdp = scp->device;
2953 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2954 
2955 	if (lba + num > sdebug_capacity) {
2956 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2957 		return check_condition_result;
2958 	}
2959 	/* transfer length excessive (tie in to block limits VPD page) */
2960 	if (num > sdebug_store_sectors) {
2961 		/* needs work to find which cdb byte 'num' comes from */
2962 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2963 		return check_condition_result;
2964 	}
2965 	if (write && unlikely(sdebug_wp)) {
2966 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2967 		return check_condition_result;
2968 	}
2969 	if (sdebug_dev_is_zoned(devip))
2970 		return check_zbc_access_params(scp, lba, num, write);
2971 
2972 	return 0;
2973 }
2974 
2975 /*
2976  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2977  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2978  * that access any of the "stores" in struct sdeb_store_info should call this
2979  * function with bug_if_fake_rw set to true.
2980  */
2981 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2982 						bool bug_if_fake_rw)
2983 {
2984 	if (sdebug_fake_rw) {
2985 		BUG_ON(bug_if_fake_rw);	/* See note above */
2986 		return NULL;
2987 	}
2988 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2989 }
2990 
2991 /* Returns number of bytes copied or -1 if error. */
2992 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2993 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2994 {
2995 	int ret;
2996 	u64 block, rest = 0;
2997 	enum dma_data_direction dir;
2998 	struct scsi_data_buffer *sdb = &scp->sdb;
2999 	u8 *fsp;
3000 
3001 	if (do_write) {
3002 		dir = DMA_TO_DEVICE;
3003 		write_since_sync = true;
3004 	} else {
3005 		dir = DMA_FROM_DEVICE;
3006 	}
3007 
3008 	if (!sdb->length || !sip)
3009 		return 0;
3010 	if (scp->sc_data_direction != dir)
3011 		return -1;
3012 	fsp = sip->storep;
3013 
3014 	block = do_div(lba, sdebug_store_sectors);
3015 	if (block + num > sdebug_store_sectors)
3016 		rest = block + num - sdebug_store_sectors;
3017 
3018 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3019 		   fsp + (block * sdebug_sector_size),
3020 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3021 	if (ret != (num - rest) * sdebug_sector_size)
3022 		return ret;
3023 
3024 	if (rest) {
3025 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3026 			    fsp, rest * sdebug_sector_size,
3027 			    sg_skip + ((num - rest) * sdebug_sector_size),
3028 			    do_write);
3029 	}
3030 
3031 	return ret;
3032 }
3033 
3034 /* Returns number of bytes copied or -1 if error. */
3035 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3036 {
3037 	struct scsi_data_buffer *sdb = &scp->sdb;
3038 
3039 	if (!sdb->length)
3040 		return 0;
3041 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3042 		return -1;
3043 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3044 			      num * sdebug_sector_size, 0, true);
3045 }
3046 
3047 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3048  * arr into sip->storep+lba and return true. If comparison fails then
3049  * return false. */
3050 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3051 			      const u8 *arr, bool compare_only)
3052 {
3053 	bool res;
3054 	u64 block, rest = 0;
3055 	u32 store_blks = sdebug_store_sectors;
3056 	u32 lb_size = sdebug_sector_size;
3057 	u8 *fsp = sip->storep;
3058 
3059 	block = do_div(lba, store_blks);
3060 	if (block + num > store_blks)
3061 		rest = block + num - store_blks;
3062 
3063 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3064 	if (!res)
3065 		return res;
3066 	if (rest)
3067 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3068 			     rest * lb_size);
3069 	if (!res)
3070 		return res;
3071 	if (compare_only)
3072 		return true;
3073 	arr += num * lb_size;
3074 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3075 	if (rest)
3076 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3077 	return res;
3078 }
3079 
3080 static __be16 dif_compute_csum(const void *buf, int len)
3081 {
3082 	__be16 csum;
3083 
3084 	if (sdebug_guard)
3085 		csum = (__force __be16)ip_compute_csum(buf, len);
3086 	else
3087 		csum = cpu_to_be16(crc_t10dif(buf, len));
3088 
3089 	return csum;
3090 }
3091 
3092 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3093 		      sector_t sector, u32 ei_lba)
3094 {
3095 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3096 
3097 	if (sdt->guard_tag != csum) {
3098 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3099 			(unsigned long)sector,
3100 			be16_to_cpu(sdt->guard_tag),
3101 			be16_to_cpu(csum));
3102 		return 0x01;
3103 	}
3104 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3105 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3106 		pr_err("REF check failed on sector %lu\n",
3107 			(unsigned long)sector);
3108 		return 0x03;
3109 	}
3110 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3111 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3112 		pr_err("REF check failed on sector %lu\n",
3113 			(unsigned long)sector);
3114 		return 0x03;
3115 	}
3116 	return 0;
3117 }
3118 
3119 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3120 			  unsigned int sectors, bool read)
3121 {
3122 	size_t resid;
3123 	void *paddr;
3124 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3125 						scp->device->hostdata, true);
3126 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3127 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3128 	struct sg_mapping_iter miter;
3129 
3130 	/* Bytes of protection data to copy into sgl */
3131 	resid = sectors * sizeof(*dif_storep);
3132 
3133 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3134 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3135 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3136 
3137 	while (sg_miter_next(&miter) && resid > 0) {
3138 		size_t len = min_t(size_t, miter.length, resid);
3139 		void *start = dif_store(sip, sector);
3140 		size_t rest = 0;
3141 
3142 		if (dif_store_end < start + len)
3143 			rest = start + len - dif_store_end;
3144 
3145 		paddr = miter.addr;
3146 
3147 		if (read)
3148 			memcpy(paddr, start, len - rest);
3149 		else
3150 			memcpy(start, paddr, len - rest);
3151 
3152 		if (rest) {
3153 			if (read)
3154 				memcpy(paddr + len - rest, dif_storep, rest);
3155 			else
3156 				memcpy(dif_storep, paddr + len - rest, rest);
3157 		}
3158 
3159 		sector += len / sizeof(*dif_storep);
3160 		resid -= len;
3161 	}
3162 	sg_miter_stop(&miter);
3163 }
3164 
3165 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3166 			    unsigned int sectors, u32 ei_lba)
3167 {
3168 	int ret = 0;
3169 	unsigned int i;
3170 	sector_t sector;
3171 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3172 						scp->device->hostdata, true);
3173 	struct t10_pi_tuple *sdt;
3174 
3175 	for (i = 0; i < sectors; i++, ei_lba++) {
3176 		sector = start_sec + i;
3177 		sdt = dif_store(sip, sector);
3178 
3179 		if (sdt->app_tag == cpu_to_be16(0xffff))
3180 			continue;
3181 
3182 		/*
3183 		 * Because scsi_debug acts as both initiator and
3184 		 * target we proceed to verify the PI even if
3185 		 * RDPROTECT=3. This is done so the "initiator" knows
3186 		 * which type of error to return. Otherwise we would
3187 		 * have to iterate over the PI twice.
3188 		 */
3189 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3190 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3191 					 sector, ei_lba);
3192 			if (ret) {
3193 				dif_errors++;
3194 				break;
3195 			}
3196 		}
3197 	}
3198 
3199 	dif_copy_prot(scp, start_sec, sectors, true);
3200 	dix_reads++;
3201 
3202 	return ret;
3203 }
3204 
3205 static inline void
3206 sdeb_read_lock(struct sdeb_store_info *sip)
3207 {
3208 	if (sdebug_no_rwlock) {
3209 		if (sip)
3210 			__acquire(&sip->macc_lck);
3211 		else
3212 			__acquire(&sdeb_fake_rw_lck);
3213 	} else {
3214 		if (sip)
3215 			read_lock(&sip->macc_lck);
3216 		else
3217 			read_lock(&sdeb_fake_rw_lck);
3218 	}
3219 }
3220 
3221 static inline void
3222 sdeb_read_unlock(struct sdeb_store_info *sip)
3223 {
3224 	if (sdebug_no_rwlock) {
3225 		if (sip)
3226 			__release(&sip->macc_lck);
3227 		else
3228 			__release(&sdeb_fake_rw_lck);
3229 	} else {
3230 		if (sip)
3231 			read_unlock(&sip->macc_lck);
3232 		else
3233 			read_unlock(&sdeb_fake_rw_lck);
3234 	}
3235 }
3236 
3237 static inline void
3238 sdeb_write_lock(struct sdeb_store_info *sip)
3239 {
3240 	if (sdebug_no_rwlock) {
3241 		if (sip)
3242 			__acquire(&sip->macc_lck);
3243 		else
3244 			__acquire(&sdeb_fake_rw_lck);
3245 	} else {
3246 		if (sip)
3247 			write_lock(&sip->macc_lck);
3248 		else
3249 			write_lock(&sdeb_fake_rw_lck);
3250 	}
3251 }
3252 
3253 static inline void
3254 sdeb_write_unlock(struct sdeb_store_info *sip)
3255 {
3256 	if (sdebug_no_rwlock) {
3257 		if (sip)
3258 			__release(&sip->macc_lck);
3259 		else
3260 			__release(&sdeb_fake_rw_lck);
3261 	} else {
3262 		if (sip)
3263 			write_unlock(&sip->macc_lck);
3264 		else
3265 			write_unlock(&sdeb_fake_rw_lck);
3266 	}
3267 }
3268 
3269 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3270 {
3271 	bool check_prot;
3272 	u32 num;
3273 	u32 ei_lba;
3274 	int ret;
3275 	u64 lba;
3276 	struct sdeb_store_info *sip = devip2sip(devip, true);
3277 	u8 *cmd = scp->cmnd;
3278 
3279 	switch (cmd[0]) {
3280 	case READ_16:
3281 		ei_lba = 0;
3282 		lba = get_unaligned_be64(cmd + 2);
3283 		num = get_unaligned_be32(cmd + 10);
3284 		check_prot = true;
3285 		break;
3286 	case READ_10:
3287 		ei_lba = 0;
3288 		lba = get_unaligned_be32(cmd + 2);
3289 		num = get_unaligned_be16(cmd + 7);
3290 		check_prot = true;
3291 		break;
3292 	case READ_6:
3293 		ei_lba = 0;
3294 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3295 		      (u32)(cmd[1] & 0x1f) << 16;
3296 		num = (0 == cmd[4]) ? 256 : cmd[4];
3297 		check_prot = true;
3298 		break;
3299 	case READ_12:
3300 		ei_lba = 0;
3301 		lba = get_unaligned_be32(cmd + 2);
3302 		num = get_unaligned_be32(cmd + 6);
3303 		check_prot = true;
3304 		break;
3305 	case XDWRITEREAD_10:
3306 		ei_lba = 0;
3307 		lba = get_unaligned_be32(cmd + 2);
3308 		num = get_unaligned_be16(cmd + 7);
3309 		check_prot = false;
3310 		break;
3311 	default:	/* assume READ(32) */
3312 		lba = get_unaligned_be64(cmd + 12);
3313 		ei_lba = get_unaligned_be32(cmd + 20);
3314 		num = get_unaligned_be32(cmd + 28);
3315 		check_prot = false;
3316 		break;
3317 	}
3318 	if (unlikely(have_dif_prot && check_prot)) {
3319 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3320 		    (cmd[1] & 0xe0)) {
3321 			mk_sense_invalid_opcode(scp);
3322 			return check_condition_result;
3323 		}
3324 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3325 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3326 		    (cmd[1] & 0xe0) == 0)
3327 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3328 				    "to DIF device\n");
3329 	}
3330 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3331 		     atomic_read(&sdeb_inject_pending))) {
3332 		num /= 2;
3333 		atomic_set(&sdeb_inject_pending, 0);
3334 	}
3335 
3336 	ret = check_device_access_params(scp, lba, num, false);
3337 	if (ret)
3338 		return ret;
3339 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3340 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3341 		     ((lba + num) > sdebug_medium_error_start))) {
3342 		/* claim unrecoverable read error */
3343 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3344 		/* set info field and valid bit for fixed descriptor */
3345 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3346 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3347 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3348 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3349 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3350 		}
3351 		scsi_set_resid(scp, scsi_bufflen(scp));
3352 		return check_condition_result;
3353 	}
3354 
3355 	sdeb_read_lock(sip);
3356 
3357 	/* DIX + T10 DIF */
3358 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3359 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3360 		case 1: /* Guard tag error */
3361 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3362 				sdeb_read_unlock(sip);
3363 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3364 				return check_condition_result;
3365 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3366 				sdeb_read_unlock(sip);
3367 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3368 				return illegal_condition_result;
3369 			}
3370 			break;
3371 		case 3: /* Reference tag error */
3372 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3373 				sdeb_read_unlock(sip);
3374 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3375 				return check_condition_result;
3376 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3377 				sdeb_read_unlock(sip);
3378 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3379 				return illegal_condition_result;
3380 			}
3381 			break;
3382 		}
3383 	}
3384 
3385 	ret = do_device_access(sip, scp, 0, lba, num, false);
3386 	sdeb_read_unlock(sip);
3387 	if (unlikely(ret == -1))
3388 		return DID_ERROR << 16;
3389 
3390 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3391 
3392 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3393 		     atomic_read(&sdeb_inject_pending))) {
3394 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3395 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3396 			atomic_set(&sdeb_inject_pending, 0);
3397 			return check_condition_result;
3398 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3399 			/* Logical block guard check failed */
3400 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3401 			atomic_set(&sdeb_inject_pending, 0);
3402 			return illegal_condition_result;
3403 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3404 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3405 			atomic_set(&sdeb_inject_pending, 0);
3406 			return illegal_condition_result;
3407 		}
3408 	}
3409 	return 0;
3410 }
3411 
3412 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3413 			     unsigned int sectors, u32 ei_lba)
3414 {
3415 	int ret;
3416 	struct t10_pi_tuple *sdt;
3417 	void *daddr;
3418 	sector_t sector = start_sec;
3419 	int ppage_offset;
3420 	int dpage_offset;
3421 	struct sg_mapping_iter diter;
3422 	struct sg_mapping_iter piter;
3423 
3424 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3425 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3426 
3427 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3428 			scsi_prot_sg_count(SCpnt),
3429 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3430 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3431 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3432 
3433 	/* For each protection page */
3434 	while (sg_miter_next(&piter)) {
3435 		dpage_offset = 0;
3436 		if (WARN_ON(!sg_miter_next(&diter))) {
3437 			ret = 0x01;
3438 			goto out;
3439 		}
3440 
3441 		for (ppage_offset = 0; ppage_offset < piter.length;
3442 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3443 			/* If we're at the end of the current
3444 			 * data page advance to the next one
3445 			 */
3446 			if (dpage_offset >= diter.length) {
3447 				if (WARN_ON(!sg_miter_next(&diter))) {
3448 					ret = 0x01;
3449 					goto out;
3450 				}
3451 				dpage_offset = 0;
3452 			}
3453 
3454 			sdt = piter.addr + ppage_offset;
3455 			daddr = diter.addr + dpage_offset;
3456 
3457 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3458 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3459 				if (ret)
3460 					goto out;
3461 			}
3462 
3463 			sector++;
3464 			ei_lba++;
3465 			dpage_offset += sdebug_sector_size;
3466 		}
3467 		diter.consumed = dpage_offset;
3468 		sg_miter_stop(&diter);
3469 	}
3470 	sg_miter_stop(&piter);
3471 
3472 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3473 	dix_writes++;
3474 
3475 	return 0;
3476 
3477 out:
3478 	dif_errors++;
3479 	sg_miter_stop(&diter);
3480 	sg_miter_stop(&piter);
3481 	return ret;
3482 }
3483 
3484 static unsigned long lba_to_map_index(sector_t lba)
3485 {
3486 	if (sdebug_unmap_alignment)
3487 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3488 	sector_div(lba, sdebug_unmap_granularity);
3489 	return lba;
3490 }
3491 
3492 static sector_t map_index_to_lba(unsigned long index)
3493 {
3494 	sector_t lba = index * sdebug_unmap_granularity;
3495 
3496 	if (sdebug_unmap_alignment)
3497 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3498 	return lba;
3499 }
3500 
3501 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3502 			      unsigned int *num)
3503 {
3504 	sector_t end;
3505 	unsigned int mapped;
3506 	unsigned long index;
3507 	unsigned long next;
3508 
3509 	index = lba_to_map_index(lba);
3510 	mapped = test_bit(index, sip->map_storep);
3511 
3512 	if (mapped)
3513 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3514 	else
3515 		next = find_next_bit(sip->map_storep, map_size, index);
3516 
3517 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3518 	*num = end - lba;
3519 	return mapped;
3520 }
3521 
3522 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3523 		       unsigned int len)
3524 {
3525 	sector_t end = lba + len;
3526 
3527 	while (lba < end) {
3528 		unsigned long index = lba_to_map_index(lba);
3529 
3530 		if (index < map_size)
3531 			set_bit(index, sip->map_storep);
3532 
3533 		lba = map_index_to_lba(index + 1);
3534 	}
3535 }
3536 
3537 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3538 			 unsigned int len)
3539 {
3540 	sector_t end = lba + len;
3541 	u8 *fsp = sip->storep;
3542 
3543 	while (lba < end) {
3544 		unsigned long index = lba_to_map_index(lba);
3545 
3546 		if (lba == map_index_to_lba(index) &&
3547 		    lba + sdebug_unmap_granularity <= end &&
3548 		    index < map_size) {
3549 			clear_bit(index, sip->map_storep);
3550 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3551 				memset(fsp + lba * sdebug_sector_size,
3552 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3553 				       sdebug_sector_size *
3554 				       sdebug_unmap_granularity);
3555 			}
3556 			if (sip->dif_storep) {
3557 				memset(sip->dif_storep + lba, 0xff,
3558 				       sizeof(*sip->dif_storep) *
3559 				       sdebug_unmap_granularity);
3560 			}
3561 		}
3562 		lba = map_index_to_lba(index + 1);
3563 	}
3564 }
3565 
3566 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3567 {
3568 	bool check_prot;
3569 	u32 num;
3570 	u32 ei_lba;
3571 	int ret;
3572 	u64 lba;
3573 	struct sdeb_store_info *sip = devip2sip(devip, true);
3574 	u8 *cmd = scp->cmnd;
3575 
3576 	switch (cmd[0]) {
3577 	case WRITE_16:
3578 		ei_lba = 0;
3579 		lba = get_unaligned_be64(cmd + 2);
3580 		num = get_unaligned_be32(cmd + 10);
3581 		check_prot = true;
3582 		break;
3583 	case WRITE_10:
3584 		ei_lba = 0;
3585 		lba = get_unaligned_be32(cmd + 2);
3586 		num = get_unaligned_be16(cmd + 7);
3587 		check_prot = true;
3588 		break;
3589 	case WRITE_6:
3590 		ei_lba = 0;
3591 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3592 		      (u32)(cmd[1] & 0x1f) << 16;
3593 		num = (0 == cmd[4]) ? 256 : cmd[4];
3594 		check_prot = true;
3595 		break;
3596 	case WRITE_12:
3597 		ei_lba = 0;
3598 		lba = get_unaligned_be32(cmd + 2);
3599 		num = get_unaligned_be32(cmd + 6);
3600 		check_prot = true;
3601 		break;
3602 	case 0x53:	/* XDWRITEREAD(10) */
3603 		ei_lba = 0;
3604 		lba = get_unaligned_be32(cmd + 2);
3605 		num = get_unaligned_be16(cmd + 7);
3606 		check_prot = false;
3607 		break;
3608 	default:	/* assume WRITE(32) */
3609 		lba = get_unaligned_be64(cmd + 12);
3610 		ei_lba = get_unaligned_be32(cmd + 20);
3611 		num = get_unaligned_be32(cmd + 28);
3612 		check_prot = false;
3613 		break;
3614 	}
3615 	if (unlikely(have_dif_prot && check_prot)) {
3616 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3617 		    (cmd[1] & 0xe0)) {
3618 			mk_sense_invalid_opcode(scp);
3619 			return check_condition_result;
3620 		}
3621 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3622 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3623 		    (cmd[1] & 0xe0) == 0)
3624 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3625 				    "to DIF device\n");
3626 	}
3627 
3628 	sdeb_write_lock(sip);
3629 	ret = check_device_access_params(scp, lba, num, true);
3630 	if (ret) {
3631 		sdeb_write_unlock(sip);
3632 		return ret;
3633 	}
3634 
3635 	/* DIX + T10 DIF */
3636 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3637 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3638 		case 1: /* Guard tag error */
3639 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3640 				sdeb_write_unlock(sip);
3641 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3642 				return illegal_condition_result;
3643 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3644 				sdeb_write_unlock(sip);
3645 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3646 				return check_condition_result;
3647 			}
3648 			break;
3649 		case 3: /* Reference tag error */
3650 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3651 				sdeb_write_unlock(sip);
3652 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3653 				return illegal_condition_result;
3654 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3655 				sdeb_write_unlock(sip);
3656 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3657 				return check_condition_result;
3658 			}
3659 			break;
3660 		}
3661 	}
3662 
3663 	ret = do_device_access(sip, scp, 0, lba, num, true);
3664 	if (unlikely(scsi_debug_lbp()))
3665 		map_region(sip, lba, num);
3666 	/* If ZBC zone then bump its write pointer */
3667 	if (sdebug_dev_is_zoned(devip))
3668 		zbc_inc_wp(devip, lba, num);
3669 	sdeb_write_unlock(sip);
3670 	if (unlikely(-1 == ret))
3671 		return DID_ERROR << 16;
3672 	else if (unlikely(sdebug_verbose &&
3673 			  (ret < (num * sdebug_sector_size))))
3674 		sdev_printk(KERN_INFO, scp->device,
3675 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3676 			    my_name, num * sdebug_sector_size, ret);
3677 
3678 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3679 		     atomic_read(&sdeb_inject_pending))) {
3680 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3681 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3682 			atomic_set(&sdeb_inject_pending, 0);
3683 			return check_condition_result;
3684 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3685 			/* Logical block guard check failed */
3686 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3687 			atomic_set(&sdeb_inject_pending, 0);
3688 			return illegal_condition_result;
3689 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3690 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3691 			atomic_set(&sdeb_inject_pending, 0);
3692 			return illegal_condition_result;
3693 		}
3694 	}
3695 	return 0;
3696 }
3697 
3698 /*
3699  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3700  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3701  */
3702 static int resp_write_scat(struct scsi_cmnd *scp,
3703 			   struct sdebug_dev_info *devip)
3704 {
3705 	u8 *cmd = scp->cmnd;
3706 	u8 *lrdp = NULL;
3707 	u8 *up;
3708 	struct sdeb_store_info *sip = devip2sip(devip, true);
3709 	u8 wrprotect;
3710 	u16 lbdof, num_lrd, k;
3711 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3712 	u32 lb_size = sdebug_sector_size;
3713 	u32 ei_lba;
3714 	u64 lba;
3715 	int ret, res;
3716 	bool is_16;
3717 	static const u32 lrd_size = 32; /* + parameter list header size */
3718 
3719 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3720 		is_16 = false;
3721 		wrprotect = (cmd[10] >> 5) & 0x7;
3722 		lbdof = get_unaligned_be16(cmd + 12);
3723 		num_lrd = get_unaligned_be16(cmd + 16);
3724 		bt_len = get_unaligned_be32(cmd + 28);
3725 	} else {        /* that leaves WRITE SCATTERED(16) */
3726 		is_16 = true;
3727 		wrprotect = (cmd[2] >> 5) & 0x7;
3728 		lbdof = get_unaligned_be16(cmd + 4);
3729 		num_lrd = get_unaligned_be16(cmd + 8);
3730 		bt_len = get_unaligned_be32(cmd + 10);
3731 		if (unlikely(have_dif_prot)) {
3732 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3733 			    wrprotect) {
3734 				mk_sense_invalid_opcode(scp);
3735 				return illegal_condition_result;
3736 			}
3737 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3738 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3739 			     wrprotect == 0)
3740 				sdev_printk(KERN_ERR, scp->device,
3741 					    "Unprotected WR to DIF device\n");
3742 		}
3743 	}
3744 	if ((num_lrd == 0) || (bt_len == 0))
3745 		return 0;       /* T10 says these do-nothings are not errors */
3746 	if (lbdof == 0) {
3747 		if (sdebug_verbose)
3748 			sdev_printk(KERN_INFO, scp->device,
3749 				"%s: %s: LB Data Offset field bad\n",
3750 				my_name, __func__);
3751 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3752 		return illegal_condition_result;
3753 	}
3754 	lbdof_blen = lbdof * lb_size;
3755 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3756 		if (sdebug_verbose)
3757 			sdev_printk(KERN_INFO, scp->device,
3758 				"%s: %s: LBA range descriptors don't fit\n",
3759 				my_name, __func__);
3760 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3761 		return illegal_condition_result;
3762 	}
3763 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3764 	if (lrdp == NULL)
3765 		return SCSI_MLQUEUE_HOST_BUSY;
3766 	if (sdebug_verbose)
3767 		sdev_printk(KERN_INFO, scp->device,
3768 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3769 			my_name, __func__, lbdof_blen);
3770 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3771 	if (res == -1) {
3772 		ret = DID_ERROR << 16;
3773 		goto err_out;
3774 	}
3775 
3776 	sdeb_write_lock(sip);
3777 	sg_off = lbdof_blen;
3778 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3779 	cum_lb = 0;
3780 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3781 		lba = get_unaligned_be64(up + 0);
3782 		num = get_unaligned_be32(up + 8);
3783 		if (sdebug_verbose)
3784 			sdev_printk(KERN_INFO, scp->device,
3785 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3786 				my_name, __func__, k, lba, num, sg_off);
3787 		if (num == 0)
3788 			continue;
3789 		ret = check_device_access_params(scp, lba, num, true);
3790 		if (ret)
3791 			goto err_out_unlock;
3792 		num_by = num * lb_size;
3793 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3794 
3795 		if ((cum_lb + num) > bt_len) {
3796 			if (sdebug_verbose)
3797 				sdev_printk(KERN_INFO, scp->device,
3798 				    "%s: %s: sum of blocks > data provided\n",
3799 				    my_name, __func__);
3800 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3801 					0);
3802 			ret = illegal_condition_result;
3803 			goto err_out_unlock;
3804 		}
3805 
3806 		/* DIX + T10 DIF */
3807 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3808 			int prot_ret = prot_verify_write(scp, lba, num,
3809 							 ei_lba);
3810 
3811 			if (prot_ret) {
3812 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3813 						prot_ret);
3814 				ret = illegal_condition_result;
3815 				goto err_out_unlock;
3816 			}
3817 		}
3818 
3819 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3820 		/* If ZBC zone then bump its write pointer */
3821 		if (sdebug_dev_is_zoned(devip))
3822 			zbc_inc_wp(devip, lba, num);
3823 		if (unlikely(scsi_debug_lbp()))
3824 			map_region(sip, lba, num);
3825 		if (unlikely(-1 == ret)) {
3826 			ret = DID_ERROR << 16;
3827 			goto err_out_unlock;
3828 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3829 			sdev_printk(KERN_INFO, scp->device,
3830 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3831 			    my_name, num_by, ret);
3832 
3833 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3834 			     atomic_read(&sdeb_inject_pending))) {
3835 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3836 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3837 				atomic_set(&sdeb_inject_pending, 0);
3838 				ret = check_condition_result;
3839 				goto err_out_unlock;
3840 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3841 				/* Logical block guard check failed */
3842 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3843 				atomic_set(&sdeb_inject_pending, 0);
3844 				ret = illegal_condition_result;
3845 				goto err_out_unlock;
3846 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3847 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3848 				atomic_set(&sdeb_inject_pending, 0);
3849 				ret = illegal_condition_result;
3850 				goto err_out_unlock;
3851 			}
3852 		}
3853 		sg_off += num_by;
3854 		cum_lb += num;
3855 	}
3856 	ret = 0;
3857 err_out_unlock:
3858 	sdeb_write_unlock(sip);
3859 err_out:
3860 	kfree(lrdp);
3861 	return ret;
3862 }
3863 
3864 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3865 			   u32 ei_lba, bool unmap, bool ndob)
3866 {
3867 	struct scsi_device *sdp = scp->device;
3868 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3869 	unsigned long long i;
3870 	u64 block, lbaa;
3871 	u32 lb_size = sdebug_sector_size;
3872 	int ret;
3873 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3874 						scp->device->hostdata, true);
3875 	u8 *fs1p;
3876 	u8 *fsp;
3877 
3878 	sdeb_write_lock(sip);
3879 
3880 	ret = check_device_access_params(scp, lba, num, true);
3881 	if (ret) {
3882 		sdeb_write_unlock(sip);
3883 		return ret;
3884 	}
3885 
3886 	if (unmap && scsi_debug_lbp()) {
3887 		unmap_region(sip, lba, num);
3888 		goto out;
3889 	}
3890 	lbaa = lba;
3891 	block = do_div(lbaa, sdebug_store_sectors);
3892 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3893 	fsp = sip->storep;
3894 	fs1p = fsp + (block * lb_size);
3895 	if (ndob) {
3896 		memset(fs1p, 0, lb_size);
3897 		ret = 0;
3898 	} else
3899 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3900 
3901 	if (-1 == ret) {
3902 		sdeb_write_unlock(sip);
3903 		return DID_ERROR << 16;
3904 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3905 		sdev_printk(KERN_INFO, scp->device,
3906 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3907 			    my_name, "write same", lb_size, ret);
3908 
3909 	/* Copy first sector to remaining blocks */
3910 	for (i = 1 ; i < num ; i++) {
3911 		lbaa = lba + i;
3912 		block = do_div(lbaa, sdebug_store_sectors);
3913 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3914 	}
3915 	if (scsi_debug_lbp())
3916 		map_region(sip, lba, num);
3917 	/* If ZBC zone then bump its write pointer */
3918 	if (sdebug_dev_is_zoned(devip))
3919 		zbc_inc_wp(devip, lba, num);
3920 out:
3921 	sdeb_write_unlock(sip);
3922 
3923 	return 0;
3924 }
3925 
3926 static int resp_write_same_10(struct scsi_cmnd *scp,
3927 			      struct sdebug_dev_info *devip)
3928 {
3929 	u8 *cmd = scp->cmnd;
3930 	u32 lba;
3931 	u16 num;
3932 	u32 ei_lba = 0;
3933 	bool unmap = false;
3934 
3935 	if (cmd[1] & 0x8) {
3936 		if (sdebug_lbpws10 == 0) {
3937 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3938 			return check_condition_result;
3939 		} else
3940 			unmap = true;
3941 	}
3942 	lba = get_unaligned_be32(cmd + 2);
3943 	num = get_unaligned_be16(cmd + 7);
3944 	if (num > sdebug_write_same_length) {
3945 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3946 		return check_condition_result;
3947 	}
3948 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3949 }
3950 
3951 static int resp_write_same_16(struct scsi_cmnd *scp,
3952 			      struct sdebug_dev_info *devip)
3953 {
3954 	u8 *cmd = scp->cmnd;
3955 	u64 lba;
3956 	u32 num;
3957 	u32 ei_lba = 0;
3958 	bool unmap = false;
3959 	bool ndob = false;
3960 
3961 	if (cmd[1] & 0x8) {	/* UNMAP */
3962 		if (sdebug_lbpws == 0) {
3963 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3964 			return check_condition_result;
3965 		} else
3966 			unmap = true;
3967 	}
3968 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3969 		ndob = true;
3970 	lba = get_unaligned_be64(cmd + 2);
3971 	num = get_unaligned_be32(cmd + 10);
3972 	if (num > sdebug_write_same_length) {
3973 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3974 		return check_condition_result;
3975 	}
3976 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3977 }
3978 
3979 /* Note the mode field is in the same position as the (lower) service action
3980  * field. For the Report supported operation codes command, SPC-4 suggests
3981  * each mode of this command should be reported separately; for future. */
3982 static int resp_write_buffer(struct scsi_cmnd *scp,
3983 			     struct sdebug_dev_info *devip)
3984 {
3985 	u8 *cmd = scp->cmnd;
3986 	struct scsi_device *sdp = scp->device;
3987 	struct sdebug_dev_info *dp;
3988 	u8 mode;
3989 
3990 	mode = cmd[1] & 0x1f;
3991 	switch (mode) {
3992 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3993 		/* set UAs on this device only */
3994 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3995 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3996 		break;
3997 	case 0x5:	/* download MC, save and ACT */
3998 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3999 		break;
4000 	case 0x6:	/* download MC with offsets and ACT */
4001 		/* set UAs on most devices (LUs) in this target */
4002 		list_for_each_entry(dp,
4003 				    &devip->sdbg_host->dev_info_list,
4004 				    dev_list)
4005 			if (dp->target == sdp->id) {
4006 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4007 				if (devip != dp)
4008 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4009 						dp->uas_bm);
4010 			}
4011 		break;
4012 	case 0x7:	/* download MC with offsets, save, and ACT */
4013 		/* set UA on all devices (LUs) in this target */
4014 		list_for_each_entry(dp,
4015 				    &devip->sdbg_host->dev_info_list,
4016 				    dev_list)
4017 			if (dp->target == sdp->id)
4018 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4019 					dp->uas_bm);
4020 		break;
4021 	default:
4022 		/* do nothing for this command for other mode values */
4023 		break;
4024 	}
4025 	return 0;
4026 }
4027 
4028 static int resp_comp_write(struct scsi_cmnd *scp,
4029 			   struct sdebug_dev_info *devip)
4030 {
4031 	u8 *cmd = scp->cmnd;
4032 	u8 *arr;
4033 	struct sdeb_store_info *sip = devip2sip(devip, true);
4034 	u64 lba;
4035 	u32 dnum;
4036 	u32 lb_size = sdebug_sector_size;
4037 	u8 num;
4038 	int ret;
4039 	int retval = 0;
4040 
4041 	lba = get_unaligned_be64(cmd + 2);
4042 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4043 	if (0 == num)
4044 		return 0;	/* degenerate case, not an error */
4045 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4046 	    (cmd[1] & 0xe0)) {
4047 		mk_sense_invalid_opcode(scp);
4048 		return check_condition_result;
4049 	}
4050 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4051 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4052 	    (cmd[1] & 0xe0) == 0)
4053 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4054 			    "to DIF device\n");
4055 	ret = check_device_access_params(scp, lba, num, false);
4056 	if (ret)
4057 		return ret;
4058 	dnum = 2 * num;
4059 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4060 	if (NULL == arr) {
4061 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4062 				INSUFF_RES_ASCQ);
4063 		return check_condition_result;
4064 	}
4065 
4066 	sdeb_write_lock(sip);
4067 
4068 	ret = do_dout_fetch(scp, dnum, arr);
4069 	if (ret == -1) {
4070 		retval = DID_ERROR << 16;
4071 		goto cleanup;
4072 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4073 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4074 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4075 			    dnum * lb_size, ret);
4076 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4077 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4078 		retval = check_condition_result;
4079 		goto cleanup;
4080 	}
4081 	if (scsi_debug_lbp())
4082 		map_region(sip, lba, num);
4083 cleanup:
4084 	sdeb_write_unlock(sip);
4085 	kfree(arr);
4086 	return retval;
4087 }
4088 
4089 struct unmap_block_desc {
4090 	__be64	lba;
4091 	__be32	blocks;
4092 	__be32	__reserved;
4093 };
4094 
4095 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4096 {
4097 	unsigned char *buf;
4098 	struct unmap_block_desc *desc;
4099 	struct sdeb_store_info *sip = devip2sip(devip, true);
4100 	unsigned int i, payload_len, descriptors;
4101 	int ret;
4102 
4103 	if (!scsi_debug_lbp())
4104 		return 0;	/* fib and say its done */
4105 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4106 	BUG_ON(scsi_bufflen(scp) != payload_len);
4107 
4108 	descriptors = (payload_len - 8) / 16;
4109 	if (descriptors > sdebug_unmap_max_desc) {
4110 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4111 		return check_condition_result;
4112 	}
4113 
4114 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4115 	if (!buf) {
4116 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4117 				INSUFF_RES_ASCQ);
4118 		return check_condition_result;
4119 	}
4120 
4121 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4122 
4123 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4124 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4125 
4126 	desc = (void *)&buf[8];
4127 
4128 	sdeb_write_lock(sip);
4129 
4130 	for (i = 0 ; i < descriptors ; i++) {
4131 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4132 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4133 
4134 		ret = check_device_access_params(scp, lba, num, true);
4135 		if (ret)
4136 			goto out;
4137 
4138 		unmap_region(sip, lba, num);
4139 	}
4140 
4141 	ret = 0;
4142 
4143 out:
4144 	sdeb_write_unlock(sip);
4145 	kfree(buf);
4146 
4147 	return ret;
4148 }
4149 
4150 #define SDEBUG_GET_LBA_STATUS_LEN 32
4151 
4152 static int resp_get_lba_status(struct scsi_cmnd *scp,
4153 			       struct sdebug_dev_info *devip)
4154 {
4155 	u8 *cmd = scp->cmnd;
4156 	u64 lba;
4157 	u32 alloc_len, mapped, num;
4158 	int ret;
4159 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4160 
4161 	lba = get_unaligned_be64(cmd + 2);
4162 	alloc_len = get_unaligned_be32(cmd + 10);
4163 
4164 	if (alloc_len < 24)
4165 		return 0;
4166 
4167 	ret = check_device_access_params(scp, lba, 1, false);
4168 	if (ret)
4169 		return ret;
4170 
4171 	if (scsi_debug_lbp()) {
4172 		struct sdeb_store_info *sip = devip2sip(devip, true);
4173 
4174 		mapped = map_state(sip, lba, &num);
4175 	} else {
4176 		mapped = 1;
4177 		/* following just in case virtual_gb changed */
4178 		sdebug_capacity = get_sdebug_capacity();
4179 		if (sdebug_capacity - lba <= 0xffffffff)
4180 			num = sdebug_capacity - lba;
4181 		else
4182 			num = 0xffffffff;
4183 	}
4184 
4185 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4186 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4187 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4188 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4189 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4190 
4191 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4192 }
4193 
4194 static int resp_sync_cache(struct scsi_cmnd *scp,
4195 			   struct sdebug_dev_info *devip)
4196 {
4197 	int res = 0;
4198 	u64 lba;
4199 	u32 num_blocks;
4200 	u8 *cmd = scp->cmnd;
4201 
4202 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4203 		lba = get_unaligned_be32(cmd + 2);
4204 		num_blocks = get_unaligned_be16(cmd + 7);
4205 	} else {				/* SYNCHRONIZE_CACHE(16) */
4206 		lba = get_unaligned_be64(cmd + 2);
4207 		num_blocks = get_unaligned_be32(cmd + 10);
4208 	}
4209 	if (lba + num_blocks > sdebug_capacity) {
4210 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4211 		return check_condition_result;
4212 	}
4213 	if (!write_since_sync || (cmd[1] & 0x2))
4214 		res = SDEG_RES_IMMED_MASK;
4215 	else		/* delay if write_since_sync and IMMED clear */
4216 		write_since_sync = false;
4217 	return res;
4218 }
4219 
4220 /*
4221  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4222  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4223  * a GOOD status otherwise. Model a disk with a big cache and yield
4224  * CONDITION MET. Actually tries to bring range in main memory into the
4225  * cache associated with the CPU(s).
4226  */
4227 static int resp_pre_fetch(struct scsi_cmnd *scp,
4228 			  struct sdebug_dev_info *devip)
4229 {
4230 	int res = 0;
4231 	u64 lba;
4232 	u64 block, rest = 0;
4233 	u32 nblks;
4234 	u8 *cmd = scp->cmnd;
4235 	struct sdeb_store_info *sip = devip2sip(devip, true);
4236 	u8 *fsp = sip->storep;
4237 
4238 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4239 		lba = get_unaligned_be32(cmd + 2);
4240 		nblks = get_unaligned_be16(cmd + 7);
4241 	} else {			/* PRE-FETCH(16) */
4242 		lba = get_unaligned_be64(cmd + 2);
4243 		nblks = get_unaligned_be32(cmd + 10);
4244 	}
4245 	if (lba + nblks > sdebug_capacity) {
4246 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4247 		return check_condition_result;
4248 	}
4249 	if (!fsp)
4250 		goto fini;
4251 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4252 	block = do_div(lba, sdebug_store_sectors);
4253 	if (block + nblks > sdebug_store_sectors)
4254 		rest = block + nblks - sdebug_store_sectors;
4255 
4256 	/* Try to bring the PRE-FETCH range into CPU's cache */
4257 	sdeb_read_lock(sip);
4258 	prefetch_range(fsp + (sdebug_sector_size * block),
4259 		       (nblks - rest) * sdebug_sector_size);
4260 	if (rest)
4261 		prefetch_range(fsp, rest * sdebug_sector_size);
4262 	sdeb_read_unlock(sip);
4263 fini:
4264 	if (cmd[1] & 0x2)
4265 		res = SDEG_RES_IMMED_MASK;
4266 	return res | condition_met_result;
4267 }
4268 
4269 #define RL_BUCKET_ELEMS 8
4270 
4271 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4272  * (W-LUN), the normal Linux scanning logic does not associate it with a
4273  * device (e.g. /dev/sg7). The following magic will make that association:
4274  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4275  * where <n> is a host number. If there are multiple targets in a host then
4276  * the above will associate a W-LUN to each target. To only get a W-LUN
4277  * for target 2, then use "echo '- 2 49409' > scan" .
4278  */
4279 static int resp_report_luns(struct scsi_cmnd *scp,
4280 			    struct sdebug_dev_info *devip)
4281 {
4282 	unsigned char *cmd = scp->cmnd;
4283 	unsigned int alloc_len;
4284 	unsigned char select_report;
4285 	u64 lun;
4286 	struct scsi_lun *lun_p;
4287 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4288 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4289 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4290 	unsigned int tlun_cnt;	/* total LUN count */
4291 	unsigned int rlen;	/* response length (in bytes) */
4292 	int k, j, n, res;
4293 	unsigned int off_rsp = 0;
4294 	const int sz_lun = sizeof(struct scsi_lun);
4295 
4296 	clear_luns_changed_on_target(devip);
4297 
4298 	select_report = cmd[2];
4299 	alloc_len = get_unaligned_be32(cmd + 6);
4300 
4301 	if (alloc_len < 4) {
4302 		pr_err("alloc len too small %d\n", alloc_len);
4303 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4304 		return check_condition_result;
4305 	}
4306 
4307 	switch (select_report) {
4308 	case 0:		/* all LUNs apart from W-LUNs */
4309 		lun_cnt = sdebug_max_luns;
4310 		wlun_cnt = 0;
4311 		break;
4312 	case 1:		/* only W-LUNs */
4313 		lun_cnt = 0;
4314 		wlun_cnt = 1;
4315 		break;
4316 	case 2:		/* all LUNs */
4317 		lun_cnt = sdebug_max_luns;
4318 		wlun_cnt = 1;
4319 		break;
4320 	case 0x10:	/* only administrative LUs */
4321 	case 0x11:	/* see SPC-5 */
4322 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4323 	default:
4324 		pr_debug("select report invalid %d\n", select_report);
4325 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4326 		return check_condition_result;
4327 	}
4328 
4329 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4330 		--lun_cnt;
4331 
4332 	tlun_cnt = lun_cnt + wlun_cnt;
4333 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4334 	scsi_set_resid(scp, scsi_bufflen(scp));
4335 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4336 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4337 
4338 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4339 	lun = sdebug_no_lun_0 ? 1 : 0;
4340 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4341 		memset(arr, 0, sizeof(arr));
4342 		lun_p = (struct scsi_lun *)&arr[0];
4343 		if (k == 0) {
4344 			put_unaligned_be32(rlen, &arr[0]);
4345 			++lun_p;
4346 			j = 1;
4347 		}
4348 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4349 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4350 				break;
4351 			int_to_scsilun(lun++, lun_p);
4352 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4353 				lun_p->scsi_lun[0] |= 0x40;
4354 		}
4355 		if (j < RL_BUCKET_ELEMS)
4356 			break;
4357 		n = j * sz_lun;
4358 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4359 		if (res)
4360 			return res;
4361 		off_rsp += n;
4362 	}
4363 	if (wlun_cnt) {
4364 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4365 		++j;
4366 	}
4367 	if (j > 0)
4368 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4369 	return res;
4370 }
4371 
4372 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4373 {
4374 	bool is_bytchk3 = false;
4375 	u8 bytchk;
4376 	int ret, j;
4377 	u32 vnum, a_num, off;
4378 	const u32 lb_size = sdebug_sector_size;
4379 	u64 lba;
4380 	u8 *arr;
4381 	u8 *cmd = scp->cmnd;
4382 	struct sdeb_store_info *sip = devip2sip(devip, true);
4383 
4384 	bytchk = (cmd[1] >> 1) & 0x3;
4385 	if (bytchk == 0) {
4386 		return 0;	/* always claim internal verify okay */
4387 	} else if (bytchk == 2) {
4388 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4389 		return check_condition_result;
4390 	} else if (bytchk == 3) {
4391 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4392 	}
4393 	switch (cmd[0]) {
4394 	case VERIFY_16:
4395 		lba = get_unaligned_be64(cmd + 2);
4396 		vnum = get_unaligned_be32(cmd + 10);
4397 		break;
4398 	case VERIFY:		/* is VERIFY(10) */
4399 		lba = get_unaligned_be32(cmd + 2);
4400 		vnum = get_unaligned_be16(cmd + 7);
4401 		break;
4402 	default:
4403 		mk_sense_invalid_opcode(scp);
4404 		return check_condition_result;
4405 	}
4406 	if (vnum == 0)
4407 		return 0;	/* not an error */
4408 	a_num = is_bytchk3 ? 1 : vnum;
4409 	/* Treat following check like one for read (i.e. no write) access */
4410 	ret = check_device_access_params(scp, lba, a_num, false);
4411 	if (ret)
4412 		return ret;
4413 
4414 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4415 	if (!arr) {
4416 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4417 				INSUFF_RES_ASCQ);
4418 		return check_condition_result;
4419 	}
4420 	/* Not changing store, so only need read access */
4421 	sdeb_read_lock(sip);
4422 
4423 	ret = do_dout_fetch(scp, a_num, arr);
4424 	if (ret == -1) {
4425 		ret = DID_ERROR << 16;
4426 		goto cleanup;
4427 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4428 		sdev_printk(KERN_INFO, scp->device,
4429 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4430 			    my_name, __func__, a_num * lb_size, ret);
4431 	}
4432 	if (is_bytchk3) {
4433 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4434 			memcpy(arr + off, arr, lb_size);
4435 	}
4436 	ret = 0;
4437 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4438 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4439 		ret = check_condition_result;
4440 		goto cleanup;
4441 	}
4442 cleanup:
4443 	sdeb_read_unlock(sip);
4444 	kfree(arr);
4445 	return ret;
4446 }
4447 
4448 #define RZONES_DESC_HD 64
4449 
4450 /* Report zones depending on start LBA and reporting options */
4451 static int resp_report_zones(struct scsi_cmnd *scp,
4452 			     struct sdebug_dev_info *devip)
4453 {
4454 	unsigned int rep_max_zones, nrz = 0;
4455 	int ret = 0;
4456 	u32 alloc_len, rep_opts, rep_len;
4457 	bool partial;
4458 	u64 lba, zs_lba;
4459 	u8 *arr = NULL, *desc;
4460 	u8 *cmd = scp->cmnd;
4461 	struct sdeb_zone_state *zsp = NULL;
4462 	struct sdeb_store_info *sip = devip2sip(devip, false);
4463 
4464 	if (!sdebug_dev_is_zoned(devip)) {
4465 		mk_sense_invalid_opcode(scp);
4466 		return check_condition_result;
4467 	}
4468 	zs_lba = get_unaligned_be64(cmd + 2);
4469 	alloc_len = get_unaligned_be32(cmd + 10);
4470 	if (alloc_len == 0)
4471 		return 0;	/* not an error */
4472 	rep_opts = cmd[14] & 0x3f;
4473 	partial = cmd[14] & 0x80;
4474 
4475 	if (zs_lba >= sdebug_capacity) {
4476 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4477 		return check_condition_result;
4478 	}
4479 
4480 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4481 
4482 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4483 	if (!arr) {
4484 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4485 				INSUFF_RES_ASCQ);
4486 		return check_condition_result;
4487 	}
4488 
4489 	sdeb_read_lock(sip);
4490 
4491 	desc = arr + 64;
4492 	for (lba = zs_lba; lba < sdebug_capacity;
4493 	     lba = zsp->z_start + zsp->z_size) {
4494 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4495 			break;
4496 		zsp = zbc_zone(devip, lba);
4497 		switch (rep_opts) {
4498 		case 0x00:
4499 			/* All zones */
4500 			break;
4501 		case 0x01:
4502 			/* Empty zones */
4503 			if (zsp->z_cond != ZC1_EMPTY)
4504 				continue;
4505 			break;
4506 		case 0x02:
4507 			/* Implicit open zones */
4508 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4509 				continue;
4510 			break;
4511 		case 0x03:
4512 			/* Explicit open zones */
4513 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4514 				continue;
4515 			break;
4516 		case 0x04:
4517 			/* Closed zones */
4518 			if (zsp->z_cond != ZC4_CLOSED)
4519 				continue;
4520 			break;
4521 		case 0x05:
4522 			/* Full zones */
4523 			if (zsp->z_cond != ZC5_FULL)
4524 				continue;
4525 			break;
4526 		case 0x06:
4527 		case 0x07:
4528 		case 0x10:
4529 			/*
4530 			 * Read-only, offline, reset WP recommended are
4531 			 * not emulated: no zones to report;
4532 			 */
4533 			continue;
4534 		case 0x11:
4535 			/* non-seq-resource set */
4536 			if (!zsp->z_non_seq_resource)
4537 				continue;
4538 			break;
4539 		case 0x3e:
4540 			/* All zones except gap zones. */
4541 			if (zbc_zone_is_gap(zsp))
4542 				continue;
4543 			break;
4544 		case 0x3f:
4545 			/* Not write pointer (conventional) zones */
4546 			if (zbc_zone_is_seq(zsp))
4547 				continue;
4548 			break;
4549 		default:
4550 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4551 					INVALID_FIELD_IN_CDB, 0);
4552 			ret = check_condition_result;
4553 			goto fini;
4554 		}
4555 
4556 		if (nrz < rep_max_zones) {
4557 			/* Fill zone descriptor */
4558 			desc[0] = zsp->z_type;
4559 			desc[1] = zsp->z_cond << 4;
4560 			if (zsp->z_non_seq_resource)
4561 				desc[1] |= 1 << 1;
4562 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4563 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4564 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4565 			desc += 64;
4566 		}
4567 
4568 		if (partial && nrz >= rep_max_zones)
4569 			break;
4570 
4571 		nrz++;
4572 	}
4573 
4574 	/* Report header */
4575 	/* Zone list length. */
4576 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4577 	/* Maximum LBA */
4578 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4579 	/* Zone starting LBA granularity. */
4580 	if (devip->zcap < devip->zsize)
4581 		put_unaligned_be64(devip->zsize, arr + 16);
4582 
4583 	rep_len = (unsigned long)desc - (unsigned long)arr;
4584 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4585 
4586 fini:
4587 	sdeb_read_unlock(sip);
4588 	kfree(arr);
4589 	return ret;
4590 }
4591 
4592 /* Logic transplanted from tcmu-runner, file_zbc.c */
4593 static void zbc_open_all(struct sdebug_dev_info *devip)
4594 {
4595 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4596 	unsigned int i;
4597 
4598 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4599 		if (zsp->z_cond == ZC4_CLOSED)
4600 			zbc_open_zone(devip, &devip->zstate[i], true);
4601 	}
4602 }
4603 
4604 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4605 {
4606 	int res = 0;
4607 	u64 z_id;
4608 	enum sdebug_z_cond zc;
4609 	u8 *cmd = scp->cmnd;
4610 	struct sdeb_zone_state *zsp;
4611 	bool all = cmd[14] & 0x01;
4612 	struct sdeb_store_info *sip = devip2sip(devip, false);
4613 
4614 	if (!sdebug_dev_is_zoned(devip)) {
4615 		mk_sense_invalid_opcode(scp);
4616 		return check_condition_result;
4617 	}
4618 
4619 	sdeb_write_lock(sip);
4620 
4621 	if (all) {
4622 		/* Check if all closed zones can be open */
4623 		if (devip->max_open &&
4624 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4625 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4626 					INSUFF_ZONE_ASCQ);
4627 			res = check_condition_result;
4628 			goto fini;
4629 		}
4630 		/* Open all closed zones */
4631 		zbc_open_all(devip);
4632 		goto fini;
4633 	}
4634 
4635 	/* Open the specified zone */
4636 	z_id = get_unaligned_be64(cmd + 2);
4637 	if (z_id >= sdebug_capacity) {
4638 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4639 		res = check_condition_result;
4640 		goto fini;
4641 	}
4642 
4643 	zsp = zbc_zone(devip, z_id);
4644 	if (z_id != zsp->z_start) {
4645 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4646 		res = check_condition_result;
4647 		goto fini;
4648 	}
4649 	if (zbc_zone_is_conv(zsp)) {
4650 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4651 		res = check_condition_result;
4652 		goto fini;
4653 	}
4654 
4655 	zc = zsp->z_cond;
4656 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4657 		goto fini;
4658 
4659 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4660 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4661 				INSUFF_ZONE_ASCQ);
4662 		res = check_condition_result;
4663 		goto fini;
4664 	}
4665 
4666 	zbc_open_zone(devip, zsp, true);
4667 fini:
4668 	sdeb_write_unlock(sip);
4669 	return res;
4670 }
4671 
4672 static void zbc_close_all(struct sdebug_dev_info *devip)
4673 {
4674 	unsigned int i;
4675 
4676 	for (i = 0; i < devip->nr_zones; i++)
4677 		zbc_close_zone(devip, &devip->zstate[i]);
4678 }
4679 
4680 static int resp_close_zone(struct scsi_cmnd *scp,
4681 			   struct sdebug_dev_info *devip)
4682 {
4683 	int res = 0;
4684 	u64 z_id;
4685 	u8 *cmd = scp->cmnd;
4686 	struct sdeb_zone_state *zsp;
4687 	bool all = cmd[14] & 0x01;
4688 	struct sdeb_store_info *sip = devip2sip(devip, false);
4689 
4690 	if (!sdebug_dev_is_zoned(devip)) {
4691 		mk_sense_invalid_opcode(scp);
4692 		return check_condition_result;
4693 	}
4694 
4695 	sdeb_write_lock(sip);
4696 
4697 	if (all) {
4698 		zbc_close_all(devip);
4699 		goto fini;
4700 	}
4701 
4702 	/* Close specified zone */
4703 	z_id = get_unaligned_be64(cmd + 2);
4704 	if (z_id >= sdebug_capacity) {
4705 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4706 		res = check_condition_result;
4707 		goto fini;
4708 	}
4709 
4710 	zsp = zbc_zone(devip, z_id);
4711 	if (z_id != zsp->z_start) {
4712 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4713 		res = check_condition_result;
4714 		goto fini;
4715 	}
4716 	if (zbc_zone_is_conv(zsp)) {
4717 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4718 		res = check_condition_result;
4719 		goto fini;
4720 	}
4721 
4722 	zbc_close_zone(devip, zsp);
4723 fini:
4724 	sdeb_write_unlock(sip);
4725 	return res;
4726 }
4727 
4728 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4729 			    struct sdeb_zone_state *zsp, bool empty)
4730 {
4731 	enum sdebug_z_cond zc = zsp->z_cond;
4732 
4733 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4734 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4735 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4736 			zbc_close_zone(devip, zsp);
4737 		if (zsp->z_cond == ZC4_CLOSED)
4738 			devip->nr_closed--;
4739 		zsp->z_wp = zsp->z_start + zsp->z_size;
4740 		zsp->z_cond = ZC5_FULL;
4741 	}
4742 }
4743 
4744 static void zbc_finish_all(struct sdebug_dev_info *devip)
4745 {
4746 	unsigned int i;
4747 
4748 	for (i = 0; i < devip->nr_zones; i++)
4749 		zbc_finish_zone(devip, &devip->zstate[i], false);
4750 }
4751 
4752 static int resp_finish_zone(struct scsi_cmnd *scp,
4753 			    struct sdebug_dev_info *devip)
4754 {
4755 	struct sdeb_zone_state *zsp;
4756 	int res = 0;
4757 	u64 z_id;
4758 	u8 *cmd = scp->cmnd;
4759 	bool all = cmd[14] & 0x01;
4760 	struct sdeb_store_info *sip = devip2sip(devip, false);
4761 
4762 	if (!sdebug_dev_is_zoned(devip)) {
4763 		mk_sense_invalid_opcode(scp);
4764 		return check_condition_result;
4765 	}
4766 
4767 	sdeb_write_lock(sip);
4768 
4769 	if (all) {
4770 		zbc_finish_all(devip);
4771 		goto fini;
4772 	}
4773 
4774 	/* Finish the specified zone */
4775 	z_id = get_unaligned_be64(cmd + 2);
4776 	if (z_id >= sdebug_capacity) {
4777 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4778 		res = check_condition_result;
4779 		goto fini;
4780 	}
4781 
4782 	zsp = zbc_zone(devip, z_id);
4783 	if (z_id != zsp->z_start) {
4784 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4785 		res = check_condition_result;
4786 		goto fini;
4787 	}
4788 	if (zbc_zone_is_conv(zsp)) {
4789 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4790 		res = check_condition_result;
4791 		goto fini;
4792 	}
4793 
4794 	zbc_finish_zone(devip, zsp, true);
4795 fini:
4796 	sdeb_write_unlock(sip);
4797 	return res;
4798 }
4799 
4800 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4801 			 struct sdeb_zone_state *zsp)
4802 {
4803 	enum sdebug_z_cond zc;
4804 	struct sdeb_store_info *sip = devip2sip(devip, false);
4805 
4806 	if (!zbc_zone_is_seq(zsp))
4807 		return;
4808 
4809 	zc = zsp->z_cond;
4810 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4811 		zbc_close_zone(devip, zsp);
4812 
4813 	if (zsp->z_cond == ZC4_CLOSED)
4814 		devip->nr_closed--;
4815 
4816 	if (zsp->z_wp > zsp->z_start)
4817 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4818 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4819 
4820 	zsp->z_non_seq_resource = false;
4821 	zsp->z_wp = zsp->z_start;
4822 	zsp->z_cond = ZC1_EMPTY;
4823 }
4824 
4825 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4826 {
4827 	unsigned int i;
4828 
4829 	for (i = 0; i < devip->nr_zones; i++)
4830 		zbc_rwp_zone(devip, &devip->zstate[i]);
4831 }
4832 
4833 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4834 {
4835 	struct sdeb_zone_state *zsp;
4836 	int res = 0;
4837 	u64 z_id;
4838 	u8 *cmd = scp->cmnd;
4839 	bool all = cmd[14] & 0x01;
4840 	struct sdeb_store_info *sip = devip2sip(devip, false);
4841 
4842 	if (!sdebug_dev_is_zoned(devip)) {
4843 		mk_sense_invalid_opcode(scp);
4844 		return check_condition_result;
4845 	}
4846 
4847 	sdeb_write_lock(sip);
4848 
4849 	if (all) {
4850 		zbc_rwp_all(devip);
4851 		goto fini;
4852 	}
4853 
4854 	z_id = get_unaligned_be64(cmd + 2);
4855 	if (z_id >= sdebug_capacity) {
4856 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4857 		res = check_condition_result;
4858 		goto fini;
4859 	}
4860 
4861 	zsp = zbc_zone(devip, z_id);
4862 	if (z_id != zsp->z_start) {
4863 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4864 		res = check_condition_result;
4865 		goto fini;
4866 	}
4867 	if (zbc_zone_is_conv(zsp)) {
4868 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4869 		res = check_condition_result;
4870 		goto fini;
4871 	}
4872 
4873 	zbc_rwp_zone(devip, zsp);
4874 fini:
4875 	sdeb_write_unlock(sip);
4876 	return res;
4877 }
4878 
4879 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4880 {
4881 	u16 hwq;
4882 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4883 
4884 	hwq = blk_mq_unique_tag_to_hwq(tag);
4885 
4886 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4887 	if (WARN_ON_ONCE(hwq >= submit_queues))
4888 		hwq = 0;
4889 
4890 	return sdebug_q_arr + hwq;
4891 }
4892 
4893 static u32 get_tag(struct scsi_cmnd *cmnd)
4894 {
4895 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4896 }
4897 
4898 /* Queued (deferred) command completions converge here. */
4899 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4900 {
4901 	bool aborted = sd_dp->aborted;
4902 	int qc_idx;
4903 	int retiring = 0;
4904 	unsigned long iflags;
4905 	struct sdebug_queue *sqp;
4906 	struct sdebug_queued_cmd *sqcp;
4907 	struct scsi_cmnd *scp;
4908 	struct sdebug_dev_info *devip;
4909 
4910 	if (unlikely(aborted))
4911 		sd_dp->aborted = false;
4912 	qc_idx = sd_dp->qc_idx;
4913 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4914 	if (sdebug_statistics) {
4915 		atomic_inc(&sdebug_completions);
4916 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4917 			atomic_inc(&sdebug_miss_cpus);
4918 	}
4919 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4920 		pr_err("wild qc_idx=%d\n", qc_idx);
4921 		return;
4922 	}
4923 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4924 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4925 	sqcp = &sqp->qc_arr[qc_idx];
4926 	scp = sqcp->a_cmnd;
4927 	if (unlikely(scp == NULL)) {
4928 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4929 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4930 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4931 		return;
4932 	}
4933 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4934 	if (likely(devip))
4935 		atomic_dec(&devip->num_in_q);
4936 	else
4937 		pr_err("devip=NULL\n");
4938 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4939 		retiring = 1;
4940 
4941 	sqcp->a_cmnd = NULL;
4942 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4943 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4944 		pr_err("Unexpected completion\n");
4945 		return;
4946 	}
4947 
4948 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4949 		int k, retval;
4950 
4951 		retval = atomic_read(&retired_max_queue);
4952 		if (qc_idx >= retval) {
4953 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4954 			pr_err("index %d too large\n", retval);
4955 			return;
4956 		}
4957 		k = find_last_bit(sqp->in_use_bm, retval);
4958 		if ((k < sdebug_max_queue) || (k == retval))
4959 			atomic_set(&retired_max_queue, 0);
4960 		else
4961 			atomic_set(&retired_max_queue, k + 1);
4962 	}
4963 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4964 	if (unlikely(aborted)) {
4965 		if (sdebug_verbose)
4966 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4967 		return;
4968 	}
4969 	scsi_done(scp); /* callback to mid level */
4970 }
4971 
4972 /* When high resolution timer goes off this function is called. */
4973 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4974 {
4975 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4976 						  hrt);
4977 	sdebug_q_cmd_complete(sd_dp);
4978 	return HRTIMER_NORESTART;
4979 }
4980 
4981 /* When work queue schedules work, it calls this function. */
4982 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4983 {
4984 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4985 						  ew.work);
4986 	sdebug_q_cmd_complete(sd_dp);
4987 }
4988 
4989 static bool got_shared_uuid;
4990 static uuid_t shared_uuid;
4991 
4992 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4993 {
4994 	struct sdeb_zone_state *zsp;
4995 	sector_t capacity = get_sdebug_capacity();
4996 	sector_t conv_capacity;
4997 	sector_t zstart = 0;
4998 	unsigned int i;
4999 
5000 	/*
5001 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5002 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5003 	 * use the specified zone size checking that at least 2 zones can be
5004 	 * created for the device.
5005 	 */
5006 	if (!sdeb_zbc_zone_size_mb) {
5007 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5008 			>> ilog2(sdebug_sector_size);
5009 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5010 			devip->zsize >>= 1;
5011 		if (devip->zsize < 2) {
5012 			pr_err("Device capacity too small\n");
5013 			return -EINVAL;
5014 		}
5015 	} else {
5016 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5017 			pr_err("Zone size is not a power of 2\n");
5018 			return -EINVAL;
5019 		}
5020 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5021 			>> ilog2(sdebug_sector_size);
5022 		if (devip->zsize >= capacity) {
5023 			pr_err("Zone size too large for device capacity\n");
5024 			return -EINVAL;
5025 		}
5026 	}
5027 
5028 	devip->zsize_shift = ilog2(devip->zsize);
5029 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5030 
5031 	if (sdeb_zbc_zone_cap_mb == 0) {
5032 		devip->zcap = devip->zsize;
5033 	} else {
5034 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5035 			      ilog2(sdebug_sector_size);
5036 		if (devip->zcap > devip->zsize) {
5037 			pr_err("Zone capacity too large\n");
5038 			return -EINVAL;
5039 		}
5040 	}
5041 
5042 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5043 	if (conv_capacity >= capacity) {
5044 		pr_err("Number of conventional zones too large\n");
5045 		return -EINVAL;
5046 	}
5047 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5048 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5049 			      devip->zsize_shift;
5050 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5051 
5052 	/* Add gap zones if zone capacity is smaller than the zone size */
5053 	if (devip->zcap < devip->zsize)
5054 		devip->nr_zones += devip->nr_seq_zones;
5055 
5056 	if (devip->zmodel == BLK_ZONED_HM) {
5057 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5058 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5059 			devip->max_open = (devip->nr_zones - 1) / 2;
5060 		else
5061 			devip->max_open = sdeb_zbc_max_open;
5062 	}
5063 
5064 	devip->zstate = kcalloc(devip->nr_zones,
5065 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5066 	if (!devip->zstate)
5067 		return -ENOMEM;
5068 
5069 	for (i = 0; i < devip->nr_zones; i++) {
5070 		zsp = &devip->zstate[i];
5071 
5072 		zsp->z_start = zstart;
5073 
5074 		if (i < devip->nr_conv_zones) {
5075 			zsp->z_type = ZBC_ZTYPE_CNV;
5076 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5077 			zsp->z_wp = (sector_t)-1;
5078 			zsp->z_size =
5079 				min_t(u64, devip->zsize, capacity - zstart);
5080 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5081 			if (devip->zmodel == BLK_ZONED_HM)
5082 				zsp->z_type = ZBC_ZTYPE_SWR;
5083 			else
5084 				zsp->z_type = ZBC_ZTYPE_SWP;
5085 			zsp->z_cond = ZC1_EMPTY;
5086 			zsp->z_wp = zsp->z_start;
5087 			zsp->z_size =
5088 				min_t(u64, devip->zcap, capacity - zstart);
5089 		} else {
5090 			zsp->z_type = ZBC_ZTYPE_GAP;
5091 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5092 			zsp->z_wp = (sector_t)-1;
5093 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5094 					    capacity - zstart);
5095 		}
5096 
5097 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5098 		zstart += zsp->z_size;
5099 	}
5100 
5101 	return 0;
5102 }
5103 
5104 static struct sdebug_dev_info *sdebug_device_create(
5105 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5106 {
5107 	struct sdebug_dev_info *devip;
5108 
5109 	devip = kzalloc(sizeof(*devip), flags);
5110 	if (devip) {
5111 		if (sdebug_uuid_ctl == 1)
5112 			uuid_gen(&devip->lu_name);
5113 		else if (sdebug_uuid_ctl == 2) {
5114 			if (got_shared_uuid)
5115 				devip->lu_name = shared_uuid;
5116 			else {
5117 				uuid_gen(&shared_uuid);
5118 				got_shared_uuid = true;
5119 				devip->lu_name = shared_uuid;
5120 			}
5121 		}
5122 		devip->sdbg_host = sdbg_host;
5123 		if (sdeb_zbc_in_use) {
5124 			devip->zmodel = sdeb_zbc_model;
5125 			if (sdebug_device_create_zones(devip)) {
5126 				kfree(devip);
5127 				return NULL;
5128 			}
5129 		} else {
5130 			devip->zmodel = BLK_ZONED_NONE;
5131 		}
5132 		devip->sdbg_host = sdbg_host;
5133 		devip->create_ts = ktime_get_boottime();
5134 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5135 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5136 	}
5137 	return devip;
5138 }
5139 
5140 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5141 {
5142 	struct sdebug_host_info *sdbg_host;
5143 	struct sdebug_dev_info *open_devip = NULL;
5144 	struct sdebug_dev_info *devip;
5145 
5146 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5147 	if (!sdbg_host) {
5148 		pr_err("Host info NULL\n");
5149 		return NULL;
5150 	}
5151 
5152 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5153 		if ((devip->used) && (devip->channel == sdev->channel) &&
5154 		    (devip->target == sdev->id) &&
5155 		    (devip->lun == sdev->lun))
5156 			return devip;
5157 		else {
5158 			if ((!devip->used) && (!open_devip))
5159 				open_devip = devip;
5160 		}
5161 	}
5162 	if (!open_devip) { /* try and make a new one */
5163 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5164 		if (!open_devip) {
5165 			pr_err("out of memory at line %d\n", __LINE__);
5166 			return NULL;
5167 		}
5168 	}
5169 
5170 	open_devip->channel = sdev->channel;
5171 	open_devip->target = sdev->id;
5172 	open_devip->lun = sdev->lun;
5173 	open_devip->sdbg_host = sdbg_host;
5174 	atomic_set(&open_devip->num_in_q, 0);
5175 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5176 	open_devip->used = true;
5177 	return open_devip;
5178 }
5179 
5180 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5181 {
5182 	if (sdebug_verbose)
5183 		pr_info("slave_alloc <%u %u %u %llu>\n",
5184 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5185 	return 0;
5186 }
5187 
5188 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5189 {
5190 	struct sdebug_dev_info *devip =
5191 			(struct sdebug_dev_info *)sdp->hostdata;
5192 
5193 	if (sdebug_verbose)
5194 		pr_info("slave_configure <%u %u %u %llu>\n",
5195 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5196 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5197 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5198 	if (devip == NULL) {
5199 		devip = find_build_dev_info(sdp);
5200 		if (devip == NULL)
5201 			return 1;  /* no resources, will be marked offline */
5202 	}
5203 	sdp->hostdata = devip;
5204 	if (sdebug_no_uld)
5205 		sdp->no_uld_attach = 1;
5206 	config_cdb_len(sdp);
5207 	return 0;
5208 }
5209 
5210 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5211 {
5212 	struct sdebug_dev_info *devip =
5213 		(struct sdebug_dev_info *)sdp->hostdata;
5214 
5215 	if (sdebug_verbose)
5216 		pr_info("slave_destroy <%u %u %u %llu>\n",
5217 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5218 	if (devip) {
5219 		/* make this slot available for re-use */
5220 		devip->used = false;
5221 		sdp->hostdata = NULL;
5222 	}
5223 }
5224 
5225 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5226 			   enum sdeb_defer_type defer_t)
5227 {
5228 	if (!sd_dp)
5229 		return;
5230 	if (defer_t == SDEB_DEFER_HRT)
5231 		hrtimer_cancel(&sd_dp->hrt);
5232 	else if (defer_t == SDEB_DEFER_WQ)
5233 		cancel_work_sync(&sd_dp->ew.work);
5234 }
5235 
5236 /* If @cmnd found deletes its timer or work queue and returns true; else
5237    returns false */
5238 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5239 {
5240 	unsigned long iflags;
5241 	int j, k, qmax, r_qmax;
5242 	enum sdeb_defer_type l_defer_t;
5243 	struct sdebug_queue *sqp;
5244 	struct sdebug_queued_cmd *sqcp;
5245 	struct sdebug_dev_info *devip;
5246 	struct sdebug_defer *sd_dp;
5247 
5248 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5249 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5250 		qmax = sdebug_max_queue;
5251 		r_qmax = atomic_read(&retired_max_queue);
5252 		if (r_qmax > qmax)
5253 			qmax = r_qmax;
5254 		for (k = 0; k < qmax; ++k) {
5255 			if (test_bit(k, sqp->in_use_bm)) {
5256 				sqcp = &sqp->qc_arr[k];
5257 				if (cmnd != sqcp->a_cmnd)
5258 					continue;
5259 				/* found */
5260 				devip = (struct sdebug_dev_info *)
5261 						cmnd->device->hostdata;
5262 				if (devip)
5263 					atomic_dec(&devip->num_in_q);
5264 				sqcp->a_cmnd = NULL;
5265 				sd_dp = sqcp->sd_dp;
5266 				if (sd_dp) {
5267 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5268 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5269 				} else
5270 					l_defer_t = SDEB_DEFER_NONE;
5271 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5272 				stop_qc_helper(sd_dp, l_defer_t);
5273 				clear_bit(k, sqp->in_use_bm);
5274 				return true;
5275 			}
5276 		}
5277 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5278 	}
5279 	return false;
5280 }
5281 
5282 /* Deletes (stops) timers or work queues of all queued commands */
5283 static void stop_all_queued(void)
5284 {
5285 	unsigned long iflags;
5286 	int j, k;
5287 	enum sdeb_defer_type l_defer_t;
5288 	struct sdebug_queue *sqp;
5289 	struct sdebug_queued_cmd *sqcp;
5290 	struct sdebug_dev_info *devip;
5291 	struct sdebug_defer *sd_dp;
5292 
5293 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5294 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5295 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5296 			if (test_bit(k, sqp->in_use_bm)) {
5297 				sqcp = &sqp->qc_arr[k];
5298 				if (sqcp->a_cmnd == NULL)
5299 					continue;
5300 				devip = (struct sdebug_dev_info *)
5301 					sqcp->a_cmnd->device->hostdata;
5302 				if (devip)
5303 					atomic_dec(&devip->num_in_q);
5304 				sqcp->a_cmnd = NULL;
5305 				sd_dp = sqcp->sd_dp;
5306 				if (sd_dp) {
5307 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5308 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5309 				} else
5310 					l_defer_t = SDEB_DEFER_NONE;
5311 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5312 				stop_qc_helper(sd_dp, l_defer_t);
5313 				clear_bit(k, sqp->in_use_bm);
5314 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5315 			}
5316 		}
5317 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5318 	}
5319 }
5320 
5321 /* Free queued command memory on heap */
5322 static void free_all_queued(void)
5323 {
5324 	int j, k;
5325 	struct sdebug_queue *sqp;
5326 	struct sdebug_queued_cmd *sqcp;
5327 
5328 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5329 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5330 			sqcp = &sqp->qc_arr[k];
5331 			kfree(sqcp->sd_dp);
5332 			sqcp->sd_dp = NULL;
5333 		}
5334 	}
5335 }
5336 
5337 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5338 {
5339 	bool ok;
5340 
5341 	++num_aborts;
5342 	if (SCpnt) {
5343 		ok = stop_queued_cmnd(SCpnt);
5344 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5345 			sdev_printk(KERN_INFO, SCpnt->device,
5346 				    "%s: command%s found\n", __func__,
5347 				    ok ? "" : " not");
5348 	}
5349 	return SUCCESS;
5350 }
5351 
5352 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5353 {
5354 	++num_dev_resets;
5355 	if (SCpnt && SCpnt->device) {
5356 		struct scsi_device *sdp = SCpnt->device;
5357 		struct sdebug_dev_info *devip =
5358 				(struct sdebug_dev_info *)sdp->hostdata;
5359 
5360 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5362 		if (devip)
5363 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5364 	}
5365 	return SUCCESS;
5366 }
5367 
5368 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5369 {
5370 	struct sdebug_host_info *sdbg_host;
5371 	struct sdebug_dev_info *devip;
5372 	struct scsi_device *sdp;
5373 	struct Scsi_Host *hp;
5374 	int k = 0;
5375 
5376 	++num_target_resets;
5377 	if (!SCpnt)
5378 		goto lie;
5379 	sdp = SCpnt->device;
5380 	if (!sdp)
5381 		goto lie;
5382 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5383 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5384 	hp = sdp->host;
5385 	if (!hp)
5386 		goto lie;
5387 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5388 	if (sdbg_host) {
5389 		list_for_each_entry(devip,
5390 				    &sdbg_host->dev_info_list,
5391 				    dev_list)
5392 			if (devip->target == sdp->id) {
5393 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5394 				++k;
5395 			}
5396 	}
5397 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5398 		sdev_printk(KERN_INFO, sdp,
5399 			    "%s: %d device(s) found in target\n", __func__, k);
5400 lie:
5401 	return SUCCESS;
5402 }
5403 
5404 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5405 {
5406 	struct sdebug_host_info *sdbg_host;
5407 	struct sdebug_dev_info *devip;
5408 	struct scsi_device *sdp;
5409 	struct Scsi_Host *hp;
5410 	int k = 0;
5411 
5412 	++num_bus_resets;
5413 	if (!(SCpnt && SCpnt->device))
5414 		goto lie;
5415 	sdp = SCpnt->device;
5416 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5417 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5418 	hp = sdp->host;
5419 	if (hp) {
5420 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5421 		if (sdbg_host) {
5422 			list_for_each_entry(devip,
5423 					    &sdbg_host->dev_info_list,
5424 					    dev_list) {
5425 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5426 				++k;
5427 			}
5428 		}
5429 	}
5430 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5431 		sdev_printk(KERN_INFO, sdp,
5432 			    "%s: %d device(s) found in host\n", __func__, k);
5433 lie:
5434 	return SUCCESS;
5435 }
5436 
5437 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5438 {
5439 	struct sdebug_host_info *sdbg_host;
5440 	struct sdebug_dev_info *devip;
5441 	int k = 0;
5442 
5443 	++num_host_resets;
5444 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5445 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5446 	spin_lock(&sdebug_host_list_lock);
5447 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5448 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5449 				    dev_list) {
5450 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5451 			++k;
5452 		}
5453 	}
5454 	spin_unlock(&sdebug_host_list_lock);
5455 	stop_all_queued();
5456 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5457 		sdev_printk(KERN_INFO, SCpnt->device,
5458 			    "%s: %d device(s) found\n", __func__, k);
5459 	return SUCCESS;
5460 }
5461 
5462 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5463 {
5464 	struct msdos_partition *pp;
5465 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5466 	int sectors_per_part, num_sectors, k;
5467 	int heads_by_sects, start_sec, end_sec;
5468 
5469 	/* assume partition table already zeroed */
5470 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5471 		return;
5472 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5473 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5474 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5475 	}
5476 	num_sectors = (int)get_sdebug_capacity();
5477 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5478 			   / sdebug_num_parts;
5479 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5480 	starts[0] = sdebug_sectors_per;
5481 	max_part_secs = sectors_per_part;
5482 	for (k = 1; k < sdebug_num_parts; ++k) {
5483 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5484 			    * heads_by_sects;
5485 		if (starts[k] - starts[k - 1] < max_part_secs)
5486 			max_part_secs = starts[k] - starts[k - 1];
5487 	}
5488 	starts[sdebug_num_parts] = num_sectors;
5489 	starts[sdebug_num_parts + 1] = 0;
5490 
5491 	ramp[510] = 0x55;	/* magic partition markings */
5492 	ramp[511] = 0xAA;
5493 	pp = (struct msdos_partition *)(ramp + 0x1be);
5494 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5495 		start_sec = starts[k];
5496 		end_sec = starts[k] + max_part_secs - 1;
5497 		pp->boot_ind = 0;
5498 
5499 		pp->cyl = start_sec / heads_by_sects;
5500 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5501 			   / sdebug_sectors_per;
5502 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5503 
5504 		pp->end_cyl = end_sec / heads_by_sects;
5505 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5506 			       / sdebug_sectors_per;
5507 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5508 
5509 		pp->start_sect = cpu_to_le32(start_sec);
5510 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5511 		pp->sys_ind = 0x83;	/* plain Linux partition */
5512 	}
5513 }
5514 
5515 static void block_unblock_all_queues(bool block)
5516 {
5517 	int j;
5518 	struct sdebug_queue *sqp;
5519 
5520 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5521 		atomic_set(&sqp->blocked, (int)block);
5522 }
5523 
5524 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5525  * commands will be processed normally before triggers occur.
5526  */
5527 static void tweak_cmnd_count(void)
5528 {
5529 	int count, modulo;
5530 
5531 	modulo = abs(sdebug_every_nth);
5532 	if (modulo < 2)
5533 		return;
5534 	block_unblock_all_queues(true);
5535 	count = atomic_read(&sdebug_cmnd_count);
5536 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5537 	block_unblock_all_queues(false);
5538 }
5539 
5540 static void clear_queue_stats(void)
5541 {
5542 	atomic_set(&sdebug_cmnd_count, 0);
5543 	atomic_set(&sdebug_completions, 0);
5544 	atomic_set(&sdebug_miss_cpus, 0);
5545 	atomic_set(&sdebug_a_tsf, 0);
5546 }
5547 
5548 static bool inject_on_this_cmd(void)
5549 {
5550 	if (sdebug_every_nth == 0)
5551 		return false;
5552 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5553 }
5554 
5555 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5556 
5557 /* Complete the processing of the thread that queued a SCSI command to this
5558  * driver. It either completes the command by calling cmnd_done() or
5559  * schedules a hr timer or work queue then returns 0. Returns
5560  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5561  */
5562 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5563 			 int scsi_result,
5564 			 int (*pfp)(struct scsi_cmnd *,
5565 				    struct sdebug_dev_info *),
5566 			 int delta_jiff, int ndelay)
5567 {
5568 	bool new_sd_dp;
5569 	bool inject = false;
5570 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5571 	int k, num_in_q, qdepth;
5572 	unsigned long iflags;
5573 	u64 ns_from_boot = 0;
5574 	struct sdebug_queue *sqp;
5575 	struct sdebug_queued_cmd *sqcp;
5576 	struct scsi_device *sdp;
5577 	struct sdebug_defer *sd_dp;
5578 
5579 	if (unlikely(devip == NULL)) {
5580 		if (scsi_result == 0)
5581 			scsi_result = DID_NO_CONNECT << 16;
5582 		goto respond_in_thread;
5583 	}
5584 	sdp = cmnd->device;
5585 
5586 	if (delta_jiff == 0)
5587 		goto respond_in_thread;
5588 
5589 	sqp = get_queue(cmnd);
5590 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5591 	if (unlikely(atomic_read(&sqp->blocked))) {
5592 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5593 		return SCSI_MLQUEUE_HOST_BUSY;
5594 	}
5595 	num_in_q = atomic_read(&devip->num_in_q);
5596 	qdepth = cmnd->device->queue_depth;
5597 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5598 		if (scsi_result) {
5599 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5600 			goto respond_in_thread;
5601 		} else
5602 			scsi_result = device_qfull_result;
5603 	} else if (unlikely(sdebug_every_nth &&
5604 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5605 			    (scsi_result == 0))) {
5606 		if ((num_in_q == (qdepth - 1)) &&
5607 		    (atomic_inc_return(&sdebug_a_tsf) >=
5608 		     abs(sdebug_every_nth))) {
5609 			atomic_set(&sdebug_a_tsf, 0);
5610 			inject = true;
5611 			scsi_result = device_qfull_result;
5612 		}
5613 	}
5614 
5615 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5616 	if (unlikely(k >= sdebug_max_queue)) {
5617 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5618 		if (scsi_result)
5619 			goto respond_in_thread;
5620 		scsi_result = device_qfull_result;
5621 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5622 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5623 				    __func__, sdebug_max_queue);
5624 		goto respond_in_thread;
5625 	}
5626 	set_bit(k, sqp->in_use_bm);
5627 	atomic_inc(&devip->num_in_q);
5628 	sqcp = &sqp->qc_arr[k];
5629 	sqcp->a_cmnd = cmnd;
5630 	cmnd->host_scribble = (unsigned char *)sqcp;
5631 	sd_dp = sqcp->sd_dp;
5632 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5633 
5634 	if (!sd_dp) {
5635 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5636 		if (!sd_dp) {
5637 			atomic_dec(&devip->num_in_q);
5638 			clear_bit(k, sqp->in_use_bm);
5639 			return SCSI_MLQUEUE_HOST_BUSY;
5640 		}
5641 		new_sd_dp = true;
5642 	} else {
5643 		new_sd_dp = false;
5644 	}
5645 
5646 	/* Set the hostwide tag */
5647 	if (sdebug_host_max_queue)
5648 		sd_dp->hc_idx = get_tag(cmnd);
5649 
5650 	if (polled)
5651 		ns_from_boot = ktime_get_boottime_ns();
5652 
5653 	/* one of the resp_*() response functions is called here */
5654 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5655 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5656 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5657 		delta_jiff = ndelay = 0;
5658 	}
5659 	if (cmnd->result == 0 && scsi_result != 0)
5660 		cmnd->result = scsi_result;
5661 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5662 		if (atomic_read(&sdeb_inject_pending)) {
5663 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5664 			atomic_set(&sdeb_inject_pending, 0);
5665 			cmnd->result = check_condition_result;
5666 		}
5667 	}
5668 
5669 	if (unlikely(sdebug_verbose && cmnd->result))
5670 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5671 			    __func__, cmnd->result);
5672 
5673 	if (delta_jiff > 0 || ndelay > 0) {
5674 		ktime_t kt;
5675 
5676 		if (delta_jiff > 0) {
5677 			u64 ns = jiffies_to_nsecs(delta_jiff);
5678 
5679 			if (sdebug_random && ns < U32_MAX) {
5680 				ns = prandom_u32_max((u32)ns);
5681 			} else if (sdebug_random) {
5682 				ns >>= 12;	/* scale to 4 usec precision */
5683 				if (ns < U32_MAX)	/* over 4 hours max */
5684 					ns = prandom_u32_max((u32)ns);
5685 				ns <<= 12;
5686 			}
5687 			kt = ns_to_ktime(ns);
5688 		} else {	/* ndelay has a 4.2 second max */
5689 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5690 					     (u32)ndelay;
5691 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5692 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5693 
5694 				if (kt <= d) {	/* elapsed duration >= kt */
5695 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5696 					sqcp->a_cmnd = NULL;
5697 					atomic_dec(&devip->num_in_q);
5698 					clear_bit(k, sqp->in_use_bm);
5699 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5700 					if (new_sd_dp)
5701 						kfree(sd_dp);
5702 					/* call scsi_done() from this thread */
5703 					scsi_done(cmnd);
5704 					return 0;
5705 				}
5706 				/* otherwise reduce kt by elapsed time */
5707 				kt -= d;
5708 			}
5709 		}
5710 		if (polled) {
5711 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5712 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5713 			if (!sd_dp->init_poll) {
5714 				sd_dp->init_poll = true;
5715 				sqcp->sd_dp = sd_dp;
5716 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5717 				sd_dp->qc_idx = k;
5718 			}
5719 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5720 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5721 		} else {
5722 			if (!sd_dp->init_hrt) {
5723 				sd_dp->init_hrt = true;
5724 				sqcp->sd_dp = sd_dp;
5725 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5726 					     HRTIMER_MODE_REL_PINNED);
5727 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5728 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5729 				sd_dp->qc_idx = k;
5730 			}
5731 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5732 			/* schedule the invocation of scsi_done() for a later time */
5733 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5734 		}
5735 		if (sdebug_statistics)
5736 			sd_dp->issuing_cpu = raw_smp_processor_id();
5737 	} else {	/* jdelay < 0, use work queue */
5738 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5739 			     atomic_read(&sdeb_inject_pending)))
5740 			sd_dp->aborted = true;
5741 		if (polled) {
5742 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5743 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5744 			if (!sd_dp->init_poll) {
5745 				sd_dp->init_poll = true;
5746 				sqcp->sd_dp = sd_dp;
5747 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5748 				sd_dp->qc_idx = k;
5749 			}
5750 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5751 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5752 		} else {
5753 			if (!sd_dp->init_wq) {
5754 				sd_dp->init_wq = true;
5755 				sqcp->sd_dp = sd_dp;
5756 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5757 				sd_dp->qc_idx = k;
5758 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5759 			}
5760 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5761 			schedule_work(&sd_dp->ew.work);
5762 		}
5763 		if (sdebug_statistics)
5764 			sd_dp->issuing_cpu = raw_smp_processor_id();
5765 		if (unlikely(sd_dp->aborted)) {
5766 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5767 				    scsi_cmd_to_rq(cmnd)->tag);
5768 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5769 			atomic_set(&sdeb_inject_pending, 0);
5770 			sd_dp->aborted = false;
5771 		}
5772 	}
5773 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5774 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5775 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5776 	return 0;
5777 
5778 respond_in_thread:	/* call back to mid-layer using invocation thread */
5779 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5780 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5781 	if (cmnd->result == 0 && scsi_result != 0)
5782 		cmnd->result = scsi_result;
5783 	scsi_done(cmnd);
5784 	return 0;
5785 }
5786 
5787 /* Note: The following macros create attribute files in the
5788    /sys/module/scsi_debug/parameters directory. Unfortunately this
5789    driver is unaware of a change and cannot trigger auxiliary actions
5790    as it can when the corresponding attribute in the
5791    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5792  */
5793 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5794 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5795 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5796 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5797 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5798 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5799 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5800 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5801 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5802 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5803 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5804 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5805 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5806 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5807 module_param_string(inq_product, sdebug_inq_product_id,
5808 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5809 module_param_string(inq_rev, sdebug_inq_product_rev,
5810 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5811 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5812 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5813 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5814 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5815 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5816 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5817 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5818 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5819 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5820 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5821 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5822 		   S_IRUGO | S_IWUSR);
5823 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5824 		   S_IRUGO | S_IWUSR);
5825 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5826 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5827 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5828 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5829 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5830 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5831 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5832 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5833 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5834 module_param_named(per_host_store, sdebug_per_host_store, bool,
5835 		   S_IRUGO | S_IWUSR);
5836 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5837 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5838 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5839 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5840 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5841 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5842 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5843 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5845 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5846 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5847 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5848 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5849 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5850 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5851 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5852 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5853 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5854 		   S_IRUGO | S_IWUSR);
5855 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5856 module_param_named(write_same_length, sdebug_write_same_length, int,
5857 		   S_IRUGO | S_IWUSR);
5858 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5859 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5860 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5861 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5862 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5863 
5864 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5865 MODULE_DESCRIPTION("SCSI debug adapter driver");
5866 MODULE_LICENSE("GPL");
5867 MODULE_VERSION(SDEBUG_VERSION);
5868 
5869 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5870 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5871 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5872 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5873 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5874 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5875 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5876 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5877 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5878 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5879 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5880 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5881 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5882 MODULE_PARM_DESC(host_max_queue,
5883 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5884 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5885 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5886 		 SDEBUG_VERSION "\")");
5887 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5888 MODULE_PARM_DESC(lbprz,
5889 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5890 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5891 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5893 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5894 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5895 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5896 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5897 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5898 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5899 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5900 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5901 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5902 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5903 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5904 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5905 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5906 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5907 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5908 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5909 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5910 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5911 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5912 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5913 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5914 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5915 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5916 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5917 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5918 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5919 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5920 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5921 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5922 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5923 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5924 MODULE_PARM_DESC(uuid_ctl,
5925 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5926 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5927 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5928 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5929 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5930 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5931 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5932 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5933 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5934 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5935 
5936 #define SDEBUG_INFO_LEN 256
5937 static char sdebug_info[SDEBUG_INFO_LEN];
5938 
5939 static const char *scsi_debug_info(struct Scsi_Host *shp)
5940 {
5941 	int k;
5942 
5943 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5944 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5945 	if (k >= (SDEBUG_INFO_LEN - 1))
5946 		return sdebug_info;
5947 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5948 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5949 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5950 		  "statistics", (int)sdebug_statistics);
5951 	return sdebug_info;
5952 }
5953 
5954 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5955 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5956 				 int length)
5957 {
5958 	char arr[16];
5959 	int opts;
5960 	int minLen = length > 15 ? 15 : length;
5961 
5962 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5963 		return -EACCES;
5964 	memcpy(arr, buffer, minLen);
5965 	arr[minLen] = '\0';
5966 	if (1 != sscanf(arr, "%d", &opts))
5967 		return -EINVAL;
5968 	sdebug_opts = opts;
5969 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5970 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5971 	if (sdebug_every_nth != 0)
5972 		tweak_cmnd_count();
5973 	return length;
5974 }
5975 
5976 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5977  * same for each scsi_debug host (if more than one). Some of the counters
5978  * output are not atomics so might be inaccurate in a busy system. */
5979 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5980 {
5981 	int f, j, l;
5982 	struct sdebug_queue *sqp;
5983 	struct sdebug_host_info *sdhp;
5984 
5985 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5986 		   SDEBUG_VERSION, sdebug_version_date);
5987 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5988 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5989 		   sdebug_opts, sdebug_every_nth);
5990 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5991 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5992 		   sdebug_sector_size, "bytes");
5993 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5994 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5995 		   num_aborts);
5996 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5997 		   num_dev_resets, num_target_resets, num_bus_resets,
5998 		   num_host_resets);
5999 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6000 		   dix_reads, dix_writes, dif_errors);
6001 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6002 		   sdebug_statistics);
6003 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6004 		   atomic_read(&sdebug_cmnd_count),
6005 		   atomic_read(&sdebug_completions),
6006 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6007 		   atomic_read(&sdebug_a_tsf),
6008 		   atomic_read(&sdeb_mq_poll_count));
6009 
6010 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6011 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6012 		seq_printf(m, "  queue %d:\n", j);
6013 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6014 		if (f != sdebug_max_queue) {
6015 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6016 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6017 				   "first,last bits", f, l);
6018 		}
6019 	}
6020 
6021 	seq_printf(m, "this host_no=%d\n", host->host_no);
6022 	if (!xa_empty(per_store_ap)) {
6023 		bool niu;
6024 		int idx;
6025 		unsigned long l_idx;
6026 		struct sdeb_store_info *sip;
6027 
6028 		seq_puts(m, "\nhost list:\n");
6029 		j = 0;
6030 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6031 			idx = sdhp->si_idx;
6032 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6033 				   sdhp->shost->host_no, idx);
6034 			++j;
6035 		}
6036 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6037 			   sdeb_most_recent_idx);
6038 		j = 0;
6039 		xa_for_each(per_store_ap, l_idx, sip) {
6040 			niu = xa_get_mark(per_store_ap, l_idx,
6041 					  SDEB_XA_NOT_IN_USE);
6042 			idx = (int)l_idx;
6043 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6044 				   (niu ? "  not_in_use" : ""));
6045 			++j;
6046 		}
6047 	}
6048 	return 0;
6049 }
6050 
6051 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6052 {
6053 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6054 }
6055 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6056  * of delay is jiffies.
6057  */
6058 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6059 			   size_t count)
6060 {
6061 	int jdelay, res;
6062 
6063 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6064 		res = count;
6065 		if (sdebug_jdelay != jdelay) {
6066 			int j, k;
6067 			struct sdebug_queue *sqp;
6068 
6069 			block_unblock_all_queues(true);
6070 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6071 			     ++j, ++sqp) {
6072 				k = find_first_bit(sqp->in_use_bm,
6073 						   sdebug_max_queue);
6074 				if (k != sdebug_max_queue) {
6075 					res = -EBUSY;   /* queued commands */
6076 					break;
6077 				}
6078 			}
6079 			if (res > 0) {
6080 				sdebug_jdelay = jdelay;
6081 				sdebug_ndelay = 0;
6082 			}
6083 			block_unblock_all_queues(false);
6084 		}
6085 		return res;
6086 	}
6087 	return -EINVAL;
6088 }
6089 static DRIVER_ATTR_RW(delay);
6090 
6091 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6092 {
6093 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6094 }
6095 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6096 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6097 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6098 			    size_t count)
6099 {
6100 	int ndelay, res;
6101 
6102 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6103 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6104 		res = count;
6105 		if (sdebug_ndelay != ndelay) {
6106 			int j, k;
6107 			struct sdebug_queue *sqp;
6108 
6109 			block_unblock_all_queues(true);
6110 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6111 			     ++j, ++sqp) {
6112 				k = find_first_bit(sqp->in_use_bm,
6113 						   sdebug_max_queue);
6114 				if (k != sdebug_max_queue) {
6115 					res = -EBUSY;   /* queued commands */
6116 					break;
6117 				}
6118 			}
6119 			if (res > 0) {
6120 				sdebug_ndelay = ndelay;
6121 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6122 							: DEF_JDELAY;
6123 			}
6124 			block_unblock_all_queues(false);
6125 		}
6126 		return res;
6127 	}
6128 	return -EINVAL;
6129 }
6130 static DRIVER_ATTR_RW(ndelay);
6131 
6132 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6133 {
6134 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6135 }
6136 
6137 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6138 			  size_t count)
6139 {
6140 	int opts;
6141 	char work[20];
6142 
6143 	if (sscanf(buf, "%10s", work) == 1) {
6144 		if (strncasecmp(work, "0x", 2) == 0) {
6145 			if (kstrtoint(work + 2, 16, &opts) == 0)
6146 				goto opts_done;
6147 		} else {
6148 			if (kstrtoint(work, 10, &opts) == 0)
6149 				goto opts_done;
6150 		}
6151 	}
6152 	return -EINVAL;
6153 opts_done:
6154 	sdebug_opts = opts;
6155 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6156 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6157 	tweak_cmnd_count();
6158 	return count;
6159 }
6160 static DRIVER_ATTR_RW(opts);
6161 
6162 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6163 {
6164 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6165 }
6166 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6167 			   size_t count)
6168 {
6169 	int n;
6170 
6171 	/* Cannot change from or to TYPE_ZBC with sysfs */
6172 	if (sdebug_ptype == TYPE_ZBC)
6173 		return -EINVAL;
6174 
6175 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6176 		if (n == TYPE_ZBC)
6177 			return -EINVAL;
6178 		sdebug_ptype = n;
6179 		return count;
6180 	}
6181 	return -EINVAL;
6182 }
6183 static DRIVER_ATTR_RW(ptype);
6184 
6185 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6186 {
6187 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6188 }
6189 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6190 			    size_t count)
6191 {
6192 	int n;
6193 
6194 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6195 		sdebug_dsense = n;
6196 		return count;
6197 	}
6198 	return -EINVAL;
6199 }
6200 static DRIVER_ATTR_RW(dsense);
6201 
6202 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6203 {
6204 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6205 }
6206 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6207 			     size_t count)
6208 {
6209 	int n, idx;
6210 
6211 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6212 		bool want_store = (n == 0);
6213 		struct sdebug_host_info *sdhp;
6214 
6215 		n = (n > 0);
6216 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6217 		if (sdebug_fake_rw == n)
6218 			return count;	/* not transitioning so do nothing */
6219 
6220 		if (want_store) {	/* 1 --> 0 transition, set up store */
6221 			if (sdeb_first_idx < 0) {
6222 				idx = sdebug_add_store();
6223 				if (idx < 0)
6224 					return idx;
6225 			} else {
6226 				idx = sdeb_first_idx;
6227 				xa_clear_mark(per_store_ap, idx,
6228 					      SDEB_XA_NOT_IN_USE);
6229 			}
6230 			/* make all hosts use same store */
6231 			list_for_each_entry(sdhp, &sdebug_host_list,
6232 					    host_list) {
6233 				if (sdhp->si_idx != idx) {
6234 					xa_set_mark(per_store_ap, sdhp->si_idx,
6235 						    SDEB_XA_NOT_IN_USE);
6236 					sdhp->si_idx = idx;
6237 				}
6238 			}
6239 			sdeb_most_recent_idx = idx;
6240 		} else {	/* 0 --> 1 transition is trigger for shrink */
6241 			sdebug_erase_all_stores(true /* apart from first */);
6242 		}
6243 		sdebug_fake_rw = n;
6244 		return count;
6245 	}
6246 	return -EINVAL;
6247 }
6248 static DRIVER_ATTR_RW(fake_rw);
6249 
6250 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6251 {
6252 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6253 }
6254 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6255 			      size_t count)
6256 {
6257 	int n;
6258 
6259 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6260 		sdebug_no_lun_0 = n;
6261 		return count;
6262 	}
6263 	return -EINVAL;
6264 }
6265 static DRIVER_ATTR_RW(no_lun_0);
6266 
6267 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6268 {
6269 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6270 }
6271 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6272 			      size_t count)
6273 {
6274 	int n;
6275 
6276 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6277 		sdebug_num_tgts = n;
6278 		sdebug_max_tgts_luns();
6279 		return count;
6280 	}
6281 	return -EINVAL;
6282 }
6283 static DRIVER_ATTR_RW(num_tgts);
6284 
6285 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6286 {
6287 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6288 }
6289 static DRIVER_ATTR_RO(dev_size_mb);
6290 
6291 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6292 {
6293 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6294 }
6295 
6296 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6297 				    size_t count)
6298 {
6299 	bool v;
6300 
6301 	if (kstrtobool(buf, &v))
6302 		return -EINVAL;
6303 
6304 	sdebug_per_host_store = v;
6305 	return count;
6306 }
6307 static DRIVER_ATTR_RW(per_host_store);
6308 
6309 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6310 {
6311 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6312 }
6313 static DRIVER_ATTR_RO(num_parts);
6314 
6315 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6316 {
6317 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6318 }
6319 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6320 			       size_t count)
6321 {
6322 	int nth;
6323 	char work[20];
6324 
6325 	if (sscanf(buf, "%10s", work) == 1) {
6326 		if (strncasecmp(work, "0x", 2) == 0) {
6327 			if (kstrtoint(work + 2, 16, &nth) == 0)
6328 				goto every_nth_done;
6329 		} else {
6330 			if (kstrtoint(work, 10, &nth) == 0)
6331 				goto every_nth_done;
6332 		}
6333 	}
6334 	return -EINVAL;
6335 
6336 every_nth_done:
6337 	sdebug_every_nth = nth;
6338 	if (nth && !sdebug_statistics) {
6339 		pr_info("every_nth needs statistics=1, set it\n");
6340 		sdebug_statistics = true;
6341 	}
6342 	tweak_cmnd_count();
6343 	return count;
6344 }
6345 static DRIVER_ATTR_RW(every_nth);
6346 
6347 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6348 {
6349 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6350 }
6351 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6352 				size_t count)
6353 {
6354 	int n;
6355 	bool changed;
6356 
6357 	if (kstrtoint(buf, 0, &n))
6358 		return -EINVAL;
6359 	if (n >= 0) {
6360 		if (n > (int)SAM_LUN_AM_FLAT) {
6361 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6362 			return -EINVAL;
6363 		}
6364 		changed = ((int)sdebug_lun_am != n);
6365 		sdebug_lun_am = n;
6366 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6367 			struct sdebug_host_info *sdhp;
6368 			struct sdebug_dev_info *dp;
6369 
6370 			spin_lock(&sdebug_host_list_lock);
6371 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6372 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6373 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6374 				}
6375 			}
6376 			spin_unlock(&sdebug_host_list_lock);
6377 		}
6378 		return count;
6379 	}
6380 	return -EINVAL;
6381 }
6382 static DRIVER_ATTR_RW(lun_format);
6383 
6384 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6385 {
6386 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6387 }
6388 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6389 			      size_t count)
6390 {
6391 	int n;
6392 	bool changed;
6393 
6394 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6395 		if (n > 256) {
6396 			pr_warn("max_luns can be no more than 256\n");
6397 			return -EINVAL;
6398 		}
6399 		changed = (sdebug_max_luns != n);
6400 		sdebug_max_luns = n;
6401 		sdebug_max_tgts_luns();
6402 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6403 			struct sdebug_host_info *sdhp;
6404 			struct sdebug_dev_info *dp;
6405 
6406 			spin_lock(&sdebug_host_list_lock);
6407 			list_for_each_entry(sdhp, &sdebug_host_list,
6408 					    host_list) {
6409 				list_for_each_entry(dp, &sdhp->dev_info_list,
6410 						    dev_list) {
6411 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6412 						dp->uas_bm);
6413 				}
6414 			}
6415 			spin_unlock(&sdebug_host_list_lock);
6416 		}
6417 		return count;
6418 	}
6419 	return -EINVAL;
6420 }
6421 static DRIVER_ATTR_RW(max_luns);
6422 
6423 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6424 {
6425 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6426 }
6427 /* N.B. max_queue can be changed while there are queued commands. In flight
6428  * commands beyond the new max_queue will be completed. */
6429 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6430 			       size_t count)
6431 {
6432 	int j, n, k, a;
6433 	struct sdebug_queue *sqp;
6434 
6435 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6436 	    (n <= SDEBUG_CANQUEUE) &&
6437 	    (sdebug_host_max_queue == 0)) {
6438 		block_unblock_all_queues(true);
6439 		k = 0;
6440 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6441 		     ++j, ++sqp) {
6442 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6443 			if (a > k)
6444 				k = a;
6445 		}
6446 		sdebug_max_queue = n;
6447 		if (k == SDEBUG_CANQUEUE)
6448 			atomic_set(&retired_max_queue, 0);
6449 		else if (k >= n)
6450 			atomic_set(&retired_max_queue, k + 1);
6451 		else
6452 			atomic_set(&retired_max_queue, 0);
6453 		block_unblock_all_queues(false);
6454 		return count;
6455 	}
6456 	return -EINVAL;
6457 }
6458 static DRIVER_ATTR_RW(max_queue);
6459 
6460 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6461 {
6462 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6463 }
6464 
6465 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6466 {
6467 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6468 }
6469 
6470 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6471 {
6472 	bool v;
6473 
6474 	if (kstrtobool(buf, &v))
6475 		return -EINVAL;
6476 
6477 	sdebug_no_rwlock = v;
6478 	return count;
6479 }
6480 static DRIVER_ATTR_RW(no_rwlock);
6481 
6482 /*
6483  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6484  * in range [0, sdebug_host_max_queue), we can't change it.
6485  */
6486 static DRIVER_ATTR_RO(host_max_queue);
6487 
6488 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6489 {
6490 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6491 }
6492 static DRIVER_ATTR_RO(no_uld);
6493 
6494 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6495 {
6496 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6497 }
6498 static DRIVER_ATTR_RO(scsi_level);
6499 
6500 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6501 {
6502 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6503 }
6504 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6505 				size_t count)
6506 {
6507 	int n;
6508 	bool changed;
6509 
6510 	/* Ignore capacity change for ZBC drives for now */
6511 	if (sdeb_zbc_in_use)
6512 		return -ENOTSUPP;
6513 
6514 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6515 		changed = (sdebug_virtual_gb != n);
6516 		sdebug_virtual_gb = n;
6517 		sdebug_capacity = get_sdebug_capacity();
6518 		if (changed) {
6519 			struct sdebug_host_info *sdhp;
6520 			struct sdebug_dev_info *dp;
6521 
6522 			spin_lock(&sdebug_host_list_lock);
6523 			list_for_each_entry(sdhp, &sdebug_host_list,
6524 					    host_list) {
6525 				list_for_each_entry(dp, &sdhp->dev_info_list,
6526 						    dev_list) {
6527 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6528 						dp->uas_bm);
6529 				}
6530 			}
6531 			spin_unlock(&sdebug_host_list_lock);
6532 		}
6533 		return count;
6534 	}
6535 	return -EINVAL;
6536 }
6537 static DRIVER_ATTR_RW(virtual_gb);
6538 
6539 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6540 {
6541 	/* absolute number of hosts currently active is what is shown */
6542 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6543 }
6544 
6545 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6546 			      size_t count)
6547 {
6548 	bool found;
6549 	unsigned long idx;
6550 	struct sdeb_store_info *sip;
6551 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6552 	int delta_hosts;
6553 
6554 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6555 		return -EINVAL;
6556 	if (delta_hosts > 0) {
6557 		do {
6558 			found = false;
6559 			if (want_phs) {
6560 				xa_for_each_marked(per_store_ap, idx, sip,
6561 						   SDEB_XA_NOT_IN_USE) {
6562 					sdeb_most_recent_idx = (int)idx;
6563 					found = true;
6564 					break;
6565 				}
6566 				if (found)	/* re-use case */
6567 					sdebug_add_host_helper((int)idx);
6568 				else
6569 					sdebug_do_add_host(true);
6570 			} else {
6571 				sdebug_do_add_host(false);
6572 			}
6573 		} while (--delta_hosts);
6574 	} else if (delta_hosts < 0) {
6575 		do {
6576 			sdebug_do_remove_host(false);
6577 		} while (++delta_hosts);
6578 	}
6579 	return count;
6580 }
6581 static DRIVER_ATTR_RW(add_host);
6582 
6583 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6584 {
6585 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6586 }
6587 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6588 				    size_t count)
6589 {
6590 	int n;
6591 
6592 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6593 		sdebug_vpd_use_hostno = n;
6594 		return count;
6595 	}
6596 	return -EINVAL;
6597 }
6598 static DRIVER_ATTR_RW(vpd_use_hostno);
6599 
6600 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6601 {
6602 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6603 }
6604 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6605 				size_t count)
6606 {
6607 	int n;
6608 
6609 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6610 		if (n > 0)
6611 			sdebug_statistics = true;
6612 		else {
6613 			clear_queue_stats();
6614 			sdebug_statistics = false;
6615 		}
6616 		return count;
6617 	}
6618 	return -EINVAL;
6619 }
6620 static DRIVER_ATTR_RW(statistics);
6621 
6622 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6623 {
6624 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6625 }
6626 static DRIVER_ATTR_RO(sector_size);
6627 
6628 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6629 {
6630 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6631 }
6632 static DRIVER_ATTR_RO(submit_queues);
6633 
6634 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6635 {
6636 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6637 }
6638 static DRIVER_ATTR_RO(dix);
6639 
6640 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6641 {
6642 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6643 }
6644 static DRIVER_ATTR_RO(dif);
6645 
6646 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6647 {
6648 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6649 }
6650 static DRIVER_ATTR_RO(guard);
6651 
6652 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6653 {
6654 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6655 }
6656 static DRIVER_ATTR_RO(ato);
6657 
6658 static ssize_t map_show(struct device_driver *ddp, char *buf)
6659 {
6660 	ssize_t count = 0;
6661 
6662 	if (!scsi_debug_lbp())
6663 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6664 				 sdebug_store_sectors);
6665 
6666 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6667 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6668 
6669 		if (sip)
6670 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6671 					  (int)map_size, sip->map_storep);
6672 	}
6673 	buf[count++] = '\n';
6674 	buf[count] = '\0';
6675 
6676 	return count;
6677 }
6678 static DRIVER_ATTR_RO(map);
6679 
6680 static ssize_t random_show(struct device_driver *ddp, char *buf)
6681 {
6682 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6683 }
6684 
6685 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6686 			    size_t count)
6687 {
6688 	bool v;
6689 
6690 	if (kstrtobool(buf, &v))
6691 		return -EINVAL;
6692 
6693 	sdebug_random = v;
6694 	return count;
6695 }
6696 static DRIVER_ATTR_RW(random);
6697 
6698 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6699 {
6700 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6701 }
6702 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6703 			       size_t count)
6704 {
6705 	int n;
6706 
6707 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6708 		sdebug_removable = (n > 0);
6709 		return count;
6710 	}
6711 	return -EINVAL;
6712 }
6713 static DRIVER_ATTR_RW(removable);
6714 
6715 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6716 {
6717 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6718 }
6719 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6720 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6721 			       size_t count)
6722 {
6723 	int n;
6724 
6725 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6726 		sdebug_host_lock = (n > 0);
6727 		return count;
6728 	}
6729 	return -EINVAL;
6730 }
6731 static DRIVER_ATTR_RW(host_lock);
6732 
6733 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6734 {
6735 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6736 }
6737 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6738 			    size_t count)
6739 {
6740 	int n;
6741 
6742 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6743 		sdebug_strict = (n > 0);
6744 		return count;
6745 	}
6746 	return -EINVAL;
6747 }
6748 static DRIVER_ATTR_RW(strict);
6749 
6750 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6751 {
6752 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6753 }
6754 static DRIVER_ATTR_RO(uuid_ctl);
6755 
6756 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6757 {
6758 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6759 }
6760 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6761 			     size_t count)
6762 {
6763 	int ret, n;
6764 
6765 	ret = kstrtoint(buf, 0, &n);
6766 	if (ret)
6767 		return ret;
6768 	sdebug_cdb_len = n;
6769 	all_config_cdb_len();
6770 	return count;
6771 }
6772 static DRIVER_ATTR_RW(cdb_len);
6773 
6774 static const char * const zbc_model_strs_a[] = {
6775 	[BLK_ZONED_NONE] = "none",
6776 	[BLK_ZONED_HA]   = "host-aware",
6777 	[BLK_ZONED_HM]   = "host-managed",
6778 };
6779 
6780 static const char * const zbc_model_strs_b[] = {
6781 	[BLK_ZONED_NONE] = "no",
6782 	[BLK_ZONED_HA]   = "aware",
6783 	[BLK_ZONED_HM]   = "managed",
6784 };
6785 
6786 static const char * const zbc_model_strs_c[] = {
6787 	[BLK_ZONED_NONE] = "0",
6788 	[BLK_ZONED_HA]   = "1",
6789 	[BLK_ZONED_HM]   = "2",
6790 };
6791 
6792 static int sdeb_zbc_model_str(const char *cp)
6793 {
6794 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6795 
6796 	if (res < 0) {
6797 		res = sysfs_match_string(zbc_model_strs_b, cp);
6798 		if (res < 0) {
6799 			res = sysfs_match_string(zbc_model_strs_c, cp);
6800 			if (res < 0)
6801 				return -EINVAL;
6802 		}
6803 	}
6804 	return res;
6805 }
6806 
6807 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6808 {
6809 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6810 			 zbc_model_strs_a[sdeb_zbc_model]);
6811 }
6812 static DRIVER_ATTR_RO(zbc);
6813 
6814 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6815 {
6816 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6817 }
6818 static DRIVER_ATTR_RO(tur_ms_to_ready);
6819 
6820 /* Note: The following array creates attribute files in the
6821    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6822    files (over those found in the /sys/module/scsi_debug/parameters
6823    directory) is that auxiliary actions can be triggered when an attribute
6824    is changed. For example see: add_host_store() above.
6825  */
6826 
6827 static struct attribute *sdebug_drv_attrs[] = {
6828 	&driver_attr_delay.attr,
6829 	&driver_attr_opts.attr,
6830 	&driver_attr_ptype.attr,
6831 	&driver_attr_dsense.attr,
6832 	&driver_attr_fake_rw.attr,
6833 	&driver_attr_host_max_queue.attr,
6834 	&driver_attr_no_lun_0.attr,
6835 	&driver_attr_num_tgts.attr,
6836 	&driver_attr_dev_size_mb.attr,
6837 	&driver_attr_num_parts.attr,
6838 	&driver_attr_every_nth.attr,
6839 	&driver_attr_lun_format.attr,
6840 	&driver_attr_max_luns.attr,
6841 	&driver_attr_max_queue.attr,
6842 	&driver_attr_no_rwlock.attr,
6843 	&driver_attr_no_uld.attr,
6844 	&driver_attr_scsi_level.attr,
6845 	&driver_attr_virtual_gb.attr,
6846 	&driver_attr_add_host.attr,
6847 	&driver_attr_per_host_store.attr,
6848 	&driver_attr_vpd_use_hostno.attr,
6849 	&driver_attr_sector_size.attr,
6850 	&driver_attr_statistics.attr,
6851 	&driver_attr_submit_queues.attr,
6852 	&driver_attr_dix.attr,
6853 	&driver_attr_dif.attr,
6854 	&driver_attr_guard.attr,
6855 	&driver_attr_ato.attr,
6856 	&driver_attr_map.attr,
6857 	&driver_attr_random.attr,
6858 	&driver_attr_removable.attr,
6859 	&driver_attr_host_lock.attr,
6860 	&driver_attr_ndelay.attr,
6861 	&driver_attr_strict.attr,
6862 	&driver_attr_uuid_ctl.attr,
6863 	&driver_attr_cdb_len.attr,
6864 	&driver_attr_tur_ms_to_ready.attr,
6865 	&driver_attr_zbc.attr,
6866 	NULL,
6867 };
6868 ATTRIBUTE_GROUPS(sdebug_drv);
6869 
6870 static struct device *pseudo_primary;
6871 
6872 static int __init scsi_debug_init(void)
6873 {
6874 	bool want_store = (sdebug_fake_rw == 0);
6875 	unsigned long sz;
6876 	int k, ret, hosts_to_add;
6877 	int idx = -1;
6878 
6879 	ramdisk_lck_a[0] = &atomic_rw;
6880 	ramdisk_lck_a[1] = &atomic_rw2;
6881 	atomic_set(&retired_max_queue, 0);
6882 
6883 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6884 		pr_warn("ndelay must be less than 1 second, ignored\n");
6885 		sdebug_ndelay = 0;
6886 	} else if (sdebug_ndelay > 0)
6887 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6888 
6889 	switch (sdebug_sector_size) {
6890 	case  512:
6891 	case 1024:
6892 	case 2048:
6893 	case 4096:
6894 		break;
6895 	default:
6896 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6897 		return -EINVAL;
6898 	}
6899 
6900 	switch (sdebug_dif) {
6901 	case T10_PI_TYPE0_PROTECTION:
6902 		break;
6903 	case T10_PI_TYPE1_PROTECTION:
6904 	case T10_PI_TYPE2_PROTECTION:
6905 	case T10_PI_TYPE3_PROTECTION:
6906 		have_dif_prot = true;
6907 		break;
6908 
6909 	default:
6910 		pr_err("dif must be 0, 1, 2 or 3\n");
6911 		return -EINVAL;
6912 	}
6913 
6914 	if (sdebug_num_tgts < 0) {
6915 		pr_err("num_tgts must be >= 0\n");
6916 		return -EINVAL;
6917 	}
6918 
6919 	if (sdebug_guard > 1) {
6920 		pr_err("guard must be 0 or 1\n");
6921 		return -EINVAL;
6922 	}
6923 
6924 	if (sdebug_ato > 1) {
6925 		pr_err("ato must be 0 or 1\n");
6926 		return -EINVAL;
6927 	}
6928 
6929 	if (sdebug_physblk_exp > 15) {
6930 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6931 		return -EINVAL;
6932 	}
6933 
6934 	sdebug_lun_am = sdebug_lun_am_i;
6935 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6936 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6937 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6938 	}
6939 
6940 	if (sdebug_max_luns > 256) {
6941 		if (sdebug_max_luns > 16384) {
6942 			pr_warn("max_luns can be no more than 16384, use default\n");
6943 			sdebug_max_luns = DEF_MAX_LUNS;
6944 		}
6945 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6946 	}
6947 
6948 	if (sdebug_lowest_aligned > 0x3fff) {
6949 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6950 		return -EINVAL;
6951 	}
6952 
6953 	if (submit_queues < 1) {
6954 		pr_err("submit_queues must be 1 or more\n");
6955 		return -EINVAL;
6956 	}
6957 
6958 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6959 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6960 		return -EINVAL;
6961 	}
6962 
6963 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6964 	    (sdebug_host_max_queue < 0)) {
6965 		pr_err("host_max_queue must be in range [0 %d]\n",
6966 		       SDEBUG_CANQUEUE);
6967 		return -EINVAL;
6968 	}
6969 
6970 	if (sdebug_host_max_queue &&
6971 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6972 		sdebug_max_queue = sdebug_host_max_queue;
6973 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6974 			sdebug_max_queue);
6975 	}
6976 
6977 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6978 			       GFP_KERNEL);
6979 	if (sdebug_q_arr == NULL)
6980 		return -ENOMEM;
6981 	for (k = 0; k < submit_queues; ++k)
6982 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6983 
6984 	/*
6985 	 * check for host managed zoned block device specified with
6986 	 * ptype=0x14 or zbc=XXX.
6987 	 */
6988 	if (sdebug_ptype == TYPE_ZBC) {
6989 		sdeb_zbc_model = BLK_ZONED_HM;
6990 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6991 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6992 		if (k < 0) {
6993 			ret = k;
6994 			goto free_q_arr;
6995 		}
6996 		sdeb_zbc_model = k;
6997 		switch (sdeb_zbc_model) {
6998 		case BLK_ZONED_NONE:
6999 		case BLK_ZONED_HA:
7000 			sdebug_ptype = TYPE_DISK;
7001 			break;
7002 		case BLK_ZONED_HM:
7003 			sdebug_ptype = TYPE_ZBC;
7004 			break;
7005 		default:
7006 			pr_err("Invalid ZBC model\n");
7007 			ret = -EINVAL;
7008 			goto free_q_arr;
7009 		}
7010 	}
7011 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7012 		sdeb_zbc_in_use = true;
7013 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7014 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7015 	}
7016 
7017 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7018 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7019 	if (sdebug_dev_size_mb < 1)
7020 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7021 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7022 	sdebug_store_sectors = sz / sdebug_sector_size;
7023 	sdebug_capacity = get_sdebug_capacity();
7024 
7025 	/* play around with geometry, don't waste too much on track 0 */
7026 	sdebug_heads = 8;
7027 	sdebug_sectors_per = 32;
7028 	if (sdebug_dev_size_mb >= 256)
7029 		sdebug_heads = 64;
7030 	else if (sdebug_dev_size_mb >= 16)
7031 		sdebug_heads = 32;
7032 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7033 			       (sdebug_sectors_per * sdebug_heads);
7034 	if (sdebug_cylinders_per >= 1024) {
7035 		/* other LLDs do this; implies >= 1GB ram disk ... */
7036 		sdebug_heads = 255;
7037 		sdebug_sectors_per = 63;
7038 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7039 			       (sdebug_sectors_per * sdebug_heads);
7040 	}
7041 	if (scsi_debug_lbp()) {
7042 		sdebug_unmap_max_blocks =
7043 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7044 
7045 		sdebug_unmap_max_desc =
7046 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7047 
7048 		sdebug_unmap_granularity =
7049 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7050 
7051 		if (sdebug_unmap_alignment &&
7052 		    sdebug_unmap_granularity <=
7053 		    sdebug_unmap_alignment) {
7054 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7055 			ret = -EINVAL;
7056 			goto free_q_arr;
7057 		}
7058 	}
7059 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7060 	if (want_store) {
7061 		idx = sdebug_add_store();
7062 		if (idx < 0) {
7063 			ret = idx;
7064 			goto free_q_arr;
7065 		}
7066 	}
7067 
7068 	pseudo_primary = root_device_register("pseudo_0");
7069 	if (IS_ERR(pseudo_primary)) {
7070 		pr_warn("root_device_register() error\n");
7071 		ret = PTR_ERR(pseudo_primary);
7072 		goto free_vm;
7073 	}
7074 	ret = bus_register(&pseudo_lld_bus);
7075 	if (ret < 0) {
7076 		pr_warn("bus_register error: %d\n", ret);
7077 		goto dev_unreg;
7078 	}
7079 	ret = driver_register(&sdebug_driverfs_driver);
7080 	if (ret < 0) {
7081 		pr_warn("driver_register error: %d\n", ret);
7082 		goto bus_unreg;
7083 	}
7084 
7085 	hosts_to_add = sdebug_add_host;
7086 	sdebug_add_host = 0;
7087 
7088 	for (k = 0; k < hosts_to_add; k++) {
7089 		if (want_store && k == 0) {
7090 			ret = sdebug_add_host_helper(idx);
7091 			if (ret < 0) {
7092 				pr_err("add_host_helper k=%d, error=%d\n",
7093 				       k, -ret);
7094 				break;
7095 			}
7096 		} else {
7097 			ret = sdebug_do_add_host(want_store &&
7098 						 sdebug_per_host_store);
7099 			if (ret < 0) {
7100 				pr_err("add_host k=%d error=%d\n", k, -ret);
7101 				break;
7102 			}
7103 		}
7104 	}
7105 	if (sdebug_verbose)
7106 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7107 
7108 	return 0;
7109 
7110 bus_unreg:
7111 	bus_unregister(&pseudo_lld_bus);
7112 dev_unreg:
7113 	root_device_unregister(pseudo_primary);
7114 free_vm:
7115 	sdebug_erase_store(idx, NULL);
7116 free_q_arr:
7117 	kfree(sdebug_q_arr);
7118 	return ret;
7119 }
7120 
7121 static void __exit scsi_debug_exit(void)
7122 {
7123 	int k = sdebug_num_hosts;
7124 
7125 	stop_all_queued();
7126 	for (; k; k--)
7127 		sdebug_do_remove_host(true);
7128 	free_all_queued();
7129 	driver_unregister(&sdebug_driverfs_driver);
7130 	bus_unregister(&pseudo_lld_bus);
7131 	root_device_unregister(pseudo_primary);
7132 
7133 	sdebug_erase_all_stores(false);
7134 	xa_destroy(per_store_ap);
7135 	kfree(sdebug_q_arr);
7136 }
7137 
7138 device_initcall(scsi_debug_init);
7139 module_exit(scsi_debug_exit);
7140 
7141 static void sdebug_release_adapter(struct device *dev)
7142 {
7143 	struct sdebug_host_info *sdbg_host;
7144 
7145 	sdbg_host = to_sdebug_host(dev);
7146 	kfree(sdbg_host);
7147 }
7148 
7149 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7150 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7151 {
7152 	if (idx < 0)
7153 		return;
7154 	if (!sip) {
7155 		if (xa_empty(per_store_ap))
7156 			return;
7157 		sip = xa_load(per_store_ap, idx);
7158 		if (!sip)
7159 			return;
7160 	}
7161 	vfree(sip->map_storep);
7162 	vfree(sip->dif_storep);
7163 	vfree(sip->storep);
7164 	xa_erase(per_store_ap, idx);
7165 	kfree(sip);
7166 }
7167 
7168 /* Assume apart_from_first==false only in shutdown case. */
7169 static void sdebug_erase_all_stores(bool apart_from_first)
7170 {
7171 	unsigned long idx;
7172 	struct sdeb_store_info *sip = NULL;
7173 
7174 	xa_for_each(per_store_ap, idx, sip) {
7175 		if (apart_from_first)
7176 			apart_from_first = false;
7177 		else
7178 			sdebug_erase_store(idx, sip);
7179 	}
7180 	if (apart_from_first)
7181 		sdeb_most_recent_idx = sdeb_first_idx;
7182 }
7183 
7184 /*
7185  * Returns store xarray new element index (idx) if >=0 else negated errno.
7186  * Limit the number of stores to 65536.
7187  */
7188 static int sdebug_add_store(void)
7189 {
7190 	int res;
7191 	u32 n_idx;
7192 	unsigned long iflags;
7193 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7194 	struct sdeb_store_info *sip = NULL;
7195 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7196 
7197 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7198 	if (!sip)
7199 		return -ENOMEM;
7200 
7201 	xa_lock_irqsave(per_store_ap, iflags);
7202 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7203 	if (unlikely(res < 0)) {
7204 		xa_unlock_irqrestore(per_store_ap, iflags);
7205 		kfree(sip);
7206 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7207 		return res;
7208 	}
7209 	sdeb_most_recent_idx = n_idx;
7210 	if (sdeb_first_idx < 0)
7211 		sdeb_first_idx = n_idx;
7212 	xa_unlock_irqrestore(per_store_ap, iflags);
7213 
7214 	res = -ENOMEM;
7215 	sip->storep = vzalloc(sz);
7216 	if (!sip->storep) {
7217 		pr_err("user data oom\n");
7218 		goto err;
7219 	}
7220 	if (sdebug_num_parts > 0)
7221 		sdebug_build_parts(sip->storep, sz);
7222 
7223 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7224 	if (sdebug_dix) {
7225 		int dif_size;
7226 
7227 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7228 		sip->dif_storep = vmalloc(dif_size);
7229 
7230 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7231 			sip->dif_storep);
7232 
7233 		if (!sip->dif_storep) {
7234 			pr_err("DIX oom\n");
7235 			goto err;
7236 		}
7237 		memset(sip->dif_storep, 0xff, dif_size);
7238 	}
7239 	/* Logical Block Provisioning */
7240 	if (scsi_debug_lbp()) {
7241 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7242 		sip->map_storep = vmalloc(array_size(sizeof(long),
7243 						     BITS_TO_LONGS(map_size)));
7244 
7245 		pr_info("%lu provisioning blocks\n", map_size);
7246 
7247 		if (!sip->map_storep) {
7248 			pr_err("LBP map oom\n");
7249 			goto err;
7250 		}
7251 
7252 		bitmap_zero(sip->map_storep, map_size);
7253 
7254 		/* Map first 1KB for partition table */
7255 		if (sdebug_num_parts)
7256 			map_region(sip, 0, 2);
7257 	}
7258 
7259 	rwlock_init(&sip->macc_lck);
7260 	return (int)n_idx;
7261 err:
7262 	sdebug_erase_store((int)n_idx, sip);
7263 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7264 	return res;
7265 }
7266 
7267 static int sdebug_add_host_helper(int per_host_idx)
7268 {
7269 	int k, devs_per_host, idx;
7270 	int error = -ENOMEM;
7271 	struct sdebug_host_info *sdbg_host;
7272 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7273 
7274 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7275 	if (!sdbg_host)
7276 		return -ENOMEM;
7277 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7278 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7279 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7280 	sdbg_host->si_idx = idx;
7281 
7282 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7283 
7284 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7285 	for (k = 0; k < devs_per_host; k++) {
7286 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7287 		if (!sdbg_devinfo)
7288 			goto clean;
7289 	}
7290 
7291 	spin_lock(&sdebug_host_list_lock);
7292 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7293 	spin_unlock(&sdebug_host_list_lock);
7294 
7295 	sdbg_host->dev.bus = &pseudo_lld_bus;
7296 	sdbg_host->dev.parent = pseudo_primary;
7297 	sdbg_host->dev.release = &sdebug_release_adapter;
7298 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7299 
7300 	error = device_register(&sdbg_host->dev);
7301 	if (error)
7302 		goto clean;
7303 
7304 	++sdebug_num_hosts;
7305 	return 0;
7306 
7307 clean:
7308 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7309 				 dev_list) {
7310 		list_del(&sdbg_devinfo->dev_list);
7311 		kfree(sdbg_devinfo->zstate);
7312 		kfree(sdbg_devinfo);
7313 	}
7314 	kfree(sdbg_host);
7315 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7316 	return error;
7317 }
7318 
7319 static int sdebug_do_add_host(bool mk_new_store)
7320 {
7321 	int ph_idx = sdeb_most_recent_idx;
7322 
7323 	if (mk_new_store) {
7324 		ph_idx = sdebug_add_store();
7325 		if (ph_idx < 0)
7326 			return ph_idx;
7327 	}
7328 	return sdebug_add_host_helper(ph_idx);
7329 }
7330 
7331 static void sdebug_do_remove_host(bool the_end)
7332 {
7333 	int idx = -1;
7334 	struct sdebug_host_info *sdbg_host = NULL;
7335 	struct sdebug_host_info *sdbg_host2;
7336 
7337 	spin_lock(&sdebug_host_list_lock);
7338 	if (!list_empty(&sdebug_host_list)) {
7339 		sdbg_host = list_entry(sdebug_host_list.prev,
7340 				       struct sdebug_host_info, host_list);
7341 		idx = sdbg_host->si_idx;
7342 	}
7343 	if (!the_end && idx >= 0) {
7344 		bool unique = true;
7345 
7346 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7347 			if (sdbg_host2 == sdbg_host)
7348 				continue;
7349 			if (idx == sdbg_host2->si_idx) {
7350 				unique = false;
7351 				break;
7352 			}
7353 		}
7354 		if (unique) {
7355 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7356 			if (idx == sdeb_most_recent_idx)
7357 				--sdeb_most_recent_idx;
7358 		}
7359 	}
7360 	if (sdbg_host)
7361 		list_del(&sdbg_host->host_list);
7362 	spin_unlock(&sdebug_host_list_lock);
7363 
7364 	if (!sdbg_host)
7365 		return;
7366 
7367 	device_unregister(&sdbg_host->dev);
7368 	--sdebug_num_hosts;
7369 }
7370 
7371 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7372 {
7373 	int num_in_q = 0;
7374 	struct sdebug_dev_info *devip;
7375 
7376 	block_unblock_all_queues(true);
7377 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7378 	if (NULL == devip) {
7379 		block_unblock_all_queues(false);
7380 		return	-ENODEV;
7381 	}
7382 	num_in_q = atomic_read(&devip->num_in_q);
7383 
7384 	if (qdepth > SDEBUG_CANQUEUE) {
7385 		qdepth = SDEBUG_CANQUEUE;
7386 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7387 			qdepth, SDEBUG_CANQUEUE);
7388 	}
7389 	if (qdepth < 1)
7390 		qdepth = 1;
7391 	if (qdepth != sdev->queue_depth)
7392 		scsi_change_queue_depth(sdev, qdepth);
7393 
7394 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7395 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7396 			    __func__, qdepth, num_in_q);
7397 	}
7398 	block_unblock_all_queues(false);
7399 	return sdev->queue_depth;
7400 }
7401 
7402 static bool fake_timeout(struct scsi_cmnd *scp)
7403 {
7404 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7405 		if (sdebug_every_nth < -1)
7406 			sdebug_every_nth = -1;
7407 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7408 			return true; /* ignore command causing timeout */
7409 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7410 			 scsi_medium_access_command(scp))
7411 			return true; /* time out reads and writes */
7412 	}
7413 	return false;
7414 }
7415 
7416 /* Response to TUR or media access command when device stopped */
7417 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7418 {
7419 	int stopped_state;
7420 	u64 diff_ns = 0;
7421 	ktime_t now_ts = ktime_get_boottime();
7422 	struct scsi_device *sdp = scp->device;
7423 
7424 	stopped_state = atomic_read(&devip->stopped);
7425 	if (stopped_state == 2) {
7426 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7427 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7428 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7429 				/* tur_ms_to_ready timer extinguished */
7430 				atomic_set(&devip->stopped, 0);
7431 				return 0;
7432 			}
7433 		}
7434 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7435 		if (sdebug_verbose)
7436 			sdev_printk(KERN_INFO, sdp,
7437 				    "%s: Not ready: in process of becoming ready\n", my_name);
7438 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7439 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7440 
7441 			if (diff_ns <= tur_nanosecs_to_ready)
7442 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7443 			else
7444 				diff_ns = tur_nanosecs_to_ready;
7445 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7446 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7447 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7448 						   diff_ns);
7449 			return check_condition_result;
7450 		}
7451 	}
7452 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7453 	if (sdebug_verbose)
7454 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7455 			    my_name);
7456 	return check_condition_result;
7457 }
7458 
7459 static int sdebug_map_queues(struct Scsi_Host *shost)
7460 {
7461 	int i, qoff;
7462 
7463 	if (shost->nr_hw_queues == 1)
7464 		return 0;
7465 
7466 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7467 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7468 
7469 		map->nr_queues  = 0;
7470 
7471 		if (i == HCTX_TYPE_DEFAULT)
7472 			map->nr_queues = submit_queues - poll_queues;
7473 		else if (i == HCTX_TYPE_POLL)
7474 			map->nr_queues = poll_queues;
7475 
7476 		if (!map->nr_queues) {
7477 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7478 			continue;
7479 		}
7480 
7481 		map->queue_offset = qoff;
7482 		blk_mq_map_queues(map);
7483 
7484 		qoff += map->nr_queues;
7485 	}
7486 
7487 	return 0;
7488 
7489 }
7490 
7491 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7492 {
7493 	bool first;
7494 	bool retiring = false;
7495 	int num_entries = 0;
7496 	unsigned int qc_idx = 0;
7497 	unsigned long iflags;
7498 	ktime_t kt_from_boot = ktime_get_boottime();
7499 	struct sdebug_queue *sqp;
7500 	struct sdebug_queued_cmd *sqcp;
7501 	struct scsi_cmnd *scp;
7502 	struct sdebug_dev_info *devip;
7503 	struct sdebug_defer *sd_dp;
7504 
7505 	sqp = sdebug_q_arr + queue_num;
7506 
7507 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7508 
7509 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7510 	if (qc_idx >= sdebug_max_queue)
7511 		goto unlock;
7512 
7513 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7514 		if (first) {
7515 			first = false;
7516 			if (!test_bit(qc_idx, sqp->in_use_bm))
7517 				continue;
7518 		} else {
7519 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7520 		}
7521 		if (qc_idx >= sdebug_max_queue)
7522 			break;
7523 
7524 		sqcp = &sqp->qc_arr[qc_idx];
7525 		sd_dp = sqcp->sd_dp;
7526 		if (unlikely(!sd_dp))
7527 			continue;
7528 		scp = sqcp->a_cmnd;
7529 		if (unlikely(scp == NULL)) {
7530 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7531 			       queue_num, qc_idx, __func__);
7532 			break;
7533 		}
7534 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7535 			if (kt_from_boot < sd_dp->cmpl_ts)
7536 				continue;
7537 
7538 		} else		/* ignoring non REQ_POLLED requests */
7539 			continue;
7540 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7541 		if (likely(devip))
7542 			atomic_dec(&devip->num_in_q);
7543 		else
7544 			pr_err("devip=NULL from %s\n", __func__);
7545 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7546 			retiring = true;
7547 
7548 		sqcp->a_cmnd = NULL;
7549 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7550 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7551 				sqp, queue_num, qc_idx, __func__);
7552 			break;
7553 		}
7554 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7555 			int k, retval;
7556 
7557 			retval = atomic_read(&retired_max_queue);
7558 			if (qc_idx >= retval) {
7559 				pr_err("index %d too large\n", retval);
7560 				break;
7561 			}
7562 			k = find_last_bit(sqp->in_use_bm, retval);
7563 			if ((k < sdebug_max_queue) || (k == retval))
7564 				atomic_set(&retired_max_queue, 0);
7565 			else
7566 				atomic_set(&retired_max_queue, k + 1);
7567 		}
7568 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7569 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7570 		scsi_done(scp); /* callback to mid level */
7571 		num_entries++;
7572 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7573 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7574 			break;
7575 	}
7576 
7577 unlock:
7578 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7579 
7580 	if (num_entries > 0)
7581 		atomic_add(num_entries, &sdeb_mq_poll_count);
7582 	return num_entries;
7583 }
7584 
7585 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7586 				   struct scsi_cmnd *scp)
7587 {
7588 	u8 sdeb_i;
7589 	struct scsi_device *sdp = scp->device;
7590 	const struct opcode_info_t *oip;
7591 	const struct opcode_info_t *r_oip;
7592 	struct sdebug_dev_info *devip;
7593 	u8 *cmd = scp->cmnd;
7594 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7595 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7596 	int k, na;
7597 	int errsts = 0;
7598 	u64 lun_index = sdp->lun & 0x3FFF;
7599 	u32 flags;
7600 	u16 sa;
7601 	u8 opcode = cmd[0];
7602 	bool has_wlun_rl;
7603 	bool inject_now;
7604 
7605 	scsi_set_resid(scp, 0);
7606 	if (sdebug_statistics) {
7607 		atomic_inc(&sdebug_cmnd_count);
7608 		inject_now = inject_on_this_cmd();
7609 	} else {
7610 		inject_now = false;
7611 	}
7612 	if (unlikely(sdebug_verbose &&
7613 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7614 		char b[120];
7615 		int n, len, sb;
7616 
7617 		len = scp->cmd_len;
7618 		sb = (int)sizeof(b);
7619 		if (len > 32)
7620 			strcpy(b, "too long, over 32 bytes");
7621 		else {
7622 			for (k = 0, n = 0; k < len && n < sb; ++k)
7623 				n += scnprintf(b + n, sb - n, "%02x ",
7624 					       (u32)cmd[k]);
7625 		}
7626 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7627 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7628 	}
7629 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7630 		return SCSI_MLQUEUE_HOST_BUSY;
7631 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7632 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7633 		goto err_out;
7634 
7635 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7636 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7637 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7638 	if (unlikely(!devip)) {
7639 		devip = find_build_dev_info(sdp);
7640 		if (NULL == devip)
7641 			goto err_out;
7642 	}
7643 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7644 		atomic_set(&sdeb_inject_pending, 1);
7645 
7646 	na = oip->num_attached;
7647 	r_pfp = oip->pfp;
7648 	if (na) {	/* multiple commands with this opcode */
7649 		r_oip = oip;
7650 		if (FF_SA & r_oip->flags) {
7651 			if (F_SA_LOW & oip->flags)
7652 				sa = 0x1f & cmd[1];
7653 			else
7654 				sa = get_unaligned_be16(cmd + 8);
7655 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7656 				if (opcode == oip->opcode && sa == oip->sa)
7657 					break;
7658 			}
7659 		} else {   /* since no service action only check opcode */
7660 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7661 				if (opcode == oip->opcode)
7662 					break;
7663 			}
7664 		}
7665 		if (k > na) {
7666 			if (F_SA_LOW & r_oip->flags)
7667 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7668 			else if (F_SA_HIGH & r_oip->flags)
7669 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7670 			else
7671 				mk_sense_invalid_opcode(scp);
7672 			goto check_cond;
7673 		}
7674 	}	/* else (when na==0) we assume the oip is a match */
7675 	flags = oip->flags;
7676 	if (unlikely(F_INV_OP & flags)) {
7677 		mk_sense_invalid_opcode(scp);
7678 		goto check_cond;
7679 	}
7680 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7681 		if (sdebug_verbose)
7682 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7683 				    my_name, opcode, " supported for wlun");
7684 		mk_sense_invalid_opcode(scp);
7685 		goto check_cond;
7686 	}
7687 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7688 		u8 rem;
7689 		int j;
7690 
7691 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7692 			rem = ~oip->len_mask[k] & cmd[k];
7693 			if (rem) {
7694 				for (j = 7; j >= 0; --j, rem <<= 1) {
7695 					if (0x80 & rem)
7696 						break;
7697 				}
7698 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7699 				goto check_cond;
7700 			}
7701 		}
7702 	}
7703 	if (unlikely(!(F_SKIP_UA & flags) &&
7704 		     find_first_bit(devip->uas_bm,
7705 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7706 		errsts = make_ua(scp, devip);
7707 		if (errsts)
7708 			goto check_cond;
7709 	}
7710 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7711 		     atomic_read(&devip->stopped))) {
7712 		errsts = resp_not_ready(scp, devip);
7713 		if (errsts)
7714 			goto fini;
7715 	}
7716 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7717 		goto fini;
7718 	if (unlikely(sdebug_every_nth)) {
7719 		if (fake_timeout(scp))
7720 			return 0;	/* ignore command: make trouble */
7721 	}
7722 	if (likely(oip->pfp))
7723 		pfp = oip->pfp;	/* calls a resp_* function */
7724 	else
7725 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7726 
7727 fini:
7728 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7729 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7730 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7731 					    sdebug_ndelay > 10000)) {
7732 		/*
7733 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7734 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7735 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7736 		 * For Synchronize Cache want 1/20 of SSU's delay.
7737 		 */
7738 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7739 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7740 
7741 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7742 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7743 	} else
7744 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7745 				     sdebug_ndelay);
7746 check_cond:
7747 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7748 err_out:
7749 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7750 }
7751 
7752 static struct scsi_host_template sdebug_driver_template = {
7753 	.show_info =		scsi_debug_show_info,
7754 	.write_info =		scsi_debug_write_info,
7755 	.proc_name =		sdebug_proc_name,
7756 	.name =			"SCSI DEBUG",
7757 	.info =			scsi_debug_info,
7758 	.slave_alloc =		scsi_debug_slave_alloc,
7759 	.slave_configure =	scsi_debug_slave_configure,
7760 	.slave_destroy =	scsi_debug_slave_destroy,
7761 	.ioctl =		scsi_debug_ioctl,
7762 	.queuecommand =		scsi_debug_queuecommand,
7763 	.change_queue_depth =	sdebug_change_qdepth,
7764 	.map_queues =		sdebug_map_queues,
7765 	.mq_poll =		sdebug_blk_mq_poll,
7766 	.eh_abort_handler =	scsi_debug_abort,
7767 	.eh_device_reset_handler = scsi_debug_device_reset,
7768 	.eh_target_reset_handler = scsi_debug_target_reset,
7769 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7770 	.eh_host_reset_handler = scsi_debug_host_reset,
7771 	.can_queue =		SDEBUG_CANQUEUE,
7772 	.this_id =		7,
7773 	.sg_tablesize =		SG_MAX_SEGMENTS,
7774 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7775 	.max_sectors =		-1U,
7776 	.max_segment_size =	-1U,
7777 	.module =		THIS_MODULE,
7778 	.track_queue_depth =	1,
7779 };
7780 
7781 static int sdebug_driver_probe(struct device *dev)
7782 {
7783 	int error = 0;
7784 	struct sdebug_host_info *sdbg_host;
7785 	struct Scsi_Host *hpnt;
7786 	int hprot;
7787 
7788 	sdbg_host = to_sdebug_host(dev);
7789 
7790 	sdebug_driver_template.can_queue = sdebug_max_queue;
7791 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7792 	if (!sdebug_clustering)
7793 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7794 
7795 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7796 	if (NULL == hpnt) {
7797 		pr_err("scsi_host_alloc failed\n");
7798 		error = -ENODEV;
7799 		return error;
7800 	}
7801 	if (submit_queues > nr_cpu_ids) {
7802 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7803 			my_name, submit_queues, nr_cpu_ids);
7804 		submit_queues = nr_cpu_ids;
7805 	}
7806 	/*
7807 	 * Decide whether to tell scsi subsystem that we want mq. The
7808 	 * following should give the same answer for each host.
7809 	 */
7810 	hpnt->nr_hw_queues = submit_queues;
7811 	if (sdebug_host_max_queue)
7812 		hpnt->host_tagset = 1;
7813 
7814 	/* poll queues are possible for nr_hw_queues > 1 */
7815 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7816 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7817 			 my_name, poll_queues, hpnt->nr_hw_queues);
7818 		poll_queues = 0;
7819 	}
7820 
7821 	/*
7822 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7823 	 * left over for non-polled I/O.
7824 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7825 	 */
7826 	if (poll_queues >= submit_queues) {
7827 		if (submit_queues < 3)
7828 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7829 		else
7830 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7831 				my_name, submit_queues - 1);
7832 		poll_queues = 1;
7833 	}
7834 	if (poll_queues)
7835 		hpnt->nr_maps = 3;
7836 
7837 	sdbg_host->shost = hpnt;
7838 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7839 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7840 		hpnt->max_id = sdebug_num_tgts + 1;
7841 	else
7842 		hpnt->max_id = sdebug_num_tgts;
7843 	/* = sdebug_max_luns; */
7844 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7845 
7846 	hprot = 0;
7847 
7848 	switch (sdebug_dif) {
7849 
7850 	case T10_PI_TYPE1_PROTECTION:
7851 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7852 		if (sdebug_dix)
7853 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7854 		break;
7855 
7856 	case T10_PI_TYPE2_PROTECTION:
7857 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7858 		if (sdebug_dix)
7859 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7860 		break;
7861 
7862 	case T10_PI_TYPE3_PROTECTION:
7863 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7864 		if (sdebug_dix)
7865 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7866 		break;
7867 
7868 	default:
7869 		if (sdebug_dix)
7870 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7871 		break;
7872 	}
7873 
7874 	scsi_host_set_prot(hpnt, hprot);
7875 
7876 	if (have_dif_prot || sdebug_dix)
7877 		pr_info("host protection%s%s%s%s%s%s%s\n",
7878 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7879 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7880 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7881 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7882 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7883 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7884 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7885 
7886 	if (sdebug_guard == 1)
7887 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7888 	else
7889 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7890 
7891 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7892 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7893 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7894 		sdebug_statistics = true;
7895 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7896 	if (error) {
7897 		pr_err("scsi_add_host failed\n");
7898 		error = -ENODEV;
7899 		scsi_host_put(hpnt);
7900 	} else {
7901 		scsi_scan_host(hpnt);
7902 	}
7903 
7904 	return error;
7905 }
7906 
7907 static void sdebug_driver_remove(struct device *dev)
7908 {
7909 	struct sdebug_host_info *sdbg_host;
7910 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7911 
7912 	sdbg_host = to_sdebug_host(dev);
7913 
7914 	scsi_remove_host(sdbg_host->shost);
7915 
7916 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7917 				 dev_list) {
7918 		list_del(&sdbg_devinfo->dev_list);
7919 		kfree(sdbg_devinfo->zstate);
7920 		kfree(sdbg_devinfo);
7921 	}
7922 
7923 	scsi_host_put(sdbg_host->shost);
7924 }
7925 
7926 static int pseudo_lld_bus_match(struct device *dev,
7927 				struct device_driver *dev_driver)
7928 {
7929 	return 1;
7930 }
7931 
7932 static struct bus_type pseudo_lld_bus = {
7933 	.name = "pseudo",
7934 	.match = pseudo_lld_bus_match,
7935 	.probe = sdebug_driver_probe,
7936 	.remove = sdebug_driver_remove,
7937 	.drv_groups = sdebug_drv_groups,
7938 };
7939