xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 34fa67e7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
222 
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN			1	/* Data-in command (e.g. READ) */
225 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
231 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
234 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
236 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
238 
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250 
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 	ZBC_ZONE_TYPE_CNV	= 0x1,
254 	ZBC_ZONE_TYPE_SWR	= 0x2,
255 	ZBC_ZONE_TYPE_SWP	= 0x3,
256 };
257 
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 	ZBC_NOT_WRITE_POINTER	= 0x0,
261 	ZC1_EMPTY		= 0x1,
262 	ZC2_IMPLICIT_OPEN	= 0x2,
263 	ZC3_EXPLICIT_OPEN	= 0x3,
264 	ZC4_CLOSED		= 0x4,
265 	ZC6_READ_ONLY		= 0xd,
266 	ZC5_FULL		= 0xe,
267 	ZC7_OFFLINE		= 0xf,
268 };
269 
270 struct sdeb_zone_state {	/* ZBC: per zone state */
271 	enum sdebug_z_type z_type;
272 	enum sdebug_z_cond z_cond;
273 	bool z_non_seq_resource;
274 	unsigned int z_size;
275 	sector_t z_start;
276 	sector_t z_wp;
277 };
278 
279 struct sdebug_dev_info {
280 	struct list_head dev_list;
281 	unsigned int channel;
282 	unsigned int target;
283 	u64 lun;
284 	uuid_t lu_name;
285 	struct sdebug_host_info *sdbg_host;
286 	unsigned long uas_bm[1];
287 	atomic_t num_in_q;
288 	atomic_t stopped;	/* 1: by SSU, 2: device start */
289 	bool used;
290 
291 	/* For ZBC devices */
292 	enum blk_zoned_model zmodel;
293 	unsigned int zsize;
294 	unsigned int zsize_shift;
295 	unsigned int nr_zones;
296 	unsigned int nr_conv_zones;
297 	unsigned int nr_imp_open;
298 	unsigned int nr_exp_open;
299 	unsigned int nr_closed;
300 	unsigned int max_open;
301 	ktime_t create_ts;	/* time since bootup that this device was created */
302 	struct sdeb_zone_state *zstate;
303 };
304 
305 struct sdebug_host_info {
306 	struct list_head host_list;
307 	int si_idx;	/* sdeb_store_info (per host) xarray index */
308 	struct Scsi_Host *shost;
309 	struct device dev;
310 	struct list_head dev_info_list;
311 };
312 
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 	rwlock_t macc_lck;	/* for atomic media access on this store */
316 	u8 *storep;		/* user data storage (ram) */
317 	struct t10_pi_tuple *dif_storep; /* protection info */
318 	void *map_storep;	/* provisioning map */
319 };
320 
321 #define to_sdebug_host(d)	\
322 	container_of(d, struct sdebug_host_info, dev)
323 
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
326 
327 struct sdebug_defer {
328 	struct hrtimer hrt;
329 	struct execute_work ew;
330 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 	int sqa_idx;	/* index of sdebug_queue array */
332 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
333 	int hc_idx;	/* hostwide tag index */
334 	int issuing_cpu;
335 	bool init_hrt;
336 	bool init_wq;
337 	bool init_poll;
338 	bool aborted;	/* true when blk_abort_request() already called */
339 	enum sdeb_defer_type defer_t;
340 };
341 
342 struct sdebug_queued_cmd {
343 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 	 * instance indicates this slot is in use.
345 	 */
346 	struct sdebug_defer *sd_dp;
347 	struct scsi_cmnd *a_cmnd;
348 };
349 
350 struct sdebug_queue {
351 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 	spinlock_t qc_lock;
354 	atomic_t blocked;	/* to temporarily stop more being queued */
355 };
356 
357 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
358 static atomic_t sdebug_completions;  /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
363 
364 struct opcode_info_t {
365 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
366 				/* for terminating element */
367 	u8 opcode;		/* if num_attached > 0, preferred */
368 	u16 sa;			/* service action */
369 	u32 flags;		/* OR-ed set of SDEB_F_* */
370 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
372 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
373 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
374 };
375 
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 	SDEB_I_INVALID_OPCODE =	0,
379 	SDEB_I_INQUIRY = 1,
380 	SDEB_I_REPORT_LUNS = 2,
381 	SDEB_I_REQUEST_SENSE = 3,
382 	SDEB_I_TEST_UNIT_READY = 4,
383 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
384 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
385 	SDEB_I_LOG_SENSE = 7,
386 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
387 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
388 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
389 	SDEB_I_START_STOP = 11,
390 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
391 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
392 	SDEB_I_MAINT_IN = 14,
393 	SDEB_I_MAINT_OUT = 15,
394 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
395 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
396 	SDEB_I_RESERVE = 18,		/* 6, 10 */
397 	SDEB_I_RELEASE = 19,		/* 6, 10 */
398 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
399 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
400 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
401 	SDEB_I_SEND_DIAG = 23,
402 	SDEB_I_UNMAP = 24,
403 	SDEB_I_WRITE_BUFFER = 25,
404 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
405 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
406 	SDEB_I_COMP_WRITE = 28,
407 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
408 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
409 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
410 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
411 };
412 
413 
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
417 	    0, 0, 0, 0,
418 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
420 	    SDEB_I_RELEASE,
421 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 	    SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 	0, 0, 0, SDEB_I_VERIFY,
442 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 	     SDEB_I_MAINT_OUT, 0, 0, 0,
448 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 };
458 
459 /*
460  * The following "response" functions return the SCSI mid-level's 4 byte
461  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462  * command completion, they can mask their return value with
463  * SDEG_RES_IMMED_MASK .
464  */
465 #define SDEG_RES_IMMED_MASK 0x40000000
466 
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
503 
504 /*
505  * The following are overflow arrays for cdbs that "hit" the same index in
506  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507  * should be placed in opcode_info_arr[], the others should be placed here.
508  */
509 static const struct opcode_info_t msense_iarr[] = {
510 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
511 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 };
513 
514 static const struct opcode_info_t mselect_iarr[] = {
515 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
516 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 };
518 
519 static const struct opcode_info_t read_iarr[] = {
520 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
522 	     0, 0, 0, 0} },
523 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
527 	     0xc7, 0, 0, 0, 0} },
528 };
529 
530 static const struct opcode_info_t write_iarr[] = {
531 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
532 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
533 		   0, 0, 0, 0, 0, 0} },
534 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
535 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
536 		   0, 0, 0} },
537 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
538 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 		   0xbf, 0xc7, 0, 0, 0, 0} },
540 };
541 
542 static const struct opcode_info_t verify_iarr[] = {
543 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 		   0, 0, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
552 };
553 
554 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
555 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
558 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
561 };
562 
563 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
564 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
570 };
571 
572 static const struct opcode_info_t write_same_iarr[] = {
573 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
576 };
577 
578 static const struct opcode_info_t reserve_iarr[] = {
579 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
580 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
581 };
582 
583 static const struct opcode_info_t release_iarr[] = {
584 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
585 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586 };
587 
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
592 };
593 
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
598 };
599 
600 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
601 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
604 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
607 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
610 };
611 
612 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
613 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616 };
617 
618 
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620  * plus the terminating elements for logic that scans this table such as
621  * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
623 /* 0 */
624 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
625 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 	     0, 0} },					/* REPORT LUNS */
631 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 /* 5 */
636 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
637 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
638 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
640 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
641 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
643 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
644 	     0, 0, 0} },
645 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
646 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
647 	     0, 0} },
648 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
650 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
651 /* 10 */
652 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
654 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
665 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
667 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 				0xff, 0, 0xc7, 0, 0, 0, 0} },
669 /* 15 */
670 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
674 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
678 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
679 	     0xff, 0xff} },
680 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
682 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
683 	     0} },
684 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
686 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
687 	     0} },
688 /* 20 */
689 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
696 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 /* 25 */
700 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
703 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
705 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
706 		 0, 0, 0, 0, 0} },
707 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 	    resp_sync_cache, sync_cache_iarr,
709 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
711 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
714 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 	    resp_pre_fetch, pre_fetch_iarr,
716 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
718 
719 /* 30 */
720 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
728 /* sentinel */
729 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
730 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
731 };
732 
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue;	/* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
792 
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 			  SAM_LUN_AM_FLAT = 0x1,
795 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 			  SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
799 
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity;	/* in sectors */
802 
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804    may still need them */
805 static int sdebug_heads;		/* heads per disk */
806 static int sdebug_cylinders_per;	/* cylinders per surface */
807 static int sdebug_sectors_per;		/* sectors per cylinder */
808 
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
811 
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
817 
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
827 
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
833 
834 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
837 
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
840 
841 static rwlock_t *ramdisk_lck_a[2];
842 
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
845 
846 static struct bus_type pseudo_lld_bus;
847 
848 static struct device_driver sdebug_driverfs_driver = {
849 	.name 		= sdebug_proc_name,
850 	.bus		= &pseudo_lld_bus,
851 };
852 
853 static const int check_condition_result =
854 	SAM_STAT_CHECK_CONDITION;
855 
856 static const int illegal_condition_result =
857 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
858 
859 static const int device_qfull_result =
860 	(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
861 
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
863 
864 
865 /* Only do the extra work involved in logical block provisioning if one or
866  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867  * real reads and writes (i.e. not skipping them for speed).
868  */
869 static inline bool scsi_debug_lbp(void)
870 {
871 	return 0 == sdebug_fake_rw &&
872 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
873 }
874 
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 			    unsigned long long lba)
877 {
878 	struct sdeb_store_info *lsip = sip;
879 
880 	lba = do_div(lba, sdebug_store_sectors);
881 	if (!sip || !sip->storep) {
882 		WARN_ON_ONCE(true);
883 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
884 	}
885 	return lsip->storep + lba * sdebug_sector_size;
886 }
887 
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
889 				      sector_t sector)
890 {
891 	sector = sector_div(sector, sdebug_store_sectors);
892 
893 	return sip->dif_storep + sector;
894 }
895 
896 static void sdebug_max_tgts_luns(void)
897 {
898 	struct sdebug_host_info *sdbg_host;
899 	struct Scsi_Host *hpnt;
900 
901 	spin_lock(&sdebug_host_list_lock);
902 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 		hpnt = sdbg_host->shost;
904 		if ((hpnt->this_id >= 0) &&
905 		    (sdebug_num_tgts > hpnt->this_id))
906 			hpnt->max_id = sdebug_num_tgts + 1;
907 		else
908 			hpnt->max_id = sdebug_num_tgts;
909 		/* sdebug_max_luns; */
910 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
911 	}
912 	spin_unlock(&sdebug_host_list_lock);
913 }
914 
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
916 
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 				 enum sdeb_cmd_data c_d,
920 				 int in_byte, int in_bit)
921 {
922 	unsigned char *sbuff;
923 	u8 sks[4];
924 	int sl, asc;
925 
926 	sbuff = scp->sense_buffer;
927 	if (!sbuff) {
928 		sdev_printk(KERN_ERR, scp->device,
929 			    "%s: sense_buffer is NULL\n", __func__);
930 		return;
931 	}
932 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 	memset(sks, 0, sizeof(sks));
936 	sks[0] = 0x80;
937 	if (c_d)
938 		sks[0] |= 0x40;
939 	if (in_bit >= 0) {
940 		sks[0] |= 0x8;
941 		sks[0] |= 0x7 & in_bit;
942 	}
943 	put_unaligned_be16(in_byte, sks + 1);
944 	if (sdebug_dsense) {
945 		sl = sbuff[7] + 8;
946 		sbuff[7] = sl;
947 		sbuff[sl] = 0x2;
948 		sbuff[sl + 1] = 0x6;
949 		memcpy(sbuff + sl + 4, sks, 3);
950 	} else
951 		memcpy(sbuff + 15, sks, 3);
952 	if (sdebug_verbose)
953 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
954 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
956 }
957 
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
959 {
960 	if (!scp->sense_buffer) {
961 		sdev_printk(KERN_ERR, scp->device,
962 			    "%s: sense_buffer is NULL\n", __func__);
963 		return;
964 	}
965 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
966 
967 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
968 
969 	if (sdebug_verbose)
970 		sdev_printk(KERN_INFO, scp->device,
971 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 			    my_name, key, asc, asq);
973 }
974 
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
976 {
977 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
978 }
979 
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
981 			    void __user *arg)
982 {
983 	if (sdebug_verbose) {
984 		if (0x1261 == cmd)
985 			sdev_printk(KERN_INFO, dev,
986 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
987 		else if (0x5331 == cmd)
988 			sdev_printk(KERN_INFO, dev,
989 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
990 				    __func__);
991 		else
992 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
993 				    __func__, cmd);
994 	}
995 	return -EINVAL;
996 	/* return -ENOTTY; // correct return but upsets fdisk */
997 }
998 
999 static void config_cdb_len(struct scsi_device *sdev)
1000 {
1001 	switch (sdebug_cdb_len) {
1002 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 		sdev->use_10_for_rw = false;
1004 		sdev->use_16_for_rw = false;
1005 		sdev->use_10_for_ms = false;
1006 		break;
1007 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 		sdev->use_10_for_rw = true;
1009 		sdev->use_16_for_rw = false;
1010 		sdev->use_10_for_ms = false;
1011 		break;
1012 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 		sdev->use_10_for_rw = true;
1014 		sdev->use_16_for_rw = false;
1015 		sdev->use_10_for_ms = true;
1016 		break;
1017 	case 16:
1018 		sdev->use_10_for_rw = false;
1019 		sdev->use_16_for_rw = true;
1020 		sdev->use_10_for_ms = true;
1021 		break;
1022 	case 32: /* No knobs to suggest this so same as 16 for now */
1023 		sdev->use_10_for_rw = false;
1024 		sdev->use_16_for_rw = true;
1025 		sdev->use_10_for_ms = true;
1026 		break;
1027 	default:
1028 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1029 			sdebug_cdb_len);
1030 		sdev->use_10_for_rw = true;
1031 		sdev->use_16_for_rw = false;
1032 		sdev->use_10_for_ms = false;
1033 		sdebug_cdb_len = 10;
1034 		break;
1035 	}
1036 }
1037 
1038 static void all_config_cdb_len(void)
1039 {
1040 	struct sdebug_host_info *sdbg_host;
1041 	struct Scsi_Host *shost;
1042 	struct scsi_device *sdev;
1043 
1044 	spin_lock(&sdebug_host_list_lock);
1045 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 		shost = sdbg_host->shost;
1047 		shost_for_each_device(sdev, shost) {
1048 			config_cdb_len(sdev);
1049 		}
1050 	}
1051 	spin_unlock(&sdebug_host_list_lock);
1052 }
1053 
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1055 {
1056 	struct sdebug_host_info *sdhp;
1057 	struct sdebug_dev_info *dp;
1058 
1059 	spin_lock(&sdebug_host_list_lock);
1060 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 			if ((devip->sdbg_host == dp->sdbg_host) &&
1063 			    (devip->target == dp->target))
1064 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1065 		}
1066 	}
1067 	spin_unlock(&sdebug_host_list_lock);
1068 }
1069 
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1071 {
1072 	int k;
1073 
1074 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 	if (k != SDEBUG_NUM_UAS) {
1076 		const char *cp = NULL;
1077 
1078 		switch (k) {
1079 		case SDEBUG_UA_POR:
1080 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 					POWER_ON_RESET_ASCQ);
1082 			if (sdebug_verbose)
1083 				cp = "power on reset";
1084 			break;
1085 		case SDEBUG_UA_BUS_RESET:
1086 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 					BUS_RESET_ASCQ);
1088 			if (sdebug_verbose)
1089 				cp = "bus reset";
1090 			break;
1091 		case SDEBUG_UA_MODE_CHANGED:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1093 					MODE_CHANGED_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "mode parameters changed";
1096 			break;
1097 		case SDEBUG_UA_CAPACITY_CHANGED:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 					CAPACITY_CHANGED_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "capacity data changed";
1102 			break;
1103 		case SDEBUG_UA_MICROCODE_CHANGED:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION,
1105 					TARGET_CHANGED_ASC,
1106 					MICROCODE_CHANGED_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "microcode has been changed";
1109 			break;
1110 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION,
1112 					TARGET_CHANGED_ASC,
1113 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "microcode has been changed without reset";
1116 			break;
1117 		case SDEBUG_UA_LUNS_CHANGED:
1118 			/*
1119 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 			 * on the target, until a REPORT LUNS command is
1122 			 * received.  SPC-4 behavior is to report it only once.
1123 			 * NOTE:  sdebug_scsi_level does not use the same
1124 			 * values as struct scsi_device->scsi_level.
1125 			 */
1126 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1127 				clear_luns_changed_on_target(devip);
1128 			mk_sense_buffer(scp, UNIT_ATTENTION,
1129 					TARGET_CHANGED_ASC,
1130 					LUNS_CHANGED_ASCQ);
1131 			if (sdebug_verbose)
1132 				cp = "reported luns data has changed";
1133 			break;
1134 		default:
1135 			pr_warn("unexpected unit attention code=%d\n", k);
1136 			if (sdebug_verbose)
1137 				cp = "unknown";
1138 			break;
1139 		}
1140 		clear_bit(k, devip->uas_bm);
1141 		if (sdebug_verbose)
1142 			sdev_printk(KERN_INFO, scp->device,
1143 				   "%s reports: Unit attention: %s\n",
1144 				   my_name, cp);
1145 		return check_condition_result;
1146 	}
1147 	return 0;
1148 }
1149 
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1152 				int arr_len)
1153 {
1154 	int act_len;
1155 	struct scsi_data_buffer *sdb = &scp->sdb;
1156 
1157 	if (!sdb->length)
1158 		return 0;
1159 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 		return DID_ERROR << 16;
1161 
1162 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1163 				      arr, arr_len);
1164 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1165 
1166 	return 0;
1167 }
1168 
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171  * calls, not required to write in ascending offset order. Assumes resid
1172  * set to scsi_bufflen() prior to any calls.
1173  */
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 				  int arr_len, unsigned int off_dst)
1176 {
1177 	unsigned int act_len, n;
1178 	struct scsi_data_buffer *sdb = &scp->sdb;
1179 	off_t skip = off_dst;
1180 
1181 	if (sdb->length <= off_dst)
1182 		return 0;
1183 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 		return DID_ERROR << 16;
1185 
1186 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 				       arr, arr_len, skip);
1188 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 		 scsi_get_resid(scp));
1191 	n = scsi_bufflen(scp) - (off_dst + act_len);
1192 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1193 	return 0;
1194 }
1195 
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197  * 'arr' or -1 if error.
1198  */
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1200 			       int arr_len)
1201 {
1202 	if (!scsi_bufflen(scp))
1203 		return 0;
1204 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1205 		return -1;
1206 
1207 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1208 }
1209 
1210 
1211 static char sdebug_inq_vendor_id[9] = "Linux   ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1218 
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 			  int target_dev_id, int dev_id_num,
1222 			  const char *dev_id_str, int dev_id_str_len,
1223 			  const uuid_t *lu_name)
1224 {
1225 	int num, port_a;
1226 	char b[32];
1227 
1228 	port_a = target_dev_id + 1;
1229 	/* T10 vendor identifier field format (faked) */
1230 	arr[0] = 0x2;	/* ASCII */
1231 	arr[1] = 0x1;
1232 	arr[2] = 0x0;
1233 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 	num = 8 + 16 + dev_id_str_len;
1237 	arr[3] = num;
1238 	num += 4;
1239 	if (dev_id_num >= 0) {
1240 		if (sdebug_uuid_ctl) {
1241 			/* Locally assigned UUID */
1242 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1243 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1244 			arr[num++] = 0x0;
1245 			arr[num++] = 0x12;
1246 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1247 			arr[num++] = 0x0;
1248 			memcpy(arr + num, lu_name, 16);
1249 			num += 16;
1250 		} else {
1251 			/* NAA-3, Logical unit identifier (binary) */
1252 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1253 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1254 			arr[num++] = 0x0;
1255 			arr[num++] = 0x8;
1256 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1257 			num += 8;
1258 		}
1259 		/* Target relative port number */
1260 		arr[num++] = 0x61;	/* proto=sas, binary */
1261 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1262 		arr[num++] = 0x0;	/* reserved */
1263 		arr[num++] = 0x4;	/* length */
1264 		arr[num++] = 0x0;	/* reserved */
1265 		arr[num++] = 0x0;	/* reserved */
1266 		arr[num++] = 0x0;
1267 		arr[num++] = 0x1;	/* relative port A */
1268 	}
1269 	/* NAA-3, Target port identifier */
1270 	arr[num++] = 0x61;	/* proto=sas, binary */
1271 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1272 	arr[num++] = 0x0;
1273 	arr[num++] = 0x8;
1274 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1275 	num += 8;
1276 	/* NAA-3, Target port group identifier */
1277 	arr[num++] = 0x61;	/* proto=sas, binary */
1278 	arr[num++] = 0x95;	/* piv=1, target port group id */
1279 	arr[num++] = 0x0;
1280 	arr[num++] = 0x4;
1281 	arr[num++] = 0;
1282 	arr[num++] = 0;
1283 	put_unaligned_be16(port_group_id, arr + num);
1284 	num += 2;
1285 	/* NAA-3, Target device identifier */
1286 	arr[num++] = 0x61;	/* proto=sas, binary */
1287 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1288 	arr[num++] = 0x0;
1289 	arr[num++] = 0x8;
1290 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1291 	num += 8;
1292 	/* SCSI name string: Target device identifier */
1293 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1294 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1295 	arr[num++] = 0x0;
1296 	arr[num++] = 24;
1297 	memcpy(arr + num, "naa.32222220", 12);
1298 	num += 12;
1299 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 	memcpy(arr + num, b, 8);
1301 	num += 8;
1302 	memset(arr + num, 0, 4);
1303 	num += 4;
1304 	return num;
1305 }
1306 
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309     0x22,0x22,0x22,0x0,0xbb,0x1,
1310     0x22,0x22,0x22,0x0,0xbb,0x2,
1311 };
1312 
1313 /*  Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1315 {
1316 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 	return sizeof(vpd84_data);
1318 }
1319 
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1322 {
1323 	int num = 0;
1324 	const char *na1 = "https://www.kernel.org/config";
1325 	const char *na2 = "http://www.kernel.org/log";
1326 	int plen, olen;
1327 
1328 	arr[num++] = 0x1;	/* lu, storage config */
1329 	arr[num++] = 0x0;	/* reserved */
1330 	arr[num++] = 0x0;
1331 	olen = strlen(na1);
1332 	plen = olen + 1;
1333 	if (plen % 4)
1334 		plen = ((plen / 4) + 1) * 4;
1335 	arr[num++] = plen;	/* length, null termianted, padded */
1336 	memcpy(arr + num, na1, olen);
1337 	memset(arr + num + olen, 0, plen - olen);
1338 	num += plen;
1339 
1340 	arr[num++] = 0x4;	/* lu, logging */
1341 	arr[num++] = 0x0;	/* reserved */
1342 	arr[num++] = 0x0;
1343 	olen = strlen(na2);
1344 	plen = olen + 1;
1345 	if (plen % 4)
1346 		plen = ((plen / 4) + 1) * 4;
1347 	arr[num++] = plen;	/* length, null terminated, padded */
1348 	memcpy(arr + num, na2, olen);
1349 	memset(arr + num + olen, 0, plen - olen);
1350 	num += plen;
1351 
1352 	return num;
1353 }
1354 
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1357 {
1358 	int num = 0;
1359 	int port_a, port_b;
1360 
1361 	port_a = target_dev_id + 1;
1362 	port_b = port_a + 1;
1363 	arr[num++] = 0x0;	/* reserved */
1364 	arr[num++] = 0x0;	/* reserved */
1365 	arr[num++] = 0x0;
1366 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1367 	memset(arr + num, 0, 6);
1368 	num += 6;
1369 	arr[num++] = 0x0;
1370 	arr[num++] = 12;	/* length tp descriptor */
1371 	/* naa-5 target port identifier (A) */
1372 	arr[num++] = 0x61;	/* proto=sas, binary */
1373 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1374 	arr[num++] = 0x0;	/* reserved */
1375 	arr[num++] = 0x8;	/* length */
1376 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1377 	num += 8;
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;	/* reserved */
1380 	arr[num++] = 0x0;
1381 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1382 	memset(arr + num, 0, 6);
1383 	num += 6;
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 12;	/* length tp descriptor */
1386 	/* naa-5 target port identifier (B) */
1387 	arr[num++] = 0x61;	/* proto=sas, binary */
1388 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1389 	arr[num++] = 0x0;	/* reserved */
1390 	arr[num++] = 0x8;	/* length */
1391 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1392 	num += 8;
1393 
1394 	return num;
1395 }
1396 
1397 
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1402 '1','2','3','4',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1404 0xec,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1409 0x53,0x41,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1411 0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1413 0x10,0x80,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1440 };
1441 
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1444 {
1445 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 	return sizeof(vpd89_data);
1447 }
1448 
1449 
1450 static unsigned char vpdb0_data[] = {
1451 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 };
1456 
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1459 {
1460 	unsigned int gran;
1461 
1462 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1463 
1464 	/* Optimal transfer length granularity */
1465 	if (sdebug_opt_xferlen_exp != 0 &&
1466 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 		gran = 1 << sdebug_opt_xferlen_exp;
1468 	else
1469 		gran = 1 << sdebug_physblk_exp;
1470 	put_unaligned_be16(gran, arr + 2);
1471 
1472 	/* Maximum Transfer Length */
1473 	if (sdebug_store_sectors > 0x400)
1474 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1475 
1476 	/* Optimal Transfer Length */
1477 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1478 
1479 	if (sdebug_lbpu) {
1480 		/* Maximum Unmap LBA Count */
1481 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1482 
1483 		/* Maximum Unmap Block Descriptor Count */
1484 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1485 	}
1486 
1487 	/* Unmap Granularity Alignment */
1488 	if (sdebug_unmap_alignment) {
1489 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 		arr[28] |= 0x80; /* UGAVALID */
1491 	}
1492 
1493 	/* Optimal Unmap Granularity */
1494 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1495 
1496 	/* Maximum WRITE SAME Length */
1497 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1498 
1499 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1500 
1501 	return sizeof(vpdb0_data);
1502 }
1503 
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1506 {
1507 	memset(arr, 0, 0x3c);
1508 	arr[0] = 0;
1509 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1510 	arr[2] = 0;
1511 	arr[3] = 5;	/* less than 1.8" */
1512 	if (devip->zmodel == BLK_ZONED_HA)
1513 		arr[4] = 1 << 4;	/* zoned field = 01b */
1514 
1515 	return 0x3c;
1516 }
1517 
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1520 {
1521 	memset(arr, 0, 0x4);
1522 	arr[0] = 0;			/* threshold exponent */
1523 	if (sdebug_lbpu)
1524 		arr[1] = 1 << 7;
1525 	if (sdebug_lbpws)
1526 		arr[1] |= 1 << 6;
1527 	if (sdebug_lbpws10)
1528 		arr[1] |= 1 << 5;
1529 	if (sdebug_lbprz && scsi_debug_lbp())
1530 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1531 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 	/* threshold_percentage=0 */
1534 	return 0x4;
1535 }
1536 
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1539 {
1540 	memset(arr, 0, 0x3c);
1541 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1542 	/*
1543 	 * Set Optimal number of open sequential write preferred zones and
1544 	 * Optimal number of non-sequentially written sequential write
1545 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 	 * fields set to zero, apart from Max. number of open swrz_s field.
1547 	 */
1548 	put_unaligned_be32(0xffffffff, &arr[4]);
1549 	put_unaligned_be32(0xffffffff, &arr[8]);
1550 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 		put_unaligned_be32(devip->max_open, &arr[12]);
1552 	else
1553 		put_unaligned_be32(0xffffffff, &arr[12]);
1554 	return 0x3c;
1555 }
1556 
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1559 
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1561 {
1562 	unsigned char pq_pdt;
1563 	unsigned char *arr;
1564 	unsigned char *cmd = scp->cmnd;
1565 	u32 alloc_len, n;
1566 	int ret;
1567 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 
1569 	alloc_len = get_unaligned_be16(cmd + 3);
1570 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 	if (! arr)
1572 		return DID_REQUEUE << 16;
1573 	is_disk = (sdebug_ptype == TYPE_DISK);
1574 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1575 	is_disk_zbc = (is_disk || is_zbc);
1576 	have_wlun = scsi_is_wlun(scp->device->lun);
1577 	if (have_wlun)
1578 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1579 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1581 	else
1582 		pq_pdt = (sdebug_ptype & 0x1f);
1583 	arr[0] = pq_pdt;
1584 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1585 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 		kfree(arr);
1587 		return check_condition_result;
1588 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1589 		int lu_id_num, port_group_id, target_dev_id;
1590 		u32 len;
1591 		char lu_id_str[6];
1592 		int host_no = devip->sdbg_host->shost->host_no;
1593 
1594 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 		    (devip->channel & 0x7f);
1596 		if (sdebug_vpd_use_hostno == 0)
1597 			host_no = 0;
1598 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1599 			    (devip->target * 1000) + devip->lun);
1600 		target_dev_id = ((host_no + 1) * 2000) +
1601 				 (devip->target * 1000) - 3;
1602 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1603 		if (0 == cmd[2]) { /* supported vital product data pages */
1604 			arr[1] = cmd[2];	/*sanity */
1605 			n = 4;
1606 			arr[n++] = 0x0;   /* this page */
1607 			arr[n++] = 0x80;  /* unit serial number */
1608 			arr[n++] = 0x83;  /* device identification */
1609 			arr[n++] = 0x84;  /* software interface ident. */
1610 			arr[n++] = 0x85;  /* management network addresses */
1611 			arr[n++] = 0x86;  /* extended inquiry */
1612 			arr[n++] = 0x87;  /* mode page policy */
1613 			arr[n++] = 0x88;  /* SCSI ports */
1614 			if (is_disk_zbc) {	  /* SBC or ZBC */
1615 				arr[n++] = 0x89;  /* ATA information */
1616 				arr[n++] = 0xb0;  /* Block limits */
1617 				arr[n++] = 0xb1;  /* Block characteristics */
1618 				if (is_disk)
1619 					arr[n++] = 0xb2;  /* LB Provisioning */
1620 				if (is_zbc)
1621 					arr[n++] = 0xb6;  /* ZB dev. char. */
1622 			}
1623 			arr[3] = n - 4;	  /* number of supported VPD pages */
1624 		} else if (0x80 == cmd[2]) { /* unit serial number */
1625 			arr[1] = cmd[2];	/*sanity */
1626 			arr[3] = len;
1627 			memcpy(&arr[4], lu_id_str, len);
1628 		} else if (0x83 == cmd[2]) { /* device identification */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 						target_dev_id, lu_id_num,
1632 						lu_id_str, len,
1633 						&devip->lu_name);
1634 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = inquiry_vpd_84(&arr[4]);
1637 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1638 			arr[1] = cmd[2];	/*sanity */
1639 			arr[3] = inquiry_vpd_85(&arr[4]);
1640 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1641 			arr[1] = cmd[2];	/*sanity */
1642 			arr[3] = 0x3c;	/* number of following entries */
1643 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1644 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1645 			else if (have_dif_prot)
1646 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1647 			else
1648 				arr[4] = 0x0;   /* no protection stuff */
1649 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1650 		} else if (0x87 == cmd[2]) { /* mode page policy */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = 0x8;	/* number of following entries */
1653 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1654 			arr[6] = 0x80;	/* mlus, shared */
1655 			arr[8] = 0x18;	 /* protocol specific lu */
1656 			arr[10] = 0x82;	 /* mlus, per initiator port */
1657 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1660 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1661 			arr[1] = cmd[2];        /*sanity */
1662 			n = inquiry_vpd_89(&arr[4]);
1663 			put_unaligned_be16(n, arr + 2);
1664 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b0(&arr[4]);
1667 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1668 			arr[1] = cmd[2];        /*sanity */
1669 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1670 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			arr[3] = inquiry_vpd_b2(&arr[4]);
1673 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 			arr[1] = cmd[2];        /*sanity */
1675 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1676 		} else {
1677 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1678 			kfree(arr);
1679 			return check_condition_result;
1680 		}
1681 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1682 		ret = fill_from_dev_buffer(scp, arr,
1683 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1684 		kfree(arr);
1685 		return ret;
1686 	}
1687 	/* drops through here for a standard inquiry */
1688 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1689 	arr[2] = sdebug_scsi_level;
1690 	arr[3] = 2;    /* response_data_format==2 */
1691 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1692 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1693 	if (sdebug_vpd_use_hostno == 0)
1694 		arr[5] |= 0x10; /* claim: implicit TPGS */
1695 	arr[6] = 0x10; /* claim: MultiP */
1696 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1697 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1698 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1701 	/* Use Vendor Specific area to place driver date in ASCII hex */
1702 	memcpy(&arr[36], sdebug_version_date, 8);
1703 	/* version descriptors (2 bytes each) follow */
1704 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1705 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1706 	n = 62;
1707 	if (is_disk) {		/* SBC-4 no version claimed */
1708 		put_unaligned_be16(0x600, arr + n);
1709 		n += 2;
1710 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1711 		put_unaligned_be16(0x525, arr + n);
1712 		n += 2;
1713 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1714 		put_unaligned_be16(0x624, arr + n);
1715 		n += 2;
1716 	}
1717 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1718 	ret = fill_from_dev_buffer(scp, arr,
1719 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1720 	kfree(arr);
1721 	return ret;
1722 }
1723 
1724 /* See resp_iec_m_pg() for how this data is manipulated */
1725 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 				   0, 0, 0x0, 0x0};
1727 
1728 static int resp_requests(struct scsi_cmnd *scp,
1729 			 struct sdebug_dev_info *devip)
1730 {
1731 	unsigned char *cmd = scp->cmnd;
1732 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1733 	bool dsense = !!(cmd[1] & 1);
1734 	u32 alloc_len = cmd[4];
1735 	u32 len = 18;
1736 	int stopped_state = atomic_read(&devip->stopped);
1737 
1738 	memset(arr, 0, sizeof(arr));
1739 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1740 		if (dsense) {
1741 			arr[0] = 0x72;
1742 			arr[1] = NOT_READY;
1743 			arr[2] = LOGICAL_UNIT_NOT_READY;
1744 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1745 			len = 8;
1746 		} else {
1747 			arr[0] = 0x70;
1748 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1749 			arr[7] = 0xa;			/* 18 byte sense buffer */
1750 			arr[12] = LOGICAL_UNIT_NOT_READY;
1751 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1752 		}
1753 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1755 		if (dsense) {
1756 			arr[0] = 0x72;
1757 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1758 			arr[2] = THRESHOLD_EXCEEDED;
1759 			arr[3] = 0xff;		/* Failure prediction(false) */
1760 			len = 8;
1761 		} else {
1762 			arr[0] = 0x70;
1763 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1764 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1765 			arr[12] = THRESHOLD_EXCEEDED;
1766 			arr[13] = 0xff;		/* Failure prediction(false) */
1767 		}
1768 	} else {	/* nothing to report */
1769 		if (dsense) {
1770 			len = 8;
1771 			memset(arr, 0, len);
1772 			arr[0] = 0x72;
1773 		} else {
1774 			memset(arr, 0, len);
1775 			arr[0] = 0x70;
1776 			arr[7] = 0xa;
1777 		}
1778 	}
1779 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1780 }
1781 
1782 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1783 {
1784 	unsigned char *cmd = scp->cmnd;
1785 	int power_cond, want_stop, stopped_state;
1786 	bool changing;
1787 
1788 	power_cond = (cmd[4] & 0xf0) >> 4;
1789 	if (power_cond) {
1790 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1791 		return check_condition_result;
1792 	}
1793 	want_stop = !(cmd[4] & 1);
1794 	stopped_state = atomic_read(&devip->stopped);
1795 	if (stopped_state == 2) {
1796 		ktime_t now_ts = ktime_get_boottime();
1797 
1798 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1800 
1801 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 				/* tur_ms_to_ready timer extinguished */
1803 				atomic_set(&devip->stopped, 0);
1804 				stopped_state = 0;
1805 			}
1806 		}
1807 		if (stopped_state == 2) {
1808 			if (want_stop) {
1809 				stopped_state = 1;	/* dummy up success */
1810 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1811 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 				return check_condition_result;
1813 			}
1814 		}
1815 	}
1816 	changing = (stopped_state != want_stop);
1817 	if (changing)
1818 		atomic_xchg(&devip->stopped, want_stop);
1819 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1820 		return SDEG_RES_IMMED_MASK;
1821 	else
1822 		return 0;
1823 }
1824 
1825 static sector_t get_sdebug_capacity(void)
1826 {
1827 	static const unsigned int gibibyte = 1073741824;
1828 
1829 	if (sdebug_virtual_gb > 0)
1830 		return (sector_t)sdebug_virtual_gb *
1831 			(gibibyte / sdebug_sector_size);
1832 	else
1833 		return sdebug_store_sectors;
1834 }
1835 
1836 #define SDEBUG_READCAP_ARR_SZ 8
1837 static int resp_readcap(struct scsi_cmnd *scp,
1838 			struct sdebug_dev_info *devip)
1839 {
1840 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1841 	unsigned int capac;
1842 
1843 	/* following just in case virtual_gb changed */
1844 	sdebug_capacity = get_sdebug_capacity();
1845 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1846 	if (sdebug_capacity < 0xffffffff) {
1847 		capac = (unsigned int)sdebug_capacity - 1;
1848 		put_unaligned_be32(capac, arr + 0);
1849 	} else
1850 		put_unaligned_be32(0xffffffff, arr + 0);
1851 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1852 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853 }
1854 
1855 #define SDEBUG_READCAP16_ARR_SZ 32
1856 static int resp_readcap16(struct scsi_cmnd *scp,
1857 			  struct sdebug_dev_info *devip)
1858 {
1859 	unsigned char *cmd = scp->cmnd;
1860 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1861 	u32 alloc_len;
1862 
1863 	alloc_len = get_unaligned_be32(cmd + 10);
1864 	/* following just in case virtual_gb changed */
1865 	sdebug_capacity = get_sdebug_capacity();
1866 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1867 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 	arr[13] = sdebug_physblk_exp & 0xf;
1870 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1871 
1872 	if (scsi_debug_lbp()) {
1873 		arr[14] |= 0x80; /* LBPME */
1874 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 		 * in the wider field maps to 0 in this field.
1877 		 */
1878 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1879 			arr[14] |= 0x40;
1880 	}
1881 
1882 	arr[15] = sdebug_lowest_aligned & 0xff;
1883 
1884 	if (have_dif_prot) {
1885 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1886 		arr[12] |= 1; /* PROT_EN */
1887 	}
1888 
1889 	return fill_from_dev_buffer(scp, arr,
1890 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1891 }
1892 
1893 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1894 
1895 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1896 			      struct sdebug_dev_info *devip)
1897 {
1898 	unsigned char *cmd = scp->cmnd;
1899 	unsigned char *arr;
1900 	int host_no = devip->sdbg_host->shost->host_no;
1901 	int port_group_a, port_group_b, port_a, port_b;
1902 	u32 alen, n, rlen;
1903 	int ret;
1904 
1905 	alen = get_unaligned_be32(cmd + 6);
1906 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1907 	if (! arr)
1908 		return DID_REQUEUE << 16;
1909 	/*
1910 	 * EVPD page 0x88 states we have two ports, one
1911 	 * real and a fake port with no device connected.
1912 	 * So we create two port groups with one port each
1913 	 * and set the group with port B to unavailable.
1914 	 */
1915 	port_a = 0x1; /* relative port A */
1916 	port_b = 0x2; /* relative port B */
1917 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 			(devip->channel & 0x7f);
1919 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 			(devip->channel & 0x7f) + 0x80;
1921 
1922 	/*
1923 	 * The asymmetric access state is cycled according to the host_id.
1924 	 */
1925 	n = 4;
1926 	if (sdebug_vpd_use_hostno == 0) {
1927 		arr[n++] = host_no % 3; /* Asymm access state */
1928 		arr[n++] = 0x0F; /* claim: all states are supported */
1929 	} else {
1930 		arr[n++] = 0x0; /* Active/Optimized path */
1931 		arr[n++] = 0x01; /* only support active/optimized paths */
1932 	}
1933 	put_unaligned_be16(port_group_a, arr + n);
1934 	n += 2;
1935 	arr[n++] = 0;    /* Reserved */
1936 	arr[n++] = 0;    /* Status code */
1937 	arr[n++] = 0;    /* Vendor unique */
1938 	arr[n++] = 0x1;  /* One port per group */
1939 	arr[n++] = 0;    /* Reserved */
1940 	arr[n++] = 0;    /* Reserved */
1941 	put_unaligned_be16(port_a, arr + n);
1942 	n += 2;
1943 	arr[n++] = 3;    /* Port unavailable */
1944 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 	put_unaligned_be16(port_group_b, arr + n);
1946 	n += 2;
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Status code */
1949 	arr[n++] = 0;    /* Vendor unique */
1950 	arr[n++] = 0x1;  /* One port per group */
1951 	arr[n++] = 0;    /* Reserved */
1952 	arr[n++] = 0;    /* Reserved */
1953 	put_unaligned_be16(port_b, arr + n);
1954 	n += 2;
1955 
1956 	rlen = n - 4;
1957 	put_unaligned_be32(rlen, arr + 0);
1958 
1959 	/*
1960 	 * Return the smallest value of either
1961 	 * - The allocated length
1962 	 * - The constructed command length
1963 	 * - The maximum array size
1964 	 */
1965 	rlen = min(alen, n);
1966 	ret = fill_from_dev_buffer(scp, arr,
1967 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1968 	kfree(arr);
1969 	return ret;
1970 }
1971 
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 			     struct sdebug_dev_info *devip)
1974 {
1975 	bool rctd;
1976 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1977 	u16 req_sa, u;
1978 	u32 alloc_len, a_len;
1979 	int k, offset, len, errsts, count, bump, na;
1980 	const struct opcode_info_t *oip;
1981 	const struct opcode_info_t *r_oip;
1982 	u8 *arr;
1983 	u8 *cmd = scp->cmnd;
1984 
1985 	rctd = !!(cmd[2] & 0x80);
1986 	reporting_opts = cmd[2] & 0x7;
1987 	req_opcode = cmd[3];
1988 	req_sa = get_unaligned_be16(cmd + 4);
1989 	alloc_len = get_unaligned_be32(cmd + 6);
1990 	if (alloc_len < 4 || alloc_len > 0xffff) {
1991 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 		return check_condition_result;
1993 	}
1994 	if (alloc_len > 8192)
1995 		a_len = 8192;
1996 	else
1997 		a_len = alloc_len;
1998 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1999 	if (NULL == arr) {
2000 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2001 				INSUFF_RES_ASCQ);
2002 		return check_condition_result;
2003 	}
2004 	switch (reporting_opts) {
2005 	case 0:	/* all commands */
2006 		/* count number of commands */
2007 		for (count = 0, oip = opcode_info_arr;
2008 		     oip->num_attached != 0xff; ++oip) {
2009 			if (F_INV_OP & oip->flags)
2010 				continue;
2011 			count += (oip->num_attached + 1);
2012 		}
2013 		bump = rctd ? 20 : 8;
2014 		put_unaligned_be32(count * bump, arr);
2015 		for (offset = 4, oip = opcode_info_arr;
2016 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 			if (F_INV_OP & oip->flags)
2018 				continue;
2019 			na = oip->num_attached;
2020 			arr[offset] = oip->opcode;
2021 			put_unaligned_be16(oip->sa, arr + offset + 2);
2022 			if (rctd)
2023 				arr[offset + 5] |= 0x2;
2024 			if (FF_SA & oip->flags)
2025 				arr[offset + 5] |= 0x1;
2026 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2027 			if (rctd)
2028 				put_unaligned_be16(0xa, arr + offset + 8);
2029 			r_oip = oip;
2030 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 				if (F_INV_OP & oip->flags)
2032 					continue;
2033 				offset += bump;
2034 				arr[offset] = oip->opcode;
2035 				put_unaligned_be16(oip->sa, arr + offset + 2);
2036 				if (rctd)
2037 					arr[offset + 5] |= 0x2;
2038 				if (FF_SA & oip->flags)
2039 					arr[offset + 5] |= 0x1;
2040 				put_unaligned_be16(oip->len_mask[0],
2041 						   arr + offset + 6);
2042 				if (rctd)
2043 					put_unaligned_be16(0xa,
2044 							   arr + offset + 8);
2045 			}
2046 			oip = r_oip;
2047 			offset += bump;
2048 		}
2049 		break;
2050 	case 1:	/* one command: opcode only */
2051 	case 2:	/* one command: opcode plus service action */
2052 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2053 		sdeb_i = opcode_ind_arr[req_opcode];
2054 		oip = &opcode_info_arr[sdeb_i];
2055 		if (F_INV_OP & oip->flags) {
2056 			supp = 1;
2057 			offset = 4;
2058 		} else {
2059 			if (1 == reporting_opts) {
2060 				if (FF_SA & oip->flags) {
2061 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 							     2, 2);
2063 					kfree(arr);
2064 					return check_condition_result;
2065 				}
2066 				req_sa = 0;
2067 			} else if (2 == reporting_opts &&
2068 				   0 == (FF_SA & oip->flags)) {
2069 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 				kfree(arr);	/* point at requested sa */
2071 				return check_condition_result;
2072 			}
2073 			if (0 == (FF_SA & oip->flags) &&
2074 			    req_opcode == oip->opcode)
2075 				supp = 3;
2076 			else if (0 == (FF_SA & oip->flags)) {
2077 				na = oip->num_attached;
2078 				for (k = 0, oip = oip->arrp; k < na;
2079 				     ++k, ++oip) {
2080 					if (req_opcode == oip->opcode)
2081 						break;
2082 				}
2083 				supp = (k >= na) ? 1 : 3;
2084 			} else if (req_sa != oip->sa) {
2085 				na = oip->num_attached;
2086 				for (k = 0, oip = oip->arrp; k < na;
2087 				     ++k, ++oip) {
2088 					if (req_sa == oip->sa)
2089 						break;
2090 				}
2091 				supp = (k >= na) ? 1 : 3;
2092 			} else
2093 				supp = 3;
2094 			if (3 == supp) {
2095 				u = oip->len_mask[0];
2096 				put_unaligned_be16(u, arr + 2);
2097 				arr[4] = oip->opcode;
2098 				for (k = 1; k < u; ++k)
2099 					arr[4 + k] = (k < 16) ?
2100 						 oip->len_mask[k] : 0xff;
2101 				offset = 4 + u;
2102 			} else
2103 				offset = 4;
2104 		}
2105 		arr[1] = (rctd ? 0x80 : 0) | supp;
2106 		if (rctd) {
2107 			put_unaligned_be16(0xa, arr + offset);
2108 			offset += 12;
2109 		}
2110 		break;
2111 	default:
2112 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2113 		kfree(arr);
2114 		return check_condition_result;
2115 	}
2116 	offset = (offset < a_len) ? offset : a_len;
2117 	len = (offset < alloc_len) ? offset : alloc_len;
2118 	errsts = fill_from_dev_buffer(scp, arr, len);
2119 	kfree(arr);
2120 	return errsts;
2121 }
2122 
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 			  struct sdebug_dev_info *devip)
2125 {
2126 	bool repd;
2127 	u32 alloc_len, len;
2128 	u8 arr[16];
2129 	u8 *cmd = scp->cmnd;
2130 
2131 	memset(arr, 0, sizeof(arr));
2132 	repd = !!(cmd[2] & 0x80);
2133 	alloc_len = get_unaligned_be32(cmd + 6);
2134 	if (alloc_len < 4) {
2135 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 		return check_condition_result;
2137 	}
2138 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2139 	arr[1] = 0x1;		/* ITNRS */
2140 	if (repd) {
2141 		arr[3] = 0xc;
2142 		len = 16;
2143 	} else
2144 		len = 4;
2145 
2146 	len = (len < alloc_len) ? len : alloc_len;
2147 	return fill_from_dev_buffer(scp, arr, len);
2148 }
2149 
2150 /* <<Following mode page info copied from ST318451LW>> */
2151 
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 {	/* Read-Write Error Recovery page for mode_sense */
2154 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 					5, 0, 0xff, 0xff};
2156 
2157 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2158 	if (1 == pcontrol)
2159 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 	return sizeof(err_recov_pg);
2161 }
2162 
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { 	/* Disconnect-Reconnect page for mode_sense */
2165 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 					 0, 0, 0, 0, 0, 0, 0, 0};
2167 
2168 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2169 	if (1 == pcontrol)
2170 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 	return sizeof(disconnect_pg);
2172 }
2173 
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 {       /* Format device page for mode_sense */
2176 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 				     0, 0, 0, 0, 0, 0, 0, 0,
2178 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2179 
2180 	memcpy(p, format_pg, sizeof(format_pg));
2181 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 	put_unaligned_be16(sdebug_sector_size, p + 12);
2183 	if (sdebug_removable)
2184 		p[20] |= 0x20; /* should agree with INQUIRY */
2185 	if (1 == pcontrol)
2186 		memset(p + 2, 0, sizeof(format_pg) - 2);
2187 	return sizeof(format_pg);
2188 }
2189 
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 				     0, 0, 0, 0};
2193 
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { 	/* Caching page for mode_sense */
2196 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2200 
2201 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2203 	memcpy(p, caching_pg, sizeof(caching_pg));
2204 	if (1 == pcontrol)
2205 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 	else if (2 == pcontrol)
2207 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 	return sizeof(caching_pg);
2209 }
2210 
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 				    0, 0, 0x2, 0x4b};
2213 
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { 	/* Control mode page for mode_sense */
2216 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2217 					0, 0, 0, 0};
2218 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2219 				     0, 0, 0x2, 0x4b};
2220 
2221 	if (sdebug_dsense)
2222 		ctrl_m_pg[2] |= 0x4;
2223 	else
2224 		ctrl_m_pg[2] &= ~0x4;
2225 
2226 	if (sdebug_ato)
2227 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2228 
2229 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2230 	if (1 == pcontrol)
2231 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 	else if (2 == pcontrol)
2233 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 	return sizeof(ctrl_m_pg);
2235 }
2236 
2237 
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 {	/* Informational Exceptions control mode page for mode_sense */
2240 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2241 				       0, 0, 0x0, 0x0};
2242 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 				      0, 0, 0x0, 0x0};
2244 
2245 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2246 	if (1 == pcontrol)
2247 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 	else if (2 == pcontrol)
2249 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 	return sizeof(iec_m_pg);
2251 }
2252 
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 {	/* SAS SSP mode page - short format for mode_sense */
2255 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2257 
2258 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2259 	if (1 == pcontrol)
2260 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 	return sizeof(sas_sf_m_pg);
2262 }
2263 
2264 
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2266 			      int target_dev_id)
2267 {	/* SAS phy control and discover mode page for mode_sense */
2268 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2272 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2273 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 		    0, 0, 0, 0, 0, 0, 0, 0,
2275 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2277 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2278 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2279 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 		    0, 0, 0, 0, 0, 0, 0, 0,
2281 		};
2282 	int port_a, port_b;
2283 
2284 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 	port_a = target_dev_id + 1;
2289 	port_b = port_a + 1;
2290 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 	put_unaligned_be32(port_a, p + 20);
2292 	put_unaligned_be32(port_b, p + 48 + 20);
2293 	if (1 == pcontrol)
2294 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 	return sizeof(sas_pcd_m_pg);
2296 }
2297 
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 {	/* SAS SSP shared protocol specific port mode subpage */
2300 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 		    0, 0, 0, 0, 0, 0, 0, 0,
2302 		};
2303 
2304 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2305 	if (1 == pcontrol)
2306 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 	return sizeof(sas_sha_m_pg);
2308 }
2309 
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2311 
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 			   struct sdebug_dev_info *devip)
2314 {
2315 	int pcontrol, pcode, subpcode, bd_len;
2316 	unsigned char dev_spec;
2317 	u32 alloc_len, offset, len;
2318 	int target_dev_id;
2319 	int target = scp->device->id;
2320 	unsigned char *ap;
2321 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2322 	unsigned char *cmd = scp->cmnd;
2323 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2324 
2325 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2326 	pcontrol = (cmd[2] & 0xc0) >> 6;
2327 	pcode = cmd[2] & 0x3f;
2328 	subpcode = cmd[3];
2329 	msense_6 = (MODE_SENSE == cmd[0]);
2330 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2331 	is_disk = (sdebug_ptype == TYPE_DISK);
2332 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2333 	if ((is_disk || is_zbc) && !dbd)
2334 		bd_len = llbaa ? 16 : 8;
2335 	else
2336 		bd_len = 0;
2337 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2338 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2339 	if (0x3 == pcontrol) {  /* Saving values not supported */
2340 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2341 		return check_condition_result;
2342 	}
2343 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2344 			(devip->target * 1000) - 3;
2345 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2346 	if (is_disk || is_zbc) {
2347 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2348 		if (sdebug_wp)
2349 			dev_spec |= 0x80;
2350 	} else
2351 		dev_spec = 0x0;
2352 	if (msense_6) {
2353 		arr[2] = dev_spec;
2354 		arr[3] = bd_len;
2355 		offset = 4;
2356 	} else {
2357 		arr[3] = dev_spec;
2358 		if (16 == bd_len)
2359 			arr[4] = 0x1;	/* set LONGLBA bit */
2360 		arr[7] = bd_len;	/* assume 255 or less */
2361 		offset = 8;
2362 	}
2363 	ap = arr + offset;
2364 	if ((bd_len > 0) && (!sdebug_capacity))
2365 		sdebug_capacity = get_sdebug_capacity();
2366 
2367 	if (8 == bd_len) {
2368 		if (sdebug_capacity > 0xfffffffe)
2369 			put_unaligned_be32(0xffffffff, ap + 0);
2370 		else
2371 			put_unaligned_be32(sdebug_capacity, ap + 0);
2372 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2373 		offset += bd_len;
2374 		ap = arr + offset;
2375 	} else if (16 == bd_len) {
2376 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2377 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2378 		offset += bd_len;
2379 		ap = arr + offset;
2380 	}
2381 
2382 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2383 		/* TODO: Control Extension page */
2384 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2385 		return check_condition_result;
2386 	}
2387 	bad_pcode = false;
2388 
2389 	switch (pcode) {
2390 	case 0x1:	/* Read-Write error recovery page, direct access */
2391 		len = resp_err_recov_pg(ap, pcontrol, target);
2392 		offset += len;
2393 		break;
2394 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2395 		len = resp_disconnect_pg(ap, pcontrol, target);
2396 		offset += len;
2397 		break;
2398 	case 0x3:       /* Format device page, direct access */
2399 		if (is_disk) {
2400 			len = resp_format_pg(ap, pcontrol, target);
2401 			offset += len;
2402 		} else
2403 			bad_pcode = true;
2404 		break;
2405 	case 0x8:	/* Caching page, direct access */
2406 		if (is_disk || is_zbc) {
2407 			len = resp_caching_pg(ap, pcontrol, target);
2408 			offset += len;
2409 		} else
2410 			bad_pcode = true;
2411 		break;
2412 	case 0xa:	/* Control Mode page, all devices */
2413 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2414 		offset += len;
2415 		break;
2416 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2417 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2418 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2419 			return check_condition_result;
2420 		}
2421 		len = 0;
2422 		if ((0x0 == subpcode) || (0xff == subpcode))
2423 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2424 		if ((0x1 == subpcode) || (0xff == subpcode))
2425 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2426 						  target_dev_id);
2427 		if ((0x2 == subpcode) || (0xff == subpcode))
2428 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2429 		offset += len;
2430 		break;
2431 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2432 		len = resp_iec_m_pg(ap, pcontrol, target);
2433 		offset += len;
2434 		break;
2435 	case 0x3f:	/* Read all Mode pages */
2436 		if ((0 == subpcode) || (0xff == subpcode)) {
2437 			len = resp_err_recov_pg(ap, pcontrol, target);
2438 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2439 			if (is_disk) {
2440 				len += resp_format_pg(ap + len, pcontrol,
2441 						      target);
2442 				len += resp_caching_pg(ap + len, pcontrol,
2443 						       target);
2444 			} else if (is_zbc) {
2445 				len += resp_caching_pg(ap + len, pcontrol,
2446 						       target);
2447 			}
2448 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2449 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2450 			if (0xff == subpcode) {
2451 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2452 						  target, target_dev_id);
2453 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 			}
2455 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2456 			offset += len;
2457 		} else {
2458 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2459 			return check_condition_result;
2460 		}
2461 		break;
2462 	default:
2463 		bad_pcode = true;
2464 		break;
2465 	}
2466 	if (bad_pcode) {
2467 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2468 		return check_condition_result;
2469 	}
2470 	if (msense_6)
2471 		arr[0] = offset - 1;
2472 	else
2473 		put_unaligned_be16((offset - 2), arr + 0);
2474 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2475 }
2476 
2477 #define SDEBUG_MAX_MSELECT_SZ 512
2478 
2479 static int resp_mode_select(struct scsi_cmnd *scp,
2480 			    struct sdebug_dev_info *devip)
2481 {
2482 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2483 	int param_len, res, mpage;
2484 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2485 	unsigned char *cmd = scp->cmnd;
2486 	int mselect6 = (MODE_SELECT == cmd[0]);
2487 
2488 	memset(arr, 0, sizeof(arr));
2489 	pf = cmd[1] & 0x10;
2490 	sp = cmd[1] & 0x1;
2491 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2492 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2493 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2494 		return check_condition_result;
2495 	}
2496 	res = fetch_to_dev_buffer(scp, arr, param_len);
2497 	if (-1 == res)
2498 		return DID_ERROR << 16;
2499 	else if (sdebug_verbose && (res < param_len))
2500 		sdev_printk(KERN_INFO, scp->device,
2501 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2502 			    __func__, param_len, res);
2503 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2504 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2505 	off = bd_len + (mselect6 ? 4 : 8);
2506 	if (md_len > 2 || off >= res) {
2507 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2508 		return check_condition_result;
2509 	}
2510 	mpage = arr[off] & 0x3f;
2511 	ps = !!(arr[off] & 0x80);
2512 	if (ps) {
2513 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2514 		return check_condition_result;
2515 	}
2516 	spf = !!(arr[off] & 0x40);
2517 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2518 		       (arr[off + 1] + 2);
2519 	if ((pg_len + off) > param_len) {
2520 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2521 				PARAMETER_LIST_LENGTH_ERR, 0);
2522 		return check_condition_result;
2523 	}
2524 	switch (mpage) {
2525 	case 0x8:      /* Caching Mode page */
2526 		if (caching_pg[1] == arr[off + 1]) {
2527 			memcpy(caching_pg + 2, arr + off + 2,
2528 			       sizeof(caching_pg) - 2);
2529 			goto set_mode_changed_ua;
2530 		}
2531 		break;
2532 	case 0xa:      /* Control Mode page */
2533 		if (ctrl_m_pg[1] == arr[off + 1]) {
2534 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2535 			       sizeof(ctrl_m_pg) - 2);
2536 			if (ctrl_m_pg[4] & 0x8)
2537 				sdebug_wp = true;
2538 			else
2539 				sdebug_wp = false;
2540 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2541 			goto set_mode_changed_ua;
2542 		}
2543 		break;
2544 	case 0x1c:      /* Informational Exceptions Mode page */
2545 		if (iec_m_pg[1] == arr[off + 1]) {
2546 			memcpy(iec_m_pg + 2, arr + off + 2,
2547 			       sizeof(iec_m_pg) - 2);
2548 			goto set_mode_changed_ua;
2549 		}
2550 		break;
2551 	default:
2552 		break;
2553 	}
2554 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2555 	return check_condition_result;
2556 set_mode_changed_ua:
2557 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2558 	return 0;
2559 }
2560 
2561 static int resp_temp_l_pg(unsigned char *arr)
2562 {
2563 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2564 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2565 		};
2566 
2567 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2568 	return sizeof(temp_l_pg);
2569 }
2570 
2571 static int resp_ie_l_pg(unsigned char *arr)
2572 {
2573 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2574 		};
2575 
2576 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2577 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2578 		arr[4] = THRESHOLD_EXCEEDED;
2579 		arr[5] = 0xff;
2580 	}
2581 	return sizeof(ie_l_pg);
2582 }
2583 
2584 #define SDEBUG_MAX_LSENSE_SZ 512
2585 
2586 static int resp_log_sense(struct scsi_cmnd *scp,
2587 			  struct sdebug_dev_info *devip)
2588 {
2589 	int ppc, sp, pcode, subpcode;
2590 	u32 alloc_len, len, n;
2591 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2592 	unsigned char *cmd = scp->cmnd;
2593 
2594 	memset(arr, 0, sizeof(arr));
2595 	ppc = cmd[1] & 0x2;
2596 	sp = cmd[1] & 0x1;
2597 	if (ppc || sp) {
2598 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2599 		return check_condition_result;
2600 	}
2601 	pcode = cmd[2] & 0x3f;
2602 	subpcode = cmd[3] & 0xff;
2603 	alloc_len = get_unaligned_be16(cmd + 7);
2604 	arr[0] = pcode;
2605 	if (0 == subpcode) {
2606 		switch (pcode) {
2607 		case 0x0:	/* Supported log pages log page */
2608 			n = 4;
2609 			arr[n++] = 0x0;		/* this page */
2610 			arr[n++] = 0xd;		/* Temperature */
2611 			arr[n++] = 0x2f;	/* Informational exceptions */
2612 			arr[3] = n - 4;
2613 			break;
2614 		case 0xd:	/* Temperature log page */
2615 			arr[3] = resp_temp_l_pg(arr + 4);
2616 			break;
2617 		case 0x2f:	/* Informational exceptions log page */
2618 			arr[3] = resp_ie_l_pg(arr + 4);
2619 			break;
2620 		default:
2621 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2622 			return check_condition_result;
2623 		}
2624 	} else if (0xff == subpcode) {
2625 		arr[0] |= 0x40;
2626 		arr[1] = subpcode;
2627 		switch (pcode) {
2628 		case 0x0:	/* Supported log pages and subpages log page */
2629 			n = 4;
2630 			arr[n++] = 0x0;
2631 			arr[n++] = 0x0;		/* 0,0 page */
2632 			arr[n++] = 0x0;
2633 			arr[n++] = 0xff;	/* this page */
2634 			arr[n++] = 0xd;
2635 			arr[n++] = 0x0;		/* Temperature */
2636 			arr[n++] = 0x2f;
2637 			arr[n++] = 0x0;	/* Informational exceptions */
2638 			arr[3] = n - 4;
2639 			break;
2640 		case 0xd:	/* Temperature subpages */
2641 			n = 4;
2642 			arr[n++] = 0xd;
2643 			arr[n++] = 0x0;		/* Temperature */
2644 			arr[3] = n - 4;
2645 			break;
2646 		case 0x2f:	/* Informational exceptions subpages */
2647 			n = 4;
2648 			arr[n++] = 0x2f;
2649 			arr[n++] = 0x0;		/* Informational exceptions */
2650 			arr[3] = n - 4;
2651 			break;
2652 		default:
2653 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 			return check_condition_result;
2655 		}
2656 	} else {
2657 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2658 		return check_condition_result;
2659 	}
2660 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2661 	return fill_from_dev_buffer(scp, arr,
2662 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2663 }
2664 
2665 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2666 {
2667 	return devip->nr_zones != 0;
2668 }
2669 
2670 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2671 					unsigned long long lba)
2672 {
2673 	return &devip->zstate[lba >> devip->zsize_shift];
2674 }
2675 
2676 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2677 {
2678 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2679 }
2680 
2681 static void zbc_close_zone(struct sdebug_dev_info *devip,
2682 			   struct sdeb_zone_state *zsp)
2683 {
2684 	enum sdebug_z_cond zc;
2685 
2686 	if (zbc_zone_is_conv(zsp))
2687 		return;
2688 
2689 	zc = zsp->z_cond;
2690 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2691 		return;
2692 
2693 	if (zc == ZC2_IMPLICIT_OPEN)
2694 		devip->nr_imp_open--;
2695 	else
2696 		devip->nr_exp_open--;
2697 
2698 	if (zsp->z_wp == zsp->z_start) {
2699 		zsp->z_cond = ZC1_EMPTY;
2700 	} else {
2701 		zsp->z_cond = ZC4_CLOSED;
2702 		devip->nr_closed++;
2703 	}
2704 }
2705 
2706 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2707 {
2708 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2709 	unsigned int i;
2710 
2711 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2712 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2713 			zbc_close_zone(devip, zsp);
2714 			return;
2715 		}
2716 	}
2717 }
2718 
2719 static void zbc_open_zone(struct sdebug_dev_info *devip,
2720 			  struct sdeb_zone_state *zsp, bool explicit)
2721 {
2722 	enum sdebug_z_cond zc;
2723 
2724 	if (zbc_zone_is_conv(zsp))
2725 		return;
2726 
2727 	zc = zsp->z_cond;
2728 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2729 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2730 		return;
2731 
2732 	/* Close an implicit open zone if necessary */
2733 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2734 		zbc_close_zone(devip, zsp);
2735 	else if (devip->max_open &&
2736 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2737 		zbc_close_imp_open_zone(devip);
2738 
2739 	if (zsp->z_cond == ZC4_CLOSED)
2740 		devip->nr_closed--;
2741 	if (explicit) {
2742 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2743 		devip->nr_exp_open++;
2744 	} else {
2745 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2746 		devip->nr_imp_open++;
2747 	}
2748 }
2749 
2750 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2751 		       unsigned long long lba, unsigned int num)
2752 {
2753 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2754 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2755 
2756 	if (zbc_zone_is_conv(zsp))
2757 		return;
2758 
2759 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2760 		zsp->z_wp += num;
2761 		if (zsp->z_wp >= zend)
2762 			zsp->z_cond = ZC5_FULL;
2763 		return;
2764 	}
2765 
2766 	while (num) {
2767 		if (lba != zsp->z_wp)
2768 			zsp->z_non_seq_resource = true;
2769 
2770 		end = lba + num;
2771 		if (end >= zend) {
2772 			n = zend - lba;
2773 			zsp->z_wp = zend;
2774 		} else if (end > zsp->z_wp) {
2775 			n = num;
2776 			zsp->z_wp = end;
2777 		} else {
2778 			n = num;
2779 		}
2780 		if (zsp->z_wp >= zend)
2781 			zsp->z_cond = ZC5_FULL;
2782 
2783 		num -= n;
2784 		lba += n;
2785 		if (num) {
2786 			zsp++;
2787 			zend = zsp->z_start + zsp->z_size;
2788 		}
2789 	}
2790 }
2791 
2792 static int check_zbc_access_params(struct scsi_cmnd *scp,
2793 			unsigned long long lba, unsigned int num, bool write)
2794 {
2795 	struct scsi_device *sdp = scp->device;
2796 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2797 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2798 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2799 
2800 	if (!write) {
2801 		if (devip->zmodel == BLK_ZONED_HA)
2802 			return 0;
2803 		/* For host-managed, reads cannot cross zone types boundaries */
2804 		if (zsp_end != zsp &&
2805 		    zbc_zone_is_conv(zsp) &&
2806 		    !zbc_zone_is_conv(zsp_end)) {
2807 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2808 					LBA_OUT_OF_RANGE,
2809 					READ_INVDATA_ASCQ);
2810 			return check_condition_result;
2811 		}
2812 		return 0;
2813 	}
2814 
2815 	/* No restrictions for writes within conventional zones */
2816 	if (zbc_zone_is_conv(zsp)) {
2817 		if (!zbc_zone_is_conv(zsp_end)) {
2818 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2819 					LBA_OUT_OF_RANGE,
2820 					WRITE_BOUNDARY_ASCQ);
2821 			return check_condition_result;
2822 		}
2823 		return 0;
2824 	}
2825 
2826 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2827 		/* Writes cannot cross sequential zone boundaries */
2828 		if (zsp_end != zsp) {
2829 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2830 					LBA_OUT_OF_RANGE,
2831 					WRITE_BOUNDARY_ASCQ);
2832 			return check_condition_result;
2833 		}
2834 		/* Cannot write full zones */
2835 		if (zsp->z_cond == ZC5_FULL) {
2836 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 					INVALID_FIELD_IN_CDB, 0);
2838 			return check_condition_result;
2839 		}
2840 		/* Writes must be aligned to the zone WP */
2841 		if (lba != zsp->z_wp) {
2842 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2843 					LBA_OUT_OF_RANGE,
2844 					UNALIGNED_WRITE_ASCQ);
2845 			return check_condition_result;
2846 		}
2847 	}
2848 
2849 	/* Handle implicit open of closed and empty zones */
2850 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2851 		if (devip->max_open &&
2852 		    devip->nr_exp_open >= devip->max_open) {
2853 			mk_sense_buffer(scp, DATA_PROTECT,
2854 					INSUFF_RES_ASC,
2855 					INSUFF_ZONE_ASCQ);
2856 			return check_condition_result;
2857 		}
2858 		zbc_open_zone(devip, zsp, false);
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 static inline int check_device_access_params
2865 			(struct scsi_cmnd *scp, unsigned long long lba,
2866 			 unsigned int num, bool write)
2867 {
2868 	struct scsi_device *sdp = scp->device;
2869 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2870 
2871 	if (lba + num > sdebug_capacity) {
2872 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2873 		return check_condition_result;
2874 	}
2875 	/* transfer length excessive (tie in to block limits VPD page) */
2876 	if (num > sdebug_store_sectors) {
2877 		/* needs work to find which cdb byte 'num' comes from */
2878 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2879 		return check_condition_result;
2880 	}
2881 	if (write && unlikely(sdebug_wp)) {
2882 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2883 		return check_condition_result;
2884 	}
2885 	if (sdebug_dev_is_zoned(devip))
2886 		return check_zbc_access_params(scp, lba, num, write);
2887 
2888 	return 0;
2889 }
2890 
2891 /*
2892  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2893  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2894  * that access any of the "stores" in struct sdeb_store_info should call this
2895  * function with bug_if_fake_rw set to true.
2896  */
2897 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2898 						bool bug_if_fake_rw)
2899 {
2900 	if (sdebug_fake_rw) {
2901 		BUG_ON(bug_if_fake_rw);	/* See note above */
2902 		return NULL;
2903 	}
2904 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2905 }
2906 
2907 /* Returns number of bytes copied or -1 if error. */
2908 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2909 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2910 {
2911 	int ret;
2912 	u64 block, rest = 0;
2913 	enum dma_data_direction dir;
2914 	struct scsi_data_buffer *sdb = &scp->sdb;
2915 	u8 *fsp;
2916 
2917 	if (do_write) {
2918 		dir = DMA_TO_DEVICE;
2919 		write_since_sync = true;
2920 	} else {
2921 		dir = DMA_FROM_DEVICE;
2922 	}
2923 
2924 	if (!sdb->length || !sip)
2925 		return 0;
2926 	if (scp->sc_data_direction != dir)
2927 		return -1;
2928 	fsp = sip->storep;
2929 
2930 	block = do_div(lba, sdebug_store_sectors);
2931 	if (block + num > sdebug_store_sectors)
2932 		rest = block + num - sdebug_store_sectors;
2933 
2934 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2935 		   fsp + (block * sdebug_sector_size),
2936 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2937 	if (ret != (num - rest) * sdebug_sector_size)
2938 		return ret;
2939 
2940 	if (rest) {
2941 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2942 			    fsp, rest * sdebug_sector_size,
2943 			    sg_skip + ((num - rest) * sdebug_sector_size),
2944 			    do_write);
2945 	}
2946 
2947 	return ret;
2948 }
2949 
2950 /* Returns number of bytes copied or -1 if error. */
2951 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2952 {
2953 	struct scsi_data_buffer *sdb = &scp->sdb;
2954 
2955 	if (!sdb->length)
2956 		return 0;
2957 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2958 		return -1;
2959 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2960 			      num * sdebug_sector_size, 0, true);
2961 }
2962 
2963 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2964  * arr into sip->storep+lba and return true. If comparison fails then
2965  * return false. */
2966 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2967 			      const u8 *arr, bool compare_only)
2968 {
2969 	bool res;
2970 	u64 block, rest = 0;
2971 	u32 store_blks = sdebug_store_sectors;
2972 	u32 lb_size = sdebug_sector_size;
2973 	u8 *fsp = sip->storep;
2974 
2975 	block = do_div(lba, store_blks);
2976 	if (block + num > store_blks)
2977 		rest = block + num - store_blks;
2978 
2979 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2980 	if (!res)
2981 		return res;
2982 	if (rest)
2983 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2984 			     rest * lb_size);
2985 	if (!res)
2986 		return res;
2987 	if (compare_only)
2988 		return true;
2989 	arr += num * lb_size;
2990 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2991 	if (rest)
2992 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2993 	return res;
2994 }
2995 
2996 static __be16 dif_compute_csum(const void *buf, int len)
2997 {
2998 	__be16 csum;
2999 
3000 	if (sdebug_guard)
3001 		csum = (__force __be16)ip_compute_csum(buf, len);
3002 	else
3003 		csum = cpu_to_be16(crc_t10dif(buf, len));
3004 
3005 	return csum;
3006 }
3007 
3008 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3009 		      sector_t sector, u32 ei_lba)
3010 {
3011 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3012 
3013 	if (sdt->guard_tag != csum) {
3014 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3015 			(unsigned long)sector,
3016 			be16_to_cpu(sdt->guard_tag),
3017 			be16_to_cpu(csum));
3018 		return 0x01;
3019 	}
3020 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3021 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3022 		pr_err("REF check failed on sector %lu\n",
3023 			(unsigned long)sector);
3024 		return 0x03;
3025 	}
3026 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3027 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3028 		pr_err("REF check failed on sector %lu\n",
3029 			(unsigned long)sector);
3030 		return 0x03;
3031 	}
3032 	return 0;
3033 }
3034 
3035 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3036 			  unsigned int sectors, bool read)
3037 {
3038 	size_t resid;
3039 	void *paddr;
3040 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3041 						scp->device->hostdata, true);
3042 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3043 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3044 	struct sg_mapping_iter miter;
3045 
3046 	/* Bytes of protection data to copy into sgl */
3047 	resid = sectors * sizeof(*dif_storep);
3048 
3049 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3050 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3051 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3052 
3053 	while (sg_miter_next(&miter) && resid > 0) {
3054 		size_t len = min_t(size_t, miter.length, resid);
3055 		void *start = dif_store(sip, sector);
3056 		size_t rest = 0;
3057 
3058 		if (dif_store_end < start + len)
3059 			rest = start + len - dif_store_end;
3060 
3061 		paddr = miter.addr;
3062 
3063 		if (read)
3064 			memcpy(paddr, start, len - rest);
3065 		else
3066 			memcpy(start, paddr, len - rest);
3067 
3068 		if (rest) {
3069 			if (read)
3070 				memcpy(paddr + len - rest, dif_storep, rest);
3071 			else
3072 				memcpy(dif_storep, paddr + len - rest, rest);
3073 		}
3074 
3075 		sector += len / sizeof(*dif_storep);
3076 		resid -= len;
3077 	}
3078 	sg_miter_stop(&miter);
3079 }
3080 
3081 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3082 			    unsigned int sectors, u32 ei_lba)
3083 {
3084 	int ret = 0;
3085 	unsigned int i;
3086 	sector_t sector;
3087 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3088 						scp->device->hostdata, true);
3089 	struct t10_pi_tuple *sdt;
3090 
3091 	for (i = 0; i < sectors; i++, ei_lba++) {
3092 		sector = start_sec + i;
3093 		sdt = dif_store(sip, sector);
3094 
3095 		if (sdt->app_tag == cpu_to_be16(0xffff))
3096 			continue;
3097 
3098 		/*
3099 		 * Because scsi_debug acts as both initiator and
3100 		 * target we proceed to verify the PI even if
3101 		 * RDPROTECT=3. This is done so the "initiator" knows
3102 		 * which type of error to return. Otherwise we would
3103 		 * have to iterate over the PI twice.
3104 		 */
3105 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3106 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3107 					 sector, ei_lba);
3108 			if (ret) {
3109 				dif_errors++;
3110 				break;
3111 			}
3112 		}
3113 	}
3114 
3115 	dif_copy_prot(scp, start_sec, sectors, true);
3116 	dix_reads++;
3117 
3118 	return ret;
3119 }
3120 
3121 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3122 {
3123 	bool check_prot;
3124 	u32 num;
3125 	u32 ei_lba;
3126 	int ret;
3127 	u64 lba;
3128 	struct sdeb_store_info *sip = devip2sip(devip, true);
3129 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3130 	u8 *cmd = scp->cmnd;
3131 
3132 	switch (cmd[0]) {
3133 	case READ_16:
3134 		ei_lba = 0;
3135 		lba = get_unaligned_be64(cmd + 2);
3136 		num = get_unaligned_be32(cmd + 10);
3137 		check_prot = true;
3138 		break;
3139 	case READ_10:
3140 		ei_lba = 0;
3141 		lba = get_unaligned_be32(cmd + 2);
3142 		num = get_unaligned_be16(cmd + 7);
3143 		check_prot = true;
3144 		break;
3145 	case READ_6:
3146 		ei_lba = 0;
3147 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3148 		      (u32)(cmd[1] & 0x1f) << 16;
3149 		num = (0 == cmd[4]) ? 256 : cmd[4];
3150 		check_prot = true;
3151 		break;
3152 	case READ_12:
3153 		ei_lba = 0;
3154 		lba = get_unaligned_be32(cmd + 2);
3155 		num = get_unaligned_be32(cmd + 6);
3156 		check_prot = true;
3157 		break;
3158 	case XDWRITEREAD_10:
3159 		ei_lba = 0;
3160 		lba = get_unaligned_be32(cmd + 2);
3161 		num = get_unaligned_be16(cmd + 7);
3162 		check_prot = false;
3163 		break;
3164 	default:	/* assume READ(32) */
3165 		lba = get_unaligned_be64(cmd + 12);
3166 		ei_lba = get_unaligned_be32(cmd + 20);
3167 		num = get_unaligned_be32(cmd + 28);
3168 		check_prot = false;
3169 		break;
3170 	}
3171 	if (unlikely(have_dif_prot && check_prot)) {
3172 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3173 		    (cmd[1] & 0xe0)) {
3174 			mk_sense_invalid_opcode(scp);
3175 			return check_condition_result;
3176 		}
3177 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3178 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3179 		    (cmd[1] & 0xe0) == 0)
3180 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3181 				    "to DIF device\n");
3182 	}
3183 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3184 		     atomic_read(&sdeb_inject_pending))) {
3185 		num /= 2;
3186 		atomic_set(&sdeb_inject_pending, 0);
3187 	}
3188 
3189 	ret = check_device_access_params(scp, lba, num, false);
3190 	if (ret)
3191 		return ret;
3192 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3193 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3194 		     ((lba + num) > sdebug_medium_error_start))) {
3195 		/* claim unrecoverable read error */
3196 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3197 		/* set info field and valid bit for fixed descriptor */
3198 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3199 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3200 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3201 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3202 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3203 		}
3204 		scsi_set_resid(scp, scsi_bufflen(scp));
3205 		return check_condition_result;
3206 	}
3207 
3208 	read_lock(macc_lckp);
3209 
3210 	/* DIX + T10 DIF */
3211 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3212 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3213 		case 1: /* Guard tag error */
3214 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3215 				read_unlock(macc_lckp);
3216 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3217 				return check_condition_result;
3218 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3219 				read_unlock(macc_lckp);
3220 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3221 				return illegal_condition_result;
3222 			}
3223 			break;
3224 		case 3: /* Reference tag error */
3225 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3226 				read_unlock(macc_lckp);
3227 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3228 				return check_condition_result;
3229 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3230 				read_unlock(macc_lckp);
3231 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3232 				return illegal_condition_result;
3233 			}
3234 			break;
3235 		}
3236 	}
3237 
3238 	ret = do_device_access(sip, scp, 0, lba, num, false);
3239 	read_unlock(macc_lckp);
3240 	if (unlikely(ret == -1))
3241 		return DID_ERROR << 16;
3242 
3243 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3244 
3245 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3246 		     atomic_read(&sdeb_inject_pending))) {
3247 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3248 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3249 			atomic_set(&sdeb_inject_pending, 0);
3250 			return check_condition_result;
3251 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3252 			/* Logical block guard check failed */
3253 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3254 			atomic_set(&sdeb_inject_pending, 0);
3255 			return illegal_condition_result;
3256 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3257 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3258 			atomic_set(&sdeb_inject_pending, 0);
3259 			return illegal_condition_result;
3260 		}
3261 	}
3262 	return 0;
3263 }
3264 
3265 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3266 			     unsigned int sectors, u32 ei_lba)
3267 {
3268 	int ret;
3269 	struct t10_pi_tuple *sdt;
3270 	void *daddr;
3271 	sector_t sector = start_sec;
3272 	int ppage_offset;
3273 	int dpage_offset;
3274 	struct sg_mapping_iter diter;
3275 	struct sg_mapping_iter piter;
3276 
3277 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3278 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3279 
3280 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3281 			scsi_prot_sg_count(SCpnt),
3282 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3283 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3284 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3285 
3286 	/* For each protection page */
3287 	while (sg_miter_next(&piter)) {
3288 		dpage_offset = 0;
3289 		if (WARN_ON(!sg_miter_next(&diter))) {
3290 			ret = 0x01;
3291 			goto out;
3292 		}
3293 
3294 		for (ppage_offset = 0; ppage_offset < piter.length;
3295 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3296 			/* If we're at the end of the current
3297 			 * data page advance to the next one
3298 			 */
3299 			if (dpage_offset >= diter.length) {
3300 				if (WARN_ON(!sg_miter_next(&diter))) {
3301 					ret = 0x01;
3302 					goto out;
3303 				}
3304 				dpage_offset = 0;
3305 			}
3306 
3307 			sdt = piter.addr + ppage_offset;
3308 			daddr = diter.addr + dpage_offset;
3309 
3310 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3311 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3312 				if (ret)
3313 					goto out;
3314 			}
3315 
3316 			sector++;
3317 			ei_lba++;
3318 			dpage_offset += sdebug_sector_size;
3319 		}
3320 		diter.consumed = dpage_offset;
3321 		sg_miter_stop(&diter);
3322 	}
3323 	sg_miter_stop(&piter);
3324 
3325 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3326 	dix_writes++;
3327 
3328 	return 0;
3329 
3330 out:
3331 	dif_errors++;
3332 	sg_miter_stop(&diter);
3333 	sg_miter_stop(&piter);
3334 	return ret;
3335 }
3336 
3337 static unsigned long lba_to_map_index(sector_t lba)
3338 {
3339 	if (sdebug_unmap_alignment)
3340 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3341 	sector_div(lba, sdebug_unmap_granularity);
3342 	return lba;
3343 }
3344 
3345 static sector_t map_index_to_lba(unsigned long index)
3346 {
3347 	sector_t lba = index * sdebug_unmap_granularity;
3348 
3349 	if (sdebug_unmap_alignment)
3350 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3351 	return lba;
3352 }
3353 
3354 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3355 			      unsigned int *num)
3356 {
3357 	sector_t end;
3358 	unsigned int mapped;
3359 	unsigned long index;
3360 	unsigned long next;
3361 
3362 	index = lba_to_map_index(lba);
3363 	mapped = test_bit(index, sip->map_storep);
3364 
3365 	if (mapped)
3366 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3367 	else
3368 		next = find_next_bit(sip->map_storep, map_size, index);
3369 
3370 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3371 	*num = end - lba;
3372 	return mapped;
3373 }
3374 
3375 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3376 		       unsigned int len)
3377 {
3378 	sector_t end = lba + len;
3379 
3380 	while (lba < end) {
3381 		unsigned long index = lba_to_map_index(lba);
3382 
3383 		if (index < map_size)
3384 			set_bit(index, sip->map_storep);
3385 
3386 		lba = map_index_to_lba(index + 1);
3387 	}
3388 }
3389 
3390 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3391 			 unsigned int len)
3392 {
3393 	sector_t end = lba + len;
3394 	u8 *fsp = sip->storep;
3395 
3396 	while (lba < end) {
3397 		unsigned long index = lba_to_map_index(lba);
3398 
3399 		if (lba == map_index_to_lba(index) &&
3400 		    lba + sdebug_unmap_granularity <= end &&
3401 		    index < map_size) {
3402 			clear_bit(index, sip->map_storep);
3403 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3404 				memset(fsp + lba * sdebug_sector_size,
3405 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3406 				       sdebug_sector_size *
3407 				       sdebug_unmap_granularity);
3408 			}
3409 			if (sip->dif_storep) {
3410 				memset(sip->dif_storep + lba, 0xff,
3411 				       sizeof(*sip->dif_storep) *
3412 				       sdebug_unmap_granularity);
3413 			}
3414 		}
3415 		lba = map_index_to_lba(index + 1);
3416 	}
3417 }
3418 
3419 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3420 {
3421 	bool check_prot;
3422 	u32 num;
3423 	u32 ei_lba;
3424 	int ret;
3425 	u64 lba;
3426 	struct sdeb_store_info *sip = devip2sip(devip, true);
3427 	rwlock_t *macc_lckp = &sip->macc_lck;
3428 	u8 *cmd = scp->cmnd;
3429 
3430 	switch (cmd[0]) {
3431 	case WRITE_16:
3432 		ei_lba = 0;
3433 		lba = get_unaligned_be64(cmd + 2);
3434 		num = get_unaligned_be32(cmd + 10);
3435 		check_prot = true;
3436 		break;
3437 	case WRITE_10:
3438 		ei_lba = 0;
3439 		lba = get_unaligned_be32(cmd + 2);
3440 		num = get_unaligned_be16(cmd + 7);
3441 		check_prot = true;
3442 		break;
3443 	case WRITE_6:
3444 		ei_lba = 0;
3445 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3446 		      (u32)(cmd[1] & 0x1f) << 16;
3447 		num = (0 == cmd[4]) ? 256 : cmd[4];
3448 		check_prot = true;
3449 		break;
3450 	case WRITE_12:
3451 		ei_lba = 0;
3452 		lba = get_unaligned_be32(cmd + 2);
3453 		num = get_unaligned_be32(cmd + 6);
3454 		check_prot = true;
3455 		break;
3456 	case 0x53:	/* XDWRITEREAD(10) */
3457 		ei_lba = 0;
3458 		lba = get_unaligned_be32(cmd + 2);
3459 		num = get_unaligned_be16(cmd + 7);
3460 		check_prot = false;
3461 		break;
3462 	default:	/* assume WRITE(32) */
3463 		lba = get_unaligned_be64(cmd + 12);
3464 		ei_lba = get_unaligned_be32(cmd + 20);
3465 		num = get_unaligned_be32(cmd + 28);
3466 		check_prot = false;
3467 		break;
3468 	}
3469 	if (unlikely(have_dif_prot && check_prot)) {
3470 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3471 		    (cmd[1] & 0xe0)) {
3472 			mk_sense_invalid_opcode(scp);
3473 			return check_condition_result;
3474 		}
3475 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3476 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3477 		    (cmd[1] & 0xe0) == 0)
3478 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3479 				    "to DIF device\n");
3480 	}
3481 
3482 	write_lock(macc_lckp);
3483 	ret = check_device_access_params(scp, lba, num, true);
3484 	if (ret) {
3485 		write_unlock(macc_lckp);
3486 		return ret;
3487 	}
3488 
3489 	/* DIX + T10 DIF */
3490 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3491 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3492 		case 1: /* Guard tag error */
3493 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3494 				write_unlock(macc_lckp);
3495 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3496 				return illegal_condition_result;
3497 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3498 				write_unlock(macc_lckp);
3499 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3500 				return check_condition_result;
3501 			}
3502 			break;
3503 		case 3: /* Reference tag error */
3504 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3505 				write_unlock(macc_lckp);
3506 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3507 				return illegal_condition_result;
3508 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3509 				write_unlock(macc_lckp);
3510 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3511 				return check_condition_result;
3512 			}
3513 			break;
3514 		}
3515 	}
3516 
3517 	ret = do_device_access(sip, scp, 0, lba, num, true);
3518 	if (unlikely(scsi_debug_lbp()))
3519 		map_region(sip, lba, num);
3520 	/* If ZBC zone then bump its write pointer */
3521 	if (sdebug_dev_is_zoned(devip))
3522 		zbc_inc_wp(devip, lba, num);
3523 	write_unlock(macc_lckp);
3524 	if (unlikely(-1 == ret))
3525 		return DID_ERROR << 16;
3526 	else if (unlikely(sdebug_verbose &&
3527 			  (ret < (num * sdebug_sector_size))))
3528 		sdev_printk(KERN_INFO, scp->device,
3529 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3530 			    my_name, num * sdebug_sector_size, ret);
3531 
3532 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3533 		     atomic_read(&sdeb_inject_pending))) {
3534 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3535 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3536 			atomic_set(&sdeb_inject_pending, 0);
3537 			return check_condition_result;
3538 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3539 			/* Logical block guard check failed */
3540 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3541 			atomic_set(&sdeb_inject_pending, 0);
3542 			return illegal_condition_result;
3543 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3544 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3545 			atomic_set(&sdeb_inject_pending, 0);
3546 			return illegal_condition_result;
3547 		}
3548 	}
3549 	return 0;
3550 }
3551 
3552 /*
3553  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3554  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3555  */
3556 static int resp_write_scat(struct scsi_cmnd *scp,
3557 			   struct sdebug_dev_info *devip)
3558 {
3559 	u8 *cmd = scp->cmnd;
3560 	u8 *lrdp = NULL;
3561 	u8 *up;
3562 	struct sdeb_store_info *sip = devip2sip(devip, true);
3563 	rwlock_t *macc_lckp = &sip->macc_lck;
3564 	u8 wrprotect;
3565 	u16 lbdof, num_lrd, k;
3566 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3567 	u32 lb_size = sdebug_sector_size;
3568 	u32 ei_lba;
3569 	u64 lba;
3570 	int ret, res;
3571 	bool is_16;
3572 	static const u32 lrd_size = 32; /* + parameter list header size */
3573 
3574 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3575 		is_16 = false;
3576 		wrprotect = (cmd[10] >> 5) & 0x7;
3577 		lbdof = get_unaligned_be16(cmd + 12);
3578 		num_lrd = get_unaligned_be16(cmd + 16);
3579 		bt_len = get_unaligned_be32(cmd + 28);
3580 	} else {        /* that leaves WRITE SCATTERED(16) */
3581 		is_16 = true;
3582 		wrprotect = (cmd[2] >> 5) & 0x7;
3583 		lbdof = get_unaligned_be16(cmd + 4);
3584 		num_lrd = get_unaligned_be16(cmd + 8);
3585 		bt_len = get_unaligned_be32(cmd + 10);
3586 		if (unlikely(have_dif_prot)) {
3587 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3588 			    wrprotect) {
3589 				mk_sense_invalid_opcode(scp);
3590 				return illegal_condition_result;
3591 			}
3592 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3593 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3594 			     wrprotect == 0)
3595 				sdev_printk(KERN_ERR, scp->device,
3596 					    "Unprotected WR to DIF device\n");
3597 		}
3598 	}
3599 	if ((num_lrd == 0) || (bt_len == 0))
3600 		return 0;       /* T10 says these do-nothings are not errors */
3601 	if (lbdof == 0) {
3602 		if (sdebug_verbose)
3603 			sdev_printk(KERN_INFO, scp->device,
3604 				"%s: %s: LB Data Offset field bad\n",
3605 				my_name, __func__);
3606 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3607 		return illegal_condition_result;
3608 	}
3609 	lbdof_blen = lbdof * lb_size;
3610 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3611 		if (sdebug_verbose)
3612 			sdev_printk(KERN_INFO, scp->device,
3613 				"%s: %s: LBA range descriptors don't fit\n",
3614 				my_name, __func__);
3615 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3616 		return illegal_condition_result;
3617 	}
3618 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3619 	if (lrdp == NULL)
3620 		return SCSI_MLQUEUE_HOST_BUSY;
3621 	if (sdebug_verbose)
3622 		sdev_printk(KERN_INFO, scp->device,
3623 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3624 			my_name, __func__, lbdof_blen);
3625 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3626 	if (res == -1) {
3627 		ret = DID_ERROR << 16;
3628 		goto err_out;
3629 	}
3630 
3631 	write_lock(macc_lckp);
3632 	sg_off = lbdof_blen;
3633 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3634 	cum_lb = 0;
3635 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3636 		lba = get_unaligned_be64(up + 0);
3637 		num = get_unaligned_be32(up + 8);
3638 		if (sdebug_verbose)
3639 			sdev_printk(KERN_INFO, scp->device,
3640 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3641 				my_name, __func__, k, lba, num, sg_off);
3642 		if (num == 0)
3643 			continue;
3644 		ret = check_device_access_params(scp, lba, num, true);
3645 		if (ret)
3646 			goto err_out_unlock;
3647 		num_by = num * lb_size;
3648 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3649 
3650 		if ((cum_lb + num) > bt_len) {
3651 			if (sdebug_verbose)
3652 				sdev_printk(KERN_INFO, scp->device,
3653 				    "%s: %s: sum of blocks > data provided\n",
3654 				    my_name, __func__);
3655 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3656 					0);
3657 			ret = illegal_condition_result;
3658 			goto err_out_unlock;
3659 		}
3660 
3661 		/* DIX + T10 DIF */
3662 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3663 			int prot_ret = prot_verify_write(scp, lba, num,
3664 							 ei_lba);
3665 
3666 			if (prot_ret) {
3667 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3668 						prot_ret);
3669 				ret = illegal_condition_result;
3670 				goto err_out_unlock;
3671 			}
3672 		}
3673 
3674 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3675 		/* If ZBC zone then bump its write pointer */
3676 		if (sdebug_dev_is_zoned(devip))
3677 			zbc_inc_wp(devip, lba, num);
3678 		if (unlikely(scsi_debug_lbp()))
3679 			map_region(sip, lba, num);
3680 		if (unlikely(-1 == ret)) {
3681 			ret = DID_ERROR << 16;
3682 			goto err_out_unlock;
3683 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3684 			sdev_printk(KERN_INFO, scp->device,
3685 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3686 			    my_name, num_by, ret);
3687 
3688 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3689 			     atomic_read(&sdeb_inject_pending))) {
3690 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3691 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3692 				atomic_set(&sdeb_inject_pending, 0);
3693 				ret = check_condition_result;
3694 				goto err_out_unlock;
3695 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3696 				/* Logical block guard check failed */
3697 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3698 				atomic_set(&sdeb_inject_pending, 0);
3699 				ret = illegal_condition_result;
3700 				goto err_out_unlock;
3701 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3702 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3703 				atomic_set(&sdeb_inject_pending, 0);
3704 				ret = illegal_condition_result;
3705 				goto err_out_unlock;
3706 			}
3707 		}
3708 		sg_off += num_by;
3709 		cum_lb += num;
3710 	}
3711 	ret = 0;
3712 err_out_unlock:
3713 	write_unlock(macc_lckp);
3714 err_out:
3715 	kfree(lrdp);
3716 	return ret;
3717 }
3718 
3719 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3720 			   u32 ei_lba, bool unmap, bool ndob)
3721 {
3722 	struct scsi_device *sdp = scp->device;
3723 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3724 	unsigned long long i;
3725 	u64 block, lbaa;
3726 	u32 lb_size = sdebug_sector_size;
3727 	int ret;
3728 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3729 						scp->device->hostdata, true);
3730 	rwlock_t *macc_lckp = &sip->macc_lck;
3731 	u8 *fs1p;
3732 	u8 *fsp;
3733 
3734 	write_lock(macc_lckp);
3735 
3736 	ret = check_device_access_params(scp, lba, num, true);
3737 	if (ret) {
3738 		write_unlock(macc_lckp);
3739 		return ret;
3740 	}
3741 
3742 	if (unmap && scsi_debug_lbp()) {
3743 		unmap_region(sip, lba, num);
3744 		goto out;
3745 	}
3746 	lbaa = lba;
3747 	block = do_div(lbaa, sdebug_store_sectors);
3748 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3749 	fsp = sip->storep;
3750 	fs1p = fsp + (block * lb_size);
3751 	if (ndob) {
3752 		memset(fs1p, 0, lb_size);
3753 		ret = 0;
3754 	} else
3755 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3756 
3757 	if (-1 == ret) {
3758 		write_unlock(&sip->macc_lck);
3759 		return DID_ERROR << 16;
3760 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3761 		sdev_printk(KERN_INFO, scp->device,
3762 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3763 			    my_name, "write same", lb_size, ret);
3764 
3765 	/* Copy first sector to remaining blocks */
3766 	for (i = 1 ; i < num ; i++) {
3767 		lbaa = lba + i;
3768 		block = do_div(lbaa, sdebug_store_sectors);
3769 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3770 	}
3771 	if (scsi_debug_lbp())
3772 		map_region(sip, lba, num);
3773 	/* If ZBC zone then bump its write pointer */
3774 	if (sdebug_dev_is_zoned(devip))
3775 		zbc_inc_wp(devip, lba, num);
3776 out:
3777 	write_unlock(macc_lckp);
3778 
3779 	return 0;
3780 }
3781 
3782 static int resp_write_same_10(struct scsi_cmnd *scp,
3783 			      struct sdebug_dev_info *devip)
3784 {
3785 	u8 *cmd = scp->cmnd;
3786 	u32 lba;
3787 	u16 num;
3788 	u32 ei_lba = 0;
3789 	bool unmap = false;
3790 
3791 	if (cmd[1] & 0x8) {
3792 		if (sdebug_lbpws10 == 0) {
3793 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3794 			return check_condition_result;
3795 		} else
3796 			unmap = true;
3797 	}
3798 	lba = get_unaligned_be32(cmd + 2);
3799 	num = get_unaligned_be16(cmd + 7);
3800 	if (num > sdebug_write_same_length) {
3801 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3802 		return check_condition_result;
3803 	}
3804 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3805 }
3806 
3807 static int resp_write_same_16(struct scsi_cmnd *scp,
3808 			      struct sdebug_dev_info *devip)
3809 {
3810 	u8 *cmd = scp->cmnd;
3811 	u64 lba;
3812 	u32 num;
3813 	u32 ei_lba = 0;
3814 	bool unmap = false;
3815 	bool ndob = false;
3816 
3817 	if (cmd[1] & 0x8) {	/* UNMAP */
3818 		if (sdebug_lbpws == 0) {
3819 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3820 			return check_condition_result;
3821 		} else
3822 			unmap = true;
3823 	}
3824 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3825 		ndob = true;
3826 	lba = get_unaligned_be64(cmd + 2);
3827 	num = get_unaligned_be32(cmd + 10);
3828 	if (num > sdebug_write_same_length) {
3829 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3830 		return check_condition_result;
3831 	}
3832 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3833 }
3834 
3835 /* Note the mode field is in the same position as the (lower) service action
3836  * field. For the Report supported operation codes command, SPC-4 suggests
3837  * each mode of this command should be reported separately; for future. */
3838 static int resp_write_buffer(struct scsi_cmnd *scp,
3839 			     struct sdebug_dev_info *devip)
3840 {
3841 	u8 *cmd = scp->cmnd;
3842 	struct scsi_device *sdp = scp->device;
3843 	struct sdebug_dev_info *dp;
3844 	u8 mode;
3845 
3846 	mode = cmd[1] & 0x1f;
3847 	switch (mode) {
3848 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3849 		/* set UAs on this device only */
3850 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3851 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3852 		break;
3853 	case 0x5:	/* download MC, save and ACT */
3854 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3855 		break;
3856 	case 0x6:	/* download MC with offsets and ACT */
3857 		/* set UAs on most devices (LUs) in this target */
3858 		list_for_each_entry(dp,
3859 				    &devip->sdbg_host->dev_info_list,
3860 				    dev_list)
3861 			if (dp->target == sdp->id) {
3862 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3863 				if (devip != dp)
3864 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3865 						dp->uas_bm);
3866 			}
3867 		break;
3868 	case 0x7:	/* download MC with offsets, save, and ACT */
3869 		/* set UA on all devices (LUs) in this target */
3870 		list_for_each_entry(dp,
3871 				    &devip->sdbg_host->dev_info_list,
3872 				    dev_list)
3873 			if (dp->target == sdp->id)
3874 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3875 					dp->uas_bm);
3876 		break;
3877 	default:
3878 		/* do nothing for this command for other mode values */
3879 		break;
3880 	}
3881 	return 0;
3882 }
3883 
3884 static int resp_comp_write(struct scsi_cmnd *scp,
3885 			   struct sdebug_dev_info *devip)
3886 {
3887 	u8 *cmd = scp->cmnd;
3888 	u8 *arr;
3889 	struct sdeb_store_info *sip = devip2sip(devip, true);
3890 	rwlock_t *macc_lckp = &sip->macc_lck;
3891 	u64 lba;
3892 	u32 dnum;
3893 	u32 lb_size = sdebug_sector_size;
3894 	u8 num;
3895 	int ret;
3896 	int retval = 0;
3897 
3898 	lba = get_unaligned_be64(cmd + 2);
3899 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3900 	if (0 == num)
3901 		return 0;	/* degenerate case, not an error */
3902 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3903 	    (cmd[1] & 0xe0)) {
3904 		mk_sense_invalid_opcode(scp);
3905 		return check_condition_result;
3906 	}
3907 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3908 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3909 	    (cmd[1] & 0xe0) == 0)
3910 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3911 			    "to DIF device\n");
3912 	ret = check_device_access_params(scp, lba, num, false);
3913 	if (ret)
3914 		return ret;
3915 	dnum = 2 * num;
3916 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3917 	if (NULL == arr) {
3918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3919 				INSUFF_RES_ASCQ);
3920 		return check_condition_result;
3921 	}
3922 
3923 	write_lock(macc_lckp);
3924 
3925 	ret = do_dout_fetch(scp, dnum, arr);
3926 	if (ret == -1) {
3927 		retval = DID_ERROR << 16;
3928 		goto cleanup;
3929 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3930 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3931 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3932 			    dnum * lb_size, ret);
3933 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3934 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3935 		retval = check_condition_result;
3936 		goto cleanup;
3937 	}
3938 	if (scsi_debug_lbp())
3939 		map_region(sip, lba, num);
3940 cleanup:
3941 	write_unlock(macc_lckp);
3942 	kfree(arr);
3943 	return retval;
3944 }
3945 
3946 struct unmap_block_desc {
3947 	__be64	lba;
3948 	__be32	blocks;
3949 	__be32	__reserved;
3950 };
3951 
3952 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3953 {
3954 	unsigned char *buf;
3955 	struct unmap_block_desc *desc;
3956 	struct sdeb_store_info *sip = devip2sip(devip, true);
3957 	rwlock_t *macc_lckp = &sip->macc_lck;
3958 	unsigned int i, payload_len, descriptors;
3959 	int ret;
3960 
3961 	if (!scsi_debug_lbp())
3962 		return 0;	/* fib and say its done */
3963 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3964 	BUG_ON(scsi_bufflen(scp) != payload_len);
3965 
3966 	descriptors = (payload_len - 8) / 16;
3967 	if (descriptors > sdebug_unmap_max_desc) {
3968 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3969 		return check_condition_result;
3970 	}
3971 
3972 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3973 	if (!buf) {
3974 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3975 				INSUFF_RES_ASCQ);
3976 		return check_condition_result;
3977 	}
3978 
3979 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3980 
3981 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3982 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3983 
3984 	desc = (void *)&buf[8];
3985 
3986 	write_lock(macc_lckp);
3987 
3988 	for (i = 0 ; i < descriptors ; i++) {
3989 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3990 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3991 
3992 		ret = check_device_access_params(scp, lba, num, true);
3993 		if (ret)
3994 			goto out;
3995 
3996 		unmap_region(sip, lba, num);
3997 	}
3998 
3999 	ret = 0;
4000 
4001 out:
4002 	write_unlock(macc_lckp);
4003 	kfree(buf);
4004 
4005 	return ret;
4006 }
4007 
4008 #define SDEBUG_GET_LBA_STATUS_LEN 32
4009 
4010 static int resp_get_lba_status(struct scsi_cmnd *scp,
4011 			       struct sdebug_dev_info *devip)
4012 {
4013 	u8 *cmd = scp->cmnd;
4014 	u64 lba;
4015 	u32 alloc_len, mapped, num;
4016 	int ret;
4017 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4018 
4019 	lba = get_unaligned_be64(cmd + 2);
4020 	alloc_len = get_unaligned_be32(cmd + 10);
4021 
4022 	if (alloc_len < 24)
4023 		return 0;
4024 
4025 	ret = check_device_access_params(scp, lba, 1, false);
4026 	if (ret)
4027 		return ret;
4028 
4029 	if (scsi_debug_lbp()) {
4030 		struct sdeb_store_info *sip = devip2sip(devip, true);
4031 
4032 		mapped = map_state(sip, lba, &num);
4033 	} else {
4034 		mapped = 1;
4035 		/* following just in case virtual_gb changed */
4036 		sdebug_capacity = get_sdebug_capacity();
4037 		if (sdebug_capacity - lba <= 0xffffffff)
4038 			num = sdebug_capacity - lba;
4039 		else
4040 			num = 0xffffffff;
4041 	}
4042 
4043 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4044 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4045 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4046 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4047 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4048 
4049 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4050 }
4051 
4052 static int resp_sync_cache(struct scsi_cmnd *scp,
4053 			   struct sdebug_dev_info *devip)
4054 {
4055 	int res = 0;
4056 	u64 lba;
4057 	u32 num_blocks;
4058 	u8 *cmd = scp->cmnd;
4059 
4060 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4061 		lba = get_unaligned_be32(cmd + 2);
4062 		num_blocks = get_unaligned_be16(cmd + 7);
4063 	} else {				/* SYNCHRONIZE_CACHE(16) */
4064 		lba = get_unaligned_be64(cmd + 2);
4065 		num_blocks = get_unaligned_be32(cmd + 10);
4066 	}
4067 	if (lba + num_blocks > sdebug_capacity) {
4068 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4069 		return check_condition_result;
4070 	}
4071 	if (!write_since_sync || (cmd[1] & 0x2))
4072 		res = SDEG_RES_IMMED_MASK;
4073 	else		/* delay if write_since_sync and IMMED clear */
4074 		write_since_sync = false;
4075 	return res;
4076 }
4077 
4078 /*
4079  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4080  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4081  * a GOOD status otherwise. Model a disk with a big cache and yield
4082  * CONDITION MET. Actually tries to bring range in main memory into the
4083  * cache associated with the CPU(s).
4084  */
4085 static int resp_pre_fetch(struct scsi_cmnd *scp,
4086 			  struct sdebug_dev_info *devip)
4087 {
4088 	int res = 0;
4089 	u64 lba;
4090 	u64 block, rest = 0;
4091 	u32 nblks;
4092 	u8 *cmd = scp->cmnd;
4093 	struct sdeb_store_info *sip = devip2sip(devip, true);
4094 	rwlock_t *macc_lckp = &sip->macc_lck;
4095 	u8 *fsp = sip->storep;
4096 
4097 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4098 		lba = get_unaligned_be32(cmd + 2);
4099 		nblks = get_unaligned_be16(cmd + 7);
4100 	} else {			/* PRE-FETCH(16) */
4101 		lba = get_unaligned_be64(cmd + 2);
4102 		nblks = get_unaligned_be32(cmd + 10);
4103 	}
4104 	if (lba + nblks > sdebug_capacity) {
4105 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4106 		return check_condition_result;
4107 	}
4108 	if (!fsp)
4109 		goto fini;
4110 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4111 	block = do_div(lba, sdebug_store_sectors);
4112 	if (block + nblks > sdebug_store_sectors)
4113 		rest = block + nblks - sdebug_store_sectors;
4114 
4115 	/* Try to bring the PRE-FETCH range into CPU's cache */
4116 	read_lock(macc_lckp);
4117 	prefetch_range(fsp + (sdebug_sector_size * block),
4118 		       (nblks - rest) * sdebug_sector_size);
4119 	if (rest)
4120 		prefetch_range(fsp, rest * sdebug_sector_size);
4121 	read_unlock(macc_lckp);
4122 fini:
4123 	if (cmd[1] & 0x2)
4124 		res = SDEG_RES_IMMED_MASK;
4125 	return res | condition_met_result;
4126 }
4127 
4128 #define RL_BUCKET_ELEMS 8
4129 
4130 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4131  * (W-LUN), the normal Linux scanning logic does not associate it with a
4132  * device (e.g. /dev/sg7). The following magic will make that association:
4133  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4134  * where <n> is a host number. If there are multiple targets in a host then
4135  * the above will associate a W-LUN to each target. To only get a W-LUN
4136  * for target 2, then use "echo '- 2 49409' > scan" .
4137  */
4138 static int resp_report_luns(struct scsi_cmnd *scp,
4139 			    struct sdebug_dev_info *devip)
4140 {
4141 	unsigned char *cmd = scp->cmnd;
4142 	unsigned int alloc_len;
4143 	unsigned char select_report;
4144 	u64 lun;
4145 	struct scsi_lun *lun_p;
4146 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4147 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4148 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4149 	unsigned int tlun_cnt;	/* total LUN count */
4150 	unsigned int rlen;	/* response length (in bytes) */
4151 	int k, j, n, res;
4152 	unsigned int off_rsp = 0;
4153 	const int sz_lun = sizeof(struct scsi_lun);
4154 
4155 	clear_luns_changed_on_target(devip);
4156 
4157 	select_report = cmd[2];
4158 	alloc_len = get_unaligned_be32(cmd + 6);
4159 
4160 	if (alloc_len < 4) {
4161 		pr_err("alloc len too small %d\n", alloc_len);
4162 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4163 		return check_condition_result;
4164 	}
4165 
4166 	switch (select_report) {
4167 	case 0:		/* all LUNs apart from W-LUNs */
4168 		lun_cnt = sdebug_max_luns;
4169 		wlun_cnt = 0;
4170 		break;
4171 	case 1:		/* only W-LUNs */
4172 		lun_cnt = 0;
4173 		wlun_cnt = 1;
4174 		break;
4175 	case 2:		/* all LUNs */
4176 		lun_cnt = sdebug_max_luns;
4177 		wlun_cnt = 1;
4178 		break;
4179 	case 0x10:	/* only administrative LUs */
4180 	case 0x11:	/* see SPC-5 */
4181 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4182 	default:
4183 		pr_debug("select report invalid %d\n", select_report);
4184 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4185 		return check_condition_result;
4186 	}
4187 
4188 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4189 		--lun_cnt;
4190 
4191 	tlun_cnt = lun_cnt + wlun_cnt;
4192 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4193 	scsi_set_resid(scp, scsi_bufflen(scp));
4194 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4195 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4196 
4197 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4198 	lun = sdebug_no_lun_0 ? 1 : 0;
4199 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4200 		memset(arr, 0, sizeof(arr));
4201 		lun_p = (struct scsi_lun *)&arr[0];
4202 		if (k == 0) {
4203 			put_unaligned_be32(rlen, &arr[0]);
4204 			++lun_p;
4205 			j = 1;
4206 		}
4207 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4208 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4209 				break;
4210 			int_to_scsilun(lun++, lun_p);
4211 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4212 				lun_p->scsi_lun[0] |= 0x40;
4213 		}
4214 		if (j < RL_BUCKET_ELEMS)
4215 			break;
4216 		n = j * sz_lun;
4217 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4218 		if (res)
4219 			return res;
4220 		off_rsp += n;
4221 	}
4222 	if (wlun_cnt) {
4223 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4224 		++j;
4225 	}
4226 	if (j > 0)
4227 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4228 	return res;
4229 }
4230 
4231 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4232 {
4233 	bool is_bytchk3 = false;
4234 	u8 bytchk;
4235 	int ret, j;
4236 	u32 vnum, a_num, off;
4237 	const u32 lb_size = sdebug_sector_size;
4238 	u64 lba;
4239 	u8 *arr;
4240 	u8 *cmd = scp->cmnd;
4241 	struct sdeb_store_info *sip = devip2sip(devip, true);
4242 	rwlock_t *macc_lckp = &sip->macc_lck;
4243 
4244 	bytchk = (cmd[1] >> 1) & 0x3;
4245 	if (bytchk == 0) {
4246 		return 0;	/* always claim internal verify okay */
4247 	} else if (bytchk == 2) {
4248 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4249 		return check_condition_result;
4250 	} else if (bytchk == 3) {
4251 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4252 	}
4253 	switch (cmd[0]) {
4254 	case VERIFY_16:
4255 		lba = get_unaligned_be64(cmd + 2);
4256 		vnum = get_unaligned_be32(cmd + 10);
4257 		break;
4258 	case VERIFY:		/* is VERIFY(10) */
4259 		lba = get_unaligned_be32(cmd + 2);
4260 		vnum = get_unaligned_be16(cmd + 7);
4261 		break;
4262 	default:
4263 		mk_sense_invalid_opcode(scp);
4264 		return check_condition_result;
4265 	}
4266 	if (vnum == 0)
4267 		return 0;	/* not an error */
4268 	a_num = is_bytchk3 ? 1 : vnum;
4269 	/* Treat following check like one for read (i.e. no write) access */
4270 	ret = check_device_access_params(scp, lba, a_num, false);
4271 	if (ret)
4272 		return ret;
4273 
4274 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4275 	if (!arr) {
4276 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4277 				INSUFF_RES_ASCQ);
4278 		return check_condition_result;
4279 	}
4280 	/* Not changing store, so only need read access */
4281 	read_lock(macc_lckp);
4282 
4283 	ret = do_dout_fetch(scp, a_num, arr);
4284 	if (ret == -1) {
4285 		ret = DID_ERROR << 16;
4286 		goto cleanup;
4287 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4288 		sdev_printk(KERN_INFO, scp->device,
4289 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4290 			    my_name, __func__, a_num * lb_size, ret);
4291 	}
4292 	if (is_bytchk3) {
4293 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4294 			memcpy(arr + off, arr, lb_size);
4295 	}
4296 	ret = 0;
4297 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4298 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4299 		ret = check_condition_result;
4300 		goto cleanup;
4301 	}
4302 cleanup:
4303 	read_unlock(macc_lckp);
4304 	kfree(arr);
4305 	return ret;
4306 }
4307 
4308 #define RZONES_DESC_HD 64
4309 
4310 /* Report zones depending on start LBA nad reporting options */
4311 static int resp_report_zones(struct scsi_cmnd *scp,
4312 			     struct sdebug_dev_info *devip)
4313 {
4314 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4315 	int ret = 0;
4316 	u32 alloc_len, rep_opts, rep_len;
4317 	bool partial;
4318 	u64 lba, zs_lba;
4319 	u8 *arr = NULL, *desc;
4320 	u8 *cmd = scp->cmnd;
4321 	struct sdeb_zone_state *zsp;
4322 	struct sdeb_store_info *sip = devip2sip(devip, false);
4323 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4324 
4325 	if (!sdebug_dev_is_zoned(devip)) {
4326 		mk_sense_invalid_opcode(scp);
4327 		return check_condition_result;
4328 	}
4329 	zs_lba = get_unaligned_be64(cmd + 2);
4330 	alloc_len = get_unaligned_be32(cmd + 10);
4331 	if (alloc_len == 0)
4332 		return 0;	/* not an error */
4333 	rep_opts = cmd[14] & 0x3f;
4334 	partial = cmd[14] & 0x80;
4335 
4336 	if (zs_lba >= sdebug_capacity) {
4337 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4338 		return check_condition_result;
4339 	}
4340 
4341 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4342 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4343 			    max_zones);
4344 
4345 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4346 	if (!arr) {
4347 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4348 				INSUFF_RES_ASCQ);
4349 		return check_condition_result;
4350 	}
4351 
4352 	read_lock(macc_lckp);
4353 
4354 	desc = arr + 64;
4355 	for (i = 0; i < max_zones; i++) {
4356 		lba = zs_lba + devip->zsize * i;
4357 		if (lba > sdebug_capacity)
4358 			break;
4359 		zsp = zbc_zone(devip, lba);
4360 		switch (rep_opts) {
4361 		case 0x00:
4362 			/* All zones */
4363 			break;
4364 		case 0x01:
4365 			/* Empty zones */
4366 			if (zsp->z_cond != ZC1_EMPTY)
4367 				continue;
4368 			break;
4369 		case 0x02:
4370 			/* Implicit open zones */
4371 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4372 				continue;
4373 			break;
4374 		case 0x03:
4375 			/* Explicit open zones */
4376 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4377 				continue;
4378 			break;
4379 		case 0x04:
4380 			/* Closed zones */
4381 			if (zsp->z_cond != ZC4_CLOSED)
4382 				continue;
4383 			break;
4384 		case 0x05:
4385 			/* Full zones */
4386 			if (zsp->z_cond != ZC5_FULL)
4387 				continue;
4388 			break;
4389 		case 0x06:
4390 		case 0x07:
4391 		case 0x10:
4392 			/*
4393 			 * Read-only, offline, reset WP recommended are
4394 			 * not emulated: no zones to report;
4395 			 */
4396 			continue;
4397 		case 0x11:
4398 			/* non-seq-resource set */
4399 			if (!zsp->z_non_seq_resource)
4400 				continue;
4401 			break;
4402 		case 0x3f:
4403 			/* Not write pointer (conventional) zones */
4404 			if (!zbc_zone_is_conv(zsp))
4405 				continue;
4406 			break;
4407 		default:
4408 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4409 					INVALID_FIELD_IN_CDB, 0);
4410 			ret = check_condition_result;
4411 			goto fini;
4412 		}
4413 
4414 		if (nrz < rep_max_zones) {
4415 			/* Fill zone descriptor */
4416 			desc[0] = zsp->z_type;
4417 			desc[1] = zsp->z_cond << 4;
4418 			if (zsp->z_non_seq_resource)
4419 				desc[1] |= 1 << 1;
4420 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4421 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4422 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4423 			desc += 64;
4424 		}
4425 
4426 		if (partial && nrz >= rep_max_zones)
4427 			break;
4428 
4429 		nrz++;
4430 	}
4431 
4432 	/* Report header */
4433 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4434 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4435 
4436 	rep_len = (unsigned long)desc - (unsigned long)arr;
4437 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4438 
4439 fini:
4440 	read_unlock(macc_lckp);
4441 	kfree(arr);
4442 	return ret;
4443 }
4444 
4445 /* Logic transplanted from tcmu-runner, file_zbc.c */
4446 static void zbc_open_all(struct sdebug_dev_info *devip)
4447 {
4448 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4449 	unsigned int i;
4450 
4451 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4452 		if (zsp->z_cond == ZC4_CLOSED)
4453 			zbc_open_zone(devip, &devip->zstate[i], true);
4454 	}
4455 }
4456 
4457 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4458 {
4459 	int res = 0;
4460 	u64 z_id;
4461 	enum sdebug_z_cond zc;
4462 	u8 *cmd = scp->cmnd;
4463 	struct sdeb_zone_state *zsp;
4464 	bool all = cmd[14] & 0x01;
4465 	struct sdeb_store_info *sip = devip2sip(devip, false);
4466 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4467 
4468 	if (!sdebug_dev_is_zoned(devip)) {
4469 		mk_sense_invalid_opcode(scp);
4470 		return check_condition_result;
4471 	}
4472 
4473 	write_lock(macc_lckp);
4474 
4475 	if (all) {
4476 		/* Check if all closed zones can be open */
4477 		if (devip->max_open &&
4478 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4479 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4480 					INSUFF_ZONE_ASCQ);
4481 			res = check_condition_result;
4482 			goto fini;
4483 		}
4484 		/* Open all closed zones */
4485 		zbc_open_all(devip);
4486 		goto fini;
4487 	}
4488 
4489 	/* Open the specified zone */
4490 	z_id = get_unaligned_be64(cmd + 2);
4491 	if (z_id >= sdebug_capacity) {
4492 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4493 		res = check_condition_result;
4494 		goto fini;
4495 	}
4496 
4497 	zsp = zbc_zone(devip, z_id);
4498 	if (z_id != zsp->z_start) {
4499 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4500 		res = check_condition_result;
4501 		goto fini;
4502 	}
4503 	if (zbc_zone_is_conv(zsp)) {
4504 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4505 		res = check_condition_result;
4506 		goto fini;
4507 	}
4508 
4509 	zc = zsp->z_cond;
4510 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4511 		goto fini;
4512 
4513 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4514 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4515 				INSUFF_ZONE_ASCQ);
4516 		res = check_condition_result;
4517 		goto fini;
4518 	}
4519 
4520 	zbc_open_zone(devip, zsp, true);
4521 fini:
4522 	write_unlock(macc_lckp);
4523 	return res;
4524 }
4525 
4526 static void zbc_close_all(struct sdebug_dev_info *devip)
4527 {
4528 	unsigned int i;
4529 
4530 	for (i = 0; i < devip->nr_zones; i++)
4531 		zbc_close_zone(devip, &devip->zstate[i]);
4532 }
4533 
4534 static int resp_close_zone(struct scsi_cmnd *scp,
4535 			   struct sdebug_dev_info *devip)
4536 {
4537 	int res = 0;
4538 	u64 z_id;
4539 	u8 *cmd = scp->cmnd;
4540 	struct sdeb_zone_state *zsp;
4541 	bool all = cmd[14] & 0x01;
4542 	struct sdeb_store_info *sip = devip2sip(devip, false);
4543 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4544 
4545 	if (!sdebug_dev_is_zoned(devip)) {
4546 		mk_sense_invalid_opcode(scp);
4547 		return check_condition_result;
4548 	}
4549 
4550 	write_lock(macc_lckp);
4551 
4552 	if (all) {
4553 		zbc_close_all(devip);
4554 		goto fini;
4555 	}
4556 
4557 	/* Close specified zone */
4558 	z_id = get_unaligned_be64(cmd + 2);
4559 	if (z_id >= sdebug_capacity) {
4560 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4561 		res = check_condition_result;
4562 		goto fini;
4563 	}
4564 
4565 	zsp = zbc_zone(devip, z_id);
4566 	if (z_id != zsp->z_start) {
4567 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4568 		res = check_condition_result;
4569 		goto fini;
4570 	}
4571 	if (zbc_zone_is_conv(zsp)) {
4572 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4573 		res = check_condition_result;
4574 		goto fini;
4575 	}
4576 
4577 	zbc_close_zone(devip, zsp);
4578 fini:
4579 	write_unlock(macc_lckp);
4580 	return res;
4581 }
4582 
4583 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4584 			    struct sdeb_zone_state *zsp, bool empty)
4585 {
4586 	enum sdebug_z_cond zc = zsp->z_cond;
4587 
4588 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4589 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4590 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4591 			zbc_close_zone(devip, zsp);
4592 		if (zsp->z_cond == ZC4_CLOSED)
4593 			devip->nr_closed--;
4594 		zsp->z_wp = zsp->z_start + zsp->z_size;
4595 		zsp->z_cond = ZC5_FULL;
4596 	}
4597 }
4598 
4599 static void zbc_finish_all(struct sdebug_dev_info *devip)
4600 {
4601 	unsigned int i;
4602 
4603 	for (i = 0; i < devip->nr_zones; i++)
4604 		zbc_finish_zone(devip, &devip->zstate[i], false);
4605 }
4606 
4607 static int resp_finish_zone(struct scsi_cmnd *scp,
4608 			    struct sdebug_dev_info *devip)
4609 {
4610 	struct sdeb_zone_state *zsp;
4611 	int res = 0;
4612 	u64 z_id;
4613 	u8 *cmd = scp->cmnd;
4614 	bool all = cmd[14] & 0x01;
4615 	struct sdeb_store_info *sip = devip2sip(devip, false);
4616 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4617 
4618 	if (!sdebug_dev_is_zoned(devip)) {
4619 		mk_sense_invalid_opcode(scp);
4620 		return check_condition_result;
4621 	}
4622 
4623 	write_lock(macc_lckp);
4624 
4625 	if (all) {
4626 		zbc_finish_all(devip);
4627 		goto fini;
4628 	}
4629 
4630 	/* Finish the specified zone */
4631 	z_id = get_unaligned_be64(cmd + 2);
4632 	if (z_id >= sdebug_capacity) {
4633 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4634 		res = check_condition_result;
4635 		goto fini;
4636 	}
4637 
4638 	zsp = zbc_zone(devip, z_id);
4639 	if (z_id != zsp->z_start) {
4640 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4641 		res = check_condition_result;
4642 		goto fini;
4643 	}
4644 	if (zbc_zone_is_conv(zsp)) {
4645 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4646 		res = check_condition_result;
4647 		goto fini;
4648 	}
4649 
4650 	zbc_finish_zone(devip, zsp, true);
4651 fini:
4652 	write_unlock(macc_lckp);
4653 	return res;
4654 }
4655 
4656 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4657 			 struct sdeb_zone_state *zsp)
4658 {
4659 	enum sdebug_z_cond zc;
4660 	struct sdeb_store_info *sip = devip2sip(devip, false);
4661 
4662 	if (zbc_zone_is_conv(zsp))
4663 		return;
4664 
4665 	zc = zsp->z_cond;
4666 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4667 		zbc_close_zone(devip, zsp);
4668 
4669 	if (zsp->z_cond == ZC4_CLOSED)
4670 		devip->nr_closed--;
4671 
4672 	if (zsp->z_wp > zsp->z_start)
4673 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4674 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4675 
4676 	zsp->z_non_seq_resource = false;
4677 	zsp->z_wp = zsp->z_start;
4678 	zsp->z_cond = ZC1_EMPTY;
4679 }
4680 
4681 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4682 {
4683 	unsigned int i;
4684 
4685 	for (i = 0; i < devip->nr_zones; i++)
4686 		zbc_rwp_zone(devip, &devip->zstate[i]);
4687 }
4688 
4689 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4690 {
4691 	struct sdeb_zone_state *zsp;
4692 	int res = 0;
4693 	u64 z_id;
4694 	u8 *cmd = scp->cmnd;
4695 	bool all = cmd[14] & 0x01;
4696 	struct sdeb_store_info *sip = devip2sip(devip, false);
4697 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4698 
4699 	if (!sdebug_dev_is_zoned(devip)) {
4700 		mk_sense_invalid_opcode(scp);
4701 		return check_condition_result;
4702 	}
4703 
4704 	write_lock(macc_lckp);
4705 
4706 	if (all) {
4707 		zbc_rwp_all(devip);
4708 		goto fini;
4709 	}
4710 
4711 	z_id = get_unaligned_be64(cmd + 2);
4712 	if (z_id >= sdebug_capacity) {
4713 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4714 		res = check_condition_result;
4715 		goto fini;
4716 	}
4717 
4718 	zsp = zbc_zone(devip, z_id);
4719 	if (z_id != zsp->z_start) {
4720 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4721 		res = check_condition_result;
4722 		goto fini;
4723 	}
4724 	if (zbc_zone_is_conv(zsp)) {
4725 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 		res = check_condition_result;
4727 		goto fini;
4728 	}
4729 
4730 	zbc_rwp_zone(devip, zsp);
4731 fini:
4732 	write_unlock(macc_lckp);
4733 	return res;
4734 }
4735 
4736 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4737 {
4738 	u16 hwq;
4739 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4740 
4741 	hwq = blk_mq_unique_tag_to_hwq(tag);
4742 
4743 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4744 	if (WARN_ON_ONCE(hwq >= submit_queues))
4745 		hwq = 0;
4746 
4747 	return sdebug_q_arr + hwq;
4748 }
4749 
4750 static u32 get_tag(struct scsi_cmnd *cmnd)
4751 {
4752 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4753 }
4754 
4755 /* Queued (deferred) command completions converge here. */
4756 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4757 {
4758 	bool aborted = sd_dp->aborted;
4759 	int qc_idx;
4760 	int retiring = 0;
4761 	unsigned long iflags;
4762 	struct sdebug_queue *sqp;
4763 	struct sdebug_queued_cmd *sqcp;
4764 	struct scsi_cmnd *scp;
4765 	struct sdebug_dev_info *devip;
4766 
4767 	if (unlikely(aborted))
4768 		sd_dp->aborted = false;
4769 	qc_idx = sd_dp->qc_idx;
4770 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4771 	if (sdebug_statistics) {
4772 		atomic_inc(&sdebug_completions);
4773 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4774 			atomic_inc(&sdebug_miss_cpus);
4775 	}
4776 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4777 		pr_err("wild qc_idx=%d\n", qc_idx);
4778 		return;
4779 	}
4780 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4781 	sd_dp->defer_t = SDEB_DEFER_NONE;
4782 	sqcp = &sqp->qc_arr[qc_idx];
4783 	scp = sqcp->a_cmnd;
4784 	if (unlikely(scp == NULL)) {
4785 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4786 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4787 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4788 		return;
4789 	}
4790 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4791 	if (likely(devip))
4792 		atomic_dec(&devip->num_in_q);
4793 	else
4794 		pr_err("devip=NULL\n");
4795 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4796 		retiring = 1;
4797 
4798 	sqcp->a_cmnd = NULL;
4799 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4800 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4801 		pr_err("Unexpected completion\n");
4802 		return;
4803 	}
4804 
4805 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4806 		int k, retval;
4807 
4808 		retval = atomic_read(&retired_max_queue);
4809 		if (qc_idx >= retval) {
4810 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4811 			pr_err("index %d too large\n", retval);
4812 			return;
4813 		}
4814 		k = find_last_bit(sqp->in_use_bm, retval);
4815 		if ((k < sdebug_max_queue) || (k == retval))
4816 			atomic_set(&retired_max_queue, 0);
4817 		else
4818 			atomic_set(&retired_max_queue, k + 1);
4819 	}
4820 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4821 	if (unlikely(aborted)) {
4822 		if (sdebug_verbose)
4823 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4824 		return;
4825 	}
4826 	scsi_done(scp); /* callback to mid level */
4827 }
4828 
4829 /* When high resolution timer goes off this function is called. */
4830 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4831 {
4832 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4833 						  hrt);
4834 	sdebug_q_cmd_complete(sd_dp);
4835 	return HRTIMER_NORESTART;
4836 }
4837 
4838 /* When work queue schedules work, it calls this function. */
4839 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4840 {
4841 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4842 						  ew.work);
4843 	sdebug_q_cmd_complete(sd_dp);
4844 }
4845 
4846 static bool got_shared_uuid;
4847 static uuid_t shared_uuid;
4848 
4849 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4850 {
4851 	struct sdeb_zone_state *zsp;
4852 	sector_t capacity = get_sdebug_capacity();
4853 	sector_t zstart = 0;
4854 	unsigned int i;
4855 
4856 	/*
4857 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4858 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4859 	 * use the specified zone size checking that at least 2 zones can be
4860 	 * created for the device.
4861 	 */
4862 	if (!sdeb_zbc_zone_size_mb) {
4863 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4864 			>> ilog2(sdebug_sector_size);
4865 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4866 			devip->zsize >>= 1;
4867 		if (devip->zsize < 2) {
4868 			pr_err("Device capacity too small\n");
4869 			return -EINVAL;
4870 		}
4871 	} else {
4872 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4873 			pr_err("Zone size is not a power of 2\n");
4874 			return -EINVAL;
4875 		}
4876 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4877 			>> ilog2(sdebug_sector_size);
4878 		if (devip->zsize >= capacity) {
4879 			pr_err("Zone size too large for device capacity\n");
4880 			return -EINVAL;
4881 		}
4882 	}
4883 
4884 	devip->zsize_shift = ilog2(devip->zsize);
4885 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4886 
4887 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4888 		pr_err("Number of conventional zones too large\n");
4889 		return -EINVAL;
4890 	}
4891 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4892 
4893 	if (devip->zmodel == BLK_ZONED_HM) {
4894 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4895 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4896 			devip->max_open = (devip->nr_zones - 1) / 2;
4897 		else
4898 			devip->max_open = sdeb_zbc_max_open;
4899 	}
4900 
4901 	devip->zstate = kcalloc(devip->nr_zones,
4902 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4903 	if (!devip->zstate)
4904 		return -ENOMEM;
4905 
4906 	for (i = 0; i < devip->nr_zones; i++) {
4907 		zsp = &devip->zstate[i];
4908 
4909 		zsp->z_start = zstart;
4910 
4911 		if (i < devip->nr_conv_zones) {
4912 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4913 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4914 			zsp->z_wp = (sector_t)-1;
4915 		} else {
4916 			if (devip->zmodel == BLK_ZONED_HM)
4917 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4918 			else
4919 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4920 			zsp->z_cond = ZC1_EMPTY;
4921 			zsp->z_wp = zsp->z_start;
4922 		}
4923 
4924 		if (zsp->z_start + devip->zsize < capacity)
4925 			zsp->z_size = devip->zsize;
4926 		else
4927 			zsp->z_size = capacity - zsp->z_start;
4928 
4929 		zstart += zsp->z_size;
4930 	}
4931 
4932 	return 0;
4933 }
4934 
4935 static struct sdebug_dev_info *sdebug_device_create(
4936 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4937 {
4938 	struct sdebug_dev_info *devip;
4939 
4940 	devip = kzalloc(sizeof(*devip), flags);
4941 	if (devip) {
4942 		if (sdebug_uuid_ctl == 1)
4943 			uuid_gen(&devip->lu_name);
4944 		else if (sdebug_uuid_ctl == 2) {
4945 			if (got_shared_uuid)
4946 				devip->lu_name = shared_uuid;
4947 			else {
4948 				uuid_gen(&shared_uuid);
4949 				got_shared_uuid = true;
4950 				devip->lu_name = shared_uuid;
4951 			}
4952 		}
4953 		devip->sdbg_host = sdbg_host;
4954 		if (sdeb_zbc_in_use) {
4955 			devip->zmodel = sdeb_zbc_model;
4956 			if (sdebug_device_create_zones(devip)) {
4957 				kfree(devip);
4958 				return NULL;
4959 			}
4960 		} else {
4961 			devip->zmodel = BLK_ZONED_NONE;
4962 		}
4963 		devip->sdbg_host = sdbg_host;
4964 		devip->create_ts = ktime_get_boottime();
4965 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4966 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4967 	}
4968 	return devip;
4969 }
4970 
4971 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4972 {
4973 	struct sdebug_host_info *sdbg_host;
4974 	struct sdebug_dev_info *open_devip = NULL;
4975 	struct sdebug_dev_info *devip;
4976 
4977 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4978 	if (!sdbg_host) {
4979 		pr_err("Host info NULL\n");
4980 		return NULL;
4981 	}
4982 
4983 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4984 		if ((devip->used) && (devip->channel == sdev->channel) &&
4985 		    (devip->target == sdev->id) &&
4986 		    (devip->lun == sdev->lun))
4987 			return devip;
4988 		else {
4989 			if ((!devip->used) && (!open_devip))
4990 				open_devip = devip;
4991 		}
4992 	}
4993 	if (!open_devip) { /* try and make a new one */
4994 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4995 		if (!open_devip) {
4996 			pr_err("out of memory at line %d\n", __LINE__);
4997 			return NULL;
4998 		}
4999 	}
5000 
5001 	open_devip->channel = sdev->channel;
5002 	open_devip->target = sdev->id;
5003 	open_devip->lun = sdev->lun;
5004 	open_devip->sdbg_host = sdbg_host;
5005 	atomic_set(&open_devip->num_in_q, 0);
5006 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5007 	open_devip->used = true;
5008 	return open_devip;
5009 }
5010 
5011 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5012 {
5013 	if (sdebug_verbose)
5014 		pr_info("slave_alloc <%u %u %u %llu>\n",
5015 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5016 	return 0;
5017 }
5018 
5019 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5020 {
5021 	struct sdebug_dev_info *devip =
5022 			(struct sdebug_dev_info *)sdp->hostdata;
5023 
5024 	if (sdebug_verbose)
5025 		pr_info("slave_configure <%u %u %u %llu>\n",
5026 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5027 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5028 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5029 	if (devip == NULL) {
5030 		devip = find_build_dev_info(sdp);
5031 		if (devip == NULL)
5032 			return 1;  /* no resources, will be marked offline */
5033 	}
5034 	sdp->hostdata = devip;
5035 	if (sdebug_no_uld)
5036 		sdp->no_uld_attach = 1;
5037 	config_cdb_len(sdp);
5038 	return 0;
5039 }
5040 
5041 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5042 {
5043 	struct sdebug_dev_info *devip =
5044 		(struct sdebug_dev_info *)sdp->hostdata;
5045 
5046 	if (sdebug_verbose)
5047 		pr_info("slave_destroy <%u %u %u %llu>\n",
5048 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5049 	if (devip) {
5050 		/* make this slot available for re-use */
5051 		devip->used = false;
5052 		sdp->hostdata = NULL;
5053 	}
5054 }
5055 
5056 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5057 			   enum sdeb_defer_type defer_t)
5058 {
5059 	if (!sd_dp)
5060 		return;
5061 	if (defer_t == SDEB_DEFER_HRT)
5062 		hrtimer_cancel(&sd_dp->hrt);
5063 	else if (defer_t == SDEB_DEFER_WQ)
5064 		cancel_work_sync(&sd_dp->ew.work);
5065 }
5066 
5067 /* If @cmnd found deletes its timer or work queue and returns true; else
5068    returns false */
5069 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5070 {
5071 	unsigned long iflags;
5072 	int j, k, qmax, r_qmax;
5073 	enum sdeb_defer_type l_defer_t;
5074 	struct sdebug_queue *sqp;
5075 	struct sdebug_queued_cmd *sqcp;
5076 	struct sdebug_dev_info *devip;
5077 	struct sdebug_defer *sd_dp;
5078 
5079 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5080 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5081 		qmax = sdebug_max_queue;
5082 		r_qmax = atomic_read(&retired_max_queue);
5083 		if (r_qmax > qmax)
5084 			qmax = r_qmax;
5085 		for (k = 0; k < qmax; ++k) {
5086 			if (test_bit(k, sqp->in_use_bm)) {
5087 				sqcp = &sqp->qc_arr[k];
5088 				if (cmnd != sqcp->a_cmnd)
5089 					continue;
5090 				/* found */
5091 				devip = (struct sdebug_dev_info *)
5092 						cmnd->device->hostdata;
5093 				if (devip)
5094 					atomic_dec(&devip->num_in_q);
5095 				sqcp->a_cmnd = NULL;
5096 				sd_dp = sqcp->sd_dp;
5097 				if (sd_dp) {
5098 					l_defer_t = sd_dp->defer_t;
5099 					sd_dp->defer_t = SDEB_DEFER_NONE;
5100 				} else
5101 					l_defer_t = SDEB_DEFER_NONE;
5102 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5103 				stop_qc_helper(sd_dp, l_defer_t);
5104 				clear_bit(k, sqp->in_use_bm);
5105 				return true;
5106 			}
5107 		}
5108 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5109 	}
5110 	return false;
5111 }
5112 
5113 /* Deletes (stops) timers or work queues of all queued commands */
5114 static void stop_all_queued(void)
5115 {
5116 	unsigned long iflags;
5117 	int j, k;
5118 	enum sdeb_defer_type l_defer_t;
5119 	struct sdebug_queue *sqp;
5120 	struct sdebug_queued_cmd *sqcp;
5121 	struct sdebug_dev_info *devip;
5122 	struct sdebug_defer *sd_dp;
5123 
5124 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5125 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5126 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5127 			if (test_bit(k, sqp->in_use_bm)) {
5128 				sqcp = &sqp->qc_arr[k];
5129 				if (sqcp->a_cmnd == NULL)
5130 					continue;
5131 				devip = (struct sdebug_dev_info *)
5132 					sqcp->a_cmnd->device->hostdata;
5133 				if (devip)
5134 					atomic_dec(&devip->num_in_q);
5135 				sqcp->a_cmnd = NULL;
5136 				sd_dp = sqcp->sd_dp;
5137 				if (sd_dp) {
5138 					l_defer_t = sd_dp->defer_t;
5139 					sd_dp->defer_t = SDEB_DEFER_NONE;
5140 				} else
5141 					l_defer_t = SDEB_DEFER_NONE;
5142 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5143 				stop_qc_helper(sd_dp, l_defer_t);
5144 				clear_bit(k, sqp->in_use_bm);
5145 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5146 			}
5147 		}
5148 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5149 	}
5150 }
5151 
5152 /* Free queued command memory on heap */
5153 static void free_all_queued(void)
5154 {
5155 	int j, k;
5156 	struct sdebug_queue *sqp;
5157 	struct sdebug_queued_cmd *sqcp;
5158 
5159 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5160 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5161 			sqcp = &sqp->qc_arr[k];
5162 			kfree(sqcp->sd_dp);
5163 			sqcp->sd_dp = NULL;
5164 		}
5165 	}
5166 }
5167 
5168 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5169 {
5170 	bool ok;
5171 
5172 	++num_aborts;
5173 	if (SCpnt) {
5174 		ok = stop_queued_cmnd(SCpnt);
5175 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5176 			sdev_printk(KERN_INFO, SCpnt->device,
5177 				    "%s: command%s found\n", __func__,
5178 				    ok ? "" : " not");
5179 	}
5180 	return SUCCESS;
5181 }
5182 
5183 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5184 {
5185 	++num_dev_resets;
5186 	if (SCpnt && SCpnt->device) {
5187 		struct scsi_device *sdp = SCpnt->device;
5188 		struct sdebug_dev_info *devip =
5189 				(struct sdebug_dev_info *)sdp->hostdata;
5190 
5191 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5192 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5193 		if (devip)
5194 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5195 	}
5196 	return SUCCESS;
5197 }
5198 
5199 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5200 {
5201 	struct sdebug_host_info *sdbg_host;
5202 	struct sdebug_dev_info *devip;
5203 	struct scsi_device *sdp;
5204 	struct Scsi_Host *hp;
5205 	int k = 0;
5206 
5207 	++num_target_resets;
5208 	if (!SCpnt)
5209 		goto lie;
5210 	sdp = SCpnt->device;
5211 	if (!sdp)
5212 		goto lie;
5213 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5214 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5215 	hp = sdp->host;
5216 	if (!hp)
5217 		goto lie;
5218 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5219 	if (sdbg_host) {
5220 		list_for_each_entry(devip,
5221 				    &sdbg_host->dev_info_list,
5222 				    dev_list)
5223 			if (devip->target == sdp->id) {
5224 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5225 				++k;
5226 			}
5227 	}
5228 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5229 		sdev_printk(KERN_INFO, sdp,
5230 			    "%s: %d device(s) found in target\n", __func__, k);
5231 lie:
5232 	return SUCCESS;
5233 }
5234 
5235 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5236 {
5237 	struct sdebug_host_info *sdbg_host;
5238 	struct sdebug_dev_info *devip;
5239 	struct scsi_device *sdp;
5240 	struct Scsi_Host *hp;
5241 	int k = 0;
5242 
5243 	++num_bus_resets;
5244 	if (!(SCpnt && SCpnt->device))
5245 		goto lie;
5246 	sdp = SCpnt->device;
5247 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5248 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5249 	hp = sdp->host;
5250 	if (hp) {
5251 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5252 		if (sdbg_host) {
5253 			list_for_each_entry(devip,
5254 					    &sdbg_host->dev_info_list,
5255 					    dev_list) {
5256 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5257 				++k;
5258 			}
5259 		}
5260 	}
5261 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5262 		sdev_printk(KERN_INFO, sdp,
5263 			    "%s: %d device(s) found in host\n", __func__, k);
5264 lie:
5265 	return SUCCESS;
5266 }
5267 
5268 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5269 {
5270 	struct sdebug_host_info *sdbg_host;
5271 	struct sdebug_dev_info *devip;
5272 	int k = 0;
5273 
5274 	++num_host_resets;
5275 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5276 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5277 	spin_lock(&sdebug_host_list_lock);
5278 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5279 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5280 				    dev_list) {
5281 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5282 			++k;
5283 		}
5284 	}
5285 	spin_unlock(&sdebug_host_list_lock);
5286 	stop_all_queued();
5287 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5288 		sdev_printk(KERN_INFO, SCpnt->device,
5289 			    "%s: %d device(s) found\n", __func__, k);
5290 	return SUCCESS;
5291 }
5292 
5293 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5294 {
5295 	struct msdos_partition *pp;
5296 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5297 	int sectors_per_part, num_sectors, k;
5298 	int heads_by_sects, start_sec, end_sec;
5299 
5300 	/* assume partition table already zeroed */
5301 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5302 		return;
5303 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5304 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5305 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5306 	}
5307 	num_sectors = (int)get_sdebug_capacity();
5308 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5309 			   / sdebug_num_parts;
5310 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5311 	starts[0] = sdebug_sectors_per;
5312 	max_part_secs = sectors_per_part;
5313 	for (k = 1; k < sdebug_num_parts; ++k) {
5314 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5315 			    * heads_by_sects;
5316 		if (starts[k] - starts[k - 1] < max_part_secs)
5317 			max_part_secs = starts[k] - starts[k - 1];
5318 	}
5319 	starts[sdebug_num_parts] = num_sectors;
5320 	starts[sdebug_num_parts + 1] = 0;
5321 
5322 	ramp[510] = 0x55;	/* magic partition markings */
5323 	ramp[511] = 0xAA;
5324 	pp = (struct msdos_partition *)(ramp + 0x1be);
5325 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5326 		start_sec = starts[k];
5327 		end_sec = starts[k] + max_part_secs - 1;
5328 		pp->boot_ind = 0;
5329 
5330 		pp->cyl = start_sec / heads_by_sects;
5331 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5332 			   / sdebug_sectors_per;
5333 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5334 
5335 		pp->end_cyl = end_sec / heads_by_sects;
5336 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5337 			       / sdebug_sectors_per;
5338 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5339 
5340 		pp->start_sect = cpu_to_le32(start_sec);
5341 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5342 		pp->sys_ind = 0x83;	/* plain Linux partition */
5343 	}
5344 }
5345 
5346 static void block_unblock_all_queues(bool block)
5347 {
5348 	int j;
5349 	struct sdebug_queue *sqp;
5350 
5351 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5352 		atomic_set(&sqp->blocked, (int)block);
5353 }
5354 
5355 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5356  * commands will be processed normally before triggers occur.
5357  */
5358 static void tweak_cmnd_count(void)
5359 {
5360 	int count, modulo;
5361 
5362 	modulo = abs(sdebug_every_nth);
5363 	if (modulo < 2)
5364 		return;
5365 	block_unblock_all_queues(true);
5366 	count = atomic_read(&sdebug_cmnd_count);
5367 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5368 	block_unblock_all_queues(false);
5369 }
5370 
5371 static void clear_queue_stats(void)
5372 {
5373 	atomic_set(&sdebug_cmnd_count, 0);
5374 	atomic_set(&sdebug_completions, 0);
5375 	atomic_set(&sdebug_miss_cpus, 0);
5376 	atomic_set(&sdebug_a_tsf, 0);
5377 }
5378 
5379 static bool inject_on_this_cmd(void)
5380 {
5381 	if (sdebug_every_nth == 0)
5382 		return false;
5383 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5384 }
5385 
5386 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5387 
5388 /* Complete the processing of the thread that queued a SCSI command to this
5389  * driver. It either completes the command by calling cmnd_done() or
5390  * schedules a hr timer or work queue then returns 0. Returns
5391  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5392  */
5393 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5394 			 int scsi_result,
5395 			 int (*pfp)(struct scsi_cmnd *,
5396 				    struct sdebug_dev_info *),
5397 			 int delta_jiff, int ndelay)
5398 {
5399 	bool new_sd_dp;
5400 	bool inject = false;
5401 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5402 	int k, num_in_q, qdepth;
5403 	unsigned long iflags;
5404 	u64 ns_from_boot = 0;
5405 	struct sdebug_queue *sqp;
5406 	struct sdebug_queued_cmd *sqcp;
5407 	struct scsi_device *sdp;
5408 	struct sdebug_defer *sd_dp;
5409 
5410 	if (unlikely(devip == NULL)) {
5411 		if (scsi_result == 0)
5412 			scsi_result = DID_NO_CONNECT << 16;
5413 		goto respond_in_thread;
5414 	}
5415 	sdp = cmnd->device;
5416 
5417 	if (delta_jiff == 0)
5418 		goto respond_in_thread;
5419 
5420 	sqp = get_queue(cmnd);
5421 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5422 	if (unlikely(atomic_read(&sqp->blocked))) {
5423 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5424 		return SCSI_MLQUEUE_HOST_BUSY;
5425 	}
5426 	num_in_q = atomic_read(&devip->num_in_q);
5427 	qdepth = cmnd->device->queue_depth;
5428 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5429 		if (scsi_result) {
5430 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5431 			goto respond_in_thread;
5432 		} else
5433 			scsi_result = device_qfull_result;
5434 	} else if (unlikely(sdebug_every_nth &&
5435 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5436 			    (scsi_result == 0))) {
5437 		if ((num_in_q == (qdepth - 1)) &&
5438 		    (atomic_inc_return(&sdebug_a_tsf) >=
5439 		     abs(sdebug_every_nth))) {
5440 			atomic_set(&sdebug_a_tsf, 0);
5441 			inject = true;
5442 			scsi_result = device_qfull_result;
5443 		}
5444 	}
5445 
5446 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5447 	if (unlikely(k >= sdebug_max_queue)) {
5448 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5449 		if (scsi_result)
5450 			goto respond_in_thread;
5451 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5452 			scsi_result = device_qfull_result;
5453 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5454 			sdev_printk(KERN_INFO, sdp,
5455 				    "%s: max_queue=%d exceeded, %s\n",
5456 				    __func__, sdebug_max_queue,
5457 				    (scsi_result ?  "status: TASK SET FULL" :
5458 						    "report: host busy"));
5459 		if (scsi_result)
5460 			goto respond_in_thread;
5461 		else
5462 			return SCSI_MLQUEUE_HOST_BUSY;
5463 	}
5464 	set_bit(k, sqp->in_use_bm);
5465 	atomic_inc(&devip->num_in_q);
5466 	sqcp = &sqp->qc_arr[k];
5467 	sqcp->a_cmnd = cmnd;
5468 	cmnd->host_scribble = (unsigned char *)sqcp;
5469 	sd_dp = sqcp->sd_dp;
5470 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5471 
5472 	if (!sd_dp) {
5473 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5474 		if (!sd_dp) {
5475 			atomic_dec(&devip->num_in_q);
5476 			clear_bit(k, sqp->in_use_bm);
5477 			return SCSI_MLQUEUE_HOST_BUSY;
5478 		}
5479 		new_sd_dp = true;
5480 	} else {
5481 		new_sd_dp = false;
5482 	}
5483 
5484 	/* Set the hostwide tag */
5485 	if (sdebug_host_max_queue)
5486 		sd_dp->hc_idx = get_tag(cmnd);
5487 
5488 	if (polled)
5489 		ns_from_boot = ktime_get_boottime_ns();
5490 
5491 	/* one of the resp_*() response functions is called here */
5492 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5493 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5494 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5495 		delta_jiff = ndelay = 0;
5496 	}
5497 	if (cmnd->result == 0 && scsi_result != 0)
5498 		cmnd->result = scsi_result;
5499 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5500 		if (atomic_read(&sdeb_inject_pending)) {
5501 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5502 			atomic_set(&sdeb_inject_pending, 0);
5503 			cmnd->result = check_condition_result;
5504 		}
5505 	}
5506 
5507 	if (unlikely(sdebug_verbose && cmnd->result))
5508 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5509 			    __func__, cmnd->result);
5510 
5511 	if (delta_jiff > 0 || ndelay > 0) {
5512 		ktime_t kt;
5513 
5514 		if (delta_jiff > 0) {
5515 			u64 ns = jiffies_to_nsecs(delta_jiff);
5516 
5517 			if (sdebug_random && ns < U32_MAX) {
5518 				ns = prandom_u32_max((u32)ns);
5519 			} else if (sdebug_random) {
5520 				ns >>= 12;	/* scale to 4 usec precision */
5521 				if (ns < U32_MAX)	/* over 4 hours max */
5522 					ns = prandom_u32_max((u32)ns);
5523 				ns <<= 12;
5524 			}
5525 			kt = ns_to_ktime(ns);
5526 		} else {	/* ndelay has a 4.2 second max */
5527 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5528 					     (u32)ndelay;
5529 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5530 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5531 
5532 				if (kt <= d) {	/* elapsed duration >= kt */
5533 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5534 					sqcp->a_cmnd = NULL;
5535 					atomic_dec(&devip->num_in_q);
5536 					clear_bit(k, sqp->in_use_bm);
5537 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5538 					if (new_sd_dp)
5539 						kfree(sd_dp);
5540 					/* call scsi_done() from this thread */
5541 					scsi_done(cmnd);
5542 					return 0;
5543 				}
5544 				/* otherwise reduce kt by elapsed time */
5545 				kt -= d;
5546 			}
5547 		}
5548 		if (polled) {
5549 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5550 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5551 			if (!sd_dp->init_poll) {
5552 				sd_dp->init_poll = true;
5553 				sqcp->sd_dp = sd_dp;
5554 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5555 				sd_dp->qc_idx = k;
5556 			}
5557 			sd_dp->defer_t = SDEB_DEFER_POLL;
5558 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5559 		} else {
5560 			if (!sd_dp->init_hrt) {
5561 				sd_dp->init_hrt = true;
5562 				sqcp->sd_dp = sd_dp;
5563 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5564 					     HRTIMER_MODE_REL_PINNED);
5565 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5566 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5567 				sd_dp->qc_idx = k;
5568 			}
5569 			sd_dp->defer_t = SDEB_DEFER_HRT;
5570 			/* schedule the invocation of scsi_done() for a later time */
5571 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5572 		}
5573 		if (sdebug_statistics)
5574 			sd_dp->issuing_cpu = raw_smp_processor_id();
5575 	} else {	/* jdelay < 0, use work queue */
5576 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5577 			     atomic_read(&sdeb_inject_pending)))
5578 			sd_dp->aborted = true;
5579 		if (polled) {
5580 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5581 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5582 			if (!sd_dp->init_poll) {
5583 				sd_dp->init_poll = true;
5584 				sqcp->sd_dp = sd_dp;
5585 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5586 				sd_dp->qc_idx = k;
5587 			}
5588 			sd_dp->defer_t = SDEB_DEFER_POLL;
5589 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5590 		} else {
5591 			if (!sd_dp->init_wq) {
5592 				sd_dp->init_wq = true;
5593 				sqcp->sd_dp = sd_dp;
5594 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5595 				sd_dp->qc_idx = k;
5596 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5597 			}
5598 			sd_dp->defer_t = SDEB_DEFER_WQ;
5599 			schedule_work(&sd_dp->ew.work);
5600 		}
5601 		if (sdebug_statistics)
5602 			sd_dp->issuing_cpu = raw_smp_processor_id();
5603 		if (unlikely(sd_dp->aborted)) {
5604 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5605 				    scsi_cmd_to_rq(cmnd)->tag);
5606 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5607 			atomic_set(&sdeb_inject_pending, 0);
5608 			sd_dp->aborted = false;
5609 		}
5610 	}
5611 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5612 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5613 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5614 	return 0;
5615 
5616 respond_in_thread:	/* call back to mid-layer using invocation thread */
5617 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5618 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5619 	if (cmnd->result == 0 && scsi_result != 0)
5620 		cmnd->result = scsi_result;
5621 	scsi_done(cmnd);
5622 	return 0;
5623 }
5624 
5625 /* Note: The following macros create attribute files in the
5626    /sys/module/scsi_debug/parameters directory. Unfortunately this
5627    driver is unaware of a change and cannot trigger auxiliary actions
5628    as it can when the corresponding attribute in the
5629    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5630  */
5631 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5632 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5633 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5634 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5635 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5636 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5637 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5638 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5639 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5640 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5641 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5642 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5643 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5645 module_param_string(inq_product, sdebug_inq_product_id,
5646 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5647 module_param_string(inq_rev, sdebug_inq_product_rev,
5648 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5649 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5650 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5651 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5652 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5653 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5654 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5655 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5656 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5657 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5658 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5659 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5660 		   S_IRUGO | S_IWUSR);
5661 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5662 		   S_IRUGO | S_IWUSR);
5663 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5664 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5665 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5666 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5667 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5668 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5669 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5670 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5671 module_param_named(per_host_store, sdebug_per_host_store, bool,
5672 		   S_IRUGO | S_IWUSR);
5673 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5674 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5675 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5676 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5677 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5678 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5679 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5680 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5681 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5682 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5683 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5684 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5685 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5686 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5687 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5688 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5689 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5690 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5691 		   S_IRUGO | S_IWUSR);
5692 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5693 module_param_named(write_same_length, sdebug_write_same_length, int,
5694 		   S_IRUGO | S_IWUSR);
5695 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5696 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5697 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5698 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5699 
5700 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5701 MODULE_DESCRIPTION("SCSI debug adapter driver");
5702 MODULE_LICENSE("GPL");
5703 MODULE_VERSION(SDEBUG_VERSION);
5704 
5705 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5706 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5707 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5708 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5709 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5710 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5711 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5712 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5713 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5714 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5715 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5716 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5717 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5718 MODULE_PARM_DESC(host_max_queue,
5719 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5720 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5721 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5722 		 SDEBUG_VERSION "\")");
5723 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5724 MODULE_PARM_DESC(lbprz,
5725 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5726 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5727 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5728 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5729 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5730 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5731 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5732 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5733 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5734 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5735 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5736 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5737 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5738 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5739 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5740 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5741 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5742 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5743 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5744 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5745 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5746 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5747 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5748 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5749 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5750 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5751 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5752 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5753 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5754 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5755 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5756 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5757 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5758 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5759 MODULE_PARM_DESC(uuid_ctl,
5760 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5761 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5762 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5763 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5764 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5765 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5766 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5767 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5768 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5769 
5770 #define SDEBUG_INFO_LEN 256
5771 static char sdebug_info[SDEBUG_INFO_LEN];
5772 
5773 static const char *scsi_debug_info(struct Scsi_Host *shp)
5774 {
5775 	int k;
5776 
5777 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5778 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5779 	if (k >= (SDEBUG_INFO_LEN - 1))
5780 		return sdebug_info;
5781 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5782 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5783 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5784 		  "statistics", (int)sdebug_statistics);
5785 	return sdebug_info;
5786 }
5787 
5788 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5789 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5790 				 int length)
5791 {
5792 	char arr[16];
5793 	int opts;
5794 	int minLen = length > 15 ? 15 : length;
5795 
5796 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5797 		return -EACCES;
5798 	memcpy(arr, buffer, minLen);
5799 	arr[minLen] = '\0';
5800 	if (1 != sscanf(arr, "%d", &opts))
5801 		return -EINVAL;
5802 	sdebug_opts = opts;
5803 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5804 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5805 	if (sdebug_every_nth != 0)
5806 		tweak_cmnd_count();
5807 	return length;
5808 }
5809 
5810 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5811  * same for each scsi_debug host (if more than one). Some of the counters
5812  * output are not atomics so might be inaccurate in a busy system. */
5813 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5814 {
5815 	int f, j, l;
5816 	struct sdebug_queue *sqp;
5817 	struct sdebug_host_info *sdhp;
5818 
5819 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5820 		   SDEBUG_VERSION, sdebug_version_date);
5821 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5822 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5823 		   sdebug_opts, sdebug_every_nth);
5824 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5825 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5826 		   sdebug_sector_size, "bytes");
5827 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5828 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5829 		   num_aborts);
5830 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5831 		   num_dev_resets, num_target_resets, num_bus_resets,
5832 		   num_host_resets);
5833 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5834 		   dix_reads, dix_writes, dif_errors);
5835 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5836 		   sdebug_statistics);
5837 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5838 		   atomic_read(&sdebug_cmnd_count),
5839 		   atomic_read(&sdebug_completions),
5840 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5841 		   atomic_read(&sdebug_a_tsf),
5842 		   atomic_read(&sdeb_mq_poll_count));
5843 
5844 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5845 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5846 		seq_printf(m, "  queue %d:\n", j);
5847 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5848 		if (f != sdebug_max_queue) {
5849 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5850 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5851 				   "first,last bits", f, l);
5852 		}
5853 	}
5854 
5855 	seq_printf(m, "this host_no=%d\n", host->host_no);
5856 	if (!xa_empty(per_store_ap)) {
5857 		bool niu;
5858 		int idx;
5859 		unsigned long l_idx;
5860 		struct sdeb_store_info *sip;
5861 
5862 		seq_puts(m, "\nhost list:\n");
5863 		j = 0;
5864 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5865 			idx = sdhp->si_idx;
5866 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5867 				   sdhp->shost->host_no, idx);
5868 			++j;
5869 		}
5870 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5871 			   sdeb_most_recent_idx);
5872 		j = 0;
5873 		xa_for_each(per_store_ap, l_idx, sip) {
5874 			niu = xa_get_mark(per_store_ap, l_idx,
5875 					  SDEB_XA_NOT_IN_USE);
5876 			idx = (int)l_idx;
5877 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5878 				   (niu ? "  not_in_use" : ""));
5879 			++j;
5880 		}
5881 	}
5882 	return 0;
5883 }
5884 
5885 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5886 {
5887 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5888 }
5889 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5890  * of delay is jiffies.
5891  */
5892 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5893 			   size_t count)
5894 {
5895 	int jdelay, res;
5896 
5897 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5898 		res = count;
5899 		if (sdebug_jdelay != jdelay) {
5900 			int j, k;
5901 			struct sdebug_queue *sqp;
5902 
5903 			block_unblock_all_queues(true);
5904 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5905 			     ++j, ++sqp) {
5906 				k = find_first_bit(sqp->in_use_bm,
5907 						   sdebug_max_queue);
5908 				if (k != sdebug_max_queue) {
5909 					res = -EBUSY;   /* queued commands */
5910 					break;
5911 				}
5912 			}
5913 			if (res > 0) {
5914 				sdebug_jdelay = jdelay;
5915 				sdebug_ndelay = 0;
5916 			}
5917 			block_unblock_all_queues(false);
5918 		}
5919 		return res;
5920 	}
5921 	return -EINVAL;
5922 }
5923 static DRIVER_ATTR_RW(delay);
5924 
5925 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5926 {
5927 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5928 }
5929 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5930 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5931 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5932 			    size_t count)
5933 {
5934 	int ndelay, res;
5935 
5936 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5937 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5938 		res = count;
5939 		if (sdebug_ndelay != ndelay) {
5940 			int j, k;
5941 			struct sdebug_queue *sqp;
5942 
5943 			block_unblock_all_queues(true);
5944 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5945 			     ++j, ++sqp) {
5946 				k = find_first_bit(sqp->in_use_bm,
5947 						   sdebug_max_queue);
5948 				if (k != sdebug_max_queue) {
5949 					res = -EBUSY;   /* queued commands */
5950 					break;
5951 				}
5952 			}
5953 			if (res > 0) {
5954 				sdebug_ndelay = ndelay;
5955 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5956 							: DEF_JDELAY;
5957 			}
5958 			block_unblock_all_queues(false);
5959 		}
5960 		return res;
5961 	}
5962 	return -EINVAL;
5963 }
5964 static DRIVER_ATTR_RW(ndelay);
5965 
5966 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5967 {
5968 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5969 }
5970 
5971 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5972 			  size_t count)
5973 {
5974 	int opts;
5975 	char work[20];
5976 
5977 	if (sscanf(buf, "%10s", work) == 1) {
5978 		if (strncasecmp(work, "0x", 2) == 0) {
5979 			if (kstrtoint(work + 2, 16, &opts) == 0)
5980 				goto opts_done;
5981 		} else {
5982 			if (kstrtoint(work, 10, &opts) == 0)
5983 				goto opts_done;
5984 		}
5985 	}
5986 	return -EINVAL;
5987 opts_done:
5988 	sdebug_opts = opts;
5989 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5990 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5991 	tweak_cmnd_count();
5992 	return count;
5993 }
5994 static DRIVER_ATTR_RW(opts);
5995 
5996 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5997 {
5998 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5999 }
6000 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6001 			   size_t count)
6002 {
6003 	int n;
6004 
6005 	/* Cannot change from or to TYPE_ZBC with sysfs */
6006 	if (sdebug_ptype == TYPE_ZBC)
6007 		return -EINVAL;
6008 
6009 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6010 		if (n == TYPE_ZBC)
6011 			return -EINVAL;
6012 		sdebug_ptype = n;
6013 		return count;
6014 	}
6015 	return -EINVAL;
6016 }
6017 static DRIVER_ATTR_RW(ptype);
6018 
6019 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6020 {
6021 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6022 }
6023 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6024 			    size_t count)
6025 {
6026 	int n;
6027 
6028 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6029 		sdebug_dsense = n;
6030 		return count;
6031 	}
6032 	return -EINVAL;
6033 }
6034 static DRIVER_ATTR_RW(dsense);
6035 
6036 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6037 {
6038 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6039 }
6040 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6041 			     size_t count)
6042 {
6043 	int n, idx;
6044 
6045 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6046 		bool want_store = (n == 0);
6047 		struct sdebug_host_info *sdhp;
6048 
6049 		n = (n > 0);
6050 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6051 		if (sdebug_fake_rw == n)
6052 			return count;	/* not transitioning so do nothing */
6053 
6054 		if (want_store) {	/* 1 --> 0 transition, set up store */
6055 			if (sdeb_first_idx < 0) {
6056 				idx = sdebug_add_store();
6057 				if (idx < 0)
6058 					return idx;
6059 			} else {
6060 				idx = sdeb_first_idx;
6061 				xa_clear_mark(per_store_ap, idx,
6062 					      SDEB_XA_NOT_IN_USE);
6063 			}
6064 			/* make all hosts use same store */
6065 			list_for_each_entry(sdhp, &sdebug_host_list,
6066 					    host_list) {
6067 				if (sdhp->si_idx != idx) {
6068 					xa_set_mark(per_store_ap, sdhp->si_idx,
6069 						    SDEB_XA_NOT_IN_USE);
6070 					sdhp->si_idx = idx;
6071 				}
6072 			}
6073 			sdeb_most_recent_idx = idx;
6074 		} else {	/* 0 --> 1 transition is trigger for shrink */
6075 			sdebug_erase_all_stores(true /* apart from first */);
6076 		}
6077 		sdebug_fake_rw = n;
6078 		return count;
6079 	}
6080 	return -EINVAL;
6081 }
6082 static DRIVER_ATTR_RW(fake_rw);
6083 
6084 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6085 {
6086 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6087 }
6088 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6089 			      size_t count)
6090 {
6091 	int n;
6092 
6093 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6094 		sdebug_no_lun_0 = n;
6095 		return count;
6096 	}
6097 	return -EINVAL;
6098 }
6099 static DRIVER_ATTR_RW(no_lun_0);
6100 
6101 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6102 {
6103 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6104 }
6105 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6106 			      size_t count)
6107 {
6108 	int n;
6109 
6110 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6111 		sdebug_num_tgts = n;
6112 		sdebug_max_tgts_luns();
6113 		return count;
6114 	}
6115 	return -EINVAL;
6116 }
6117 static DRIVER_ATTR_RW(num_tgts);
6118 
6119 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6120 {
6121 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6122 }
6123 static DRIVER_ATTR_RO(dev_size_mb);
6124 
6125 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6126 {
6127 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6128 }
6129 
6130 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6131 				    size_t count)
6132 {
6133 	bool v;
6134 
6135 	if (kstrtobool(buf, &v))
6136 		return -EINVAL;
6137 
6138 	sdebug_per_host_store = v;
6139 	return count;
6140 }
6141 static DRIVER_ATTR_RW(per_host_store);
6142 
6143 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6144 {
6145 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6146 }
6147 static DRIVER_ATTR_RO(num_parts);
6148 
6149 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6150 {
6151 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6152 }
6153 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6154 			       size_t count)
6155 {
6156 	int nth;
6157 	char work[20];
6158 
6159 	if (sscanf(buf, "%10s", work) == 1) {
6160 		if (strncasecmp(work, "0x", 2) == 0) {
6161 			if (kstrtoint(work + 2, 16, &nth) == 0)
6162 				goto every_nth_done;
6163 		} else {
6164 			if (kstrtoint(work, 10, &nth) == 0)
6165 				goto every_nth_done;
6166 		}
6167 	}
6168 	return -EINVAL;
6169 
6170 every_nth_done:
6171 	sdebug_every_nth = nth;
6172 	if (nth && !sdebug_statistics) {
6173 		pr_info("every_nth needs statistics=1, set it\n");
6174 		sdebug_statistics = true;
6175 	}
6176 	tweak_cmnd_count();
6177 	return count;
6178 }
6179 static DRIVER_ATTR_RW(every_nth);
6180 
6181 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6182 {
6183 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6184 }
6185 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6186 				size_t count)
6187 {
6188 	int n;
6189 	bool changed;
6190 
6191 	if (kstrtoint(buf, 0, &n))
6192 		return -EINVAL;
6193 	if (n >= 0) {
6194 		if (n > (int)SAM_LUN_AM_FLAT) {
6195 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6196 			return -EINVAL;
6197 		}
6198 		changed = ((int)sdebug_lun_am != n);
6199 		sdebug_lun_am = n;
6200 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6201 			struct sdebug_host_info *sdhp;
6202 			struct sdebug_dev_info *dp;
6203 
6204 			spin_lock(&sdebug_host_list_lock);
6205 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6206 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6207 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6208 				}
6209 			}
6210 			spin_unlock(&sdebug_host_list_lock);
6211 		}
6212 		return count;
6213 	}
6214 	return -EINVAL;
6215 }
6216 static DRIVER_ATTR_RW(lun_format);
6217 
6218 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6219 {
6220 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6221 }
6222 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6223 			      size_t count)
6224 {
6225 	int n;
6226 	bool changed;
6227 
6228 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6229 		if (n > 256) {
6230 			pr_warn("max_luns can be no more than 256\n");
6231 			return -EINVAL;
6232 		}
6233 		changed = (sdebug_max_luns != n);
6234 		sdebug_max_luns = n;
6235 		sdebug_max_tgts_luns();
6236 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6237 			struct sdebug_host_info *sdhp;
6238 			struct sdebug_dev_info *dp;
6239 
6240 			spin_lock(&sdebug_host_list_lock);
6241 			list_for_each_entry(sdhp, &sdebug_host_list,
6242 					    host_list) {
6243 				list_for_each_entry(dp, &sdhp->dev_info_list,
6244 						    dev_list) {
6245 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6246 						dp->uas_bm);
6247 				}
6248 			}
6249 			spin_unlock(&sdebug_host_list_lock);
6250 		}
6251 		return count;
6252 	}
6253 	return -EINVAL;
6254 }
6255 static DRIVER_ATTR_RW(max_luns);
6256 
6257 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6258 {
6259 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6260 }
6261 /* N.B. max_queue can be changed while there are queued commands. In flight
6262  * commands beyond the new max_queue will be completed. */
6263 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6264 			       size_t count)
6265 {
6266 	int j, n, k, a;
6267 	struct sdebug_queue *sqp;
6268 
6269 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6270 	    (n <= SDEBUG_CANQUEUE) &&
6271 	    (sdebug_host_max_queue == 0)) {
6272 		block_unblock_all_queues(true);
6273 		k = 0;
6274 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6275 		     ++j, ++sqp) {
6276 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6277 			if (a > k)
6278 				k = a;
6279 		}
6280 		sdebug_max_queue = n;
6281 		if (k == SDEBUG_CANQUEUE)
6282 			atomic_set(&retired_max_queue, 0);
6283 		else if (k >= n)
6284 			atomic_set(&retired_max_queue, k + 1);
6285 		else
6286 			atomic_set(&retired_max_queue, 0);
6287 		block_unblock_all_queues(false);
6288 		return count;
6289 	}
6290 	return -EINVAL;
6291 }
6292 static DRIVER_ATTR_RW(max_queue);
6293 
6294 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6295 {
6296 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6297 }
6298 
6299 /*
6300  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6301  * in range [0, sdebug_host_max_queue), we can't change it.
6302  */
6303 static DRIVER_ATTR_RO(host_max_queue);
6304 
6305 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6308 }
6309 static DRIVER_ATTR_RO(no_uld);
6310 
6311 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6312 {
6313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6314 }
6315 static DRIVER_ATTR_RO(scsi_level);
6316 
6317 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6318 {
6319 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6320 }
6321 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6322 				size_t count)
6323 {
6324 	int n;
6325 	bool changed;
6326 
6327 	/* Ignore capacity change for ZBC drives for now */
6328 	if (sdeb_zbc_in_use)
6329 		return -ENOTSUPP;
6330 
6331 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6332 		changed = (sdebug_virtual_gb != n);
6333 		sdebug_virtual_gb = n;
6334 		sdebug_capacity = get_sdebug_capacity();
6335 		if (changed) {
6336 			struct sdebug_host_info *sdhp;
6337 			struct sdebug_dev_info *dp;
6338 
6339 			spin_lock(&sdebug_host_list_lock);
6340 			list_for_each_entry(sdhp, &sdebug_host_list,
6341 					    host_list) {
6342 				list_for_each_entry(dp, &sdhp->dev_info_list,
6343 						    dev_list) {
6344 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6345 						dp->uas_bm);
6346 				}
6347 			}
6348 			spin_unlock(&sdebug_host_list_lock);
6349 		}
6350 		return count;
6351 	}
6352 	return -EINVAL;
6353 }
6354 static DRIVER_ATTR_RW(virtual_gb);
6355 
6356 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6357 {
6358 	/* absolute number of hosts currently active is what is shown */
6359 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6360 }
6361 
6362 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6363 			      size_t count)
6364 {
6365 	bool found;
6366 	unsigned long idx;
6367 	struct sdeb_store_info *sip;
6368 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6369 	int delta_hosts;
6370 
6371 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6372 		return -EINVAL;
6373 	if (delta_hosts > 0) {
6374 		do {
6375 			found = false;
6376 			if (want_phs) {
6377 				xa_for_each_marked(per_store_ap, idx, sip,
6378 						   SDEB_XA_NOT_IN_USE) {
6379 					sdeb_most_recent_idx = (int)idx;
6380 					found = true;
6381 					break;
6382 				}
6383 				if (found)	/* re-use case */
6384 					sdebug_add_host_helper((int)idx);
6385 				else
6386 					sdebug_do_add_host(true);
6387 			} else {
6388 				sdebug_do_add_host(false);
6389 			}
6390 		} while (--delta_hosts);
6391 	} else if (delta_hosts < 0) {
6392 		do {
6393 			sdebug_do_remove_host(false);
6394 		} while (++delta_hosts);
6395 	}
6396 	return count;
6397 }
6398 static DRIVER_ATTR_RW(add_host);
6399 
6400 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6401 {
6402 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6403 }
6404 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6405 				    size_t count)
6406 {
6407 	int n;
6408 
6409 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6410 		sdebug_vpd_use_hostno = n;
6411 		return count;
6412 	}
6413 	return -EINVAL;
6414 }
6415 static DRIVER_ATTR_RW(vpd_use_hostno);
6416 
6417 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6418 {
6419 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6420 }
6421 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6422 				size_t count)
6423 {
6424 	int n;
6425 
6426 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6427 		if (n > 0)
6428 			sdebug_statistics = true;
6429 		else {
6430 			clear_queue_stats();
6431 			sdebug_statistics = false;
6432 		}
6433 		return count;
6434 	}
6435 	return -EINVAL;
6436 }
6437 static DRIVER_ATTR_RW(statistics);
6438 
6439 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6440 {
6441 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6442 }
6443 static DRIVER_ATTR_RO(sector_size);
6444 
6445 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6446 {
6447 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6448 }
6449 static DRIVER_ATTR_RO(submit_queues);
6450 
6451 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6452 {
6453 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6454 }
6455 static DRIVER_ATTR_RO(dix);
6456 
6457 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6458 {
6459 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6460 }
6461 static DRIVER_ATTR_RO(dif);
6462 
6463 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6464 {
6465 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6466 }
6467 static DRIVER_ATTR_RO(guard);
6468 
6469 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6470 {
6471 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6472 }
6473 static DRIVER_ATTR_RO(ato);
6474 
6475 static ssize_t map_show(struct device_driver *ddp, char *buf)
6476 {
6477 	ssize_t count = 0;
6478 
6479 	if (!scsi_debug_lbp())
6480 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6481 				 sdebug_store_sectors);
6482 
6483 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6484 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6485 
6486 		if (sip)
6487 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6488 					  (int)map_size, sip->map_storep);
6489 	}
6490 	buf[count++] = '\n';
6491 	buf[count] = '\0';
6492 
6493 	return count;
6494 }
6495 static DRIVER_ATTR_RO(map);
6496 
6497 static ssize_t random_show(struct device_driver *ddp, char *buf)
6498 {
6499 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6500 }
6501 
6502 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6503 			    size_t count)
6504 {
6505 	bool v;
6506 
6507 	if (kstrtobool(buf, &v))
6508 		return -EINVAL;
6509 
6510 	sdebug_random = v;
6511 	return count;
6512 }
6513 static DRIVER_ATTR_RW(random);
6514 
6515 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6516 {
6517 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6518 }
6519 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6520 			       size_t count)
6521 {
6522 	int n;
6523 
6524 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6525 		sdebug_removable = (n > 0);
6526 		return count;
6527 	}
6528 	return -EINVAL;
6529 }
6530 static DRIVER_ATTR_RW(removable);
6531 
6532 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6533 {
6534 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6535 }
6536 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6537 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6538 			       size_t count)
6539 {
6540 	int n;
6541 
6542 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6543 		sdebug_host_lock = (n > 0);
6544 		return count;
6545 	}
6546 	return -EINVAL;
6547 }
6548 static DRIVER_ATTR_RW(host_lock);
6549 
6550 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6551 {
6552 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6553 }
6554 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6555 			    size_t count)
6556 {
6557 	int n;
6558 
6559 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6560 		sdebug_strict = (n > 0);
6561 		return count;
6562 	}
6563 	return -EINVAL;
6564 }
6565 static DRIVER_ATTR_RW(strict);
6566 
6567 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6568 {
6569 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6570 }
6571 static DRIVER_ATTR_RO(uuid_ctl);
6572 
6573 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6574 {
6575 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6576 }
6577 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6578 			     size_t count)
6579 {
6580 	int ret, n;
6581 
6582 	ret = kstrtoint(buf, 0, &n);
6583 	if (ret)
6584 		return ret;
6585 	sdebug_cdb_len = n;
6586 	all_config_cdb_len();
6587 	return count;
6588 }
6589 static DRIVER_ATTR_RW(cdb_len);
6590 
6591 static const char * const zbc_model_strs_a[] = {
6592 	[BLK_ZONED_NONE] = "none",
6593 	[BLK_ZONED_HA]   = "host-aware",
6594 	[BLK_ZONED_HM]   = "host-managed",
6595 };
6596 
6597 static const char * const zbc_model_strs_b[] = {
6598 	[BLK_ZONED_NONE] = "no",
6599 	[BLK_ZONED_HA]   = "aware",
6600 	[BLK_ZONED_HM]   = "managed",
6601 };
6602 
6603 static const char * const zbc_model_strs_c[] = {
6604 	[BLK_ZONED_NONE] = "0",
6605 	[BLK_ZONED_HA]   = "1",
6606 	[BLK_ZONED_HM]   = "2",
6607 };
6608 
6609 static int sdeb_zbc_model_str(const char *cp)
6610 {
6611 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6612 
6613 	if (res < 0) {
6614 		res = sysfs_match_string(zbc_model_strs_b, cp);
6615 		if (res < 0) {
6616 			res = sysfs_match_string(zbc_model_strs_c, cp);
6617 			if (res < 0)
6618 				return -EINVAL;
6619 		}
6620 	}
6621 	return res;
6622 }
6623 
6624 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6625 {
6626 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6627 			 zbc_model_strs_a[sdeb_zbc_model]);
6628 }
6629 static DRIVER_ATTR_RO(zbc);
6630 
6631 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6632 {
6633 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6634 }
6635 static DRIVER_ATTR_RO(tur_ms_to_ready);
6636 
6637 /* Note: The following array creates attribute files in the
6638    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6639    files (over those found in the /sys/module/scsi_debug/parameters
6640    directory) is that auxiliary actions can be triggered when an attribute
6641    is changed. For example see: add_host_store() above.
6642  */
6643 
6644 static struct attribute *sdebug_drv_attrs[] = {
6645 	&driver_attr_delay.attr,
6646 	&driver_attr_opts.attr,
6647 	&driver_attr_ptype.attr,
6648 	&driver_attr_dsense.attr,
6649 	&driver_attr_fake_rw.attr,
6650 	&driver_attr_host_max_queue.attr,
6651 	&driver_attr_no_lun_0.attr,
6652 	&driver_attr_num_tgts.attr,
6653 	&driver_attr_dev_size_mb.attr,
6654 	&driver_attr_num_parts.attr,
6655 	&driver_attr_every_nth.attr,
6656 	&driver_attr_lun_format.attr,
6657 	&driver_attr_max_luns.attr,
6658 	&driver_attr_max_queue.attr,
6659 	&driver_attr_no_uld.attr,
6660 	&driver_attr_scsi_level.attr,
6661 	&driver_attr_virtual_gb.attr,
6662 	&driver_attr_add_host.attr,
6663 	&driver_attr_per_host_store.attr,
6664 	&driver_attr_vpd_use_hostno.attr,
6665 	&driver_attr_sector_size.attr,
6666 	&driver_attr_statistics.attr,
6667 	&driver_attr_submit_queues.attr,
6668 	&driver_attr_dix.attr,
6669 	&driver_attr_dif.attr,
6670 	&driver_attr_guard.attr,
6671 	&driver_attr_ato.attr,
6672 	&driver_attr_map.attr,
6673 	&driver_attr_random.attr,
6674 	&driver_attr_removable.attr,
6675 	&driver_attr_host_lock.attr,
6676 	&driver_attr_ndelay.attr,
6677 	&driver_attr_strict.attr,
6678 	&driver_attr_uuid_ctl.attr,
6679 	&driver_attr_cdb_len.attr,
6680 	&driver_attr_tur_ms_to_ready.attr,
6681 	&driver_attr_zbc.attr,
6682 	NULL,
6683 };
6684 ATTRIBUTE_GROUPS(sdebug_drv);
6685 
6686 static struct device *pseudo_primary;
6687 
6688 static int __init scsi_debug_init(void)
6689 {
6690 	bool want_store = (sdebug_fake_rw == 0);
6691 	unsigned long sz;
6692 	int k, ret, hosts_to_add;
6693 	int idx = -1;
6694 
6695 	ramdisk_lck_a[0] = &atomic_rw;
6696 	ramdisk_lck_a[1] = &atomic_rw2;
6697 	atomic_set(&retired_max_queue, 0);
6698 
6699 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6700 		pr_warn("ndelay must be less than 1 second, ignored\n");
6701 		sdebug_ndelay = 0;
6702 	} else if (sdebug_ndelay > 0)
6703 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6704 
6705 	switch (sdebug_sector_size) {
6706 	case  512:
6707 	case 1024:
6708 	case 2048:
6709 	case 4096:
6710 		break;
6711 	default:
6712 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6713 		return -EINVAL;
6714 	}
6715 
6716 	switch (sdebug_dif) {
6717 	case T10_PI_TYPE0_PROTECTION:
6718 		break;
6719 	case T10_PI_TYPE1_PROTECTION:
6720 	case T10_PI_TYPE2_PROTECTION:
6721 	case T10_PI_TYPE3_PROTECTION:
6722 		have_dif_prot = true;
6723 		break;
6724 
6725 	default:
6726 		pr_err("dif must be 0, 1, 2 or 3\n");
6727 		return -EINVAL;
6728 	}
6729 
6730 	if (sdebug_num_tgts < 0) {
6731 		pr_err("num_tgts must be >= 0\n");
6732 		return -EINVAL;
6733 	}
6734 
6735 	if (sdebug_guard > 1) {
6736 		pr_err("guard must be 0 or 1\n");
6737 		return -EINVAL;
6738 	}
6739 
6740 	if (sdebug_ato > 1) {
6741 		pr_err("ato must be 0 or 1\n");
6742 		return -EINVAL;
6743 	}
6744 
6745 	if (sdebug_physblk_exp > 15) {
6746 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6747 		return -EINVAL;
6748 	}
6749 
6750 	sdebug_lun_am = sdebug_lun_am_i;
6751 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6752 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6753 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6754 	}
6755 
6756 	if (sdebug_max_luns > 256) {
6757 		if (sdebug_max_luns > 16384) {
6758 			pr_warn("max_luns can be no more than 16384, use default\n");
6759 			sdebug_max_luns = DEF_MAX_LUNS;
6760 		}
6761 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6762 	}
6763 
6764 	if (sdebug_lowest_aligned > 0x3fff) {
6765 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6766 		return -EINVAL;
6767 	}
6768 
6769 	if (submit_queues < 1) {
6770 		pr_err("submit_queues must be 1 or more\n");
6771 		return -EINVAL;
6772 	}
6773 
6774 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6775 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6776 		return -EINVAL;
6777 	}
6778 
6779 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6780 	    (sdebug_host_max_queue < 0)) {
6781 		pr_err("host_max_queue must be in range [0 %d]\n",
6782 		       SDEBUG_CANQUEUE);
6783 		return -EINVAL;
6784 	}
6785 
6786 	if (sdebug_host_max_queue &&
6787 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6788 		sdebug_max_queue = sdebug_host_max_queue;
6789 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6790 			sdebug_max_queue);
6791 	}
6792 
6793 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6794 			       GFP_KERNEL);
6795 	if (sdebug_q_arr == NULL)
6796 		return -ENOMEM;
6797 	for (k = 0; k < submit_queues; ++k)
6798 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6799 
6800 	/*
6801 	 * check for host managed zoned block device specified with
6802 	 * ptype=0x14 or zbc=XXX.
6803 	 */
6804 	if (sdebug_ptype == TYPE_ZBC) {
6805 		sdeb_zbc_model = BLK_ZONED_HM;
6806 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6807 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6808 		if (k < 0) {
6809 			ret = k;
6810 			goto free_q_arr;
6811 		}
6812 		sdeb_zbc_model = k;
6813 		switch (sdeb_zbc_model) {
6814 		case BLK_ZONED_NONE:
6815 		case BLK_ZONED_HA:
6816 			sdebug_ptype = TYPE_DISK;
6817 			break;
6818 		case BLK_ZONED_HM:
6819 			sdebug_ptype = TYPE_ZBC;
6820 			break;
6821 		default:
6822 			pr_err("Invalid ZBC model\n");
6823 			ret = -EINVAL;
6824 			goto free_q_arr;
6825 		}
6826 	}
6827 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6828 		sdeb_zbc_in_use = true;
6829 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6830 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6831 	}
6832 
6833 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6834 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6835 	if (sdebug_dev_size_mb < 1)
6836 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6837 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6838 	sdebug_store_sectors = sz / sdebug_sector_size;
6839 	sdebug_capacity = get_sdebug_capacity();
6840 
6841 	/* play around with geometry, don't waste too much on track 0 */
6842 	sdebug_heads = 8;
6843 	sdebug_sectors_per = 32;
6844 	if (sdebug_dev_size_mb >= 256)
6845 		sdebug_heads = 64;
6846 	else if (sdebug_dev_size_mb >= 16)
6847 		sdebug_heads = 32;
6848 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6849 			       (sdebug_sectors_per * sdebug_heads);
6850 	if (sdebug_cylinders_per >= 1024) {
6851 		/* other LLDs do this; implies >= 1GB ram disk ... */
6852 		sdebug_heads = 255;
6853 		sdebug_sectors_per = 63;
6854 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6855 			       (sdebug_sectors_per * sdebug_heads);
6856 	}
6857 	if (scsi_debug_lbp()) {
6858 		sdebug_unmap_max_blocks =
6859 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6860 
6861 		sdebug_unmap_max_desc =
6862 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6863 
6864 		sdebug_unmap_granularity =
6865 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6866 
6867 		if (sdebug_unmap_alignment &&
6868 		    sdebug_unmap_granularity <=
6869 		    sdebug_unmap_alignment) {
6870 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6871 			ret = -EINVAL;
6872 			goto free_q_arr;
6873 		}
6874 	}
6875 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6876 	if (want_store) {
6877 		idx = sdebug_add_store();
6878 		if (idx < 0) {
6879 			ret = idx;
6880 			goto free_q_arr;
6881 		}
6882 	}
6883 
6884 	pseudo_primary = root_device_register("pseudo_0");
6885 	if (IS_ERR(pseudo_primary)) {
6886 		pr_warn("root_device_register() error\n");
6887 		ret = PTR_ERR(pseudo_primary);
6888 		goto free_vm;
6889 	}
6890 	ret = bus_register(&pseudo_lld_bus);
6891 	if (ret < 0) {
6892 		pr_warn("bus_register error: %d\n", ret);
6893 		goto dev_unreg;
6894 	}
6895 	ret = driver_register(&sdebug_driverfs_driver);
6896 	if (ret < 0) {
6897 		pr_warn("driver_register error: %d\n", ret);
6898 		goto bus_unreg;
6899 	}
6900 
6901 	hosts_to_add = sdebug_add_host;
6902 	sdebug_add_host = 0;
6903 
6904 	for (k = 0; k < hosts_to_add; k++) {
6905 		if (want_store && k == 0) {
6906 			ret = sdebug_add_host_helper(idx);
6907 			if (ret < 0) {
6908 				pr_err("add_host_helper k=%d, error=%d\n",
6909 				       k, -ret);
6910 				break;
6911 			}
6912 		} else {
6913 			ret = sdebug_do_add_host(want_store &&
6914 						 sdebug_per_host_store);
6915 			if (ret < 0) {
6916 				pr_err("add_host k=%d error=%d\n", k, -ret);
6917 				break;
6918 			}
6919 		}
6920 	}
6921 	if (sdebug_verbose)
6922 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6923 
6924 	return 0;
6925 
6926 bus_unreg:
6927 	bus_unregister(&pseudo_lld_bus);
6928 dev_unreg:
6929 	root_device_unregister(pseudo_primary);
6930 free_vm:
6931 	sdebug_erase_store(idx, NULL);
6932 free_q_arr:
6933 	kfree(sdebug_q_arr);
6934 	return ret;
6935 }
6936 
6937 static void __exit scsi_debug_exit(void)
6938 {
6939 	int k = sdebug_num_hosts;
6940 
6941 	stop_all_queued();
6942 	for (; k; k--)
6943 		sdebug_do_remove_host(true);
6944 	free_all_queued();
6945 	driver_unregister(&sdebug_driverfs_driver);
6946 	bus_unregister(&pseudo_lld_bus);
6947 	root_device_unregister(pseudo_primary);
6948 
6949 	sdebug_erase_all_stores(false);
6950 	xa_destroy(per_store_ap);
6951 	kfree(sdebug_q_arr);
6952 }
6953 
6954 device_initcall(scsi_debug_init);
6955 module_exit(scsi_debug_exit);
6956 
6957 static void sdebug_release_adapter(struct device *dev)
6958 {
6959 	struct sdebug_host_info *sdbg_host;
6960 
6961 	sdbg_host = to_sdebug_host(dev);
6962 	kfree(sdbg_host);
6963 }
6964 
6965 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6966 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6967 {
6968 	if (idx < 0)
6969 		return;
6970 	if (!sip) {
6971 		if (xa_empty(per_store_ap))
6972 			return;
6973 		sip = xa_load(per_store_ap, idx);
6974 		if (!sip)
6975 			return;
6976 	}
6977 	vfree(sip->map_storep);
6978 	vfree(sip->dif_storep);
6979 	vfree(sip->storep);
6980 	xa_erase(per_store_ap, idx);
6981 	kfree(sip);
6982 }
6983 
6984 /* Assume apart_from_first==false only in shutdown case. */
6985 static void sdebug_erase_all_stores(bool apart_from_first)
6986 {
6987 	unsigned long idx;
6988 	struct sdeb_store_info *sip = NULL;
6989 
6990 	xa_for_each(per_store_ap, idx, sip) {
6991 		if (apart_from_first)
6992 			apart_from_first = false;
6993 		else
6994 			sdebug_erase_store(idx, sip);
6995 	}
6996 	if (apart_from_first)
6997 		sdeb_most_recent_idx = sdeb_first_idx;
6998 }
6999 
7000 /*
7001  * Returns store xarray new element index (idx) if >=0 else negated errno.
7002  * Limit the number of stores to 65536.
7003  */
7004 static int sdebug_add_store(void)
7005 {
7006 	int res;
7007 	u32 n_idx;
7008 	unsigned long iflags;
7009 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7010 	struct sdeb_store_info *sip = NULL;
7011 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7012 
7013 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7014 	if (!sip)
7015 		return -ENOMEM;
7016 
7017 	xa_lock_irqsave(per_store_ap, iflags);
7018 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7019 	if (unlikely(res < 0)) {
7020 		xa_unlock_irqrestore(per_store_ap, iflags);
7021 		kfree(sip);
7022 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7023 		return res;
7024 	}
7025 	sdeb_most_recent_idx = n_idx;
7026 	if (sdeb_first_idx < 0)
7027 		sdeb_first_idx = n_idx;
7028 	xa_unlock_irqrestore(per_store_ap, iflags);
7029 
7030 	res = -ENOMEM;
7031 	sip->storep = vzalloc(sz);
7032 	if (!sip->storep) {
7033 		pr_err("user data oom\n");
7034 		goto err;
7035 	}
7036 	if (sdebug_num_parts > 0)
7037 		sdebug_build_parts(sip->storep, sz);
7038 
7039 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7040 	if (sdebug_dix) {
7041 		int dif_size;
7042 
7043 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7044 		sip->dif_storep = vmalloc(dif_size);
7045 
7046 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7047 			sip->dif_storep);
7048 
7049 		if (!sip->dif_storep) {
7050 			pr_err("DIX oom\n");
7051 			goto err;
7052 		}
7053 		memset(sip->dif_storep, 0xff, dif_size);
7054 	}
7055 	/* Logical Block Provisioning */
7056 	if (scsi_debug_lbp()) {
7057 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7058 		sip->map_storep = vmalloc(array_size(sizeof(long),
7059 						     BITS_TO_LONGS(map_size)));
7060 
7061 		pr_info("%lu provisioning blocks\n", map_size);
7062 
7063 		if (!sip->map_storep) {
7064 			pr_err("LBP map oom\n");
7065 			goto err;
7066 		}
7067 
7068 		bitmap_zero(sip->map_storep, map_size);
7069 
7070 		/* Map first 1KB for partition table */
7071 		if (sdebug_num_parts)
7072 			map_region(sip, 0, 2);
7073 	}
7074 
7075 	rwlock_init(&sip->macc_lck);
7076 	return (int)n_idx;
7077 err:
7078 	sdebug_erase_store((int)n_idx, sip);
7079 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7080 	return res;
7081 }
7082 
7083 static int sdebug_add_host_helper(int per_host_idx)
7084 {
7085 	int k, devs_per_host, idx;
7086 	int error = -ENOMEM;
7087 	struct sdebug_host_info *sdbg_host;
7088 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7089 
7090 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7091 	if (!sdbg_host)
7092 		return -ENOMEM;
7093 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7094 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7095 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7096 	sdbg_host->si_idx = idx;
7097 
7098 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7099 
7100 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7101 	for (k = 0; k < devs_per_host; k++) {
7102 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7103 		if (!sdbg_devinfo)
7104 			goto clean;
7105 	}
7106 
7107 	spin_lock(&sdebug_host_list_lock);
7108 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7109 	spin_unlock(&sdebug_host_list_lock);
7110 
7111 	sdbg_host->dev.bus = &pseudo_lld_bus;
7112 	sdbg_host->dev.parent = pseudo_primary;
7113 	sdbg_host->dev.release = &sdebug_release_adapter;
7114 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7115 
7116 	error = device_register(&sdbg_host->dev);
7117 	if (error)
7118 		goto clean;
7119 
7120 	++sdebug_num_hosts;
7121 	return 0;
7122 
7123 clean:
7124 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7125 				 dev_list) {
7126 		list_del(&sdbg_devinfo->dev_list);
7127 		kfree(sdbg_devinfo->zstate);
7128 		kfree(sdbg_devinfo);
7129 	}
7130 	kfree(sdbg_host);
7131 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7132 	return error;
7133 }
7134 
7135 static int sdebug_do_add_host(bool mk_new_store)
7136 {
7137 	int ph_idx = sdeb_most_recent_idx;
7138 
7139 	if (mk_new_store) {
7140 		ph_idx = sdebug_add_store();
7141 		if (ph_idx < 0)
7142 			return ph_idx;
7143 	}
7144 	return sdebug_add_host_helper(ph_idx);
7145 }
7146 
7147 static void sdebug_do_remove_host(bool the_end)
7148 {
7149 	int idx = -1;
7150 	struct sdebug_host_info *sdbg_host = NULL;
7151 	struct sdebug_host_info *sdbg_host2;
7152 
7153 	spin_lock(&sdebug_host_list_lock);
7154 	if (!list_empty(&sdebug_host_list)) {
7155 		sdbg_host = list_entry(sdebug_host_list.prev,
7156 				       struct sdebug_host_info, host_list);
7157 		idx = sdbg_host->si_idx;
7158 	}
7159 	if (!the_end && idx >= 0) {
7160 		bool unique = true;
7161 
7162 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7163 			if (sdbg_host2 == sdbg_host)
7164 				continue;
7165 			if (idx == sdbg_host2->si_idx) {
7166 				unique = false;
7167 				break;
7168 			}
7169 		}
7170 		if (unique) {
7171 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7172 			if (idx == sdeb_most_recent_idx)
7173 				--sdeb_most_recent_idx;
7174 		}
7175 	}
7176 	if (sdbg_host)
7177 		list_del(&sdbg_host->host_list);
7178 	spin_unlock(&sdebug_host_list_lock);
7179 
7180 	if (!sdbg_host)
7181 		return;
7182 
7183 	device_unregister(&sdbg_host->dev);
7184 	--sdebug_num_hosts;
7185 }
7186 
7187 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7188 {
7189 	int num_in_q = 0;
7190 	struct sdebug_dev_info *devip;
7191 
7192 	block_unblock_all_queues(true);
7193 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7194 	if (NULL == devip) {
7195 		block_unblock_all_queues(false);
7196 		return	-ENODEV;
7197 	}
7198 	num_in_q = atomic_read(&devip->num_in_q);
7199 
7200 	if (qdepth > SDEBUG_CANQUEUE) {
7201 		qdepth = SDEBUG_CANQUEUE;
7202 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7203 			qdepth, SDEBUG_CANQUEUE);
7204 	}
7205 	if (qdepth < 1)
7206 		qdepth = 1;
7207 	if (qdepth != sdev->queue_depth)
7208 		scsi_change_queue_depth(sdev, qdepth);
7209 
7210 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7211 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7212 			    __func__, qdepth, num_in_q);
7213 	}
7214 	block_unblock_all_queues(false);
7215 	return sdev->queue_depth;
7216 }
7217 
7218 static bool fake_timeout(struct scsi_cmnd *scp)
7219 {
7220 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7221 		if (sdebug_every_nth < -1)
7222 			sdebug_every_nth = -1;
7223 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7224 			return true; /* ignore command causing timeout */
7225 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7226 			 scsi_medium_access_command(scp))
7227 			return true; /* time out reads and writes */
7228 	}
7229 	return false;
7230 }
7231 
7232 /* Response to TUR or media access command when device stopped */
7233 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7234 {
7235 	int stopped_state;
7236 	u64 diff_ns = 0;
7237 	ktime_t now_ts = ktime_get_boottime();
7238 	struct scsi_device *sdp = scp->device;
7239 
7240 	stopped_state = atomic_read(&devip->stopped);
7241 	if (stopped_state == 2) {
7242 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7243 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7244 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7245 				/* tur_ms_to_ready timer extinguished */
7246 				atomic_set(&devip->stopped, 0);
7247 				return 0;
7248 			}
7249 		}
7250 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7251 		if (sdebug_verbose)
7252 			sdev_printk(KERN_INFO, sdp,
7253 				    "%s: Not ready: in process of becoming ready\n", my_name);
7254 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7255 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7256 
7257 			if (diff_ns <= tur_nanosecs_to_ready)
7258 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7259 			else
7260 				diff_ns = tur_nanosecs_to_ready;
7261 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7262 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7263 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7264 						   diff_ns);
7265 			return check_condition_result;
7266 		}
7267 	}
7268 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7269 	if (sdebug_verbose)
7270 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7271 			    my_name);
7272 	return check_condition_result;
7273 }
7274 
7275 static int sdebug_map_queues(struct Scsi_Host *shost)
7276 {
7277 	int i, qoff;
7278 
7279 	if (shost->nr_hw_queues == 1)
7280 		return 0;
7281 
7282 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7283 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7284 
7285 		map->nr_queues  = 0;
7286 
7287 		if (i == HCTX_TYPE_DEFAULT)
7288 			map->nr_queues = submit_queues - poll_queues;
7289 		else if (i == HCTX_TYPE_POLL)
7290 			map->nr_queues = poll_queues;
7291 
7292 		if (!map->nr_queues) {
7293 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7294 			continue;
7295 		}
7296 
7297 		map->queue_offset = qoff;
7298 		blk_mq_map_queues(map);
7299 
7300 		qoff += map->nr_queues;
7301 	}
7302 
7303 	return 0;
7304 
7305 }
7306 
7307 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7308 {
7309 	bool first;
7310 	bool retiring = false;
7311 	int num_entries = 0;
7312 	unsigned int qc_idx = 0;
7313 	unsigned long iflags;
7314 	ktime_t kt_from_boot = ktime_get_boottime();
7315 	struct sdebug_queue *sqp;
7316 	struct sdebug_queued_cmd *sqcp;
7317 	struct scsi_cmnd *scp;
7318 	struct sdebug_dev_info *devip;
7319 	struct sdebug_defer *sd_dp;
7320 
7321 	sqp = sdebug_q_arr + queue_num;
7322 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7323 
7324 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7325 		if (first) {
7326 			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7327 			first = false;
7328 		} else {
7329 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7330 		}
7331 		if (unlikely(qc_idx >= sdebug_max_queue))
7332 			break;
7333 
7334 		sqcp = &sqp->qc_arr[qc_idx];
7335 		sd_dp = sqcp->sd_dp;
7336 		if (unlikely(!sd_dp))
7337 			continue;
7338 		scp = sqcp->a_cmnd;
7339 		if (unlikely(scp == NULL)) {
7340 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7341 			       queue_num, qc_idx, __func__);
7342 			break;
7343 		}
7344 		if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7345 			if (kt_from_boot < sd_dp->cmpl_ts)
7346 				continue;
7347 
7348 		} else		/* ignoring non REQ_POLLED requests */
7349 			continue;
7350 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7351 		if (likely(devip))
7352 			atomic_dec(&devip->num_in_q);
7353 		else
7354 			pr_err("devip=NULL from %s\n", __func__);
7355 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7356 			retiring = true;
7357 
7358 		sqcp->a_cmnd = NULL;
7359 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7360 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7361 				sqp, queue_num, qc_idx, __func__);
7362 			break;
7363 		}
7364 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7365 			int k, retval;
7366 
7367 			retval = atomic_read(&retired_max_queue);
7368 			if (qc_idx >= retval) {
7369 				pr_err("index %d too large\n", retval);
7370 				break;
7371 			}
7372 			k = find_last_bit(sqp->in_use_bm, retval);
7373 			if ((k < sdebug_max_queue) || (k == retval))
7374 				atomic_set(&retired_max_queue, 0);
7375 			else
7376 				atomic_set(&retired_max_queue, k + 1);
7377 		}
7378 		sd_dp->defer_t = SDEB_DEFER_NONE;
7379 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7380 		scsi_done(scp); /* callback to mid level */
7381 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7382 		num_entries++;
7383 	}
7384 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7385 	if (num_entries > 0)
7386 		atomic_add(num_entries, &sdeb_mq_poll_count);
7387 	return num_entries;
7388 }
7389 
7390 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7391 				   struct scsi_cmnd *scp)
7392 {
7393 	u8 sdeb_i;
7394 	struct scsi_device *sdp = scp->device;
7395 	const struct opcode_info_t *oip;
7396 	const struct opcode_info_t *r_oip;
7397 	struct sdebug_dev_info *devip;
7398 	u8 *cmd = scp->cmnd;
7399 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7400 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7401 	int k, na;
7402 	int errsts = 0;
7403 	u64 lun_index = sdp->lun & 0x3FFF;
7404 	u32 flags;
7405 	u16 sa;
7406 	u8 opcode = cmd[0];
7407 	bool has_wlun_rl;
7408 	bool inject_now;
7409 
7410 	scsi_set_resid(scp, 0);
7411 	if (sdebug_statistics) {
7412 		atomic_inc(&sdebug_cmnd_count);
7413 		inject_now = inject_on_this_cmd();
7414 	} else {
7415 		inject_now = false;
7416 	}
7417 	if (unlikely(sdebug_verbose &&
7418 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7419 		char b[120];
7420 		int n, len, sb;
7421 
7422 		len = scp->cmd_len;
7423 		sb = (int)sizeof(b);
7424 		if (len > 32)
7425 			strcpy(b, "too long, over 32 bytes");
7426 		else {
7427 			for (k = 0, n = 0; k < len && n < sb; ++k)
7428 				n += scnprintf(b + n, sb - n, "%02x ",
7429 					       (u32)cmd[k]);
7430 		}
7431 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7432 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7433 	}
7434 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7435 		return SCSI_MLQUEUE_HOST_BUSY;
7436 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7437 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7438 		goto err_out;
7439 
7440 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7441 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7442 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7443 	if (unlikely(!devip)) {
7444 		devip = find_build_dev_info(sdp);
7445 		if (NULL == devip)
7446 			goto err_out;
7447 	}
7448 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7449 		atomic_set(&sdeb_inject_pending, 1);
7450 
7451 	na = oip->num_attached;
7452 	r_pfp = oip->pfp;
7453 	if (na) {	/* multiple commands with this opcode */
7454 		r_oip = oip;
7455 		if (FF_SA & r_oip->flags) {
7456 			if (F_SA_LOW & oip->flags)
7457 				sa = 0x1f & cmd[1];
7458 			else
7459 				sa = get_unaligned_be16(cmd + 8);
7460 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7461 				if (opcode == oip->opcode && sa == oip->sa)
7462 					break;
7463 			}
7464 		} else {   /* since no service action only check opcode */
7465 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7466 				if (opcode == oip->opcode)
7467 					break;
7468 			}
7469 		}
7470 		if (k > na) {
7471 			if (F_SA_LOW & r_oip->flags)
7472 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7473 			else if (F_SA_HIGH & r_oip->flags)
7474 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7475 			else
7476 				mk_sense_invalid_opcode(scp);
7477 			goto check_cond;
7478 		}
7479 	}	/* else (when na==0) we assume the oip is a match */
7480 	flags = oip->flags;
7481 	if (unlikely(F_INV_OP & flags)) {
7482 		mk_sense_invalid_opcode(scp);
7483 		goto check_cond;
7484 	}
7485 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7486 		if (sdebug_verbose)
7487 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7488 				    my_name, opcode, " supported for wlun");
7489 		mk_sense_invalid_opcode(scp);
7490 		goto check_cond;
7491 	}
7492 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7493 		u8 rem;
7494 		int j;
7495 
7496 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7497 			rem = ~oip->len_mask[k] & cmd[k];
7498 			if (rem) {
7499 				for (j = 7; j >= 0; --j, rem <<= 1) {
7500 					if (0x80 & rem)
7501 						break;
7502 				}
7503 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7504 				goto check_cond;
7505 			}
7506 		}
7507 	}
7508 	if (unlikely(!(F_SKIP_UA & flags) &&
7509 		     find_first_bit(devip->uas_bm,
7510 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7511 		errsts = make_ua(scp, devip);
7512 		if (errsts)
7513 			goto check_cond;
7514 	}
7515 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7516 		     atomic_read(&devip->stopped))) {
7517 		errsts = resp_not_ready(scp, devip);
7518 		if (errsts)
7519 			goto fini;
7520 	}
7521 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7522 		goto fini;
7523 	if (unlikely(sdebug_every_nth)) {
7524 		if (fake_timeout(scp))
7525 			return 0;	/* ignore command: make trouble */
7526 	}
7527 	if (likely(oip->pfp))
7528 		pfp = oip->pfp;	/* calls a resp_* function */
7529 	else
7530 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7531 
7532 fini:
7533 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7534 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7535 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7536 					    sdebug_ndelay > 10000)) {
7537 		/*
7538 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7539 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7540 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7541 		 * For Synchronize Cache want 1/20 of SSU's delay.
7542 		 */
7543 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7544 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7545 
7546 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7547 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7548 	} else
7549 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7550 				     sdebug_ndelay);
7551 check_cond:
7552 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7553 err_out:
7554 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7555 }
7556 
7557 static struct scsi_host_template sdebug_driver_template = {
7558 	.show_info =		scsi_debug_show_info,
7559 	.write_info =		scsi_debug_write_info,
7560 	.proc_name =		sdebug_proc_name,
7561 	.name =			"SCSI DEBUG",
7562 	.info =			scsi_debug_info,
7563 	.slave_alloc =		scsi_debug_slave_alloc,
7564 	.slave_configure =	scsi_debug_slave_configure,
7565 	.slave_destroy =	scsi_debug_slave_destroy,
7566 	.ioctl =		scsi_debug_ioctl,
7567 	.queuecommand =		scsi_debug_queuecommand,
7568 	.change_queue_depth =	sdebug_change_qdepth,
7569 	.map_queues =		sdebug_map_queues,
7570 	.mq_poll =		sdebug_blk_mq_poll,
7571 	.eh_abort_handler =	scsi_debug_abort,
7572 	.eh_device_reset_handler = scsi_debug_device_reset,
7573 	.eh_target_reset_handler = scsi_debug_target_reset,
7574 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7575 	.eh_host_reset_handler = scsi_debug_host_reset,
7576 	.can_queue =		SDEBUG_CANQUEUE,
7577 	.this_id =		7,
7578 	.sg_tablesize =		SG_MAX_SEGMENTS,
7579 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7580 	.max_sectors =		-1U,
7581 	.max_segment_size =	-1U,
7582 	.module =		THIS_MODULE,
7583 	.track_queue_depth =	1,
7584 };
7585 
7586 static int sdebug_driver_probe(struct device *dev)
7587 {
7588 	int error = 0;
7589 	struct sdebug_host_info *sdbg_host;
7590 	struct Scsi_Host *hpnt;
7591 	int hprot;
7592 
7593 	sdbg_host = to_sdebug_host(dev);
7594 
7595 	sdebug_driver_template.can_queue = sdebug_max_queue;
7596 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7597 	if (!sdebug_clustering)
7598 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7599 
7600 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7601 	if (NULL == hpnt) {
7602 		pr_err("scsi_host_alloc failed\n");
7603 		error = -ENODEV;
7604 		return error;
7605 	}
7606 	if (submit_queues > nr_cpu_ids) {
7607 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7608 			my_name, submit_queues, nr_cpu_ids);
7609 		submit_queues = nr_cpu_ids;
7610 	}
7611 	/*
7612 	 * Decide whether to tell scsi subsystem that we want mq. The
7613 	 * following should give the same answer for each host.
7614 	 */
7615 	hpnt->nr_hw_queues = submit_queues;
7616 	if (sdebug_host_max_queue)
7617 		hpnt->host_tagset = 1;
7618 
7619 	/* poll queues are possible for nr_hw_queues > 1 */
7620 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7621 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7622 			 my_name, poll_queues, hpnt->nr_hw_queues);
7623 		poll_queues = 0;
7624 	}
7625 
7626 	/*
7627 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7628 	 * left over for non-polled I/O.
7629 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7630 	 */
7631 	if (poll_queues >= submit_queues) {
7632 		if (submit_queues < 3)
7633 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7634 		else
7635 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7636 				my_name, submit_queues - 1);
7637 		poll_queues = 1;
7638 	}
7639 	if (poll_queues)
7640 		hpnt->nr_maps = 3;
7641 
7642 	sdbg_host->shost = hpnt;
7643 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7644 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7645 		hpnt->max_id = sdebug_num_tgts + 1;
7646 	else
7647 		hpnt->max_id = sdebug_num_tgts;
7648 	/* = sdebug_max_luns; */
7649 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7650 
7651 	hprot = 0;
7652 
7653 	switch (sdebug_dif) {
7654 
7655 	case T10_PI_TYPE1_PROTECTION:
7656 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7657 		if (sdebug_dix)
7658 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7659 		break;
7660 
7661 	case T10_PI_TYPE2_PROTECTION:
7662 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7663 		if (sdebug_dix)
7664 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7665 		break;
7666 
7667 	case T10_PI_TYPE3_PROTECTION:
7668 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7669 		if (sdebug_dix)
7670 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7671 		break;
7672 
7673 	default:
7674 		if (sdebug_dix)
7675 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7676 		break;
7677 	}
7678 
7679 	scsi_host_set_prot(hpnt, hprot);
7680 
7681 	if (have_dif_prot || sdebug_dix)
7682 		pr_info("host protection%s%s%s%s%s%s%s\n",
7683 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7684 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7685 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7686 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7687 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7688 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7689 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7690 
7691 	if (sdebug_guard == 1)
7692 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7693 	else
7694 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7695 
7696 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7697 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7698 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7699 		sdebug_statistics = true;
7700 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7701 	if (error) {
7702 		pr_err("scsi_add_host failed\n");
7703 		error = -ENODEV;
7704 		scsi_host_put(hpnt);
7705 	} else {
7706 		scsi_scan_host(hpnt);
7707 	}
7708 
7709 	return error;
7710 }
7711 
7712 static void sdebug_driver_remove(struct device *dev)
7713 {
7714 	struct sdebug_host_info *sdbg_host;
7715 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7716 
7717 	sdbg_host = to_sdebug_host(dev);
7718 
7719 	scsi_remove_host(sdbg_host->shost);
7720 
7721 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7722 				 dev_list) {
7723 		list_del(&sdbg_devinfo->dev_list);
7724 		kfree(sdbg_devinfo->zstate);
7725 		kfree(sdbg_devinfo);
7726 	}
7727 
7728 	scsi_host_put(sdbg_host->shost);
7729 }
7730 
7731 static int pseudo_lld_bus_match(struct device *dev,
7732 				struct device_driver *dev_driver)
7733 {
7734 	return 1;
7735 }
7736 
7737 static struct bus_type pseudo_lld_bus = {
7738 	.name = "pseudo",
7739 	.match = pseudo_lld_bus_match,
7740 	.probe = sdebug_driver_probe,
7741 	.remove = sdebug_driver_remove,
7742 	.drv_groups = sdebug_drv_groups,
7743 };
7744