xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 71beab71)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
222 
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN			1	/* Data-in command (e.g. READ) */
225 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
231 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
234 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
236 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
238 
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250 
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 	ZBC_ZONE_TYPE_CNV	= 0x1,
254 	ZBC_ZONE_TYPE_SWR	= 0x2,
255 	ZBC_ZONE_TYPE_SWP	= 0x3,
256 };
257 
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 	ZBC_NOT_WRITE_POINTER	= 0x0,
261 	ZC1_EMPTY		= 0x1,
262 	ZC2_IMPLICIT_OPEN	= 0x2,
263 	ZC3_EXPLICIT_OPEN	= 0x3,
264 	ZC4_CLOSED		= 0x4,
265 	ZC6_READ_ONLY		= 0xd,
266 	ZC5_FULL		= 0xe,
267 	ZC7_OFFLINE		= 0xf,
268 };
269 
270 struct sdeb_zone_state {	/* ZBC: per zone state */
271 	enum sdebug_z_type z_type;
272 	enum sdebug_z_cond z_cond;
273 	bool z_non_seq_resource;
274 	unsigned int z_size;
275 	sector_t z_start;
276 	sector_t z_wp;
277 };
278 
279 struct sdebug_dev_info {
280 	struct list_head dev_list;
281 	unsigned int channel;
282 	unsigned int target;
283 	u64 lun;
284 	uuid_t lu_name;
285 	struct sdebug_host_info *sdbg_host;
286 	unsigned long uas_bm[1];
287 	atomic_t num_in_q;
288 	atomic_t stopped;	/* 1: by SSU, 2: device start */
289 	bool used;
290 
291 	/* For ZBC devices */
292 	enum blk_zoned_model zmodel;
293 	unsigned int zsize;
294 	unsigned int zsize_shift;
295 	unsigned int nr_zones;
296 	unsigned int nr_conv_zones;
297 	unsigned int nr_imp_open;
298 	unsigned int nr_exp_open;
299 	unsigned int nr_closed;
300 	unsigned int max_open;
301 	ktime_t create_ts;	/* time since bootup that this device was created */
302 	struct sdeb_zone_state *zstate;
303 };
304 
305 struct sdebug_host_info {
306 	struct list_head host_list;
307 	int si_idx;	/* sdeb_store_info (per host) xarray index */
308 	struct Scsi_Host *shost;
309 	struct device dev;
310 	struct list_head dev_info_list;
311 };
312 
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 	rwlock_t macc_lck;	/* for atomic media access on this store */
316 	u8 *storep;		/* user data storage (ram) */
317 	struct t10_pi_tuple *dif_storep; /* protection info */
318 	void *map_storep;	/* provisioning map */
319 };
320 
321 #define to_sdebug_host(d)	\
322 	container_of(d, struct sdebug_host_info, dev)
323 
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
326 
327 struct sdebug_defer {
328 	struct hrtimer hrt;
329 	struct execute_work ew;
330 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 	int sqa_idx;	/* index of sdebug_queue array */
332 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
333 	int hc_idx;	/* hostwide tag index */
334 	int issuing_cpu;
335 	bool init_hrt;
336 	bool init_wq;
337 	bool init_poll;
338 	bool aborted;	/* true when blk_abort_request() already called */
339 	enum sdeb_defer_type defer_t;
340 };
341 
342 struct sdebug_queued_cmd {
343 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 	 * instance indicates this slot is in use.
345 	 */
346 	struct sdebug_defer *sd_dp;
347 	struct scsi_cmnd *a_cmnd;
348 };
349 
350 struct sdebug_queue {
351 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 	spinlock_t qc_lock;
354 	atomic_t blocked;	/* to temporarily stop more being queued */
355 };
356 
357 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
358 static atomic_t sdebug_completions;  /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
363 
364 struct opcode_info_t {
365 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
366 				/* for terminating element */
367 	u8 opcode;		/* if num_attached > 0, preferred */
368 	u16 sa;			/* service action */
369 	u32 flags;		/* OR-ed set of SDEB_F_* */
370 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
372 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
373 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
374 };
375 
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 	SDEB_I_INVALID_OPCODE =	0,
379 	SDEB_I_INQUIRY = 1,
380 	SDEB_I_REPORT_LUNS = 2,
381 	SDEB_I_REQUEST_SENSE = 3,
382 	SDEB_I_TEST_UNIT_READY = 4,
383 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
384 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
385 	SDEB_I_LOG_SENSE = 7,
386 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
387 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
388 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
389 	SDEB_I_START_STOP = 11,
390 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
391 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
392 	SDEB_I_MAINT_IN = 14,
393 	SDEB_I_MAINT_OUT = 15,
394 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
395 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
396 	SDEB_I_RESERVE = 18,		/* 6, 10 */
397 	SDEB_I_RELEASE = 19,		/* 6, 10 */
398 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
399 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
400 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
401 	SDEB_I_SEND_DIAG = 23,
402 	SDEB_I_UNMAP = 24,
403 	SDEB_I_WRITE_BUFFER = 25,
404 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
405 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
406 	SDEB_I_COMP_WRITE = 28,
407 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
408 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
409 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
410 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
411 };
412 
413 
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
417 	    0, 0, 0, 0,
418 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
420 	    SDEB_I_RELEASE,
421 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 	    SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 	0, 0, 0, SDEB_I_VERIFY,
442 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 	     SDEB_I_MAINT_OUT, 0, 0, 0,
448 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 };
458 
459 /*
460  * The following "response" functions return the SCSI mid-level's 4 byte
461  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462  * command completion, they can mask their return value with
463  * SDEG_RES_IMMED_MASK .
464  */
465 #define SDEG_RES_IMMED_MASK 0x40000000
466 
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
503 
504 /*
505  * The following are overflow arrays for cdbs that "hit" the same index in
506  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507  * should be placed in opcode_info_arr[], the others should be placed here.
508  */
509 static const struct opcode_info_t msense_iarr[] = {
510 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
511 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 };
513 
514 static const struct opcode_info_t mselect_iarr[] = {
515 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
516 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 };
518 
519 static const struct opcode_info_t read_iarr[] = {
520 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
522 	     0, 0, 0, 0} },
523 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
527 	     0xc7, 0, 0, 0, 0} },
528 };
529 
530 static const struct opcode_info_t write_iarr[] = {
531 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
532 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
533 		   0, 0, 0, 0, 0, 0} },
534 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
535 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
536 		   0, 0, 0} },
537 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
538 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 		   0xbf, 0xc7, 0, 0, 0, 0} },
540 };
541 
542 static const struct opcode_info_t verify_iarr[] = {
543 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 		   0, 0, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
552 };
553 
554 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
555 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
558 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
561 };
562 
563 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
564 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
570 };
571 
572 static const struct opcode_info_t write_same_iarr[] = {
573 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
576 };
577 
578 static const struct opcode_info_t reserve_iarr[] = {
579 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
580 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
581 };
582 
583 static const struct opcode_info_t release_iarr[] = {
584 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
585 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586 };
587 
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
592 };
593 
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
598 };
599 
600 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
601 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
604 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
607 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
610 };
611 
612 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
613 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616 };
617 
618 
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620  * plus the terminating elements for logic that scans this table such as
621  * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
623 /* 0 */
624 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
625 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 	     0, 0} },					/* REPORT LUNS */
631 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 /* 5 */
636 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
637 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
638 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
640 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
641 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
643 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
644 	     0, 0, 0} },
645 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
646 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
647 	     0, 0} },
648 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
650 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
651 /* 10 */
652 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
654 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
665 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
667 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 				0xff, 0, 0xc7, 0, 0, 0, 0} },
669 /* 15 */
670 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
674 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
678 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
679 	     0xff, 0xff} },
680 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
682 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
683 	     0} },
684 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
686 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
687 	     0} },
688 /* 20 */
689 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
696 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 /* 25 */
700 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
703 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
705 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
706 		 0, 0, 0, 0, 0} },
707 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 	    resp_sync_cache, sync_cache_iarr,
709 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
711 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
714 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 	    resp_pre_fetch, pre_fetch_iarr,
716 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
718 
719 /* 30 */
720 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
728 /* sentinel */
729 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
730 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
731 };
732 
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue;	/* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
792 
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 			  SAM_LUN_AM_FLAT = 0x1,
795 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 			  SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
799 
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity;	/* in sectors */
802 
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804    may still need them */
805 static int sdebug_heads;		/* heads per disk */
806 static int sdebug_cylinders_per;	/* cylinders per surface */
807 static int sdebug_sectors_per;		/* sectors per cylinder */
808 
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
811 
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
817 
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
827 
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
833 
834 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
837 
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
840 
841 static rwlock_t *ramdisk_lck_a[2];
842 
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
845 
846 static struct bus_type pseudo_lld_bus;
847 
848 static struct device_driver sdebug_driverfs_driver = {
849 	.name 		= sdebug_proc_name,
850 	.bus		= &pseudo_lld_bus,
851 };
852 
853 static const int check_condition_result =
854 	SAM_STAT_CHECK_CONDITION;
855 
856 static const int illegal_condition_result =
857 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
858 
859 static const int device_qfull_result =
860 	(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
861 
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
863 
864 
865 /* Only do the extra work involved in logical block provisioning if one or
866  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867  * real reads and writes (i.e. not skipping them for speed).
868  */
869 static inline bool scsi_debug_lbp(void)
870 {
871 	return 0 == sdebug_fake_rw &&
872 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
873 }
874 
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 			    unsigned long long lba)
877 {
878 	struct sdeb_store_info *lsip = sip;
879 
880 	lba = do_div(lba, sdebug_store_sectors);
881 	if (!sip || !sip->storep) {
882 		WARN_ON_ONCE(true);
883 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
884 	}
885 	return lsip->storep + lba * sdebug_sector_size;
886 }
887 
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
889 				      sector_t sector)
890 {
891 	sector = sector_div(sector, sdebug_store_sectors);
892 
893 	return sip->dif_storep + sector;
894 }
895 
896 static void sdebug_max_tgts_luns(void)
897 {
898 	struct sdebug_host_info *sdbg_host;
899 	struct Scsi_Host *hpnt;
900 
901 	spin_lock(&sdebug_host_list_lock);
902 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 		hpnt = sdbg_host->shost;
904 		if ((hpnt->this_id >= 0) &&
905 		    (sdebug_num_tgts > hpnt->this_id))
906 			hpnt->max_id = sdebug_num_tgts + 1;
907 		else
908 			hpnt->max_id = sdebug_num_tgts;
909 		/* sdebug_max_luns; */
910 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
911 	}
912 	spin_unlock(&sdebug_host_list_lock);
913 }
914 
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
916 
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 				 enum sdeb_cmd_data c_d,
920 				 int in_byte, int in_bit)
921 {
922 	unsigned char *sbuff;
923 	u8 sks[4];
924 	int sl, asc;
925 
926 	sbuff = scp->sense_buffer;
927 	if (!sbuff) {
928 		sdev_printk(KERN_ERR, scp->device,
929 			    "%s: sense_buffer is NULL\n", __func__);
930 		return;
931 	}
932 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 	memset(sks, 0, sizeof(sks));
936 	sks[0] = 0x80;
937 	if (c_d)
938 		sks[0] |= 0x40;
939 	if (in_bit >= 0) {
940 		sks[0] |= 0x8;
941 		sks[0] |= 0x7 & in_bit;
942 	}
943 	put_unaligned_be16(in_byte, sks + 1);
944 	if (sdebug_dsense) {
945 		sl = sbuff[7] + 8;
946 		sbuff[7] = sl;
947 		sbuff[sl] = 0x2;
948 		sbuff[sl + 1] = 0x6;
949 		memcpy(sbuff + sl + 4, sks, 3);
950 	} else
951 		memcpy(sbuff + 15, sks, 3);
952 	if (sdebug_verbose)
953 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
954 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
956 }
957 
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
959 {
960 	if (!scp->sense_buffer) {
961 		sdev_printk(KERN_ERR, scp->device,
962 			    "%s: sense_buffer is NULL\n", __func__);
963 		return;
964 	}
965 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
966 
967 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
968 
969 	if (sdebug_verbose)
970 		sdev_printk(KERN_INFO, scp->device,
971 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 			    my_name, key, asc, asq);
973 }
974 
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
976 {
977 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
978 }
979 
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
981 			    void __user *arg)
982 {
983 	if (sdebug_verbose) {
984 		if (0x1261 == cmd)
985 			sdev_printk(KERN_INFO, dev,
986 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
987 		else if (0x5331 == cmd)
988 			sdev_printk(KERN_INFO, dev,
989 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
990 				    __func__);
991 		else
992 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
993 				    __func__, cmd);
994 	}
995 	return -EINVAL;
996 	/* return -ENOTTY; // correct return but upsets fdisk */
997 }
998 
999 static void config_cdb_len(struct scsi_device *sdev)
1000 {
1001 	switch (sdebug_cdb_len) {
1002 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 		sdev->use_10_for_rw = false;
1004 		sdev->use_16_for_rw = false;
1005 		sdev->use_10_for_ms = false;
1006 		break;
1007 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 		sdev->use_10_for_rw = true;
1009 		sdev->use_16_for_rw = false;
1010 		sdev->use_10_for_ms = false;
1011 		break;
1012 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 		sdev->use_10_for_rw = true;
1014 		sdev->use_16_for_rw = false;
1015 		sdev->use_10_for_ms = true;
1016 		break;
1017 	case 16:
1018 		sdev->use_10_for_rw = false;
1019 		sdev->use_16_for_rw = true;
1020 		sdev->use_10_for_ms = true;
1021 		break;
1022 	case 32: /* No knobs to suggest this so same as 16 for now */
1023 		sdev->use_10_for_rw = false;
1024 		sdev->use_16_for_rw = true;
1025 		sdev->use_10_for_ms = true;
1026 		break;
1027 	default:
1028 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1029 			sdebug_cdb_len);
1030 		sdev->use_10_for_rw = true;
1031 		sdev->use_16_for_rw = false;
1032 		sdev->use_10_for_ms = false;
1033 		sdebug_cdb_len = 10;
1034 		break;
1035 	}
1036 }
1037 
1038 static void all_config_cdb_len(void)
1039 {
1040 	struct sdebug_host_info *sdbg_host;
1041 	struct Scsi_Host *shost;
1042 	struct scsi_device *sdev;
1043 
1044 	spin_lock(&sdebug_host_list_lock);
1045 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 		shost = sdbg_host->shost;
1047 		shost_for_each_device(sdev, shost) {
1048 			config_cdb_len(sdev);
1049 		}
1050 	}
1051 	spin_unlock(&sdebug_host_list_lock);
1052 }
1053 
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1055 {
1056 	struct sdebug_host_info *sdhp;
1057 	struct sdebug_dev_info *dp;
1058 
1059 	spin_lock(&sdebug_host_list_lock);
1060 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 			if ((devip->sdbg_host == dp->sdbg_host) &&
1063 			    (devip->target == dp->target))
1064 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1065 		}
1066 	}
1067 	spin_unlock(&sdebug_host_list_lock);
1068 }
1069 
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1071 {
1072 	int k;
1073 
1074 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 	if (k != SDEBUG_NUM_UAS) {
1076 		const char *cp = NULL;
1077 
1078 		switch (k) {
1079 		case SDEBUG_UA_POR:
1080 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 					POWER_ON_RESET_ASCQ);
1082 			if (sdebug_verbose)
1083 				cp = "power on reset";
1084 			break;
1085 		case SDEBUG_UA_BUS_RESET:
1086 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 					BUS_RESET_ASCQ);
1088 			if (sdebug_verbose)
1089 				cp = "bus reset";
1090 			break;
1091 		case SDEBUG_UA_MODE_CHANGED:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1093 					MODE_CHANGED_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "mode parameters changed";
1096 			break;
1097 		case SDEBUG_UA_CAPACITY_CHANGED:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 					CAPACITY_CHANGED_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "capacity data changed";
1102 			break;
1103 		case SDEBUG_UA_MICROCODE_CHANGED:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION,
1105 					TARGET_CHANGED_ASC,
1106 					MICROCODE_CHANGED_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "microcode has been changed";
1109 			break;
1110 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION,
1112 					TARGET_CHANGED_ASC,
1113 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "microcode has been changed without reset";
1116 			break;
1117 		case SDEBUG_UA_LUNS_CHANGED:
1118 			/*
1119 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 			 * on the target, until a REPORT LUNS command is
1122 			 * received.  SPC-4 behavior is to report it only once.
1123 			 * NOTE:  sdebug_scsi_level does not use the same
1124 			 * values as struct scsi_device->scsi_level.
1125 			 */
1126 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1127 				clear_luns_changed_on_target(devip);
1128 			mk_sense_buffer(scp, UNIT_ATTENTION,
1129 					TARGET_CHANGED_ASC,
1130 					LUNS_CHANGED_ASCQ);
1131 			if (sdebug_verbose)
1132 				cp = "reported luns data has changed";
1133 			break;
1134 		default:
1135 			pr_warn("unexpected unit attention code=%d\n", k);
1136 			if (sdebug_verbose)
1137 				cp = "unknown";
1138 			break;
1139 		}
1140 		clear_bit(k, devip->uas_bm);
1141 		if (sdebug_verbose)
1142 			sdev_printk(KERN_INFO, scp->device,
1143 				   "%s reports: Unit attention: %s\n",
1144 				   my_name, cp);
1145 		return check_condition_result;
1146 	}
1147 	return 0;
1148 }
1149 
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1152 				int arr_len)
1153 {
1154 	int act_len;
1155 	struct scsi_data_buffer *sdb = &scp->sdb;
1156 
1157 	if (!sdb->length)
1158 		return 0;
1159 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 		return DID_ERROR << 16;
1161 
1162 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1163 				      arr, arr_len);
1164 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1165 
1166 	return 0;
1167 }
1168 
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171  * calls, not required to write in ascending offset order. Assumes resid
1172  * set to scsi_bufflen() prior to any calls.
1173  */
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 				  int arr_len, unsigned int off_dst)
1176 {
1177 	unsigned int act_len, n;
1178 	struct scsi_data_buffer *sdb = &scp->sdb;
1179 	off_t skip = off_dst;
1180 
1181 	if (sdb->length <= off_dst)
1182 		return 0;
1183 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 		return DID_ERROR << 16;
1185 
1186 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 				       arr, arr_len, skip);
1188 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 		 scsi_get_resid(scp));
1191 	n = scsi_bufflen(scp) - (off_dst + act_len);
1192 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1193 	return 0;
1194 }
1195 
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197  * 'arr' or -1 if error.
1198  */
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1200 			       int arr_len)
1201 {
1202 	if (!scsi_bufflen(scp))
1203 		return 0;
1204 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1205 		return -1;
1206 
1207 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1208 }
1209 
1210 
1211 static char sdebug_inq_vendor_id[9] = "Linux   ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1218 
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 			  int target_dev_id, int dev_id_num,
1222 			  const char *dev_id_str, int dev_id_str_len,
1223 			  const uuid_t *lu_name)
1224 {
1225 	int num, port_a;
1226 	char b[32];
1227 
1228 	port_a = target_dev_id + 1;
1229 	/* T10 vendor identifier field format (faked) */
1230 	arr[0] = 0x2;	/* ASCII */
1231 	arr[1] = 0x1;
1232 	arr[2] = 0x0;
1233 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 	num = 8 + 16 + dev_id_str_len;
1237 	arr[3] = num;
1238 	num += 4;
1239 	if (dev_id_num >= 0) {
1240 		if (sdebug_uuid_ctl) {
1241 			/* Locally assigned UUID */
1242 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1243 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1244 			arr[num++] = 0x0;
1245 			arr[num++] = 0x12;
1246 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1247 			arr[num++] = 0x0;
1248 			memcpy(arr + num, lu_name, 16);
1249 			num += 16;
1250 		} else {
1251 			/* NAA-3, Logical unit identifier (binary) */
1252 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1253 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1254 			arr[num++] = 0x0;
1255 			arr[num++] = 0x8;
1256 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1257 			num += 8;
1258 		}
1259 		/* Target relative port number */
1260 		arr[num++] = 0x61;	/* proto=sas, binary */
1261 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1262 		arr[num++] = 0x0;	/* reserved */
1263 		arr[num++] = 0x4;	/* length */
1264 		arr[num++] = 0x0;	/* reserved */
1265 		arr[num++] = 0x0;	/* reserved */
1266 		arr[num++] = 0x0;
1267 		arr[num++] = 0x1;	/* relative port A */
1268 	}
1269 	/* NAA-3, Target port identifier */
1270 	arr[num++] = 0x61;	/* proto=sas, binary */
1271 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1272 	arr[num++] = 0x0;
1273 	arr[num++] = 0x8;
1274 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1275 	num += 8;
1276 	/* NAA-3, Target port group identifier */
1277 	arr[num++] = 0x61;	/* proto=sas, binary */
1278 	arr[num++] = 0x95;	/* piv=1, target port group id */
1279 	arr[num++] = 0x0;
1280 	arr[num++] = 0x4;
1281 	arr[num++] = 0;
1282 	arr[num++] = 0;
1283 	put_unaligned_be16(port_group_id, arr + num);
1284 	num += 2;
1285 	/* NAA-3, Target device identifier */
1286 	arr[num++] = 0x61;	/* proto=sas, binary */
1287 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1288 	arr[num++] = 0x0;
1289 	arr[num++] = 0x8;
1290 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1291 	num += 8;
1292 	/* SCSI name string: Target device identifier */
1293 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1294 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1295 	arr[num++] = 0x0;
1296 	arr[num++] = 24;
1297 	memcpy(arr + num, "naa.32222220", 12);
1298 	num += 12;
1299 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 	memcpy(arr + num, b, 8);
1301 	num += 8;
1302 	memset(arr + num, 0, 4);
1303 	num += 4;
1304 	return num;
1305 }
1306 
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309     0x22,0x22,0x22,0x0,0xbb,0x1,
1310     0x22,0x22,0x22,0x0,0xbb,0x2,
1311 };
1312 
1313 /*  Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1315 {
1316 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 	return sizeof(vpd84_data);
1318 }
1319 
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1322 {
1323 	int num = 0;
1324 	const char *na1 = "https://www.kernel.org/config";
1325 	const char *na2 = "http://www.kernel.org/log";
1326 	int plen, olen;
1327 
1328 	arr[num++] = 0x1;	/* lu, storage config */
1329 	arr[num++] = 0x0;	/* reserved */
1330 	arr[num++] = 0x0;
1331 	olen = strlen(na1);
1332 	plen = olen + 1;
1333 	if (plen % 4)
1334 		plen = ((plen / 4) + 1) * 4;
1335 	arr[num++] = plen;	/* length, null termianted, padded */
1336 	memcpy(arr + num, na1, olen);
1337 	memset(arr + num + olen, 0, plen - olen);
1338 	num += plen;
1339 
1340 	arr[num++] = 0x4;	/* lu, logging */
1341 	arr[num++] = 0x0;	/* reserved */
1342 	arr[num++] = 0x0;
1343 	olen = strlen(na2);
1344 	plen = olen + 1;
1345 	if (plen % 4)
1346 		plen = ((plen / 4) + 1) * 4;
1347 	arr[num++] = plen;	/* length, null terminated, padded */
1348 	memcpy(arr + num, na2, olen);
1349 	memset(arr + num + olen, 0, plen - olen);
1350 	num += plen;
1351 
1352 	return num;
1353 }
1354 
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1357 {
1358 	int num = 0;
1359 	int port_a, port_b;
1360 
1361 	port_a = target_dev_id + 1;
1362 	port_b = port_a + 1;
1363 	arr[num++] = 0x0;	/* reserved */
1364 	arr[num++] = 0x0;	/* reserved */
1365 	arr[num++] = 0x0;
1366 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1367 	memset(arr + num, 0, 6);
1368 	num += 6;
1369 	arr[num++] = 0x0;
1370 	arr[num++] = 12;	/* length tp descriptor */
1371 	/* naa-5 target port identifier (A) */
1372 	arr[num++] = 0x61;	/* proto=sas, binary */
1373 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1374 	arr[num++] = 0x0;	/* reserved */
1375 	arr[num++] = 0x8;	/* length */
1376 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1377 	num += 8;
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;	/* reserved */
1380 	arr[num++] = 0x0;
1381 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1382 	memset(arr + num, 0, 6);
1383 	num += 6;
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 12;	/* length tp descriptor */
1386 	/* naa-5 target port identifier (B) */
1387 	arr[num++] = 0x61;	/* proto=sas, binary */
1388 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1389 	arr[num++] = 0x0;	/* reserved */
1390 	arr[num++] = 0x8;	/* length */
1391 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1392 	num += 8;
1393 
1394 	return num;
1395 }
1396 
1397 
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1402 '1','2','3','4',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1404 0xec,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1409 0x53,0x41,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1411 0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1413 0x10,0x80,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1440 };
1441 
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1444 {
1445 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 	return sizeof(vpd89_data);
1447 }
1448 
1449 
1450 static unsigned char vpdb0_data[] = {
1451 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 };
1456 
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1459 {
1460 	unsigned int gran;
1461 
1462 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1463 
1464 	/* Optimal transfer length granularity */
1465 	if (sdebug_opt_xferlen_exp != 0 &&
1466 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 		gran = 1 << sdebug_opt_xferlen_exp;
1468 	else
1469 		gran = 1 << sdebug_physblk_exp;
1470 	put_unaligned_be16(gran, arr + 2);
1471 
1472 	/* Maximum Transfer Length */
1473 	if (sdebug_store_sectors > 0x400)
1474 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1475 
1476 	/* Optimal Transfer Length */
1477 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1478 
1479 	if (sdebug_lbpu) {
1480 		/* Maximum Unmap LBA Count */
1481 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1482 
1483 		/* Maximum Unmap Block Descriptor Count */
1484 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1485 	}
1486 
1487 	/* Unmap Granularity Alignment */
1488 	if (sdebug_unmap_alignment) {
1489 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 		arr[28] |= 0x80; /* UGAVALID */
1491 	}
1492 
1493 	/* Optimal Unmap Granularity */
1494 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1495 
1496 	/* Maximum WRITE SAME Length */
1497 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1498 
1499 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1500 
1501 	return sizeof(vpdb0_data);
1502 }
1503 
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1506 {
1507 	memset(arr, 0, 0x3c);
1508 	arr[0] = 0;
1509 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1510 	arr[2] = 0;
1511 	arr[3] = 5;	/* less than 1.8" */
1512 	if (devip->zmodel == BLK_ZONED_HA)
1513 		arr[4] = 1 << 4;	/* zoned field = 01b */
1514 
1515 	return 0x3c;
1516 }
1517 
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1520 {
1521 	memset(arr, 0, 0x4);
1522 	arr[0] = 0;			/* threshold exponent */
1523 	if (sdebug_lbpu)
1524 		arr[1] = 1 << 7;
1525 	if (sdebug_lbpws)
1526 		arr[1] |= 1 << 6;
1527 	if (sdebug_lbpws10)
1528 		arr[1] |= 1 << 5;
1529 	if (sdebug_lbprz && scsi_debug_lbp())
1530 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1531 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 	/* threshold_percentage=0 */
1534 	return 0x4;
1535 }
1536 
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1539 {
1540 	memset(arr, 0, 0x3c);
1541 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1542 	/*
1543 	 * Set Optimal number of open sequential write preferred zones and
1544 	 * Optimal number of non-sequentially written sequential write
1545 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 	 * fields set to zero, apart from Max. number of open swrz_s field.
1547 	 */
1548 	put_unaligned_be32(0xffffffff, &arr[4]);
1549 	put_unaligned_be32(0xffffffff, &arr[8]);
1550 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 		put_unaligned_be32(devip->max_open, &arr[12]);
1552 	else
1553 		put_unaligned_be32(0xffffffff, &arr[12]);
1554 	return 0x3c;
1555 }
1556 
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1559 
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1561 {
1562 	unsigned char pq_pdt;
1563 	unsigned char *arr;
1564 	unsigned char *cmd = scp->cmnd;
1565 	u32 alloc_len, n;
1566 	int ret;
1567 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 
1569 	alloc_len = get_unaligned_be16(cmd + 3);
1570 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 	if (! arr)
1572 		return DID_REQUEUE << 16;
1573 	is_disk = (sdebug_ptype == TYPE_DISK);
1574 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1575 	is_disk_zbc = (is_disk || is_zbc);
1576 	have_wlun = scsi_is_wlun(scp->device->lun);
1577 	if (have_wlun)
1578 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1579 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1581 	else
1582 		pq_pdt = (sdebug_ptype & 0x1f);
1583 	arr[0] = pq_pdt;
1584 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1585 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 		kfree(arr);
1587 		return check_condition_result;
1588 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1589 		int lu_id_num, port_group_id, target_dev_id;
1590 		u32 len;
1591 		char lu_id_str[6];
1592 		int host_no = devip->sdbg_host->shost->host_no;
1593 
1594 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 		    (devip->channel & 0x7f);
1596 		if (sdebug_vpd_use_hostno == 0)
1597 			host_no = 0;
1598 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1599 			    (devip->target * 1000) + devip->lun);
1600 		target_dev_id = ((host_no + 1) * 2000) +
1601 				 (devip->target * 1000) - 3;
1602 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1603 		if (0 == cmd[2]) { /* supported vital product data pages */
1604 			arr[1] = cmd[2];	/*sanity */
1605 			n = 4;
1606 			arr[n++] = 0x0;   /* this page */
1607 			arr[n++] = 0x80;  /* unit serial number */
1608 			arr[n++] = 0x83;  /* device identification */
1609 			arr[n++] = 0x84;  /* software interface ident. */
1610 			arr[n++] = 0x85;  /* management network addresses */
1611 			arr[n++] = 0x86;  /* extended inquiry */
1612 			arr[n++] = 0x87;  /* mode page policy */
1613 			arr[n++] = 0x88;  /* SCSI ports */
1614 			if (is_disk_zbc) {	  /* SBC or ZBC */
1615 				arr[n++] = 0x89;  /* ATA information */
1616 				arr[n++] = 0xb0;  /* Block limits */
1617 				arr[n++] = 0xb1;  /* Block characteristics */
1618 				if (is_disk)
1619 					arr[n++] = 0xb2;  /* LB Provisioning */
1620 				if (is_zbc)
1621 					arr[n++] = 0xb6;  /* ZB dev. char. */
1622 			}
1623 			arr[3] = n - 4;	  /* number of supported VPD pages */
1624 		} else if (0x80 == cmd[2]) { /* unit serial number */
1625 			arr[1] = cmd[2];	/*sanity */
1626 			arr[3] = len;
1627 			memcpy(&arr[4], lu_id_str, len);
1628 		} else if (0x83 == cmd[2]) { /* device identification */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 						target_dev_id, lu_id_num,
1632 						lu_id_str, len,
1633 						&devip->lu_name);
1634 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = inquiry_vpd_84(&arr[4]);
1637 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1638 			arr[1] = cmd[2];	/*sanity */
1639 			arr[3] = inquiry_vpd_85(&arr[4]);
1640 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1641 			arr[1] = cmd[2];	/*sanity */
1642 			arr[3] = 0x3c;	/* number of following entries */
1643 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1644 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1645 			else if (have_dif_prot)
1646 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1647 			else
1648 				arr[4] = 0x0;   /* no protection stuff */
1649 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1650 		} else if (0x87 == cmd[2]) { /* mode page policy */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = 0x8;	/* number of following entries */
1653 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1654 			arr[6] = 0x80;	/* mlus, shared */
1655 			arr[8] = 0x18;	 /* protocol specific lu */
1656 			arr[10] = 0x82;	 /* mlus, per initiator port */
1657 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1660 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1661 			arr[1] = cmd[2];        /*sanity */
1662 			n = inquiry_vpd_89(&arr[4]);
1663 			put_unaligned_be16(n, arr + 2);
1664 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b0(&arr[4]);
1667 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1668 			arr[1] = cmd[2];        /*sanity */
1669 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1670 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			arr[3] = inquiry_vpd_b2(&arr[4]);
1673 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 			arr[1] = cmd[2];        /*sanity */
1675 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1676 		} else {
1677 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1678 			kfree(arr);
1679 			return check_condition_result;
1680 		}
1681 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1682 		ret = fill_from_dev_buffer(scp, arr,
1683 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1684 		kfree(arr);
1685 		return ret;
1686 	}
1687 	/* drops through here for a standard inquiry */
1688 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1689 	arr[2] = sdebug_scsi_level;
1690 	arr[3] = 2;    /* response_data_format==2 */
1691 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1692 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1693 	if (sdebug_vpd_use_hostno == 0)
1694 		arr[5] |= 0x10; /* claim: implicit TPGS */
1695 	arr[6] = 0x10; /* claim: MultiP */
1696 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1697 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1698 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1701 	/* Use Vendor Specific area to place driver date in ASCII hex */
1702 	memcpy(&arr[36], sdebug_version_date, 8);
1703 	/* version descriptors (2 bytes each) follow */
1704 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1705 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1706 	n = 62;
1707 	if (is_disk) {		/* SBC-4 no version claimed */
1708 		put_unaligned_be16(0x600, arr + n);
1709 		n += 2;
1710 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1711 		put_unaligned_be16(0x525, arr + n);
1712 		n += 2;
1713 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1714 		put_unaligned_be16(0x624, arr + n);
1715 		n += 2;
1716 	}
1717 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1718 	ret = fill_from_dev_buffer(scp, arr,
1719 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1720 	kfree(arr);
1721 	return ret;
1722 }
1723 
1724 /* See resp_iec_m_pg() for how this data is manipulated */
1725 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 				   0, 0, 0x0, 0x0};
1727 
1728 static int resp_requests(struct scsi_cmnd *scp,
1729 			 struct sdebug_dev_info *devip)
1730 {
1731 	unsigned char *cmd = scp->cmnd;
1732 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1733 	bool dsense = !!(cmd[1] & 1);
1734 	u32 alloc_len = cmd[4];
1735 	u32 len = 18;
1736 	int stopped_state = atomic_read(&devip->stopped);
1737 
1738 	memset(arr, 0, sizeof(arr));
1739 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1740 		if (dsense) {
1741 			arr[0] = 0x72;
1742 			arr[1] = NOT_READY;
1743 			arr[2] = LOGICAL_UNIT_NOT_READY;
1744 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1745 			len = 8;
1746 		} else {
1747 			arr[0] = 0x70;
1748 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1749 			arr[7] = 0xa;			/* 18 byte sense buffer */
1750 			arr[12] = LOGICAL_UNIT_NOT_READY;
1751 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1752 		}
1753 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1755 		if (dsense) {
1756 			arr[0] = 0x72;
1757 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1758 			arr[2] = THRESHOLD_EXCEEDED;
1759 			arr[3] = 0xff;		/* Failure prediction(false) */
1760 			len = 8;
1761 		} else {
1762 			arr[0] = 0x70;
1763 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1764 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1765 			arr[12] = THRESHOLD_EXCEEDED;
1766 			arr[13] = 0xff;		/* Failure prediction(false) */
1767 		}
1768 	} else {	/* nothing to report */
1769 		if (dsense) {
1770 			len = 8;
1771 			memset(arr, 0, len);
1772 			arr[0] = 0x72;
1773 		} else {
1774 			memset(arr, 0, len);
1775 			arr[0] = 0x70;
1776 			arr[7] = 0xa;
1777 		}
1778 	}
1779 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1780 }
1781 
1782 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1783 {
1784 	unsigned char *cmd = scp->cmnd;
1785 	int power_cond, want_stop, stopped_state;
1786 	bool changing;
1787 
1788 	power_cond = (cmd[4] & 0xf0) >> 4;
1789 	if (power_cond) {
1790 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1791 		return check_condition_result;
1792 	}
1793 	want_stop = !(cmd[4] & 1);
1794 	stopped_state = atomic_read(&devip->stopped);
1795 	if (stopped_state == 2) {
1796 		ktime_t now_ts = ktime_get_boottime();
1797 
1798 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1800 
1801 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 				/* tur_ms_to_ready timer extinguished */
1803 				atomic_set(&devip->stopped, 0);
1804 				stopped_state = 0;
1805 			}
1806 		}
1807 		if (stopped_state == 2) {
1808 			if (want_stop) {
1809 				stopped_state = 1;	/* dummy up success */
1810 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1811 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 				return check_condition_result;
1813 			}
1814 		}
1815 	}
1816 	changing = (stopped_state != want_stop);
1817 	if (changing)
1818 		atomic_xchg(&devip->stopped, want_stop);
1819 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1820 		return SDEG_RES_IMMED_MASK;
1821 	else
1822 		return 0;
1823 }
1824 
1825 static sector_t get_sdebug_capacity(void)
1826 {
1827 	static const unsigned int gibibyte = 1073741824;
1828 
1829 	if (sdebug_virtual_gb > 0)
1830 		return (sector_t)sdebug_virtual_gb *
1831 			(gibibyte / sdebug_sector_size);
1832 	else
1833 		return sdebug_store_sectors;
1834 }
1835 
1836 #define SDEBUG_READCAP_ARR_SZ 8
1837 static int resp_readcap(struct scsi_cmnd *scp,
1838 			struct sdebug_dev_info *devip)
1839 {
1840 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1841 	unsigned int capac;
1842 
1843 	/* following just in case virtual_gb changed */
1844 	sdebug_capacity = get_sdebug_capacity();
1845 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1846 	if (sdebug_capacity < 0xffffffff) {
1847 		capac = (unsigned int)sdebug_capacity - 1;
1848 		put_unaligned_be32(capac, arr + 0);
1849 	} else
1850 		put_unaligned_be32(0xffffffff, arr + 0);
1851 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1852 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853 }
1854 
1855 #define SDEBUG_READCAP16_ARR_SZ 32
1856 static int resp_readcap16(struct scsi_cmnd *scp,
1857 			  struct sdebug_dev_info *devip)
1858 {
1859 	unsigned char *cmd = scp->cmnd;
1860 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1861 	u32 alloc_len;
1862 
1863 	alloc_len = get_unaligned_be32(cmd + 10);
1864 	/* following just in case virtual_gb changed */
1865 	sdebug_capacity = get_sdebug_capacity();
1866 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1867 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 	arr[13] = sdebug_physblk_exp & 0xf;
1870 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1871 
1872 	if (scsi_debug_lbp()) {
1873 		arr[14] |= 0x80; /* LBPME */
1874 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 		 * in the wider field maps to 0 in this field.
1877 		 */
1878 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1879 			arr[14] |= 0x40;
1880 	}
1881 
1882 	arr[15] = sdebug_lowest_aligned & 0xff;
1883 
1884 	if (have_dif_prot) {
1885 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1886 		arr[12] |= 1; /* PROT_EN */
1887 	}
1888 
1889 	return fill_from_dev_buffer(scp, arr,
1890 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1891 }
1892 
1893 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1894 
1895 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1896 			      struct sdebug_dev_info *devip)
1897 {
1898 	unsigned char *cmd = scp->cmnd;
1899 	unsigned char *arr;
1900 	int host_no = devip->sdbg_host->shost->host_no;
1901 	int port_group_a, port_group_b, port_a, port_b;
1902 	u32 alen, n, rlen;
1903 	int ret;
1904 
1905 	alen = get_unaligned_be32(cmd + 6);
1906 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1907 	if (! arr)
1908 		return DID_REQUEUE << 16;
1909 	/*
1910 	 * EVPD page 0x88 states we have two ports, one
1911 	 * real and a fake port with no device connected.
1912 	 * So we create two port groups with one port each
1913 	 * and set the group with port B to unavailable.
1914 	 */
1915 	port_a = 0x1; /* relative port A */
1916 	port_b = 0x2; /* relative port B */
1917 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 			(devip->channel & 0x7f);
1919 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 			(devip->channel & 0x7f) + 0x80;
1921 
1922 	/*
1923 	 * The asymmetric access state is cycled according to the host_id.
1924 	 */
1925 	n = 4;
1926 	if (sdebug_vpd_use_hostno == 0) {
1927 		arr[n++] = host_no % 3; /* Asymm access state */
1928 		arr[n++] = 0x0F; /* claim: all states are supported */
1929 	} else {
1930 		arr[n++] = 0x0; /* Active/Optimized path */
1931 		arr[n++] = 0x01; /* only support active/optimized paths */
1932 	}
1933 	put_unaligned_be16(port_group_a, arr + n);
1934 	n += 2;
1935 	arr[n++] = 0;    /* Reserved */
1936 	arr[n++] = 0;    /* Status code */
1937 	arr[n++] = 0;    /* Vendor unique */
1938 	arr[n++] = 0x1;  /* One port per group */
1939 	arr[n++] = 0;    /* Reserved */
1940 	arr[n++] = 0;    /* Reserved */
1941 	put_unaligned_be16(port_a, arr + n);
1942 	n += 2;
1943 	arr[n++] = 3;    /* Port unavailable */
1944 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 	put_unaligned_be16(port_group_b, arr + n);
1946 	n += 2;
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Status code */
1949 	arr[n++] = 0;    /* Vendor unique */
1950 	arr[n++] = 0x1;  /* One port per group */
1951 	arr[n++] = 0;    /* Reserved */
1952 	arr[n++] = 0;    /* Reserved */
1953 	put_unaligned_be16(port_b, arr + n);
1954 	n += 2;
1955 
1956 	rlen = n - 4;
1957 	put_unaligned_be32(rlen, arr + 0);
1958 
1959 	/*
1960 	 * Return the smallest value of either
1961 	 * - The allocated length
1962 	 * - The constructed command length
1963 	 * - The maximum array size
1964 	 */
1965 	rlen = min(alen, n);
1966 	ret = fill_from_dev_buffer(scp, arr,
1967 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1968 	kfree(arr);
1969 	return ret;
1970 }
1971 
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 			     struct sdebug_dev_info *devip)
1974 {
1975 	bool rctd;
1976 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1977 	u16 req_sa, u;
1978 	u32 alloc_len, a_len;
1979 	int k, offset, len, errsts, count, bump, na;
1980 	const struct opcode_info_t *oip;
1981 	const struct opcode_info_t *r_oip;
1982 	u8 *arr;
1983 	u8 *cmd = scp->cmnd;
1984 
1985 	rctd = !!(cmd[2] & 0x80);
1986 	reporting_opts = cmd[2] & 0x7;
1987 	req_opcode = cmd[3];
1988 	req_sa = get_unaligned_be16(cmd + 4);
1989 	alloc_len = get_unaligned_be32(cmd + 6);
1990 	if (alloc_len < 4 || alloc_len > 0xffff) {
1991 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 		return check_condition_result;
1993 	}
1994 	if (alloc_len > 8192)
1995 		a_len = 8192;
1996 	else
1997 		a_len = alloc_len;
1998 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1999 	if (NULL == arr) {
2000 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2001 				INSUFF_RES_ASCQ);
2002 		return check_condition_result;
2003 	}
2004 	switch (reporting_opts) {
2005 	case 0:	/* all commands */
2006 		/* count number of commands */
2007 		for (count = 0, oip = opcode_info_arr;
2008 		     oip->num_attached != 0xff; ++oip) {
2009 			if (F_INV_OP & oip->flags)
2010 				continue;
2011 			count += (oip->num_attached + 1);
2012 		}
2013 		bump = rctd ? 20 : 8;
2014 		put_unaligned_be32(count * bump, arr);
2015 		for (offset = 4, oip = opcode_info_arr;
2016 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 			if (F_INV_OP & oip->flags)
2018 				continue;
2019 			na = oip->num_attached;
2020 			arr[offset] = oip->opcode;
2021 			put_unaligned_be16(oip->sa, arr + offset + 2);
2022 			if (rctd)
2023 				arr[offset + 5] |= 0x2;
2024 			if (FF_SA & oip->flags)
2025 				arr[offset + 5] |= 0x1;
2026 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2027 			if (rctd)
2028 				put_unaligned_be16(0xa, arr + offset + 8);
2029 			r_oip = oip;
2030 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 				if (F_INV_OP & oip->flags)
2032 					continue;
2033 				offset += bump;
2034 				arr[offset] = oip->opcode;
2035 				put_unaligned_be16(oip->sa, arr + offset + 2);
2036 				if (rctd)
2037 					arr[offset + 5] |= 0x2;
2038 				if (FF_SA & oip->flags)
2039 					arr[offset + 5] |= 0x1;
2040 				put_unaligned_be16(oip->len_mask[0],
2041 						   arr + offset + 6);
2042 				if (rctd)
2043 					put_unaligned_be16(0xa,
2044 							   arr + offset + 8);
2045 			}
2046 			oip = r_oip;
2047 			offset += bump;
2048 		}
2049 		break;
2050 	case 1:	/* one command: opcode only */
2051 	case 2:	/* one command: opcode plus service action */
2052 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2053 		sdeb_i = opcode_ind_arr[req_opcode];
2054 		oip = &opcode_info_arr[sdeb_i];
2055 		if (F_INV_OP & oip->flags) {
2056 			supp = 1;
2057 			offset = 4;
2058 		} else {
2059 			if (1 == reporting_opts) {
2060 				if (FF_SA & oip->flags) {
2061 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 							     2, 2);
2063 					kfree(arr);
2064 					return check_condition_result;
2065 				}
2066 				req_sa = 0;
2067 			} else if (2 == reporting_opts &&
2068 				   0 == (FF_SA & oip->flags)) {
2069 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 				kfree(arr);	/* point at requested sa */
2071 				return check_condition_result;
2072 			}
2073 			if (0 == (FF_SA & oip->flags) &&
2074 			    req_opcode == oip->opcode)
2075 				supp = 3;
2076 			else if (0 == (FF_SA & oip->flags)) {
2077 				na = oip->num_attached;
2078 				for (k = 0, oip = oip->arrp; k < na;
2079 				     ++k, ++oip) {
2080 					if (req_opcode == oip->opcode)
2081 						break;
2082 				}
2083 				supp = (k >= na) ? 1 : 3;
2084 			} else if (req_sa != oip->sa) {
2085 				na = oip->num_attached;
2086 				for (k = 0, oip = oip->arrp; k < na;
2087 				     ++k, ++oip) {
2088 					if (req_sa == oip->sa)
2089 						break;
2090 				}
2091 				supp = (k >= na) ? 1 : 3;
2092 			} else
2093 				supp = 3;
2094 			if (3 == supp) {
2095 				u = oip->len_mask[0];
2096 				put_unaligned_be16(u, arr + 2);
2097 				arr[4] = oip->opcode;
2098 				for (k = 1; k < u; ++k)
2099 					arr[4 + k] = (k < 16) ?
2100 						 oip->len_mask[k] : 0xff;
2101 				offset = 4 + u;
2102 			} else
2103 				offset = 4;
2104 		}
2105 		arr[1] = (rctd ? 0x80 : 0) | supp;
2106 		if (rctd) {
2107 			put_unaligned_be16(0xa, arr + offset);
2108 			offset += 12;
2109 		}
2110 		break;
2111 	default:
2112 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2113 		kfree(arr);
2114 		return check_condition_result;
2115 	}
2116 	offset = (offset < a_len) ? offset : a_len;
2117 	len = (offset < alloc_len) ? offset : alloc_len;
2118 	errsts = fill_from_dev_buffer(scp, arr, len);
2119 	kfree(arr);
2120 	return errsts;
2121 }
2122 
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 			  struct sdebug_dev_info *devip)
2125 {
2126 	bool repd;
2127 	u32 alloc_len, len;
2128 	u8 arr[16];
2129 	u8 *cmd = scp->cmnd;
2130 
2131 	memset(arr, 0, sizeof(arr));
2132 	repd = !!(cmd[2] & 0x80);
2133 	alloc_len = get_unaligned_be32(cmd + 6);
2134 	if (alloc_len < 4) {
2135 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 		return check_condition_result;
2137 	}
2138 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2139 	arr[1] = 0x1;		/* ITNRS */
2140 	if (repd) {
2141 		arr[3] = 0xc;
2142 		len = 16;
2143 	} else
2144 		len = 4;
2145 
2146 	len = (len < alloc_len) ? len : alloc_len;
2147 	return fill_from_dev_buffer(scp, arr, len);
2148 }
2149 
2150 /* <<Following mode page info copied from ST318451LW>> */
2151 
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 {	/* Read-Write Error Recovery page for mode_sense */
2154 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 					5, 0, 0xff, 0xff};
2156 
2157 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2158 	if (1 == pcontrol)
2159 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 	return sizeof(err_recov_pg);
2161 }
2162 
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { 	/* Disconnect-Reconnect page for mode_sense */
2165 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 					 0, 0, 0, 0, 0, 0, 0, 0};
2167 
2168 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2169 	if (1 == pcontrol)
2170 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 	return sizeof(disconnect_pg);
2172 }
2173 
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 {       /* Format device page for mode_sense */
2176 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 				     0, 0, 0, 0, 0, 0, 0, 0,
2178 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2179 
2180 	memcpy(p, format_pg, sizeof(format_pg));
2181 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 	put_unaligned_be16(sdebug_sector_size, p + 12);
2183 	if (sdebug_removable)
2184 		p[20] |= 0x20; /* should agree with INQUIRY */
2185 	if (1 == pcontrol)
2186 		memset(p + 2, 0, sizeof(format_pg) - 2);
2187 	return sizeof(format_pg);
2188 }
2189 
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 				     0, 0, 0, 0};
2193 
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { 	/* Caching page for mode_sense */
2196 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2200 
2201 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2203 	memcpy(p, caching_pg, sizeof(caching_pg));
2204 	if (1 == pcontrol)
2205 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 	else if (2 == pcontrol)
2207 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 	return sizeof(caching_pg);
2209 }
2210 
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 				    0, 0, 0x2, 0x4b};
2213 
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { 	/* Control mode page for mode_sense */
2216 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2217 					0, 0, 0, 0};
2218 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2219 				     0, 0, 0x2, 0x4b};
2220 
2221 	if (sdebug_dsense)
2222 		ctrl_m_pg[2] |= 0x4;
2223 	else
2224 		ctrl_m_pg[2] &= ~0x4;
2225 
2226 	if (sdebug_ato)
2227 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2228 
2229 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2230 	if (1 == pcontrol)
2231 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 	else if (2 == pcontrol)
2233 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 	return sizeof(ctrl_m_pg);
2235 }
2236 
2237 
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 {	/* Informational Exceptions control mode page for mode_sense */
2240 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2241 				       0, 0, 0x0, 0x0};
2242 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 				      0, 0, 0x0, 0x0};
2244 
2245 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2246 	if (1 == pcontrol)
2247 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 	else if (2 == pcontrol)
2249 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 	return sizeof(iec_m_pg);
2251 }
2252 
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 {	/* SAS SSP mode page - short format for mode_sense */
2255 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2257 
2258 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2259 	if (1 == pcontrol)
2260 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 	return sizeof(sas_sf_m_pg);
2262 }
2263 
2264 
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2266 			      int target_dev_id)
2267 {	/* SAS phy control and discover mode page for mode_sense */
2268 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2272 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2273 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 		    0, 0, 0, 0, 0, 0, 0, 0,
2275 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2277 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2278 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2279 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 		    0, 0, 0, 0, 0, 0, 0, 0,
2281 		};
2282 	int port_a, port_b;
2283 
2284 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 	port_a = target_dev_id + 1;
2289 	port_b = port_a + 1;
2290 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 	put_unaligned_be32(port_a, p + 20);
2292 	put_unaligned_be32(port_b, p + 48 + 20);
2293 	if (1 == pcontrol)
2294 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 	return sizeof(sas_pcd_m_pg);
2296 }
2297 
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 {	/* SAS SSP shared protocol specific port mode subpage */
2300 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 		    0, 0, 0, 0, 0, 0, 0, 0,
2302 		};
2303 
2304 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2305 	if (1 == pcontrol)
2306 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 	return sizeof(sas_sha_m_pg);
2308 }
2309 
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2311 
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 			   struct sdebug_dev_info *devip)
2314 {
2315 	int pcontrol, pcode, subpcode, bd_len;
2316 	unsigned char dev_spec;
2317 	u32 alloc_len, offset, len;
2318 	int target_dev_id;
2319 	int target = scp->device->id;
2320 	unsigned char *ap;
2321 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2322 	unsigned char *cmd = scp->cmnd;
2323 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2324 
2325 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2326 	pcontrol = (cmd[2] & 0xc0) >> 6;
2327 	pcode = cmd[2] & 0x3f;
2328 	subpcode = cmd[3];
2329 	msense_6 = (MODE_SENSE == cmd[0]);
2330 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2331 	is_disk = (sdebug_ptype == TYPE_DISK);
2332 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2333 	if ((is_disk || is_zbc) && !dbd)
2334 		bd_len = llbaa ? 16 : 8;
2335 	else
2336 		bd_len = 0;
2337 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2338 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2339 	if (0x3 == pcontrol) {  /* Saving values not supported */
2340 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2341 		return check_condition_result;
2342 	}
2343 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2344 			(devip->target * 1000) - 3;
2345 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2346 	if (is_disk || is_zbc) {
2347 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2348 		if (sdebug_wp)
2349 			dev_spec |= 0x80;
2350 	} else
2351 		dev_spec = 0x0;
2352 	if (msense_6) {
2353 		arr[2] = dev_spec;
2354 		arr[3] = bd_len;
2355 		offset = 4;
2356 	} else {
2357 		arr[3] = dev_spec;
2358 		if (16 == bd_len)
2359 			arr[4] = 0x1;	/* set LONGLBA bit */
2360 		arr[7] = bd_len;	/* assume 255 or less */
2361 		offset = 8;
2362 	}
2363 	ap = arr + offset;
2364 	if ((bd_len > 0) && (!sdebug_capacity))
2365 		sdebug_capacity = get_sdebug_capacity();
2366 
2367 	if (8 == bd_len) {
2368 		if (sdebug_capacity > 0xfffffffe)
2369 			put_unaligned_be32(0xffffffff, ap + 0);
2370 		else
2371 			put_unaligned_be32(sdebug_capacity, ap + 0);
2372 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2373 		offset += bd_len;
2374 		ap = arr + offset;
2375 	} else if (16 == bd_len) {
2376 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2377 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2378 		offset += bd_len;
2379 		ap = arr + offset;
2380 	}
2381 
2382 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2383 		/* TODO: Control Extension page */
2384 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2385 		return check_condition_result;
2386 	}
2387 	bad_pcode = false;
2388 
2389 	switch (pcode) {
2390 	case 0x1:	/* Read-Write error recovery page, direct access */
2391 		len = resp_err_recov_pg(ap, pcontrol, target);
2392 		offset += len;
2393 		break;
2394 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2395 		len = resp_disconnect_pg(ap, pcontrol, target);
2396 		offset += len;
2397 		break;
2398 	case 0x3:       /* Format device page, direct access */
2399 		if (is_disk) {
2400 			len = resp_format_pg(ap, pcontrol, target);
2401 			offset += len;
2402 		} else
2403 			bad_pcode = true;
2404 		break;
2405 	case 0x8:	/* Caching page, direct access */
2406 		if (is_disk || is_zbc) {
2407 			len = resp_caching_pg(ap, pcontrol, target);
2408 			offset += len;
2409 		} else
2410 			bad_pcode = true;
2411 		break;
2412 	case 0xa:	/* Control Mode page, all devices */
2413 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2414 		offset += len;
2415 		break;
2416 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2417 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2418 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2419 			return check_condition_result;
2420 		}
2421 		len = 0;
2422 		if ((0x0 == subpcode) || (0xff == subpcode))
2423 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2424 		if ((0x1 == subpcode) || (0xff == subpcode))
2425 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2426 						  target_dev_id);
2427 		if ((0x2 == subpcode) || (0xff == subpcode))
2428 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2429 		offset += len;
2430 		break;
2431 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2432 		len = resp_iec_m_pg(ap, pcontrol, target);
2433 		offset += len;
2434 		break;
2435 	case 0x3f:	/* Read all Mode pages */
2436 		if ((0 == subpcode) || (0xff == subpcode)) {
2437 			len = resp_err_recov_pg(ap, pcontrol, target);
2438 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2439 			if (is_disk) {
2440 				len += resp_format_pg(ap + len, pcontrol,
2441 						      target);
2442 				len += resp_caching_pg(ap + len, pcontrol,
2443 						       target);
2444 			} else if (is_zbc) {
2445 				len += resp_caching_pg(ap + len, pcontrol,
2446 						       target);
2447 			}
2448 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2449 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2450 			if (0xff == subpcode) {
2451 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2452 						  target, target_dev_id);
2453 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 			}
2455 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2456 			offset += len;
2457 		} else {
2458 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2459 			return check_condition_result;
2460 		}
2461 		break;
2462 	default:
2463 		bad_pcode = true;
2464 		break;
2465 	}
2466 	if (bad_pcode) {
2467 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2468 		return check_condition_result;
2469 	}
2470 	if (msense_6)
2471 		arr[0] = offset - 1;
2472 	else
2473 		put_unaligned_be16((offset - 2), arr + 0);
2474 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2475 }
2476 
2477 #define SDEBUG_MAX_MSELECT_SZ 512
2478 
2479 static int resp_mode_select(struct scsi_cmnd *scp,
2480 			    struct sdebug_dev_info *devip)
2481 {
2482 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2483 	int param_len, res, mpage;
2484 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2485 	unsigned char *cmd = scp->cmnd;
2486 	int mselect6 = (MODE_SELECT == cmd[0]);
2487 
2488 	memset(arr, 0, sizeof(arr));
2489 	pf = cmd[1] & 0x10;
2490 	sp = cmd[1] & 0x1;
2491 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2492 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2493 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2494 		return check_condition_result;
2495 	}
2496 	res = fetch_to_dev_buffer(scp, arr, param_len);
2497 	if (-1 == res)
2498 		return DID_ERROR << 16;
2499 	else if (sdebug_verbose && (res < param_len))
2500 		sdev_printk(KERN_INFO, scp->device,
2501 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2502 			    __func__, param_len, res);
2503 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2504 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2505 	off = bd_len + (mselect6 ? 4 : 8);
2506 	if (md_len > 2 || off >= res) {
2507 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2508 		return check_condition_result;
2509 	}
2510 	mpage = arr[off] & 0x3f;
2511 	ps = !!(arr[off] & 0x80);
2512 	if (ps) {
2513 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2514 		return check_condition_result;
2515 	}
2516 	spf = !!(arr[off] & 0x40);
2517 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2518 		       (arr[off + 1] + 2);
2519 	if ((pg_len + off) > param_len) {
2520 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2521 				PARAMETER_LIST_LENGTH_ERR, 0);
2522 		return check_condition_result;
2523 	}
2524 	switch (mpage) {
2525 	case 0x8:      /* Caching Mode page */
2526 		if (caching_pg[1] == arr[off + 1]) {
2527 			memcpy(caching_pg + 2, arr + off + 2,
2528 			       sizeof(caching_pg) - 2);
2529 			goto set_mode_changed_ua;
2530 		}
2531 		break;
2532 	case 0xa:      /* Control Mode page */
2533 		if (ctrl_m_pg[1] == arr[off + 1]) {
2534 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2535 			       sizeof(ctrl_m_pg) - 2);
2536 			if (ctrl_m_pg[4] & 0x8)
2537 				sdebug_wp = true;
2538 			else
2539 				sdebug_wp = false;
2540 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2541 			goto set_mode_changed_ua;
2542 		}
2543 		break;
2544 	case 0x1c:      /* Informational Exceptions Mode page */
2545 		if (iec_m_pg[1] == arr[off + 1]) {
2546 			memcpy(iec_m_pg + 2, arr + off + 2,
2547 			       sizeof(iec_m_pg) - 2);
2548 			goto set_mode_changed_ua;
2549 		}
2550 		break;
2551 	default:
2552 		break;
2553 	}
2554 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2555 	return check_condition_result;
2556 set_mode_changed_ua:
2557 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2558 	return 0;
2559 }
2560 
2561 static int resp_temp_l_pg(unsigned char *arr)
2562 {
2563 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2564 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2565 		};
2566 
2567 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2568 	return sizeof(temp_l_pg);
2569 }
2570 
2571 static int resp_ie_l_pg(unsigned char *arr)
2572 {
2573 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2574 		};
2575 
2576 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2577 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2578 		arr[4] = THRESHOLD_EXCEEDED;
2579 		arr[5] = 0xff;
2580 	}
2581 	return sizeof(ie_l_pg);
2582 }
2583 
2584 #define SDEBUG_MAX_LSENSE_SZ 512
2585 
2586 static int resp_log_sense(struct scsi_cmnd *scp,
2587 			  struct sdebug_dev_info *devip)
2588 {
2589 	int ppc, sp, pcode, subpcode;
2590 	u32 alloc_len, len, n;
2591 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2592 	unsigned char *cmd = scp->cmnd;
2593 
2594 	memset(arr, 0, sizeof(arr));
2595 	ppc = cmd[1] & 0x2;
2596 	sp = cmd[1] & 0x1;
2597 	if (ppc || sp) {
2598 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2599 		return check_condition_result;
2600 	}
2601 	pcode = cmd[2] & 0x3f;
2602 	subpcode = cmd[3] & 0xff;
2603 	alloc_len = get_unaligned_be16(cmd + 7);
2604 	arr[0] = pcode;
2605 	if (0 == subpcode) {
2606 		switch (pcode) {
2607 		case 0x0:	/* Supported log pages log page */
2608 			n = 4;
2609 			arr[n++] = 0x0;		/* this page */
2610 			arr[n++] = 0xd;		/* Temperature */
2611 			arr[n++] = 0x2f;	/* Informational exceptions */
2612 			arr[3] = n - 4;
2613 			break;
2614 		case 0xd:	/* Temperature log page */
2615 			arr[3] = resp_temp_l_pg(arr + 4);
2616 			break;
2617 		case 0x2f:	/* Informational exceptions log page */
2618 			arr[3] = resp_ie_l_pg(arr + 4);
2619 			break;
2620 		default:
2621 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2622 			return check_condition_result;
2623 		}
2624 	} else if (0xff == subpcode) {
2625 		arr[0] |= 0x40;
2626 		arr[1] = subpcode;
2627 		switch (pcode) {
2628 		case 0x0:	/* Supported log pages and subpages log page */
2629 			n = 4;
2630 			arr[n++] = 0x0;
2631 			arr[n++] = 0x0;		/* 0,0 page */
2632 			arr[n++] = 0x0;
2633 			arr[n++] = 0xff;	/* this page */
2634 			arr[n++] = 0xd;
2635 			arr[n++] = 0x0;		/* Temperature */
2636 			arr[n++] = 0x2f;
2637 			arr[n++] = 0x0;	/* Informational exceptions */
2638 			arr[3] = n - 4;
2639 			break;
2640 		case 0xd:	/* Temperature subpages */
2641 			n = 4;
2642 			arr[n++] = 0xd;
2643 			arr[n++] = 0x0;		/* Temperature */
2644 			arr[3] = n - 4;
2645 			break;
2646 		case 0x2f:	/* Informational exceptions subpages */
2647 			n = 4;
2648 			arr[n++] = 0x2f;
2649 			arr[n++] = 0x0;		/* Informational exceptions */
2650 			arr[3] = n - 4;
2651 			break;
2652 		default:
2653 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 			return check_condition_result;
2655 		}
2656 	} else {
2657 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2658 		return check_condition_result;
2659 	}
2660 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2661 	return fill_from_dev_buffer(scp, arr,
2662 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2663 }
2664 
2665 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2666 {
2667 	return devip->nr_zones != 0;
2668 }
2669 
2670 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2671 					unsigned long long lba)
2672 {
2673 	return &devip->zstate[lba >> devip->zsize_shift];
2674 }
2675 
2676 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2677 {
2678 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2679 }
2680 
2681 static void zbc_close_zone(struct sdebug_dev_info *devip,
2682 			   struct sdeb_zone_state *zsp)
2683 {
2684 	enum sdebug_z_cond zc;
2685 
2686 	if (zbc_zone_is_conv(zsp))
2687 		return;
2688 
2689 	zc = zsp->z_cond;
2690 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2691 		return;
2692 
2693 	if (zc == ZC2_IMPLICIT_OPEN)
2694 		devip->nr_imp_open--;
2695 	else
2696 		devip->nr_exp_open--;
2697 
2698 	if (zsp->z_wp == zsp->z_start) {
2699 		zsp->z_cond = ZC1_EMPTY;
2700 	} else {
2701 		zsp->z_cond = ZC4_CLOSED;
2702 		devip->nr_closed++;
2703 	}
2704 }
2705 
2706 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2707 {
2708 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2709 	unsigned int i;
2710 
2711 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2712 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2713 			zbc_close_zone(devip, zsp);
2714 			return;
2715 		}
2716 	}
2717 }
2718 
2719 static void zbc_open_zone(struct sdebug_dev_info *devip,
2720 			  struct sdeb_zone_state *zsp, bool explicit)
2721 {
2722 	enum sdebug_z_cond zc;
2723 
2724 	if (zbc_zone_is_conv(zsp))
2725 		return;
2726 
2727 	zc = zsp->z_cond;
2728 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2729 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2730 		return;
2731 
2732 	/* Close an implicit open zone if necessary */
2733 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2734 		zbc_close_zone(devip, zsp);
2735 	else if (devip->max_open &&
2736 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2737 		zbc_close_imp_open_zone(devip);
2738 
2739 	if (zsp->z_cond == ZC4_CLOSED)
2740 		devip->nr_closed--;
2741 	if (explicit) {
2742 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2743 		devip->nr_exp_open++;
2744 	} else {
2745 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2746 		devip->nr_imp_open++;
2747 	}
2748 }
2749 
2750 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2751 				     struct sdeb_zone_state *zsp)
2752 {
2753 	switch (zsp->z_cond) {
2754 	case ZC2_IMPLICIT_OPEN:
2755 		devip->nr_imp_open--;
2756 		break;
2757 	case ZC3_EXPLICIT_OPEN:
2758 		devip->nr_exp_open--;
2759 		break;
2760 	default:
2761 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2762 			  zsp->z_start, zsp->z_cond);
2763 		break;
2764 	}
2765 	zsp->z_cond = ZC5_FULL;
2766 }
2767 
2768 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2769 		       unsigned long long lba, unsigned int num)
2770 {
2771 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2772 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2773 
2774 	if (zbc_zone_is_conv(zsp))
2775 		return;
2776 
2777 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2778 		zsp->z_wp += num;
2779 		if (zsp->z_wp >= zend)
2780 			zbc_set_zone_full(devip, zsp);
2781 		return;
2782 	}
2783 
2784 	while (num) {
2785 		if (lba != zsp->z_wp)
2786 			zsp->z_non_seq_resource = true;
2787 
2788 		end = lba + num;
2789 		if (end >= zend) {
2790 			n = zend - lba;
2791 			zsp->z_wp = zend;
2792 		} else if (end > zsp->z_wp) {
2793 			n = num;
2794 			zsp->z_wp = end;
2795 		} else {
2796 			n = num;
2797 		}
2798 		if (zsp->z_wp >= zend)
2799 			zbc_set_zone_full(devip, zsp);
2800 
2801 		num -= n;
2802 		lba += n;
2803 		if (num) {
2804 			zsp++;
2805 			zend = zsp->z_start + zsp->z_size;
2806 		}
2807 	}
2808 }
2809 
2810 static int check_zbc_access_params(struct scsi_cmnd *scp,
2811 			unsigned long long lba, unsigned int num, bool write)
2812 {
2813 	struct scsi_device *sdp = scp->device;
2814 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2815 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2816 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2817 
2818 	if (!write) {
2819 		if (devip->zmodel == BLK_ZONED_HA)
2820 			return 0;
2821 		/* For host-managed, reads cannot cross zone types boundaries */
2822 		if (zsp_end != zsp &&
2823 		    zbc_zone_is_conv(zsp) &&
2824 		    !zbc_zone_is_conv(zsp_end)) {
2825 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2826 					LBA_OUT_OF_RANGE,
2827 					READ_INVDATA_ASCQ);
2828 			return check_condition_result;
2829 		}
2830 		return 0;
2831 	}
2832 
2833 	/* No restrictions for writes within conventional zones */
2834 	if (zbc_zone_is_conv(zsp)) {
2835 		if (!zbc_zone_is_conv(zsp_end)) {
2836 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 					LBA_OUT_OF_RANGE,
2838 					WRITE_BOUNDARY_ASCQ);
2839 			return check_condition_result;
2840 		}
2841 		return 0;
2842 	}
2843 
2844 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2845 		/* Writes cannot cross sequential zone boundaries */
2846 		if (zsp_end != zsp) {
2847 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2848 					LBA_OUT_OF_RANGE,
2849 					WRITE_BOUNDARY_ASCQ);
2850 			return check_condition_result;
2851 		}
2852 		/* Cannot write full zones */
2853 		if (zsp->z_cond == ZC5_FULL) {
2854 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2855 					INVALID_FIELD_IN_CDB, 0);
2856 			return check_condition_result;
2857 		}
2858 		/* Writes must be aligned to the zone WP */
2859 		if (lba != zsp->z_wp) {
2860 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2861 					LBA_OUT_OF_RANGE,
2862 					UNALIGNED_WRITE_ASCQ);
2863 			return check_condition_result;
2864 		}
2865 	}
2866 
2867 	/* Handle implicit open of closed and empty zones */
2868 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2869 		if (devip->max_open &&
2870 		    devip->nr_exp_open >= devip->max_open) {
2871 			mk_sense_buffer(scp, DATA_PROTECT,
2872 					INSUFF_RES_ASC,
2873 					INSUFF_ZONE_ASCQ);
2874 			return check_condition_result;
2875 		}
2876 		zbc_open_zone(devip, zsp, false);
2877 	}
2878 
2879 	return 0;
2880 }
2881 
2882 static inline int check_device_access_params
2883 			(struct scsi_cmnd *scp, unsigned long long lba,
2884 			 unsigned int num, bool write)
2885 {
2886 	struct scsi_device *sdp = scp->device;
2887 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2888 
2889 	if (lba + num > sdebug_capacity) {
2890 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2891 		return check_condition_result;
2892 	}
2893 	/* transfer length excessive (tie in to block limits VPD page) */
2894 	if (num > sdebug_store_sectors) {
2895 		/* needs work to find which cdb byte 'num' comes from */
2896 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2897 		return check_condition_result;
2898 	}
2899 	if (write && unlikely(sdebug_wp)) {
2900 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2901 		return check_condition_result;
2902 	}
2903 	if (sdebug_dev_is_zoned(devip))
2904 		return check_zbc_access_params(scp, lba, num, write);
2905 
2906 	return 0;
2907 }
2908 
2909 /*
2910  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2911  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2912  * that access any of the "stores" in struct sdeb_store_info should call this
2913  * function with bug_if_fake_rw set to true.
2914  */
2915 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2916 						bool bug_if_fake_rw)
2917 {
2918 	if (sdebug_fake_rw) {
2919 		BUG_ON(bug_if_fake_rw);	/* See note above */
2920 		return NULL;
2921 	}
2922 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2923 }
2924 
2925 /* Returns number of bytes copied or -1 if error. */
2926 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2927 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2928 {
2929 	int ret;
2930 	u64 block, rest = 0;
2931 	enum dma_data_direction dir;
2932 	struct scsi_data_buffer *sdb = &scp->sdb;
2933 	u8 *fsp;
2934 
2935 	if (do_write) {
2936 		dir = DMA_TO_DEVICE;
2937 		write_since_sync = true;
2938 	} else {
2939 		dir = DMA_FROM_DEVICE;
2940 	}
2941 
2942 	if (!sdb->length || !sip)
2943 		return 0;
2944 	if (scp->sc_data_direction != dir)
2945 		return -1;
2946 	fsp = sip->storep;
2947 
2948 	block = do_div(lba, sdebug_store_sectors);
2949 	if (block + num > sdebug_store_sectors)
2950 		rest = block + num - sdebug_store_sectors;
2951 
2952 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2953 		   fsp + (block * sdebug_sector_size),
2954 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2955 	if (ret != (num - rest) * sdebug_sector_size)
2956 		return ret;
2957 
2958 	if (rest) {
2959 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2960 			    fsp, rest * sdebug_sector_size,
2961 			    sg_skip + ((num - rest) * sdebug_sector_size),
2962 			    do_write);
2963 	}
2964 
2965 	return ret;
2966 }
2967 
2968 /* Returns number of bytes copied or -1 if error. */
2969 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2970 {
2971 	struct scsi_data_buffer *sdb = &scp->sdb;
2972 
2973 	if (!sdb->length)
2974 		return 0;
2975 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2976 		return -1;
2977 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2978 			      num * sdebug_sector_size, 0, true);
2979 }
2980 
2981 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2982  * arr into sip->storep+lba and return true. If comparison fails then
2983  * return false. */
2984 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2985 			      const u8 *arr, bool compare_only)
2986 {
2987 	bool res;
2988 	u64 block, rest = 0;
2989 	u32 store_blks = sdebug_store_sectors;
2990 	u32 lb_size = sdebug_sector_size;
2991 	u8 *fsp = sip->storep;
2992 
2993 	block = do_div(lba, store_blks);
2994 	if (block + num > store_blks)
2995 		rest = block + num - store_blks;
2996 
2997 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2998 	if (!res)
2999 		return res;
3000 	if (rest)
3001 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3002 			     rest * lb_size);
3003 	if (!res)
3004 		return res;
3005 	if (compare_only)
3006 		return true;
3007 	arr += num * lb_size;
3008 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3009 	if (rest)
3010 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3011 	return res;
3012 }
3013 
3014 static __be16 dif_compute_csum(const void *buf, int len)
3015 {
3016 	__be16 csum;
3017 
3018 	if (sdebug_guard)
3019 		csum = (__force __be16)ip_compute_csum(buf, len);
3020 	else
3021 		csum = cpu_to_be16(crc_t10dif(buf, len));
3022 
3023 	return csum;
3024 }
3025 
3026 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3027 		      sector_t sector, u32 ei_lba)
3028 {
3029 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3030 
3031 	if (sdt->guard_tag != csum) {
3032 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3033 			(unsigned long)sector,
3034 			be16_to_cpu(sdt->guard_tag),
3035 			be16_to_cpu(csum));
3036 		return 0x01;
3037 	}
3038 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3039 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3040 		pr_err("REF check failed on sector %lu\n",
3041 			(unsigned long)sector);
3042 		return 0x03;
3043 	}
3044 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3045 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3046 		pr_err("REF check failed on sector %lu\n",
3047 			(unsigned long)sector);
3048 		return 0x03;
3049 	}
3050 	return 0;
3051 }
3052 
3053 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3054 			  unsigned int sectors, bool read)
3055 {
3056 	size_t resid;
3057 	void *paddr;
3058 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3059 						scp->device->hostdata, true);
3060 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3061 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3062 	struct sg_mapping_iter miter;
3063 
3064 	/* Bytes of protection data to copy into sgl */
3065 	resid = sectors * sizeof(*dif_storep);
3066 
3067 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3068 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3069 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3070 
3071 	while (sg_miter_next(&miter) && resid > 0) {
3072 		size_t len = min_t(size_t, miter.length, resid);
3073 		void *start = dif_store(sip, sector);
3074 		size_t rest = 0;
3075 
3076 		if (dif_store_end < start + len)
3077 			rest = start + len - dif_store_end;
3078 
3079 		paddr = miter.addr;
3080 
3081 		if (read)
3082 			memcpy(paddr, start, len - rest);
3083 		else
3084 			memcpy(start, paddr, len - rest);
3085 
3086 		if (rest) {
3087 			if (read)
3088 				memcpy(paddr + len - rest, dif_storep, rest);
3089 			else
3090 				memcpy(dif_storep, paddr + len - rest, rest);
3091 		}
3092 
3093 		sector += len / sizeof(*dif_storep);
3094 		resid -= len;
3095 	}
3096 	sg_miter_stop(&miter);
3097 }
3098 
3099 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3100 			    unsigned int sectors, u32 ei_lba)
3101 {
3102 	int ret = 0;
3103 	unsigned int i;
3104 	sector_t sector;
3105 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3106 						scp->device->hostdata, true);
3107 	struct t10_pi_tuple *sdt;
3108 
3109 	for (i = 0; i < sectors; i++, ei_lba++) {
3110 		sector = start_sec + i;
3111 		sdt = dif_store(sip, sector);
3112 
3113 		if (sdt->app_tag == cpu_to_be16(0xffff))
3114 			continue;
3115 
3116 		/*
3117 		 * Because scsi_debug acts as both initiator and
3118 		 * target we proceed to verify the PI even if
3119 		 * RDPROTECT=3. This is done so the "initiator" knows
3120 		 * which type of error to return. Otherwise we would
3121 		 * have to iterate over the PI twice.
3122 		 */
3123 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3124 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3125 					 sector, ei_lba);
3126 			if (ret) {
3127 				dif_errors++;
3128 				break;
3129 			}
3130 		}
3131 	}
3132 
3133 	dif_copy_prot(scp, start_sec, sectors, true);
3134 	dix_reads++;
3135 
3136 	return ret;
3137 }
3138 
3139 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3140 {
3141 	bool check_prot;
3142 	u32 num;
3143 	u32 ei_lba;
3144 	int ret;
3145 	u64 lba;
3146 	struct sdeb_store_info *sip = devip2sip(devip, true);
3147 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3148 	u8 *cmd = scp->cmnd;
3149 
3150 	switch (cmd[0]) {
3151 	case READ_16:
3152 		ei_lba = 0;
3153 		lba = get_unaligned_be64(cmd + 2);
3154 		num = get_unaligned_be32(cmd + 10);
3155 		check_prot = true;
3156 		break;
3157 	case READ_10:
3158 		ei_lba = 0;
3159 		lba = get_unaligned_be32(cmd + 2);
3160 		num = get_unaligned_be16(cmd + 7);
3161 		check_prot = true;
3162 		break;
3163 	case READ_6:
3164 		ei_lba = 0;
3165 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3166 		      (u32)(cmd[1] & 0x1f) << 16;
3167 		num = (0 == cmd[4]) ? 256 : cmd[4];
3168 		check_prot = true;
3169 		break;
3170 	case READ_12:
3171 		ei_lba = 0;
3172 		lba = get_unaligned_be32(cmd + 2);
3173 		num = get_unaligned_be32(cmd + 6);
3174 		check_prot = true;
3175 		break;
3176 	case XDWRITEREAD_10:
3177 		ei_lba = 0;
3178 		lba = get_unaligned_be32(cmd + 2);
3179 		num = get_unaligned_be16(cmd + 7);
3180 		check_prot = false;
3181 		break;
3182 	default:	/* assume READ(32) */
3183 		lba = get_unaligned_be64(cmd + 12);
3184 		ei_lba = get_unaligned_be32(cmd + 20);
3185 		num = get_unaligned_be32(cmd + 28);
3186 		check_prot = false;
3187 		break;
3188 	}
3189 	if (unlikely(have_dif_prot && check_prot)) {
3190 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3191 		    (cmd[1] & 0xe0)) {
3192 			mk_sense_invalid_opcode(scp);
3193 			return check_condition_result;
3194 		}
3195 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3196 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3197 		    (cmd[1] & 0xe0) == 0)
3198 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3199 				    "to DIF device\n");
3200 	}
3201 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3202 		     atomic_read(&sdeb_inject_pending))) {
3203 		num /= 2;
3204 		atomic_set(&sdeb_inject_pending, 0);
3205 	}
3206 
3207 	ret = check_device_access_params(scp, lba, num, false);
3208 	if (ret)
3209 		return ret;
3210 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3211 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3212 		     ((lba + num) > sdebug_medium_error_start))) {
3213 		/* claim unrecoverable read error */
3214 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3215 		/* set info field and valid bit for fixed descriptor */
3216 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3217 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3218 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3219 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3220 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3221 		}
3222 		scsi_set_resid(scp, scsi_bufflen(scp));
3223 		return check_condition_result;
3224 	}
3225 
3226 	read_lock(macc_lckp);
3227 
3228 	/* DIX + T10 DIF */
3229 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3230 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3231 		case 1: /* Guard tag error */
3232 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3233 				read_unlock(macc_lckp);
3234 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3235 				return check_condition_result;
3236 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3237 				read_unlock(macc_lckp);
3238 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3239 				return illegal_condition_result;
3240 			}
3241 			break;
3242 		case 3: /* Reference tag error */
3243 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3244 				read_unlock(macc_lckp);
3245 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3246 				return check_condition_result;
3247 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3248 				read_unlock(macc_lckp);
3249 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3250 				return illegal_condition_result;
3251 			}
3252 			break;
3253 		}
3254 	}
3255 
3256 	ret = do_device_access(sip, scp, 0, lba, num, false);
3257 	read_unlock(macc_lckp);
3258 	if (unlikely(ret == -1))
3259 		return DID_ERROR << 16;
3260 
3261 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3262 
3263 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3264 		     atomic_read(&sdeb_inject_pending))) {
3265 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3266 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3267 			atomic_set(&sdeb_inject_pending, 0);
3268 			return check_condition_result;
3269 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3270 			/* Logical block guard check failed */
3271 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3272 			atomic_set(&sdeb_inject_pending, 0);
3273 			return illegal_condition_result;
3274 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3275 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3276 			atomic_set(&sdeb_inject_pending, 0);
3277 			return illegal_condition_result;
3278 		}
3279 	}
3280 	return 0;
3281 }
3282 
3283 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3284 			     unsigned int sectors, u32 ei_lba)
3285 {
3286 	int ret;
3287 	struct t10_pi_tuple *sdt;
3288 	void *daddr;
3289 	sector_t sector = start_sec;
3290 	int ppage_offset;
3291 	int dpage_offset;
3292 	struct sg_mapping_iter diter;
3293 	struct sg_mapping_iter piter;
3294 
3295 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3296 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3297 
3298 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3299 			scsi_prot_sg_count(SCpnt),
3300 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3301 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3302 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3303 
3304 	/* For each protection page */
3305 	while (sg_miter_next(&piter)) {
3306 		dpage_offset = 0;
3307 		if (WARN_ON(!sg_miter_next(&diter))) {
3308 			ret = 0x01;
3309 			goto out;
3310 		}
3311 
3312 		for (ppage_offset = 0; ppage_offset < piter.length;
3313 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3314 			/* If we're at the end of the current
3315 			 * data page advance to the next one
3316 			 */
3317 			if (dpage_offset >= diter.length) {
3318 				if (WARN_ON(!sg_miter_next(&diter))) {
3319 					ret = 0x01;
3320 					goto out;
3321 				}
3322 				dpage_offset = 0;
3323 			}
3324 
3325 			sdt = piter.addr + ppage_offset;
3326 			daddr = diter.addr + dpage_offset;
3327 
3328 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3329 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3330 				if (ret)
3331 					goto out;
3332 			}
3333 
3334 			sector++;
3335 			ei_lba++;
3336 			dpage_offset += sdebug_sector_size;
3337 		}
3338 		diter.consumed = dpage_offset;
3339 		sg_miter_stop(&diter);
3340 	}
3341 	sg_miter_stop(&piter);
3342 
3343 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3344 	dix_writes++;
3345 
3346 	return 0;
3347 
3348 out:
3349 	dif_errors++;
3350 	sg_miter_stop(&diter);
3351 	sg_miter_stop(&piter);
3352 	return ret;
3353 }
3354 
3355 static unsigned long lba_to_map_index(sector_t lba)
3356 {
3357 	if (sdebug_unmap_alignment)
3358 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3359 	sector_div(lba, sdebug_unmap_granularity);
3360 	return lba;
3361 }
3362 
3363 static sector_t map_index_to_lba(unsigned long index)
3364 {
3365 	sector_t lba = index * sdebug_unmap_granularity;
3366 
3367 	if (sdebug_unmap_alignment)
3368 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3369 	return lba;
3370 }
3371 
3372 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3373 			      unsigned int *num)
3374 {
3375 	sector_t end;
3376 	unsigned int mapped;
3377 	unsigned long index;
3378 	unsigned long next;
3379 
3380 	index = lba_to_map_index(lba);
3381 	mapped = test_bit(index, sip->map_storep);
3382 
3383 	if (mapped)
3384 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3385 	else
3386 		next = find_next_bit(sip->map_storep, map_size, index);
3387 
3388 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3389 	*num = end - lba;
3390 	return mapped;
3391 }
3392 
3393 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3394 		       unsigned int len)
3395 {
3396 	sector_t end = lba + len;
3397 
3398 	while (lba < end) {
3399 		unsigned long index = lba_to_map_index(lba);
3400 
3401 		if (index < map_size)
3402 			set_bit(index, sip->map_storep);
3403 
3404 		lba = map_index_to_lba(index + 1);
3405 	}
3406 }
3407 
3408 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3409 			 unsigned int len)
3410 {
3411 	sector_t end = lba + len;
3412 	u8 *fsp = sip->storep;
3413 
3414 	while (lba < end) {
3415 		unsigned long index = lba_to_map_index(lba);
3416 
3417 		if (lba == map_index_to_lba(index) &&
3418 		    lba + sdebug_unmap_granularity <= end &&
3419 		    index < map_size) {
3420 			clear_bit(index, sip->map_storep);
3421 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3422 				memset(fsp + lba * sdebug_sector_size,
3423 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3424 				       sdebug_sector_size *
3425 				       sdebug_unmap_granularity);
3426 			}
3427 			if (sip->dif_storep) {
3428 				memset(sip->dif_storep + lba, 0xff,
3429 				       sizeof(*sip->dif_storep) *
3430 				       sdebug_unmap_granularity);
3431 			}
3432 		}
3433 		lba = map_index_to_lba(index + 1);
3434 	}
3435 }
3436 
3437 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3438 {
3439 	bool check_prot;
3440 	u32 num;
3441 	u32 ei_lba;
3442 	int ret;
3443 	u64 lba;
3444 	struct sdeb_store_info *sip = devip2sip(devip, true);
3445 	rwlock_t *macc_lckp = &sip->macc_lck;
3446 	u8 *cmd = scp->cmnd;
3447 
3448 	switch (cmd[0]) {
3449 	case WRITE_16:
3450 		ei_lba = 0;
3451 		lba = get_unaligned_be64(cmd + 2);
3452 		num = get_unaligned_be32(cmd + 10);
3453 		check_prot = true;
3454 		break;
3455 	case WRITE_10:
3456 		ei_lba = 0;
3457 		lba = get_unaligned_be32(cmd + 2);
3458 		num = get_unaligned_be16(cmd + 7);
3459 		check_prot = true;
3460 		break;
3461 	case WRITE_6:
3462 		ei_lba = 0;
3463 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3464 		      (u32)(cmd[1] & 0x1f) << 16;
3465 		num = (0 == cmd[4]) ? 256 : cmd[4];
3466 		check_prot = true;
3467 		break;
3468 	case WRITE_12:
3469 		ei_lba = 0;
3470 		lba = get_unaligned_be32(cmd + 2);
3471 		num = get_unaligned_be32(cmd + 6);
3472 		check_prot = true;
3473 		break;
3474 	case 0x53:	/* XDWRITEREAD(10) */
3475 		ei_lba = 0;
3476 		lba = get_unaligned_be32(cmd + 2);
3477 		num = get_unaligned_be16(cmd + 7);
3478 		check_prot = false;
3479 		break;
3480 	default:	/* assume WRITE(32) */
3481 		lba = get_unaligned_be64(cmd + 12);
3482 		ei_lba = get_unaligned_be32(cmd + 20);
3483 		num = get_unaligned_be32(cmd + 28);
3484 		check_prot = false;
3485 		break;
3486 	}
3487 	if (unlikely(have_dif_prot && check_prot)) {
3488 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3489 		    (cmd[1] & 0xe0)) {
3490 			mk_sense_invalid_opcode(scp);
3491 			return check_condition_result;
3492 		}
3493 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3494 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3495 		    (cmd[1] & 0xe0) == 0)
3496 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3497 				    "to DIF device\n");
3498 	}
3499 
3500 	write_lock(macc_lckp);
3501 	ret = check_device_access_params(scp, lba, num, true);
3502 	if (ret) {
3503 		write_unlock(macc_lckp);
3504 		return ret;
3505 	}
3506 
3507 	/* DIX + T10 DIF */
3508 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3509 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3510 		case 1: /* Guard tag error */
3511 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3512 				write_unlock(macc_lckp);
3513 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3514 				return illegal_condition_result;
3515 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3516 				write_unlock(macc_lckp);
3517 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3518 				return check_condition_result;
3519 			}
3520 			break;
3521 		case 3: /* Reference tag error */
3522 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3523 				write_unlock(macc_lckp);
3524 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3525 				return illegal_condition_result;
3526 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3527 				write_unlock(macc_lckp);
3528 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3529 				return check_condition_result;
3530 			}
3531 			break;
3532 		}
3533 	}
3534 
3535 	ret = do_device_access(sip, scp, 0, lba, num, true);
3536 	if (unlikely(scsi_debug_lbp()))
3537 		map_region(sip, lba, num);
3538 	/* If ZBC zone then bump its write pointer */
3539 	if (sdebug_dev_is_zoned(devip))
3540 		zbc_inc_wp(devip, lba, num);
3541 	write_unlock(macc_lckp);
3542 	if (unlikely(-1 == ret))
3543 		return DID_ERROR << 16;
3544 	else if (unlikely(sdebug_verbose &&
3545 			  (ret < (num * sdebug_sector_size))))
3546 		sdev_printk(KERN_INFO, scp->device,
3547 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3548 			    my_name, num * sdebug_sector_size, ret);
3549 
3550 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3551 		     atomic_read(&sdeb_inject_pending))) {
3552 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3553 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3554 			atomic_set(&sdeb_inject_pending, 0);
3555 			return check_condition_result;
3556 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3557 			/* Logical block guard check failed */
3558 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3559 			atomic_set(&sdeb_inject_pending, 0);
3560 			return illegal_condition_result;
3561 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3562 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3563 			atomic_set(&sdeb_inject_pending, 0);
3564 			return illegal_condition_result;
3565 		}
3566 	}
3567 	return 0;
3568 }
3569 
3570 /*
3571  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3572  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3573  */
3574 static int resp_write_scat(struct scsi_cmnd *scp,
3575 			   struct sdebug_dev_info *devip)
3576 {
3577 	u8 *cmd = scp->cmnd;
3578 	u8 *lrdp = NULL;
3579 	u8 *up;
3580 	struct sdeb_store_info *sip = devip2sip(devip, true);
3581 	rwlock_t *macc_lckp = &sip->macc_lck;
3582 	u8 wrprotect;
3583 	u16 lbdof, num_lrd, k;
3584 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3585 	u32 lb_size = sdebug_sector_size;
3586 	u32 ei_lba;
3587 	u64 lba;
3588 	int ret, res;
3589 	bool is_16;
3590 	static const u32 lrd_size = 32; /* + parameter list header size */
3591 
3592 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3593 		is_16 = false;
3594 		wrprotect = (cmd[10] >> 5) & 0x7;
3595 		lbdof = get_unaligned_be16(cmd + 12);
3596 		num_lrd = get_unaligned_be16(cmd + 16);
3597 		bt_len = get_unaligned_be32(cmd + 28);
3598 	} else {        /* that leaves WRITE SCATTERED(16) */
3599 		is_16 = true;
3600 		wrprotect = (cmd[2] >> 5) & 0x7;
3601 		lbdof = get_unaligned_be16(cmd + 4);
3602 		num_lrd = get_unaligned_be16(cmd + 8);
3603 		bt_len = get_unaligned_be32(cmd + 10);
3604 		if (unlikely(have_dif_prot)) {
3605 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3606 			    wrprotect) {
3607 				mk_sense_invalid_opcode(scp);
3608 				return illegal_condition_result;
3609 			}
3610 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3611 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3612 			     wrprotect == 0)
3613 				sdev_printk(KERN_ERR, scp->device,
3614 					    "Unprotected WR to DIF device\n");
3615 		}
3616 	}
3617 	if ((num_lrd == 0) || (bt_len == 0))
3618 		return 0;       /* T10 says these do-nothings are not errors */
3619 	if (lbdof == 0) {
3620 		if (sdebug_verbose)
3621 			sdev_printk(KERN_INFO, scp->device,
3622 				"%s: %s: LB Data Offset field bad\n",
3623 				my_name, __func__);
3624 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3625 		return illegal_condition_result;
3626 	}
3627 	lbdof_blen = lbdof * lb_size;
3628 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3629 		if (sdebug_verbose)
3630 			sdev_printk(KERN_INFO, scp->device,
3631 				"%s: %s: LBA range descriptors don't fit\n",
3632 				my_name, __func__);
3633 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3634 		return illegal_condition_result;
3635 	}
3636 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3637 	if (lrdp == NULL)
3638 		return SCSI_MLQUEUE_HOST_BUSY;
3639 	if (sdebug_verbose)
3640 		sdev_printk(KERN_INFO, scp->device,
3641 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3642 			my_name, __func__, lbdof_blen);
3643 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3644 	if (res == -1) {
3645 		ret = DID_ERROR << 16;
3646 		goto err_out;
3647 	}
3648 
3649 	write_lock(macc_lckp);
3650 	sg_off = lbdof_blen;
3651 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3652 	cum_lb = 0;
3653 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3654 		lba = get_unaligned_be64(up + 0);
3655 		num = get_unaligned_be32(up + 8);
3656 		if (sdebug_verbose)
3657 			sdev_printk(KERN_INFO, scp->device,
3658 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3659 				my_name, __func__, k, lba, num, sg_off);
3660 		if (num == 0)
3661 			continue;
3662 		ret = check_device_access_params(scp, lba, num, true);
3663 		if (ret)
3664 			goto err_out_unlock;
3665 		num_by = num * lb_size;
3666 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3667 
3668 		if ((cum_lb + num) > bt_len) {
3669 			if (sdebug_verbose)
3670 				sdev_printk(KERN_INFO, scp->device,
3671 				    "%s: %s: sum of blocks > data provided\n",
3672 				    my_name, __func__);
3673 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3674 					0);
3675 			ret = illegal_condition_result;
3676 			goto err_out_unlock;
3677 		}
3678 
3679 		/* DIX + T10 DIF */
3680 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3681 			int prot_ret = prot_verify_write(scp, lba, num,
3682 							 ei_lba);
3683 
3684 			if (prot_ret) {
3685 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3686 						prot_ret);
3687 				ret = illegal_condition_result;
3688 				goto err_out_unlock;
3689 			}
3690 		}
3691 
3692 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3693 		/* If ZBC zone then bump its write pointer */
3694 		if (sdebug_dev_is_zoned(devip))
3695 			zbc_inc_wp(devip, lba, num);
3696 		if (unlikely(scsi_debug_lbp()))
3697 			map_region(sip, lba, num);
3698 		if (unlikely(-1 == ret)) {
3699 			ret = DID_ERROR << 16;
3700 			goto err_out_unlock;
3701 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3702 			sdev_printk(KERN_INFO, scp->device,
3703 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3704 			    my_name, num_by, ret);
3705 
3706 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3707 			     atomic_read(&sdeb_inject_pending))) {
3708 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3709 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3710 				atomic_set(&sdeb_inject_pending, 0);
3711 				ret = check_condition_result;
3712 				goto err_out_unlock;
3713 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3714 				/* Logical block guard check failed */
3715 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3716 				atomic_set(&sdeb_inject_pending, 0);
3717 				ret = illegal_condition_result;
3718 				goto err_out_unlock;
3719 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3720 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3721 				atomic_set(&sdeb_inject_pending, 0);
3722 				ret = illegal_condition_result;
3723 				goto err_out_unlock;
3724 			}
3725 		}
3726 		sg_off += num_by;
3727 		cum_lb += num;
3728 	}
3729 	ret = 0;
3730 err_out_unlock:
3731 	write_unlock(macc_lckp);
3732 err_out:
3733 	kfree(lrdp);
3734 	return ret;
3735 }
3736 
3737 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3738 			   u32 ei_lba, bool unmap, bool ndob)
3739 {
3740 	struct scsi_device *sdp = scp->device;
3741 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3742 	unsigned long long i;
3743 	u64 block, lbaa;
3744 	u32 lb_size = sdebug_sector_size;
3745 	int ret;
3746 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3747 						scp->device->hostdata, true);
3748 	rwlock_t *macc_lckp = &sip->macc_lck;
3749 	u8 *fs1p;
3750 	u8 *fsp;
3751 
3752 	write_lock(macc_lckp);
3753 
3754 	ret = check_device_access_params(scp, lba, num, true);
3755 	if (ret) {
3756 		write_unlock(macc_lckp);
3757 		return ret;
3758 	}
3759 
3760 	if (unmap && scsi_debug_lbp()) {
3761 		unmap_region(sip, lba, num);
3762 		goto out;
3763 	}
3764 	lbaa = lba;
3765 	block = do_div(lbaa, sdebug_store_sectors);
3766 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3767 	fsp = sip->storep;
3768 	fs1p = fsp + (block * lb_size);
3769 	if (ndob) {
3770 		memset(fs1p, 0, lb_size);
3771 		ret = 0;
3772 	} else
3773 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3774 
3775 	if (-1 == ret) {
3776 		write_unlock(&sip->macc_lck);
3777 		return DID_ERROR << 16;
3778 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3779 		sdev_printk(KERN_INFO, scp->device,
3780 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3781 			    my_name, "write same", lb_size, ret);
3782 
3783 	/* Copy first sector to remaining blocks */
3784 	for (i = 1 ; i < num ; i++) {
3785 		lbaa = lba + i;
3786 		block = do_div(lbaa, sdebug_store_sectors);
3787 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3788 	}
3789 	if (scsi_debug_lbp())
3790 		map_region(sip, lba, num);
3791 	/* If ZBC zone then bump its write pointer */
3792 	if (sdebug_dev_is_zoned(devip))
3793 		zbc_inc_wp(devip, lba, num);
3794 out:
3795 	write_unlock(macc_lckp);
3796 
3797 	return 0;
3798 }
3799 
3800 static int resp_write_same_10(struct scsi_cmnd *scp,
3801 			      struct sdebug_dev_info *devip)
3802 {
3803 	u8 *cmd = scp->cmnd;
3804 	u32 lba;
3805 	u16 num;
3806 	u32 ei_lba = 0;
3807 	bool unmap = false;
3808 
3809 	if (cmd[1] & 0x8) {
3810 		if (sdebug_lbpws10 == 0) {
3811 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3812 			return check_condition_result;
3813 		} else
3814 			unmap = true;
3815 	}
3816 	lba = get_unaligned_be32(cmd + 2);
3817 	num = get_unaligned_be16(cmd + 7);
3818 	if (num > sdebug_write_same_length) {
3819 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3820 		return check_condition_result;
3821 	}
3822 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3823 }
3824 
3825 static int resp_write_same_16(struct scsi_cmnd *scp,
3826 			      struct sdebug_dev_info *devip)
3827 {
3828 	u8 *cmd = scp->cmnd;
3829 	u64 lba;
3830 	u32 num;
3831 	u32 ei_lba = 0;
3832 	bool unmap = false;
3833 	bool ndob = false;
3834 
3835 	if (cmd[1] & 0x8) {	/* UNMAP */
3836 		if (sdebug_lbpws == 0) {
3837 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3838 			return check_condition_result;
3839 		} else
3840 			unmap = true;
3841 	}
3842 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3843 		ndob = true;
3844 	lba = get_unaligned_be64(cmd + 2);
3845 	num = get_unaligned_be32(cmd + 10);
3846 	if (num > sdebug_write_same_length) {
3847 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3848 		return check_condition_result;
3849 	}
3850 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3851 }
3852 
3853 /* Note the mode field is in the same position as the (lower) service action
3854  * field. For the Report supported operation codes command, SPC-4 suggests
3855  * each mode of this command should be reported separately; for future. */
3856 static int resp_write_buffer(struct scsi_cmnd *scp,
3857 			     struct sdebug_dev_info *devip)
3858 {
3859 	u8 *cmd = scp->cmnd;
3860 	struct scsi_device *sdp = scp->device;
3861 	struct sdebug_dev_info *dp;
3862 	u8 mode;
3863 
3864 	mode = cmd[1] & 0x1f;
3865 	switch (mode) {
3866 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3867 		/* set UAs on this device only */
3868 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3869 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3870 		break;
3871 	case 0x5:	/* download MC, save and ACT */
3872 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3873 		break;
3874 	case 0x6:	/* download MC with offsets and ACT */
3875 		/* set UAs on most devices (LUs) in this target */
3876 		list_for_each_entry(dp,
3877 				    &devip->sdbg_host->dev_info_list,
3878 				    dev_list)
3879 			if (dp->target == sdp->id) {
3880 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3881 				if (devip != dp)
3882 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3883 						dp->uas_bm);
3884 			}
3885 		break;
3886 	case 0x7:	/* download MC with offsets, save, and ACT */
3887 		/* set UA on all devices (LUs) in this target */
3888 		list_for_each_entry(dp,
3889 				    &devip->sdbg_host->dev_info_list,
3890 				    dev_list)
3891 			if (dp->target == sdp->id)
3892 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3893 					dp->uas_bm);
3894 		break;
3895 	default:
3896 		/* do nothing for this command for other mode values */
3897 		break;
3898 	}
3899 	return 0;
3900 }
3901 
3902 static int resp_comp_write(struct scsi_cmnd *scp,
3903 			   struct sdebug_dev_info *devip)
3904 {
3905 	u8 *cmd = scp->cmnd;
3906 	u8 *arr;
3907 	struct sdeb_store_info *sip = devip2sip(devip, true);
3908 	rwlock_t *macc_lckp = &sip->macc_lck;
3909 	u64 lba;
3910 	u32 dnum;
3911 	u32 lb_size = sdebug_sector_size;
3912 	u8 num;
3913 	int ret;
3914 	int retval = 0;
3915 
3916 	lba = get_unaligned_be64(cmd + 2);
3917 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3918 	if (0 == num)
3919 		return 0;	/* degenerate case, not an error */
3920 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3921 	    (cmd[1] & 0xe0)) {
3922 		mk_sense_invalid_opcode(scp);
3923 		return check_condition_result;
3924 	}
3925 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3926 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3927 	    (cmd[1] & 0xe0) == 0)
3928 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3929 			    "to DIF device\n");
3930 	ret = check_device_access_params(scp, lba, num, false);
3931 	if (ret)
3932 		return ret;
3933 	dnum = 2 * num;
3934 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3935 	if (NULL == arr) {
3936 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3937 				INSUFF_RES_ASCQ);
3938 		return check_condition_result;
3939 	}
3940 
3941 	write_lock(macc_lckp);
3942 
3943 	ret = do_dout_fetch(scp, dnum, arr);
3944 	if (ret == -1) {
3945 		retval = DID_ERROR << 16;
3946 		goto cleanup;
3947 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3948 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3949 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3950 			    dnum * lb_size, ret);
3951 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3952 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3953 		retval = check_condition_result;
3954 		goto cleanup;
3955 	}
3956 	if (scsi_debug_lbp())
3957 		map_region(sip, lba, num);
3958 cleanup:
3959 	write_unlock(macc_lckp);
3960 	kfree(arr);
3961 	return retval;
3962 }
3963 
3964 struct unmap_block_desc {
3965 	__be64	lba;
3966 	__be32	blocks;
3967 	__be32	__reserved;
3968 };
3969 
3970 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3971 {
3972 	unsigned char *buf;
3973 	struct unmap_block_desc *desc;
3974 	struct sdeb_store_info *sip = devip2sip(devip, true);
3975 	rwlock_t *macc_lckp = &sip->macc_lck;
3976 	unsigned int i, payload_len, descriptors;
3977 	int ret;
3978 
3979 	if (!scsi_debug_lbp())
3980 		return 0;	/* fib and say its done */
3981 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3982 	BUG_ON(scsi_bufflen(scp) != payload_len);
3983 
3984 	descriptors = (payload_len - 8) / 16;
3985 	if (descriptors > sdebug_unmap_max_desc) {
3986 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3987 		return check_condition_result;
3988 	}
3989 
3990 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3991 	if (!buf) {
3992 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3993 				INSUFF_RES_ASCQ);
3994 		return check_condition_result;
3995 	}
3996 
3997 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3998 
3999 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4000 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4001 
4002 	desc = (void *)&buf[8];
4003 
4004 	write_lock(macc_lckp);
4005 
4006 	for (i = 0 ; i < descriptors ; i++) {
4007 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4008 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4009 
4010 		ret = check_device_access_params(scp, lba, num, true);
4011 		if (ret)
4012 			goto out;
4013 
4014 		unmap_region(sip, lba, num);
4015 	}
4016 
4017 	ret = 0;
4018 
4019 out:
4020 	write_unlock(macc_lckp);
4021 	kfree(buf);
4022 
4023 	return ret;
4024 }
4025 
4026 #define SDEBUG_GET_LBA_STATUS_LEN 32
4027 
4028 static int resp_get_lba_status(struct scsi_cmnd *scp,
4029 			       struct sdebug_dev_info *devip)
4030 {
4031 	u8 *cmd = scp->cmnd;
4032 	u64 lba;
4033 	u32 alloc_len, mapped, num;
4034 	int ret;
4035 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4036 
4037 	lba = get_unaligned_be64(cmd + 2);
4038 	alloc_len = get_unaligned_be32(cmd + 10);
4039 
4040 	if (alloc_len < 24)
4041 		return 0;
4042 
4043 	ret = check_device_access_params(scp, lba, 1, false);
4044 	if (ret)
4045 		return ret;
4046 
4047 	if (scsi_debug_lbp()) {
4048 		struct sdeb_store_info *sip = devip2sip(devip, true);
4049 
4050 		mapped = map_state(sip, lba, &num);
4051 	} else {
4052 		mapped = 1;
4053 		/* following just in case virtual_gb changed */
4054 		sdebug_capacity = get_sdebug_capacity();
4055 		if (sdebug_capacity - lba <= 0xffffffff)
4056 			num = sdebug_capacity - lba;
4057 		else
4058 			num = 0xffffffff;
4059 	}
4060 
4061 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4062 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4063 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4064 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4065 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4066 
4067 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4068 }
4069 
4070 static int resp_sync_cache(struct scsi_cmnd *scp,
4071 			   struct sdebug_dev_info *devip)
4072 {
4073 	int res = 0;
4074 	u64 lba;
4075 	u32 num_blocks;
4076 	u8 *cmd = scp->cmnd;
4077 
4078 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4079 		lba = get_unaligned_be32(cmd + 2);
4080 		num_blocks = get_unaligned_be16(cmd + 7);
4081 	} else {				/* SYNCHRONIZE_CACHE(16) */
4082 		lba = get_unaligned_be64(cmd + 2);
4083 		num_blocks = get_unaligned_be32(cmd + 10);
4084 	}
4085 	if (lba + num_blocks > sdebug_capacity) {
4086 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4087 		return check_condition_result;
4088 	}
4089 	if (!write_since_sync || (cmd[1] & 0x2))
4090 		res = SDEG_RES_IMMED_MASK;
4091 	else		/* delay if write_since_sync and IMMED clear */
4092 		write_since_sync = false;
4093 	return res;
4094 }
4095 
4096 /*
4097  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4098  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4099  * a GOOD status otherwise. Model a disk with a big cache and yield
4100  * CONDITION MET. Actually tries to bring range in main memory into the
4101  * cache associated with the CPU(s).
4102  */
4103 static int resp_pre_fetch(struct scsi_cmnd *scp,
4104 			  struct sdebug_dev_info *devip)
4105 {
4106 	int res = 0;
4107 	u64 lba;
4108 	u64 block, rest = 0;
4109 	u32 nblks;
4110 	u8 *cmd = scp->cmnd;
4111 	struct sdeb_store_info *sip = devip2sip(devip, true);
4112 	rwlock_t *macc_lckp = &sip->macc_lck;
4113 	u8 *fsp = sip->storep;
4114 
4115 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4116 		lba = get_unaligned_be32(cmd + 2);
4117 		nblks = get_unaligned_be16(cmd + 7);
4118 	} else {			/* PRE-FETCH(16) */
4119 		lba = get_unaligned_be64(cmd + 2);
4120 		nblks = get_unaligned_be32(cmd + 10);
4121 	}
4122 	if (lba + nblks > sdebug_capacity) {
4123 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4124 		return check_condition_result;
4125 	}
4126 	if (!fsp)
4127 		goto fini;
4128 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4129 	block = do_div(lba, sdebug_store_sectors);
4130 	if (block + nblks > sdebug_store_sectors)
4131 		rest = block + nblks - sdebug_store_sectors;
4132 
4133 	/* Try to bring the PRE-FETCH range into CPU's cache */
4134 	read_lock(macc_lckp);
4135 	prefetch_range(fsp + (sdebug_sector_size * block),
4136 		       (nblks - rest) * sdebug_sector_size);
4137 	if (rest)
4138 		prefetch_range(fsp, rest * sdebug_sector_size);
4139 	read_unlock(macc_lckp);
4140 fini:
4141 	if (cmd[1] & 0x2)
4142 		res = SDEG_RES_IMMED_MASK;
4143 	return res | condition_met_result;
4144 }
4145 
4146 #define RL_BUCKET_ELEMS 8
4147 
4148 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4149  * (W-LUN), the normal Linux scanning logic does not associate it with a
4150  * device (e.g. /dev/sg7). The following magic will make that association:
4151  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4152  * where <n> is a host number. If there are multiple targets in a host then
4153  * the above will associate a W-LUN to each target. To only get a W-LUN
4154  * for target 2, then use "echo '- 2 49409' > scan" .
4155  */
4156 static int resp_report_luns(struct scsi_cmnd *scp,
4157 			    struct sdebug_dev_info *devip)
4158 {
4159 	unsigned char *cmd = scp->cmnd;
4160 	unsigned int alloc_len;
4161 	unsigned char select_report;
4162 	u64 lun;
4163 	struct scsi_lun *lun_p;
4164 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4165 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4166 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4167 	unsigned int tlun_cnt;	/* total LUN count */
4168 	unsigned int rlen;	/* response length (in bytes) */
4169 	int k, j, n, res;
4170 	unsigned int off_rsp = 0;
4171 	const int sz_lun = sizeof(struct scsi_lun);
4172 
4173 	clear_luns_changed_on_target(devip);
4174 
4175 	select_report = cmd[2];
4176 	alloc_len = get_unaligned_be32(cmd + 6);
4177 
4178 	if (alloc_len < 4) {
4179 		pr_err("alloc len too small %d\n", alloc_len);
4180 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4181 		return check_condition_result;
4182 	}
4183 
4184 	switch (select_report) {
4185 	case 0:		/* all LUNs apart from W-LUNs */
4186 		lun_cnt = sdebug_max_luns;
4187 		wlun_cnt = 0;
4188 		break;
4189 	case 1:		/* only W-LUNs */
4190 		lun_cnt = 0;
4191 		wlun_cnt = 1;
4192 		break;
4193 	case 2:		/* all LUNs */
4194 		lun_cnt = sdebug_max_luns;
4195 		wlun_cnt = 1;
4196 		break;
4197 	case 0x10:	/* only administrative LUs */
4198 	case 0x11:	/* see SPC-5 */
4199 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4200 	default:
4201 		pr_debug("select report invalid %d\n", select_report);
4202 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4203 		return check_condition_result;
4204 	}
4205 
4206 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4207 		--lun_cnt;
4208 
4209 	tlun_cnt = lun_cnt + wlun_cnt;
4210 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4211 	scsi_set_resid(scp, scsi_bufflen(scp));
4212 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4213 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4214 
4215 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4216 	lun = sdebug_no_lun_0 ? 1 : 0;
4217 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4218 		memset(arr, 0, sizeof(arr));
4219 		lun_p = (struct scsi_lun *)&arr[0];
4220 		if (k == 0) {
4221 			put_unaligned_be32(rlen, &arr[0]);
4222 			++lun_p;
4223 			j = 1;
4224 		}
4225 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4226 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4227 				break;
4228 			int_to_scsilun(lun++, lun_p);
4229 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4230 				lun_p->scsi_lun[0] |= 0x40;
4231 		}
4232 		if (j < RL_BUCKET_ELEMS)
4233 			break;
4234 		n = j * sz_lun;
4235 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4236 		if (res)
4237 			return res;
4238 		off_rsp += n;
4239 	}
4240 	if (wlun_cnt) {
4241 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4242 		++j;
4243 	}
4244 	if (j > 0)
4245 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4246 	return res;
4247 }
4248 
4249 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4250 {
4251 	bool is_bytchk3 = false;
4252 	u8 bytchk;
4253 	int ret, j;
4254 	u32 vnum, a_num, off;
4255 	const u32 lb_size = sdebug_sector_size;
4256 	u64 lba;
4257 	u8 *arr;
4258 	u8 *cmd = scp->cmnd;
4259 	struct sdeb_store_info *sip = devip2sip(devip, true);
4260 	rwlock_t *macc_lckp = &sip->macc_lck;
4261 
4262 	bytchk = (cmd[1] >> 1) & 0x3;
4263 	if (bytchk == 0) {
4264 		return 0;	/* always claim internal verify okay */
4265 	} else if (bytchk == 2) {
4266 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4267 		return check_condition_result;
4268 	} else if (bytchk == 3) {
4269 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4270 	}
4271 	switch (cmd[0]) {
4272 	case VERIFY_16:
4273 		lba = get_unaligned_be64(cmd + 2);
4274 		vnum = get_unaligned_be32(cmd + 10);
4275 		break;
4276 	case VERIFY:		/* is VERIFY(10) */
4277 		lba = get_unaligned_be32(cmd + 2);
4278 		vnum = get_unaligned_be16(cmd + 7);
4279 		break;
4280 	default:
4281 		mk_sense_invalid_opcode(scp);
4282 		return check_condition_result;
4283 	}
4284 	if (vnum == 0)
4285 		return 0;	/* not an error */
4286 	a_num = is_bytchk3 ? 1 : vnum;
4287 	/* Treat following check like one for read (i.e. no write) access */
4288 	ret = check_device_access_params(scp, lba, a_num, false);
4289 	if (ret)
4290 		return ret;
4291 
4292 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4293 	if (!arr) {
4294 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4295 				INSUFF_RES_ASCQ);
4296 		return check_condition_result;
4297 	}
4298 	/* Not changing store, so only need read access */
4299 	read_lock(macc_lckp);
4300 
4301 	ret = do_dout_fetch(scp, a_num, arr);
4302 	if (ret == -1) {
4303 		ret = DID_ERROR << 16;
4304 		goto cleanup;
4305 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4306 		sdev_printk(KERN_INFO, scp->device,
4307 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4308 			    my_name, __func__, a_num * lb_size, ret);
4309 	}
4310 	if (is_bytchk3) {
4311 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4312 			memcpy(arr + off, arr, lb_size);
4313 	}
4314 	ret = 0;
4315 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4316 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4317 		ret = check_condition_result;
4318 		goto cleanup;
4319 	}
4320 cleanup:
4321 	read_unlock(macc_lckp);
4322 	kfree(arr);
4323 	return ret;
4324 }
4325 
4326 #define RZONES_DESC_HD 64
4327 
4328 /* Report zones depending on start LBA nad reporting options */
4329 static int resp_report_zones(struct scsi_cmnd *scp,
4330 			     struct sdebug_dev_info *devip)
4331 {
4332 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4333 	int ret = 0;
4334 	u32 alloc_len, rep_opts, rep_len;
4335 	bool partial;
4336 	u64 lba, zs_lba;
4337 	u8 *arr = NULL, *desc;
4338 	u8 *cmd = scp->cmnd;
4339 	struct sdeb_zone_state *zsp;
4340 	struct sdeb_store_info *sip = devip2sip(devip, false);
4341 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4342 
4343 	if (!sdebug_dev_is_zoned(devip)) {
4344 		mk_sense_invalid_opcode(scp);
4345 		return check_condition_result;
4346 	}
4347 	zs_lba = get_unaligned_be64(cmd + 2);
4348 	alloc_len = get_unaligned_be32(cmd + 10);
4349 	if (alloc_len == 0)
4350 		return 0;	/* not an error */
4351 	rep_opts = cmd[14] & 0x3f;
4352 	partial = cmd[14] & 0x80;
4353 
4354 	if (zs_lba >= sdebug_capacity) {
4355 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4356 		return check_condition_result;
4357 	}
4358 
4359 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4360 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4361 			    max_zones);
4362 
4363 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4364 	if (!arr) {
4365 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4366 				INSUFF_RES_ASCQ);
4367 		return check_condition_result;
4368 	}
4369 
4370 	read_lock(macc_lckp);
4371 
4372 	desc = arr + 64;
4373 	for (i = 0; i < max_zones; i++) {
4374 		lba = zs_lba + devip->zsize * i;
4375 		if (lba > sdebug_capacity)
4376 			break;
4377 		zsp = zbc_zone(devip, lba);
4378 		switch (rep_opts) {
4379 		case 0x00:
4380 			/* All zones */
4381 			break;
4382 		case 0x01:
4383 			/* Empty zones */
4384 			if (zsp->z_cond != ZC1_EMPTY)
4385 				continue;
4386 			break;
4387 		case 0x02:
4388 			/* Implicit open zones */
4389 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4390 				continue;
4391 			break;
4392 		case 0x03:
4393 			/* Explicit open zones */
4394 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4395 				continue;
4396 			break;
4397 		case 0x04:
4398 			/* Closed zones */
4399 			if (zsp->z_cond != ZC4_CLOSED)
4400 				continue;
4401 			break;
4402 		case 0x05:
4403 			/* Full zones */
4404 			if (zsp->z_cond != ZC5_FULL)
4405 				continue;
4406 			break;
4407 		case 0x06:
4408 		case 0x07:
4409 		case 0x10:
4410 			/*
4411 			 * Read-only, offline, reset WP recommended are
4412 			 * not emulated: no zones to report;
4413 			 */
4414 			continue;
4415 		case 0x11:
4416 			/* non-seq-resource set */
4417 			if (!zsp->z_non_seq_resource)
4418 				continue;
4419 			break;
4420 		case 0x3f:
4421 			/* Not write pointer (conventional) zones */
4422 			if (!zbc_zone_is_conv(zsp))
4423 				continue;
4424 			break;
4425 		default:
4426 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4427 					INVALID_FIELD_IN_CDB, 0);
4428 			ret = check_condition_result;
4429 			goto fini;
4430 		}
4431 
4432 		if (nrz < rep_max_zones) {
4433 			/* Fill zone descriptor */
4434 			desc[0] = zsp->z_type;
4435 			desc[1] = zsp->z_cond << 4;
4436 			if (zsp->z_non_seq_resource)
4437 				desc[1] |= 1 << 1;
4438 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4439 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4440 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4441 			desc += 64;
4442 		}
4443 
4444 		if (partial && nrz >= rep_max_zones)
4445 			break;
4446 
4447 		nrz++;
4448 	}
4449 
4450 	/* Report header */
4451 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4452 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4453 
4454 	rep_len = (unsigned long)desc - (unsigned long)arr;
4455 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4456 
4457 fini:
4458 	read_unlock(macc_lckp);
4459 	kfree(arr);
4460 	return ret;
4461 }
4462 
4463 /* Logic transplanted from tcmu-runner, file_zbc.c */
4464 static void zbc_open_all(struct sdebug_dev_info *devip)
4465 {
4466 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4467 	unsigned int i;
4468 
4469 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4470 		if (zsp->z_cond == ZC4_CLOSED)
4471 			zbc_open_zone(devip, &devip->zstate[i], true);
4472 	}
4473 }
4474 
4475 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4476 {
4477 	int res = 0;
4478 	u64 z_id;
4479 	enum sdebug_z_cond zc;
4480 	u8 *cmd = scp->cmnd;
4481 	struct sdeb_zone_state *zsp;
4482 	bool all = cmd[14] & 0x01;
4483 	struct sdeb_store_info *sip = devip2sip(devip, false);
4484 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4485 
4486 	if (!sdebug_dev_is_zoned(devip)) {
4487 		mk_sense_invalid_opcode(scp);
4488 		return check_condition_result;
4489 	}
4490 
4491 	write_lock(macc_lckp);
4492 
4493 	if (all) {
4494 		/* Check if all closed zones can be open */
4495 		if (devip->max_open &&
4496 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4497 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4498 					INSUFF_ZONE_ASCQ);
4499 			res = check_condition_result;
4500 			goto fini;
4501 		}
4502 		/* Open all closed zones */
4503 		zbc_open_all(devip);
4504 		goto fini;
4505 	}
4506 
4507 	/* Open the specified zone */
4508 	z_id = get_unaligned_be64(cmd + 2);
4509 	if (z_id >= sdebug_capacity) {
4510 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4511 		res = check_condition_result;
4512 		goto fini;
4513 	}
4514 
4515 	zsp = zbc_zone(devip, z_id);
4516 	if (z_id != zsp->z_start) {
4517 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4518 		res = check_condition_result;
4519 		goto fini;
4520 	}
4521 	if (zbc_zone_is_conv(zsp)) {
4522 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4523 		res = check_condition_result;
4524 		goto fini;
4525 	}
4526 
4527 	zc = zsp->z_cond;
4528 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4529 		goto fini;
4530 
4531 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4532 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4533 				INSUFF_ZONE_ASCQ);
4534 		res = check_condition_result;
4535 		goto fini;
4536 	}
4537 
4538 	zbc_open_zone(devip, zsp, true);
4539 fini:
4540 	write_unlock(macc_lckp);
4541 	return res;
4542 }
4543 
4544 static void zbc_close_all(struct sdebug_dev_info *devip)
4545 {
4546 	unsigned int i;
4547 
4548 	for (i = 0; i < devip->nr_zones; i++)
4549 		zbc_close_zone(devip, &devip->zstate[i]);
4550 }
4551 
4552 static int resp_close_zone(struct scsi_cmnd *scp,
4553 			   struct sdebug_dev_info *devip)
4554 {
4555 	int res = 0;
4556 	u64 z_id;
4557 	u8 *cmd = scp->cmnd;
4558 	struct sdeb_zone_state *zsp;
4559 	bool all = cmd[14] & 0x01;
4560 	struct sdeb_store_info *sip = devip2sip(devip, false);
4561 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4562 
4563 	if (!sdebug_dev_is_zoned(devip)) {
4564 		mk_sense_invalid_opcode(scp);
4565 		return check_condition_result;
4566 	}
4567 
4568 	write_lock(macc_lckp);
4569 
4570 	if (all) {
4571 		zbc_close_all(devip);
4572 		goto fini;
4573 	}
4574 
4575 	/* Close specified zone */
4576 	z_id = get_unaligned_be64(cmd + 2);
4577 	if (z_id >= sdebug_capacity) {
4578 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4579 		res = check_condition_result;
4580 		goto fini;
4581 	}
4582 
4583 	zsp = zbc_zone(devip, z_id);
4584 	if (z_id != zsp->z_start) {
4585 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4586 		res = check_condition_result;
4587 		goto fini;
4588 	}
4589 	if (zbc_zone_is_conv(zsp)) {
4590 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4591 		res = check_condition_result;
4592 		goto fini;
4593 	}
4594 
4595 	zbc_close_zone(devip, zsp);
4596 fini:
4597 	write_unlock(macc_lckp);
4598 	return res;
4599 }
4600 
4601 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4602 			    struct sdeb_zone_state *zsp, bool empty)
4603 {
4604 	enum sdebug_z_cond zc = zsp->z_cond;
4605 
4606 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4607 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4608 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4609 			zbc_close_zone(devip, zsp);
4610 		if (zsp->z_cond == ZC4_CLOSED)
4611 			devip->nr_closed--;
4612 		zsp->z_wp = zsp->z_start + zsp->z_size;
4613 		zsp->z_cond = ZC5_FULL;
4614 	}
4615 }
4616 
4617 static void zbc_finish_all(struct sdebug_dev_info *devip)
4618 {
4619 	unsigned int i;
4620 
4621 	for (i = 0; i < devip->nr_zones; i++)
4622 		zbc_finish_zone(devip, &devip->zstate[i], false);
4623 }
4624 
4625 static int resp_finish_zone(struct scsi_cmnd *scp,
4626 			    struct sdebug_dev_info *devip)
4627 {
4628 	struct sdeb_zone_state *zsp;
4629 	int res = 0;
4630 	u64 z_id;
4631 	u8 *cmd = scp->cmnd;
4632 	bool all = cmd[14] & 0x01;
4633 	struct sdeb_store_info *sip = devip2sip(devip, false);
4634 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4635 
4636 	if (!sdebug_dev_is_zoned(devip)) {
4637 		mk_sense_invalid_opcode(scp);
4638 		return check_condition_result;
4639 	}
4640 
4641 	write_lock(macc_lckp);
4642 
4643 	if (all) {
4644 		zbc_finish_all(devip);
4645 		goto fini;
4646 	}
4647 
4648 	/* Finish the specified zone */
4649 	z_id = get_unaligned_be64(cmd + 2);
4650 	if (z_id >= sdebug_capacity) {
4651 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4652 		res = check_condition_result;
4653 		goto fini;
4654 	}
4655 
4656 	zsp = zbc_zone(devip, z_id);
4657 	if (z_id != zsp->z_start) {
4658 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4659 		res = check_condition_result;
4660 		goto fini;
4661 	}
4662 	if (zbc_zone_is_conv(zsp)) {
4663 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 		res = check_condition_result;
4665 		goto fini;
4666 	}
4667 
4668 	zbc_finish_zone(devip, zsp, true);
4669 fini:
4670 	write_unlock(macc_lckp);
4671 	return res;
4672 }
4673 
4674 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4675 			 struct sdeb_zone_state *zsp)
4676 {
4677 	enum sdebug_z_cond zc;
4678 	struct sdeb_store_info *sip = devip2sip(devip, false);
4679 
4680 	if (zbc_zone_is_conv(zsp))
4681 		return;
4682 
4683 	zc = zsp->z_cond;
4684 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4685 		zbc_close_zone(devip, zsp);
4686 
4687 	if (zsp->z_cond == ZC4_CLOSED)
4688 		devip->nr_closed--;
4689 
4690 	if (zsp->z_wp > zsp->z_start)
4691 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4692 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4693 
4694 	zsp->z_non_seq_resource = false;
4695 	zsp->z_wp = zsp->z_start;
4696 	zsp->z_cond = ZC1_EMPTY;
4697 }
4698 
4699 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4700 {
4701 	unsigned int i;
4702 
4703 	for (i = 0; i < devip->nr_zones; i++)
4704 		zbc_rwp_zone(devip, &devip->zstate[i]);
4705 }
4706 
4707 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4708 {
4709 	struct sdeb_zone_state *zsp;
4710 	int res = 0;
4711 	u64 z_id;
4712 	u8 *cmd = scp->cmnd;
4713 	bool all = cmd[14] & 0x01;
4714 	struct sdeb_store_info *sip = devip2sip(devip, false);
4715 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4716 
4717 	if (!sdebug_dev_is_zoned(devip)) {
4718 		mk_sense_invalid_opcode(scp);
4719 		return check_condition_result;
4720 	}
4721 
4722 	write_lock(macc_lckp);
4723 
4724 	if (all) {
4725 		zbc_rwp_all(devip);
4726 		goto fini;
4727 	}
4728 
4729 	z_id = get_unaligned_be64(cmd + 2);
4730 	if (z_id >= sdebug_capacity) {
4731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4732 		res = check_condition_result;
4733 		goto fini;
4734 	}
4735 
4736 	zsp = zbc_zone(devip, z_id);
4737 	if (z_id != zsp->z_start) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 	if (zbc_zone_is_conv(zsp)) {
4743 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4744 		res = check_condition_result;
4745 		goto fini;
4746 	}
4747 
4748 	zbc_rwp_zone(devip, zsp);
4749 fini:
4750 	write_unlock(macc_lckp);
4751 	return res;
4752 }
4753 
4754 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4755 {
4756 	u16 hwq;
4757 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4758 
4759 	hwq = blk_mq_unique_tag_to_hwq(tag);
4760 
4761 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4762 	if (WARN_ON_ONCE(hwq >= submit_queues))
4763 		hwq = 0;
4764 
4765 	return sdebug_q_arr + hwq;
4766 }
4767 
4768 static u32 get_tag(struct scsi_cmnd *cmnd)
4769 {
4770 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4771 }
4772 
4773 /* Queued (deferred) command completions converge here. */
4774 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4775 {
4776 	bool aborted = sd_dp->aborted;
4777 	int qc_idx;
4778 	int retiring = 0;
4779 	unsigned long iflags;
4780 	struct sdebug_queue *sqp;
4781 	struct sdebug_queued_cmd *sqcp;
4782 	struct scsi_cmnd *scp;
4783 	struct sdebug_dev_info *devip;
4784 
4785 	if (unlikely(aborted))
4786 		sd_dp->aborted = false;
4787 	qc_idx = sd_dp->qc_idx;
4788 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4789 	if (sdebug_statistics) {
4790 		atomic_inc(&sdebug_completions);
4791 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4792 			atomic_inc(&sdebug_miss_cpus);
4793 	}
4794 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4795 		pr_err("wild qc_idx=%d\n", qc_idx);
4796 		return;
4797 	}
4798 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4799 	sd_dp->defer_t = SDEB_DEFER_NONE;
4800 	sqcp = &sqp->qc_arr[qc_idx];
4801 	scp = sqcp->a_cmnd;
4802 	if (unlikely(scp == NULL)) {
4803 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4804 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4805 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4806 		return;
4807 	}
4808 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4809 	if (likely(devip))
4810 		atomic_dec(&devip->num_in_q);
4811 	else
4812 		pr_err("devip=NULL\n");
4813 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4814 		retiring = 1;
4815 
4816 	sqcp->a_cmnd = NULL;
4817 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4818 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4819 		pr_err("Unexpected completion\n");
4820 		return;
4821 	}
4822 
4823 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4824 		int k, retval;
4825 
4826 		retval = atomic_read(&retired_max_queue);
4827 		if (qc_idx >= retval) {
4828 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4829 			pr_err("index %d too large\n", retval);
4830 			return;
4831 		}
4832 		k = find_last_bit(sqp->in_use_bm, retval);
4833 		if ((k < sdebug_max_queue) || (k == retval))
4834 			atomic_set(&retired_max_queue, 0);
4835 		else
4836 			atomic_set(&retired_max_queue, k + 1);
4837 	}
4838 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4839 	if (unlikely(aborted)) {
4840 		if (sdebug_verbose)
4841 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4842 		return;
4843 	}
4844 	scp->scsi_done(scp); /* callback to mid level */
4845 }
4846 
4847 /* When high resolution timer goes off this function is called. */
4848 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4849 {
4850 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4851 						  hrt);
4852 	sdebug_q_cmd_complete(sd_dp);
4853 	return HRTIMER_NORESTART;
4854 }
4855 
4856 /* When work queue schedules work, it calls this function. */
4857 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4858 {
4859 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4860 						  ew.work);
4861 	sdebug_q_cmd_complete(sd_dp);
4862 }
4863 
4864 static bool got_shared_uuid;
4865 static uuid_t shared_uuid;
4866 
4867 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4868 {
4869 	struct sdeb_zone_state *zsp;
4870 	sector_t capacity = get_sdebug_capacity();
4871 	sector_t zstart = 0;
4872 	unsigned int i;
4873 
4874 	/*
4875 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4876 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4877 	 * use the specified zone size checking that at least 2 zones can be
4878 	 * created for the device.
4879 	 */
4880 	if (!sdeb_zbc_zone_size_mb) {
4881 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4882 			>> ilog2(sdebug_sector_size);
4883 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4884 			devip->zsize >>= 1;
4885 		if (devip->zsize < 2) {
4886 			pr_err("Device capacity too small\n");
4887 			return -EINVAL;
4888 		}
4889 	} else {
4890 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4891 			pr_err("Zone size is not a power of 2\n");
4892 			return -EINVAL;
4893 		}
4894 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4895 			>> ilog2(sdebug_sector_size);
4896 		if (devip->zsize >= capacity) {
4897 			pr_err("Zone size too large for device capacity\n");
4898 			return -EINVAL;
4899 		}
4900 	}
4901 
4902 	devip->zsize_shift = ilog2(devip->zsize);
4903 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4904 
4905 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4906 		pr_err("Number of conventional zones too large\n");
4907 		return -EINVAL;
4908 	}
4909 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4910 
4911 	if (devip->zmodel == BLK_ZONED_HM) {
4912 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4913 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4914 			devip->max_open = (devip->nr_zones - 1) / 2;
4915 		else
4916 			devip->max_open = sdeb_zbc_max_open;
4917 	}
4918 
4919 	devip->zstate = kcalloc(devip->nr_zones,
4920 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4921 	if (!devip->zstate)
4922 		return -ENOMEM;
4923 
4924 	for (i = 0; i < devip->nr_zones; i++) {
4925 		zsp = &devip->zstate[i];
4926 
4927 		zsp->z_start = zstart;
4928 
4929 		if (i < devip->nr_conv_zones) {
4930 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4931 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4932 			zsp->z_wp = (sector_t)-1;
4933 		} else {
4934 			if (devip->zmodel == BLK_ZONED_HM)
4935 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4936 			else
4937 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4938 			zsp->z_cond = ZC1_EMPTY;
4939 			zsp->z_wp = zsp->z_start;
4940 		}
4941 
4942 		if (zsp->z_start + devip->zsize < capacity)
4943 			zsp->z_size = devip->zsize;
4944 		else
4945 			zsp->z_size = capacity - zsp->z_start;
4946 
4947 		zstart += zsp->z_size;
4948 	}
4949 
4950 	return 0;
4951 }
4952 
4953 static struct sdebug_dev_info *sdebug_device_create(
4954 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4955 {
4956 	struct sdebug_dev_info *devip;
4957 
4958 	devip = kzalloc(sizeof(*devip), flags);
4959 	if (devip) {
4960 		if (sdebug_uuid_ctl == 1)
4961 			uuid_gen(&devip->lu_name);
4962 		else if (sdebug_uuid_ctl == 2) {
4963 			if (got_shared_uuid)
4964 				devip->lu_name = shared_uuid;
4965 			else {
4966 				uuid_gen(&shared_uuid);
4967 				got_shared_uuid = true;
4968 				devip->lu_name = shared_uuid;
4969 			}
4970 		}
4971 		devip->sdbg_host = sdbg_host;
4972 		if (sdeb_zbc_in_use) {
4973 			devip->zmodel = sdeb_zbc_model;
4974 			if (sdebug_device_create_zones(devip)) {
4975 				kfree(devip);
4976 				return NULL;
4977 			}
4978 		} else {
4979 			devip->zmodel = BLK_ZONED_NONE;
4980 		}
4981 		devip->sdbg_host = sdbg_host;
4982 		devip->create_ts = ktime_get_boottime();
4983 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4984 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4985 	}
4986 	return devip;
4987 }
4988 
4989 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4990 {
4991 	struct sdebug_host_info *sdbg_host;
4992 	struct sdebug_dev_info *open_devip = NULL;
4993 	struct sdebug_dev_info *devip;
4994 
4995 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4996 	if (!sdbg_host) {
4997 		pr_err("Host info NULL\n");
4998 		return NULL;
4999 	}
5000 
5001 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5002 		if ((devip->used) && (devip->channel == sdev->channel) &&
5003 		    (devip->target == sdev->id) &&
5004 		    (devip->lun == sdev->lun))
5005 			return devip;
5006 		else {
5007 			if ((!devip->used) && (!open_devip))
5008 				open_devip = devip;
5009 		}
5010 	}
5011 	if (!open_devip) { /* try and make a new one */
5012 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5013 		if (!open_devip) {
5014 			pr_err("out of memory at line %d\n", __LINE__);
5015 			return NULL;
5016 		}
5017 	}
5018 
5019 	open_devip->channel = sdev->channel;
5020 	open_devip->target = sdev->id;
5021 	open_devip->lun = sdev->lun;
5022 	open_devip->sdbg_host = sdbg_host;
5023 	atomic_set(&open_devip->num_in_q, 0);
5024 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5025 	open_devip->used = true;
5026 	return open_devip;
5027 }
5028 
5029 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5030 {
5031 	if (sdebug_verbose)
5032 		pr_info("slave_alloc <%u %u %u %llu>\n",
5033 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5034 	return 0;
5035 }
5036 
5037 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5038 {
5039 	struct sdebug_dev_info *devip =
5040 			(struct sdebug_dev_info *)sdp->hostdata;
5041 
5042 	if (sdebug_verbose)
5043 		pr_info("slave_configure <%u %u %u %llu>\n",
5044 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5045 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5046 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5047 	if (devip == NULL) {
5048 		devip = find_build_dev_info(sdp);
5049 		if (devip == NULL)
5050 			return 1;  /* no resources, will be marked offline */
5051 	}
5052 	sdp->hostdata = devip;
5053 	if (sdebug_no_uld)
5054 		sdp->no_uld_attach = 1;
5055 	config_cdb_len(sdp);
5056 	return 0;
5057 }
5058 
5059 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5060 {
5061 	struct sdebug_dev_info *devip =
5062 		(struct sdebug_dev_info *)sdp->hostdata;
5063 
5064 	if (sdebug_verbose)
5065 		pr_info("slave_destroy <%u %u %u %llu>\n",
5066 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5067 	if (devip) {
5068 		/* make this slot available for re-use */
5069 		devip->used = false;
5070 		sdp->hostdata = NULL;
5071 	}
5072 }
5073 
5074 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5075 			   enum sdeb_defer_type defer_t)
5076 {
5077 	if (!sd_dp)
5078 		return;
5079 	if (defer_t == SDEB_DEFER_HRT)
5080 		hrtimer_cancel(&sd_dp->hrt);
5081 	else if (defer_t == SDEB_DEFER_WQ)
5082 		cancel_work_sync(&sd_dp->ew.work);
5083 }
5084 
5085 /* If @cmnd found deletes its timer or work queue and returns true; else
5086    returns false */
5087 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5088 {
5089 	unsigned long iflags;
5090 	int j, k, qmax, r_qmax;
5091 	enum sdeb_defer_type l_defer_t;
5092 	struct sdebug_queue *sqp;
5093 	struct sdebug_queued_cmd *sqcp;
5094 	struct sdebug_dev_info *devip;
5095 	struct sdebug_defer *sd_dp;
5096 
5097 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5098 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5099 		qmax = sdebug_max_queue;
5100 		r_qmax = atomic_read(&retired_max_queue);
5101 		if (r_qmax > qmax)
5102 			qmax = r_qmax;
5103 		for (k = 0; k < qmax; ++k) {
5104 			if (test_bit(k, sqp->in_use_bm)) {
5105 				sqcp = &sqp->qc_arr[k];
5106 				if (cmnd != sqcp->a_cmnd)
5107 					continue;
5108 				/* found */
5109 				devip = (struct sdebug_dev_info *)
5110 						cmnd->device->hostdata;
5111 				if (devip)
5112 					atomic_dec(&devip->num_in_q);
5113 				sqcp->a_cmnd = NULL;
5114 				sd_dp = sqcp->sd_dp;
5115 				if (sd_dp) {
5116 					l_defer_t = sd_dp->defer_t;
5117 					sd_dp->defer_t = SDEB_DEFER_NONE;
5118 				} else
5119 					l_defer_t = SDEB_DEFER_NONE;
5120 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5121 				stop_qc_helper(sd_dp, l_defer_t);
5122 				clear_bit(k, sqp->in_use_bm);
5123 				return true;
5124 			}
5125 		}
5126 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5127 	}
5128 	return false;
5129 }
5130 
5131 /* Deletes (stops) timers or work queues of all queued commands */
5132 static void stop_all_queued(void)
5133 {
5134 	unsigned long iflags;
5135 	int j, k;
5136 	enum sdeb_defer_type l_defer_t;
5137 	struct sdebug_queue *sqp;
5138 	struct sdebug_queued_cmd *sqcp;
5139 	struct sdebug_dev_info *devip;
5140 	struct sdebug_defer *sd_dp;
5141 
5142 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5143 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5144 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5145 			if (test_bit(k, sqp->in_use_bm)) {
5146 				sqcp = &sqp->qc_arr[k];
5147 				if (sqcp->a_cmnd == NULL)
5148 					continue;
5149 				devip = (struct sdebug_dev_info *)
5150 					sqcp->a_cmnd->device->hostdata;
5151 				if (devip)
5152 					atomic_dec(&devip->num_in_q);
5153 				sqcp->a_cmnd = NULL;
5154 				sd_dp = sqcp->sd_dp;
5155 				if (sd_dp) {
5156 					l_defer_t = sd_dp->defer_t;
5157 					sd_dp->defer_t = SDEB_DEFER_NONE;
5158 				} else
5159 					l_defer_t = SDEB_DEFER_NONE;
5160 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5161 				stop_qc_helper(sd_dp, l_defer_t);
5162 				clear_bit(k, sqp->in_use_bm);
5163 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5164 			}
5165 		}
5166 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5167 	}
5168 }
5169 
5170 /* Free queued command memory on heap */
5171 static void free_all_queued(void)
5172 {
5173 	int j, k;
5174 	struct sdebug_queue *sqp;
5175 	struct sdebug_queued_cmd *sqcp;
5176 
5177 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5178 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5179 			sqcp = &sqp->qc_arr[k];
5180 			kfree(sqcp->sd_dp);
5181 			sqcp->sd_dp = NULL;
5182 		}
5183 	}
5184 }
5185 
5186 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5187 {
5188 	bool ok;
5189 
5190 	++num_aborts;
5191 	if (SCpnt) {
5192 		ok = stop_queued_cmnd(SCpnt);
5193 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5194 			sdev_printk(KERN_INFO, SCpnt->device,
5195 				    "%s: command%s found\n", __func__,
5196 				    ok ? "" : " not");
5197 	}
5198 	return SUCCESS;
5199 }
5200 
5201 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5202 {
5203 	++num_dev_resets;
5204 	if (SCpnt && SCpnt->device) {
5205 		struct scsi_device *sdp = SCpnt->device;
5206 		struct sdebug_dev_info *devip =
5207 				(struct sdebug_dev_info *)sdp->hostdata;
5208 
5209 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5210 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5211 		if (devip)
5212 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5213 	}
5214 	return SUCCESS;
5215 }
5216 
5217 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5218 {
5219 	struct sdebug_host_info *sdbg_host;
5220 	struct sdebug_dev_info *devip;
5221 	struct scsi_device *sdp;
5222 	struct Scsi_Host *hp;
5223 	int k = 0;
5224 
5225 	++num_target_resets;
5226 	if (!SCpnt)
5227 		goto lie;
5228 	sdp = SCpnt->device;
5229 	if (!sdp)
5230 		goto lie;
5231 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5232 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5233 	hp = sdp->host;
5234 	if (!hp)
5235 		goto lie;
5236 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5237 	if (sdbg_host) {
5238 		list_for_each_entry(devip,
5239 				    &sdbg_host->dev_info_list,
5240 				    dev_list)
5241 			if (devip->target == sdp->id) {
5242 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5243 				++k;
5244 			}
5245 	}
5246 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5247 		sdev_printk(KERN_INFO, sdp,
5248 			    "%s: %d device(s) found in target\n", __func__, k);
5249 lie:
5250 	return SUCCESS;
5251 }
5252 
5253 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5254 {
5255 	struct sdebug_host_info *sdbg_host;
5256 	struct sdebug_dev_info *devip;
5257 	struct scsi_device *sdp;
5258 	struct Scsi_Host *hp;
5259 	int k = 0;
5260 
5261 	++num_bus_resets;
5262 	if (!(SCpnt && SCpnt->device))
5263 		goto lie;
5264 	sdp = SCpnt->device;
5265 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5266 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5267 	hp = sdp->host;
5268 	if (hp) {
5269 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5270 		if (sdbg_host) {
5271 			list_for_each_entry(devip,
5272 					    &sdbg_host->dev_info_list,
5273 					    dev_list) {
5274 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5275 				++k;
5276 			}
5277 		}
5278 	}
5279 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5280 		sdev_printk(KERN_INFO, sdp,
5281 			    "%s: %d device(s) found in host\n", __func__, k);
5282 lie:
5283 	return SUCCESS;
5284 }
5285 
5286 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5287 {
5288 	struct sdebug_host_info *sdbg_host;
5289 	struct sdebug_dev_info *devip;
5290 	int k = 0;
5291 
5292 	++num_host_resets;
5293 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5294 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5295 	spin_lock(&sdebug_host_list_lock);
5296 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5297 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5298 				    dev_list) {
5299 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5300 			++k;
5301 		}
5302 	}
5303 	spin_unlock(&sdebug_host_list_lock);
5304 	stop_all_queued();
5305 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5306 		sdev_printk(KERN_INFO, SCpnt->device,
5307 			    "%s: %d device(s) found\n", __func__, k);
5308 	return SUCCESS;
5309 }
5310 
5311 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5312 {
5313 	struct msdos_partition *pp;
5314 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5315 	int sectors_per_part, num_sectors, k;
5316 	int heads_by_sects, start_sec, end_sec;
5317 
5318 	/* assume partition table already zeroed */
5319 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5320 		return;
5321 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5322 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5323 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5324 	}
5325 	num_sectors = (int)get_sdebug_capacity();
5326 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5327 			   / sdebug_num_parts;
5328 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5329 	starts[0] = sdebug_sectors_per;
5330 	max_part_secs = sectors_per_part;
5331 	for (k = 1; k < sdebug_num_parts; ++k) {
5332 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5333 			    * heads_by_sects;
5334 		if (starts[k] - starts[k - 1] < max_part_secs)
5335 			max_part_secs = starts[k] - starts[k - 1];
5336 	}
5337 	starts[sdebug_num_parts] = num_sectors;
5338 	starts[sdebug_num_parts + 1] = 0;
5339 
5340 	ramp[510] = 0x55;	/* magic partition markings */
5341 	ramp[511] = 0xAA;
5342 	pp = (struct msdos_partition *)(ramp + 0x1be);
5343 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5344 		start_sec = starts[k];
5345 		end_sec = starts[k] + max_part_secs - 1;
5346 		pp->boot_ind = 0;
5347 
5348 		pp->cyl = start_sec / heads_by_sects;
5349 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5350 			   / sdebug_sectors_per;
5351 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5352 
5353 		pp->end_cyl = end_sec / heads_by_sects;
5354 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5355 			       / sdebug_sectors_per;
5356 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5357 
5358 		pp->start_sect = cpu_to_le32(start_sec);
5359 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5360 		pp->sys_ind = 0x83;	/* plain Linux partition */
5361 	}
5362 }
5363 
5364 static void block_unblock_all_queues(bool block)
5365 {
5366 	int j;
5367 	struct sdebug_queue *sqp;
5368 
5369 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5370 		atomic_set(&sqp->blocked, (int)block);
5371 }
5372 
5373 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5374  * commands will be processed normally before triggers occur.
5375  */
5376 static void tweak_cmnd_count(void)
5377 {
5378 	int count, modulo;
5379 
5380 	modulo = abs(sdebug_every_nth);
5381 	if (modulo < 2)
5382 		return;
5383 	block_unblock_all_queues(true);
5384 	count = atomic_read(&sdebug_cmnd_count);
5385 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5386 	block_unblock_all_queues(false);
5387 }
5388 
5389 static void clear_queue_stats(void)
5390 {
5391 	atomic_set(&sdebug_cmnd_count, 0);
5392 	atomic_set(&sdebug_completions, 0);
5393 	atomic_set(&sdebug_miss_cpus, 0);
5394 	atomic_set(&sdebug_a_tsf, 0);
5395 }
5396 
5397 static bool inject_on_this_cmd(void)
5398 {
5399 	if (sdebug_every_nth == 0)
5400 		return false;
5401 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5402 }
5403 
5404 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5405 
5406 /* Complete the processing of the thread that queued a SCSI command to this
5407  * driver. It either completes the command by calling cmnd_done() or
5408  * schedules a hr timer or work queue then returns 0. Returns
5409  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5410  */
5411 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5412 			 int scsi_result,
5413 			 int (*pfp)(struct scsi_cmnd *,
5414 				    struct sdebug_dev_info *),
5415 			 int delta_jiff, int ndelay)
5416 {
5417 	bool new_sd_dp;
5418 	bool inject = false;
5419 	bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
5420 	int k, num_in_q, qdepth;
5421 	unsigned long iflags;
5422 	u64 ns_from_boot = 0;
5423 	struct sdebug_queue *sqp;
5424 	struct sdebug_queued_cmd *sqcp;
5425 	struct scsi_device *sdp;
5426 	struct sdebug_defer *sd_dp;
5427 
5428 	if (unlikely(devip == NULL)) {
5429 		if (scsi_result == 0)
5430 			scsi_result = DID_NO_CONNECT << 16;
5431 		goto respond_in_thread;
5432 	}
5433 	sdp = cmnd->device;
5434 
5435 	if (delta_jiff == 0)
5436 		goto respond_in_thread;
5437 
5438 	sqp = get_queue(cmnd);
5439 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5440 	if (unlikely(atomic_read(&sqp->blocked))) {
5441 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5442 		return SCSI_MLQUEUE_HOST_BUSY;
5443 	}
5444 	num_in_q = atomic_read(&devip->num_in_q);
5445 	qdepth = cmnd->device->queue_depth;
5446 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5447 		if (scsi_result) {
5448 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5449 			goto respond_in_thread;
5450 		} else
5451 			scsi_result = device_qfull_result;
5452 	} else if (unlikely(sdebug_every_nth &&
5453 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5454 			    (scsi_result == 0))) {
5455 		if ((num_in_q == (qdepth - 1)) &&
5456 		    (atomic_inc_return(&sdebug_a_tsf) >=
5457 		     abs(sdebug_every_nth))) {
5458 			atomic_set(&sdebug_a_tsf, 0);
5459 			inject = true;
5460 			scsi_result = device_qfull_result;
5461 		}
5462 	}
5463 
5464 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5465 	if (unlikely(k >= sdebug_max_queue)) {
5466 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5467 		if (scsi_result)
5468 			goto respond_in_thread;
5469 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5470 			scsi_result = device_qfull_result;
5471 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5472 			sdev_printk(KERN_INFO, sdp,
5473 				    "%s: max_queue=%d exceeded, %s\n",
5474 				    __func__, sdebug_max_queue,
5475 				    (scsi_result ?  "status: TASK SET FULL" :
5476 						    "report: host busy"));
5477 		if (scsi_result)
5478 			goto respond_in_thread;
5479 		else
5480 			return SCSI_MLQUEUE_HOST_BUSY;
5481 	}
5482 	set_bit(k, sqp->in_use_bm);
5483 	atomic_inc(&devip->num_in_q);
5484 	sqcp = &sqp->qc_arr[k];
5485 	sqcp->a_cmnd = cmnd;
5486 	cmnd->host_scribble = (unsigned char *)sqcp;
5487 	sd_dp = sqcp->sd_dp;
5488 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5489 
5490 	if (!sd_dp) {
5491 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5492 		if (!sd_dp) {
5493 			atomic_dec(&devip->num_in_q);
5494 			clear_bit(k, sqp->in_use_bm);
5495 			return SCSI_MLQUEUE_HOST_BUSY;
5496 		}
5497 		new_sd_dp = true;
5498 	} else {
5499 		new_sd_dp = false;
5500 	}
5501 
5502 	/* Set the hostwide tag */
5503 	if (sdebug_host_max_queue)
5504 		sd_dp->hc_idx = get_tag(cmnd);
5505 
5506 	if (hipri)
5507 		ns_from_boot = ktime_get_boottime_ns();
5508 
5509 	/* one of the resp_*() response functions is called here */
5510 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5511 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5512 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5513 		delta_jiff = ndelay = 0;
5514 	}
5515 	if (cmnd->result == 0 && scsi_result != 0)
5516 		cmnd->result = scsi_result;
5517 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5518 		if (atomic_read(&sdeb_inject_pending)) {
5519 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5520 			atomic_set(&sdeb_inject_pending, 0);
5521 			cmnd->result = check_condition_result;
5522 		}
5523 	}
5524 
5525 	if (unlikely(sdebug_verbose && cmnd->result))
5526 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5527 			    __func__, cmnd->result);
5528 
5529 	if (delta_jiff > 0 || ndelay > 0) {
5530 		ktime_t kt;
5531 
5532 		if (delta_jiff > 0) {
5533 			u64 ns = jiffies_to_nsecs(delta_jiff);
5534 
5535 			if (sdebug_random && ns < U32_MAX) {
5536 				ns = prandom_u32_max((u32)ns);
5537 			} else if (sdebug_random) {
5538 				ns >>= 12;	/* scale to 4 usec precision */
5539 				if (ns < U32_MAX)	/* over 4 hours max */
5540 					ns = prandom_u32_max((u32)ns);
5541 				ns <<= 12;
5542 			}
5543 			kt = ns_to_ktime(ns);
5544 		} else {	/* ndelay has a 4.2 second max */
5545 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5546 					     (u32)ndelay;
5547 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5548 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5549 
5550 				if (kt <= d) {	/* elapsed duration >= kt */
5551 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5552 					sqcp->a_cmnd = NULL;
5553 					atomic_dec(&devip->num_in_q);
5554 					clear_bit(k, sqp->in_use_bm);
5555 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5556 					if (new_sd_dp)
5557 						kfree(sd_dp);
5558 					/* call scsi_done() from this thread */
5559 					cmnd->scsi_done(cmnd);
5560 					return 0;
5561 				}
5562 				/* otherwise reduce kt by elapsed time */
5563 				kt -= d;
5564 			}
5565 		}
5566 		if (hipri) {
5567 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5568 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5569 			if (!sd_dp->init_poll) {
5570 				sd_dp->init_poll = true;
5571 				sqcp->sd_dp = sd_dp;
5572 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5573 				sd_dp->qc_idx = k;
5574 			}
5575 			sd_dp->defer_t = SDEB_DEFER_POLL;
5576 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5577 		} else {
5578 			if (!sd_dp->init_hrt) {
5579 				sd_dp->init_hrt = true;
5580 				sqcp->sd_dp = sd_dp;
5581 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5582 					     HRTIMER_MODE_REL_PINNED);
5583 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5584 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5585 				sd_dp->qc_idx = k;
5586 			}
5587 			sd_dp->defer_t = SDEB_DEFER_HRT;
5588 			/* schedule the invocation of scsi_done() for a later time */
5589 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5590 		}
5591 		if (sdebug_statistics)
5592 			sd_dp->issuing_cpu = raw_smp_processor_id();
5593 	} else {	/* jdelay < 0, use work queue */
5594 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5595 			     atomic_read(&sdeb_inject_pending)))
5596 			sd_dp->aborted = true;
5597 		if (hipri) {
5598 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5599 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5600 			if (!sd_dp->init_poll) {
5601 				sd_dp->init_poll = true;
5602 				sqcp->sd_dp = sd_dp;
5603 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5604 				sd_dp->qc_idx = k;
5605 			}
5606 			sd_dp->defer_t = SDEB_DEFER_POLL;
5607 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5608 		} else {
5609 			if (!sd_dp->init_wq) {
5610 				sd_dp->init_wq = true;
5611 				sqcp->sd_dp = sd_dp;
5612 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5613 				sd_dp->qc_idx = k;
5614 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5615 			}
5616 			sd_dp->defer_t = SDEB_DEFER_WQ;
5617 			schedule_work(&sd_dp->ew.work);
5618 		}
5619 		if (sdebug_statistics)
5620 			sd_dp->issuing_cpu = raw_smp_processor_id();
5621 		if (unlikely(sd_dp->aborted)) {
5622 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5623 				    scsi_cmd_to_rq(cmnd)->tag);
5624 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5625 			atomic_set(&sdeb_inject_pending, 0);
5626 			sd_dp->aborted = false;
5627 		}
5628 	}
5629 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5630 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5631 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5632 	return 0;
5633 
5634 respond_in_thread:	/* call back to mid-layer using invocation thread */
5635 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5636 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5637 	if (cmnd->result == 0 && scsi_result != 0)
5638 		cmnd->result = scsi_result;
5639 	cmnd->scsi_done(cmnd);
5640 	return 0;
5641 }
5642 
5643 /* Note: The following macros create attribute files in the
5644    /sys/module/scsi_debug/parameters directory. Unfortunately this
5645    driver is unaware of a change and cannot trigger auxiliary actions
5646    as it can when the corresponding attribute in the
5647    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5648  */
5649 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5650 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5651 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5652 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5653 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5654 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5655 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5656 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5657 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5658 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5659 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5660 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5661 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5662 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5663 module_param_string(inq_product, sdebug_inq_product_id,
5664 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5665 module_param_string(inq_rev, sdebug_inq_product_rev,
5666 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5667 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5668 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5669 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5670 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5671 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5672 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5673 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5674 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5675 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5676 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5677 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5678 		   S_IRUGO | S_IWUSR);
5679 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5680 		   S_IRUGO | S_IWUSR);
5681 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5682 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5683 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5684 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5685 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5686 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5687 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5688 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5689 module_param_named(per_host_store, sdebug_per_host_store, bool,
5690 		   S_IRUGO | S_IWUSR);
5691 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5692 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5693 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5694 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5695 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5696 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5697 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5698 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5699 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5700 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5701 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5702 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5703 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5704 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5705 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5706 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5707 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5708 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5709 		   S_IRUGO | S_IWUSR);
5710 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5711 module_param_named(write_same_length, sdebug_write_same_length, int,
5712 		   S_IRUGO | S_IWUSR);
5713 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5714 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5715 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5716 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5717 
5718 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5719 MODULE_DESCRIPTION("SCSI debug adapter driver");
5720 MODULE_LICENSE("GPL");
5721 MODULE_VERSION(SDEBUG_VERSION);
5722 
5723 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5724 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5725 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5726 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5727 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5728 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5729 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5730 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5731 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5732 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5733 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5734 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5735 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5736 MODULE_PARM_DESC(host_max_queue,
5737 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5738 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5739 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5740 		 SDEBUG_VERSION "\")");
5741 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5742 MODULE_PARM_DESC(lbprz,
5743 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5744 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5745 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5746 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5747 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5748 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5749 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5750 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5751 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5752 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5753 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5754 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5755 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5756 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5757 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5758 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5759 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5760 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5761 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5762 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5763 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5764 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5765 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5766 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5767 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5768 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5769 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5770 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5771 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5772 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5773 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5774 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5775 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5776 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5777 MODULE_PARM_DESC(uuid_ctl,
5778 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5779 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5780 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5781 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5782 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5783 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5784 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5785 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5786 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5787 
5788 #define SDEBUG_INFO_LEN 256
5789 static char sdebug_info[SDEBUG_INFO_LEN];
5790 
5791 static const char *scsi_debug_info(struct Scsi_Host *shp)
5792 {
5793 	int k;
5794 
5795 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5796 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5797 	if (k >= (SDEBUG_INFO_LEN - 1))
5798 		return sdebug_info;
5799 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5800 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5801 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5802 		  "statistics", (int)sdebug_statistics);
5803 	return sdebug_info;
5804 }
5805 
5806 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5807 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5808 				 int length)
5809 {
5810 	char arr[16];
5811 	int opts;
5812 	int minLen = length > 15 ? 15 : length;
5813 
5814 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5815 		return -EACCES;
5816 	memcpy(arr, buffer, minLen);
5817 	arr[minLen] = '\0';
5818 	if (1 != sscanf(arr, "%d", &opts))
5819 		return -EINVAL;
5820 	sdebug_opts = opts;
5821 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5822 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5823 	if (sdebug_every_nth != 0)
5824 		tweak_cmnd_count();
5825 	return length;
5826 }
5827 
5828 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5829  * same for each scsi_debug host (if more than one). Some of the counters
5830  * output are not atomics so might be inaccurate in a busy system. */
5831 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5832 {
5833 	int f, j, l;
5834 	struct sdebug_queue *sqp;
5835 	struct sdebug_host_info *sdhp;
5836 
5837 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5838 		   SDEBUG_VERSION, sdebug_version_date);
5839 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5840 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5841 		   sdebug_opts, sdebug_every_nth);
5842 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5843 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5844 		   sdebug_sector_size, "bytes");
5845 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5846 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5847 		   num_aborts);
5848 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5849 		   num_dev_resets, num_target_resets, num_bus_resets,
5850 		   num_host_resets);
5851 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5852 		   dix_reads, dix_writes, dif_errors);
5853 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5854 		   sdebug_statistics);
5855 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5856 		   atomic_read(&sdebug_cmnd_count),
5857 		   atomic_read(&sdebug_completions),
5858 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5859 		   atomic_read(&sdebug_a_tsf),
5860 		   atomic_read(&sdeb_mq_poll_count));
5861 
5862 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5863 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5864 		seq_printf(m, "  queue %d:\n", j);
5865 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5866 		if (f != sdebug_max_queue) {
5867 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5868 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5869 				   "first,last bits", f, l);
5870 		}
5871 	}
5872 
5873 	seq_printf(m, "this host_no=%d\n", host->host_no);
5874 	if (!xa_empty(per_store_ap)) {
5875 		bool niu;
5876 		int idx;
5877 		unsigned long l_idx;
5878 		struct sdeb_store_info *sip;
5879 
5880 		seq_puts(m, "\nhost list:\n");
5881 		j = 0;
5882 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5883 			idx = sdhp->si_idx;
5884 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5885 				   sdhp->shost->host_no, idx);
5886 			++j;
5887 		}
5888 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5889 			   sdeb_most_recent_idx);
5890 		j = 0;
5891 		xa_for_each(per_store_ap, l_idx, sip) {
5892 			niu = xa_get_mark(per_store_ap, l_idx,
5893 					  SDEB_XA_NOT_IN_USE);
5894 			idx = (int)l_idx;
5895 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5896 				   (niu ? "  not_in_use" : ""));
5897 			++j;
5898 		}
5899 	}
5900 	return 0;
5901 }
5902 
5903 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5904 {
5905 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5906 }
5907 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5908  * of delay is jiffies.
5909  */
5910 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5911 			   size_t count)
5912 {
5913 	int jdelay, res;
5914 
5915 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5916 		res = count;
5917 		if (sdebug_jdelay != jdelay) {
5918 			int j, k;
5919 			struct sdebug_queue *sqp;
5920 
5921 			block_unblock_all_queues(true);
5922 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5923 			     ++j, ++sqp) {
5924 				k = find_first_bit(sqp->in_use_bm,
5925 						   sdebug_max_queue);
5926 				if (k != sdebug_max_queue) {
5927 					res = -EBUSY;   /* queued commands */
5928 					break;
5929 				}
5930 			}
5931 			if (res > 0) {
5932 				sdebug_jdelay = jdelay;
5933 				sdebug_ndelay = 0;
5934 			}
5935 			block_unblock_all_queues(false);
5936 		}
5937 		return res;
5938 	}
5939 	return -EINVAL;
5940 }
5941 static DRIVER_ATTR_RW(delay);
5942 
5943 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5944 {
5945 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5946 }
5947 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5948 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5949 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5950 			    size_t count)
5951 {
5952 	int ndelay, res;
5953 
5954 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5955 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5956 		res = count;
5957 		if (sdebug_ndelay != ndelay) {
5958 			int j, k;
5959 			struct sdebug_queue *sqp;
5960 
5961 			block_unblock_all_queues(true);
5962 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5963 			     ++j, ++sqp) {
5964 				k = find_first_bit(sqp->in_use_bm,
5965 						   sdebug_max_queue);
5966 				if (k != sdebug_max_queue) {
5967 					res = -EBUSY;   /* queued commands */
5968 					break;
5969 				}
5970 			}
5971 			if (res > 0) {
5972 				sdebug_ndelay = ndelay;
5973 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5974 							: DEF_JDELAY;
5975 			}
5976 			block_unblock_all_queues(false);
5977 		}
5978 		return res;
5979 	}
5980 	return -EINVAL;
5981 }
5982 static DRIVER_ATTR_RW(ndelay);
5983 
5984 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5985 {
5986 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5987 }
5988 
5989 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5990 			  size_t count)
5991 {
5992 	int opts;
5993 	char work[20];
5994 
5995 	if (sscanf(buf, "%10s", work) == 1) {
5996 		if (strncasecmp(work, "0x", 2) == 0) {
5997 			if (kstrtoint(work + 2, 16, &opts) == 0)
5998 				goto opts_done;
5999 		} else {
6000 			if (kstrtoint(work, 10, &opts) == 0)
6001 				goto opts_done;
6002 		}
6003 	}
6004 	return -EINVAL;
6005 opts_done:
6006 	sdebug_opts = opts;
6007 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6008 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6009 	tweak_cmnd_count();
6010 	return count;
6011 }
6012 static DRIVER_ATTR_RW(opts);
6013 
6014 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6015 {
6016 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6017 }
6018 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6019 			   size_t count)
6020 {
6021 	int n;
6022 
6023 	/* Cannot change from or to TYPE_ZBC with sysfs */
6024 	if (sdebug_ptype == TYPE_ZBC)
6025 		return -EINVAL;
6026 
6027 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6028 		if (n == TYPE_ZBC)
6029 			return -EINVAL;
6030 		sdebug_ptype = n;
6031 		return count;
6032 	}
6033 	return -EINVAL;
6034 }
6035 static DRIVER_ATTR_RW(ptype);
6036 
6037 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6038 {
6039 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6040 }
6041 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6042 			    size_t count)
6043 {
6044 	int n;
6045 
6046 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6047 		sdebug_dsense = n;
6048 		return count;
6049 	}
6050 	return -EINVAL;
6051 }
6052 static DRIVER_ATTR_RW(dsense);
6053 
6054 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6055 {
6056 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6057 }
6058 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6059 			     size_t count)
6060 {
6061 	int n, idx;
6062 
6063 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6064 		bool want_store = (n == 0);
6065 		struct sdebug_host_info *sdhp;
6066 
6067 		n = (n > 0);
6068 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6069 		if (sdebug_fake_rw == n)
6070 			return count;	/* not transitioning so do nothing */
6071 
6072 		if (want_store) {	/* 1 --> 0 transition, set up store */
6073 			if (sdeb_first_idx < 0) {
6074 				idx = sdebug_add_store();
6075 				if (idx < 0)
6076 					return idx;
6077 			} else {
6078 				idx = sdeb_first_idx;
6079 				xa_clear_mark(per_store_ap, idx,
6080 					      SDEB_XA_NOT_IN_USE);
6081 			}
6082 			/* make all hosts use same store */
6083 			list_for_each_entry(sdhp, &sdebug_host_list,
6084 					    host_list) {
6085 				if (sdhp->si_idx != idx) {
6086 					xa_set_mark(per_store_ap, sdhp->si_idx,
6087 						    SDEB_XA_NOT_IN_USE);
6088 					sdhp->si_idx = idx;
6089 				}
6090 			}
6091 			sdeb_most_recent_idx = idx;
6092 		} else {	/* 0 --> 1 transition is trigger for shrink */
6093 			sdebug_erase_all_stores(true /* apart from first */);
6094 		}
6095 		sdebug_fake_rw = n;
6096 		return count;
6097 	}
6098 	return -EINVAL;
6099 }
6100 static DRIVER_ATTR_RW(fake_rw);
6101 
6102 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6103 {
6104 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6105 }
6106 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6107 			      size_t count)
6108 {
6109 	int n;
6110 
6111 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6112 		sdebug_no_lun_0 = n;
6113 		return count;
6114 	}
6115 	return -EINVAL;
6116 }
6117 static DRIVER_ATTR_RW(no_lun_0);
6118 
6119 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6120 {
6121 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6122 }
6123 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6124 			      size_t count)
6125 {
6126 	int n;
6127 
6128 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6129 		sdebug_num_tgts = n;
6130 		sdebug_max_tgts_luns();
6131 		return count;
6132 	}
6133 	return -EINVAL;
6134 }
6135 static DRIVER_ATTR_RW(num_tgts);
6136 
6137 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6138 {
6139 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6140 }
6141 static DRIVER_ATTR_RO(dev_size_mb);
6142 
6143 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6144 {
6145 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6146 }
6147 
6148 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6149 				    size_t count)
6150 {
6151 	bool v;
6152 
6153 	if (kstrtobool(buf, &v))
6154 		return -EINVAL;
6155 
6156 	sdebug_per_host_store = v;
6157 	return count;
6158 }
6159 static DRIVER_ATTR_RW(per_host_store);
6160 
6161 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6162 {
6163 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6164 }
6165 static DRIVER_ATTR_RO(num_parts);
6166 
6167 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6168 {
6169 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6170 }
6171 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6172 			       size_t count)
6173 {
6174 	int nth;
6175 	char work[20];
6176 
6177 	if (sscanf(buf, "%10s", work) == 1) {
6178 		if (strncasecmp(work, "0x", 2) == 0) {
6179 			if (kstrtoint(work + 2, 16, &nth) == 0)
6180 				goto every_nth_done;
6181 		} else {
6182 			if (kstrtoint(work, 10, &nth) == 0)
6183 				goto every_nth_done;
6184 		}
6185 	}
6186 	return -EINVAL;
6187 
6188 every_nth_done:
6189 	sdebug_every_nth = nth;
6190 	if (nth && !sdebug_statistics) {
6191 		pr_info("every_nth needs statistics=1, set it\n");
6192 		sdebug_statistics = true;
6193 	}
6194 	tweak_cmnd_count();
6195 	return count;
6196 }
6197 static DRIVER_ATTR_RW(every_nth);
6198 
6199 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6200 {
6201 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6202 }
6203 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6204 				size_t count)
6205 {
6206 	int n;
6207 	bool changed;
6208 
6209 	if (kstrtoint(buf, 0, &n))
6210 		return -EINVAL;
6211 	if (n >= 0) {
6212 		if (n > (int)SAM_LUN_AM_FLAT) {
6213 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6214 			return -EINVAL;
6215 		}
6216 		changed = ((int)sdebug_lun_am != n);
6217 		sdebug_lun_am = n;
6218 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6219 			struct sdebug_host_info *sdhp;
6220 			struct sdebug_dev_info *dp;
6221 
6222 			spin_lock(&sdebug_host_list_lock);
6223 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6224 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6225 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6226 				}
6227 			}
6228 			spin_unlock(&sdebug_host_list_lock);
6229 		}
6230 		return count;
6231 	}
6232 	return -EINVAL;
6233 }
6234 static DRIVER_ATTR_RW(lun_format);
6235 
6236 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6237 {
6238 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6239 }
6240 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6241 			      size_t count)
6242 {
6243 	int n;
6244 	bool changed;
6245 
6246 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6247 		if (n > 256) {
6248 			pr_warn("max_luns can be no more than 256\n");
6249 			return -EINVAL;
6250 		}
6251 		changed = (sdebug_max_luns != n);
6252 		sdebug_max_luns = n;
6253 		sdebug_max_tgts_luns();
6254 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6255 			struct sdebug_host_info *sdhp;
6256 			struct sdebug_dev_info *dp;
6257 
6258 			spin_lock(&sdebug_host_list_lock);
6259 			list_for_each_entry(sdhp, &sdebug_host_list,
6260 					    host_list) {
6261 				list_for_each_entry(dp, &sdhp->dev_info_list,
6262 						    dev_list) {
6263 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6264 						dp->uas_bm);
6265 				}
6266 			}
6267 			spin_unlock(&sdebug_host_list_lock);
6268 		}
6269 		return count;
6270 	}
6271 	return -EINVAL;
6272 }
6273 static DRIVER_ATTR_RW(max_luns);
6274 
6275 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6276 {
6277 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6278 }
6279 /* N.B. max_queue can be changed while there are queued commands. In flight
6280  * commands beyond the new max_queue will be completed. */
6281 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6282 			       size_t count)
6283 {
6284 	int j, n, k, a;
6285 	struct sdebug_queue *sqp;
6286 
6287 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6288 	    (n <= SDEBUG_CANQUEUE) &&
6289 	    (sdebug_host_max_queue == 0)) {
6290 		block_unblock_all_queues(true);
6291 		k = 0;
6292 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6293 		     ++j, ++sqp) {
6294 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6295 			if (a > k)
6296 				k = a;
6297 		}
6298 		sdebug_max_queue = n;
6299 		if (k == SDEBUG_CANQUEUE)
6300 			atomic_set(&retired_max_queue, 0);
6301 		else if (k >= n)
6302 			atomic_set(&retired_max_queue, k + 1);
6303 		else
6304 			atomic_set(&retired_max_queue, 0);
6305 		block_unblock_all_queues(false);
6306 		return count;
6307 	}
6308 	return -EINVAL;
6309 }
6310 static DRIVER_ATTR_RW(max_queue);
6311 
6312 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6313 {
6314 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6315 }
6316 
6317 /*
6318  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6319  * in range [0, sdebug_host_max_queue), we can't change it.
6320  */
6321 static DRIVER_ATTR_RO(host_max_queue);
6322 
6323 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6324 {
6325 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6326 }
6327 static DRIVER_ATTR_RO(no_uld);
6328 
6329 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6330 {
6331 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6332 }
6333 static DRIVER_ATTR_RO(scsi_level);
6334 
6335 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6336 {
6337 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6338 }
6339 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6340 				size_t count)
6341 {
6342 	int n;
6343 	bool changed;
6344 
6345 	/* Ignore capacity change for ZBC drives for now */
6346 	if (sdeb_zbc_in_use)
6347 		return -ENOTSUPP;
6348 
6349 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6350 		changed = (sdebug_virtual_gb != n);
6351 		sdebug_virtual_gb = n;
6352 		sdebug_capacity = get_sdebug_capacity();
6353 		if (changed) {
6354 			struct sdebug_host_info *sdhp;
6355 			struct sdebug_dev_info *dp;
6356 
6357 			spin_lock(&sdebug_host_list_lock);
6358 			list_for_each_entry(sdhp, &sdebug_host_list,
6359 					    host_list) {
6360 				list_for_each_entry(dp, &sdhp->dev_info_list,
6361 						    dev_list) {
6362 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6363 						dp->uas_bm);
6364 				}
6365 			}
6366 			spin_unlock(&sdebug_host_list_lock);
6367 		}
6368 		return count;
6369 	}
6370 	return -EINVAL;
6371 }
6372 static DRIVER_ATTR_RW(virtual_gb);
6373 
6374 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6375 {
6376 	/* absolute number of hosts currently active is what is shown */
6377 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6378 }
6379 
6380 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6381 			      size_t count)
6382 {
6383 	bool found;
6384 	unsigned long idx;
6385 	struct sdeb_store_info *sip;
6386 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6387 	int delta_hosts;
6388 
6389 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6390 		return -EINVAL;
6391 	if (delta_hosts > 0) {
6392 		do {
6393 			found = false;
6394 			if (want_phs) {
6395 				xa_for_each_marked(per_store_ap, idx, sip,
6396 						   SDEB_XA_NOT_IN_USE) {
6397 					sdeb_most_recent_idx = (int)idx;
6398 					found = true;
6399 					break;
6400 				}
6401 				if (found)	/* re-use case */
6402 					sdebug_add_host_helper((int)idx);
6403 				else
6404 					sdebug_do_add_host(true);
6405 			} else {
6406 				sdebug_do_add_host(false);
6407 			}
6408 		} while (--delta_hosts);
6409 	} else if (delta_hosts < 0) {
6410 		do {
6411 			sdebug_do_remove_host(false);
6412 		} while (++delta_hosts);
6413 	}
6414 	return count;
6415 }
6416 static DRIVER_ATTR_RW(add_host);
6417 
6418 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6419 {
6420 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6421 }
6422 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6423 				    size_t count)
6424 {
6425 	int n;
6426 
6427 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6428 		sdebug_vpd_use_hostno = n;
6429 		return count;
6430 	}
6431 	return -EINVAL;
6432 }
6433 static DRIVER_ATTR_RW(vpd_use_hostno);
6434 
6435 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6436 {
6437 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6438 }
6439 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6440 				size_t count)
6441 {
6442 	int n;
6443 
6444 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6445 		if (n > 0)
6446 			sdebug_statistics = true;
6447 		else {
6448 			clear_queue_stats();
6449 			sdebug_statistics = false;
6450 		}
6451 		return count;
6452 	}
6453 	return -EINVAL;
6454 }
6455 static DRIVER_ATTR_RW(statistics);
6456 
6457 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6458 {
6459 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6460 }
6461 static DRIVER_ATTR_RO(sector_size);
6462 
6463 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6464 {
6465 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6466 }
6467 static DRIVER_ATTR_RO(submit_queues);
6468 
6469 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6470 {
6471 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6472 }
6473 static DRIVER_ATTR_RO(dix);
6474 
6475 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6476 {
6477 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6478 }
6479 static DRIVER_ATTR_RO(dif);
6480 
6481 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6482 {
6483 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6484 }
6485 static DRIVER_ATTR_RO(guard);
6486 
6487 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6488 {
6489 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6490 }
6491 static DRIVER_ATTR_RO(ato);
6492 
6493 static ssize_t map_show(struct device_driver *ddp, char *buf)
6494 {
6495 	ssize_t count = 0;
6496 
6497 	if (!scsi_debug_lbp())
6498 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6499 				 sdebug_store_sectors);
6500 
6501 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6502 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6503 
6504 		if (sip)
6505 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6506 					  (int)map_size, sip->map_storep);
6507 	}
6508 	buf[count++] = '\n';
6509 	buf[count] = '\0';
6510 
6511 	return count;
6512 }
6513 static DRIVER_ATTR_RO(map);
6514 
6515 static ssize_t random_show(struct device_driver *ddp, char *buf)
6516 {
6517 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6518 }
6519 
6520 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6521 			    size_t count)
6522 {
6523 	bool v;
6524 
6525 	if (kstrtobool(buf, &v))
6526 		return -EINVAL;
6527 
6528 	sdebug_random = v;
6529 	return count;
6530 }
6531 static DRIVER_ATTR_RW(random);
6532 
6533 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6534 {
6535 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6536 }
6537 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6538 			       size_t count)
6539 {
6540 	int n;
6541 
6542 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6543 		sdebug_removable = (n > 0);
6544 		return count;
6545 	}
6546 	return -EINVAL;
6547 }
6548 static DRIVER_ATTR_RW(removable);
6549 
6550 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6551 {
6552 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6553 }
6554 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6555 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6556 			       size_t count)
6557 {
6558 	int n;
6559 
6560 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6561 		sdebug_host_lock = (n > 0);
6562 		return count;
6563 	}
6564 	return -EINVAL;
6565 }
6566 static DRIVER_ATTR_RW(host_lock);
6567 
6568 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6569 {
6570 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6571 }
6572 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6573 			    size_t count)
6574 {
6575 	int n;
6576 
6577 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6578 		sdebug_strict = (n > 0);
6579 		return count;
6580 	}
6581 	return -EINVAL;
6582 }
6583 static DRIVER_ATTR_RW(strict);
6584 
6585 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6586 {
6587 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6588 }
6589 static DRIVER_ATTR_RO(uuid_ctl);
6590 
6591 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6592 {
6593 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6594 }
6595 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6596 			     size_t count)
6597 {
6598 	int ret, n;
6599 
6600 	ret = kstrtoint(buf, 0, &n);
6601 	if (ret)
6602 		return ret;
6603 	sdebug_cdb_len = n;
6604 	all_config_cdb_len();
6605 	return count;
6606 }
6607 static DRIVER_ATTR_RW(cdb_len);
6608 
6609 static const char * const zbc_model_strs_a[] = {
6610 	[BLK_ZONED_NONE] = "none",
6611 	[BLK_ZONED_HA]   = "host-aware",
6612 	[BLK_ZONED_HM]   = "host-managed",
6613 };
6614 
6615 static const char * const zbc_model_strs_b[] = {
6616 	[BLK_ZONED_NONE] = "no",
6617 	[BLK_ZONED_HA]   = "aware",
6618 	[BLK_ZONED_HM]   = "managed",
6619 };
6620 
6621 static const char * const zbc_model_strs_c[] = {
6622 	[BLK_ZONED_NONE] = "0",
6623 	[BLK_ZONED_HA]   = "1",
6624 	[BLK_ZONED_HM]   = "2",
6625 };
6626 
6627 static int sdeb_zbc_model_str(const char *cp)
6628 {
6629 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6630 
6631 	if (res < 0) {
6632 		res = sysfs_match_string(zbc_model_strs_b, cp);
6633 		if (res < 0) {
6634 			res = sysfs_match_string(zbc_model_strs_c, cp);
6635 			if (res < 0)
6636 				return -EINVAL;
6637 		}
6638 	}
6639 	return res;
6640 }
6641 
6642 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6643 {
6644 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6645 			 zbc_model_strs_a[sdeb_zbc_model]);
6646 }
6647 static DRIVER_ATTR_RO(zbc);
6648 
6649 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6650 {
6651 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6652 }
6653 static DRIVER_ATTR_RO(tur_ms_to_ready);
6654 
6655 /* Note: The following array creates attribute files in the
6656    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6657    files (over those found in the /sys/module/scsi_debug/parameters
6658    directory) is that auxiliary actions can be triggered when an attribute
6659    is changed. For example see: add_host_store() above.
6660  */
6661 
6662 static struct attribute *sdebug_drv_attrs[] = {
6663 	&driver_attr_delay.attr,
6664 	&driver_attr_opts.attr,
6665 	&driver_attr_ptype.attr,
6666 	&driver_attr_dsense.attr,
6667 	&driver_attr_fake_rw.attr,
6668 	&driver_attr_host_max_queue.attr,
6669 	&driver_attr_no_lun_0.attr,
6670 	&driver_attr_num_tgts.attr,
6671 	&driver_attr_dev_size_mb.attr,
6672 	&driver_attr_num_parts.attr,
6673 	&driver_attr_every_nth.attr,
6674 	&driver_attr_lun_format.attr,
6675 	&driver_attr_max_luns.attr,
6676 	&driver_attr_max_queue.attr,
6677 	&driver_attr_no_uld.attr,
6678 	&driver_attr_scsi_level.attr,
6679 	&driver_attr_virtual_gb.attr,
6680 	&driver_attr_add_host.attr,
6681 	&driver_attr_per_host_store.attr,
6682 	&driver_attr_vpd_use_hostno.attr,
6683 	&driver_attr_sector_size.attr,
6684 	&driver_attr_statistics.attr,
6685 	&driver_attr_submit_queues.attr,
6686 	&driver_attr_dix.attr,
6687 	&driver_attr_dif.attr,
6688 	&driver_attr_guard.attr,
6689 	&driver_attr_ato.attr,
6690 	&driver_attr_map.attr,
6691 	&driver_attr_random.attr,
6692 	&driver_attr_removable.attr,
6693 	&driver_attr_host_lock.attr,
6694 	&driver_attr_ndelay.attr,
6695 	&driver_attr_strict.attr,
6696 	&driver_attr_uuid_ctl.attr,
6697 	&driver_attr_cdb_len.attr,
6698 	&driver_attr_tur_ms_to_ready.attr,
6699 	&driver_attr_zbc.attr,
6700 	NULL,
6701 };
6702 ATTRIBUTE_GROUPS(sdebug_drv);
6703 
6704 static struct device *pseudo_primary;
6705 
6706 static int __init scsi_debug_init(void)
6707 {
6708 	bool want_store = (sdebug_fake_rw == 0);
6709 	unsigned long sz;
6710 	int k, ret, hosts_to_add;
6711 	int idx = -1;
6712 
6713 	ramdisk_lck_a[0] = &atomic_rw;
6714 	ramdisk_lck_a[1] = &atomic_rw2;
6715 	atomic_set(&retired_max_queue, 0);
6716 
6717 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6718 		pr_warn("ndelay must be less than 1 second, ignored\n");
6719 		sdebug_ndelay = 0;
6720 	} else if (sdebug_ndelay > 0)
6721 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6722 
6723 	switch (sdebug_sector_size) {
6724 	case  512:
6725 	case 1024:
6726 	case 2048:
6727 	case 4096:
6728 		break;
6729 	default:
6730 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6731 		return -EINVAL;
6732 	}
6733 
6734 	switch (sdebug_dif) {
6735 	case T10_PI_TYPE0_PROTECTION:
6736 		break;
6737 	case T10_PI_TYPE1_PROTECTION:
6738 	case T10_PI_TYPE2_PROTECTION:
6739 	case T10_PI_TYPE3_PROTECTION:
6740 		have_dif_prot = true;
6741 		break;
6742 
6743 	default:
6744 		pr_err("dif must be 0, 1, 2 or 3\n");
6745 		return -EINVAL;
6746 	}
6747 
6748 	if (sdebug_num_tgts < 0) {
6749 		pr_err("num_tgts must be >= 0\n");
6750 		return -EINVAL;
6751 	}
6752 
6753 	if (sdebug_guard > 1) {
6754 		pr_err("guard must be 0 or 1\n");
6755 		return -EINVAL;
6756 	}
6757 
6758 	if (sdebug_ato > 1) {
6759 		pr_err("ato must be 0 or 1\n");
6760 		return -EINVAL;
6761 	}
6762 
6763 	if (sdebug_physblk_exp > 15) {
6764 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6765 		return -EINVAL;
6766 	}
6767 
6768 	sdebug_lun_am = sdebug_lun_am_i;
6769 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6770 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6771 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6772 	}
6773 
6774 	if (sdebug_max_luns > 256) {
6775 		if (sdebug_max_luns > 16384) {
6776 			pr_warn("max_luns can be no more than 16384, use default\n");
6777 			sdebug_max_luns = DEF_MAX_LUNS;
6778 		}
6779 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6780 	}
6781 
6782 	if (sdebug_lowest_aligned > 0x3fff) {
6783 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6784 		return -EINVAL;
6785 	}
6786 
6787 	if (submit_queues < 1) {
6788 		pr_err("submit_queues must be 1 or more\n");
6789 		return -EINVAL;
6790 	}
6791 
6792 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6793 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6794 		return -EINVAL;
6795 	}
6796 
6797 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6798 	    (sdebug_host_max_queue < 0)) {
6799 		pr_err("host_max_queue must be in range [0 %d]\n",
6800 		       SDEBUG_CANQUEUE);
6801 		return -EINVAL;
6802 	}
6803 
6804 	if (sdebug_host_max_queue &&
6805 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6806 		sdebug_max_queue = sdebug_host_max_queue;
6807 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6808 			sdebug_max_queue);
6809 	}
6810 
6811 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6812 			       GFP_KERNEL);
6813 	if (sdebug_q_arr == NULL)
6814 		return -ENOMEM;
6815 	for (k = 0; k < submit_queues; ++k)
6816 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6817 
6818 	/*
6819 	 * check for host managed zoned block device specified with
6820 	 * ptype=0x14 or zbc=XXX.
6821 	 */
6822 	if (sdebug_ptype == TYPE_ZBC) {
6823 		sdeb_zbc_model = BLK_ZONED_HM;
6824 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6825 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6826 		if (k < 0) {
6827 			ret = k;
6828 			goto free_q_arr;
6829 		}
6830 		sdeb_zbc_model = k;
6831 		switch (sdeb_zbc_model) {
6832 		case BLK_ZONED_NONE:
6833 		case BLK_ZONED_HA:
6834 			sdebug_ptype = TYPE_DISK;
6835 			break;
6836 		case BLK_ZONED_HM:
6837 			sdebug_ptype = TYPE_ZBC;
6838 			break;
6839 		default:
6840 			pr_err("Invalid ZBC model\n");
6841 			ret = -EINVAL;
6842 			goto free_q_arr;
6843 		}
6844 	}
6845 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6846 		sdeb_zbc_in_use = true;
6847 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6848 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6849 	}
6850 
6851 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6852 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6853 	if (sdebug_dev_size_mb < 1)
6854 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6855 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6856 	sdebug_store_sectors = sz / sdebug_sector_size;
6857 	sdebug_capacity = get_sdebug_capacity();
6858 
6859 	/* play around with geometry, don't waste too much on track 0 */
6860 	sdebug_heads = 8;
6861 	sdebug_sectors_per = 32;
6862 	if (sdebug_dev_size_mb >= 256)
6863 		sdebug_heads = 64;
6864 	else if (sdebug_dev_size_mb >= 16)
6865 		sdebug_heads = 32;
6866 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6867 			       (sdebug_sectors_per * sdebug_heads);
6868 	if (sdebug_cylinders_per >= 1024) {
6869 		/* other LLDs do this; implies >= 1GB ram disk ... */
6870 		sdebug_heads = 255;
6871 		sdebug_sectors_per = 63;
6872 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6873 			       (sdebug_sectors_per * sdebug_heads);
6874 	}
6875 	if (scsi_debug_lbp()) {
6876 		sdebug_unmap_max_blocks =
6877 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6878 
6879 		sdebug_unmap_max_desc =
6880 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6881 
6882 		sdebug_unmap_granularity =
6883 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6884 
6885 		if (sdebug_unmap_alignment &&
6886 		    sdebug_unmap_granularity <=
6887 		    sdebug_unmap_alignment) {
6888 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6889 			ret = -EINVAL;
6890 			goto free_q_arr;
6891 		}
6892 	}
6893 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6894 	if (want_store) {
6895 		idx = sdebug_add_store();
6896 		if (idx < 0) {
6897 			ret = idx;
6898 			goto free_q_arr;
6899 		}
6900 	}
6901 
6902 	pseudo_primary = root_device_register("pseudo_0");
6903 	if (IS_ERR(pseudo_primary)) {
6904 		pr_warn("root_device_register() error\n");
6905 		ret = PTR_ERR(pseudo_primary);
6906 		goto free_vm;
6907 	}
6908 	ret = bus_register(&pseudo_lld_bus);
6909 	if (ret < 0) {
6910 		pr_warn("bus_register error: %d\n", ret);
6911 		goto dev_unreg;
6912 	}
6913 	ret = driver_register(&sdebug_driverfs_driver);
6914 	if (ret < 0) {
6915 		pr_warn("driver_register error: %d\n", ret);
6916 		goto bus_unreg;
6917 	}
6918 
6919 	hosts_to_add = sdebug_add_host;
6920 	sdebug_add_host = 0;
6921 
6922 	for (k = 0; k < hosts_to_add; k++) {
6923 		if (want_store && k == 0) {
6924 			ret = sdebug_add_host_helper(idx);
6925 			if (ret < 0) {
6926 				pr_err("add_host_helper k=%d, error=%d\n",
6927 				       k, -ret);
6928 				break;
6929 			}
6930 		} else {
6931 			ret = sdebug_do_add_host(want_store &&
6932 						 sdebug_per_host_store);
6933 			if (ret < 0) {
6934 				pr_err("add_host k=%d error=%d\n", k, -ret);
6935 				break;
6936 			}
6937 		}
6938 	}
6939 	if (sdebug_verbose)
6940 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6941 
6942 	return 0;
6943 
6944 bus_unreg:
6945 	bus_unregister(&pseudo_lld_bus);
6946 dev_unreg:
6947 	root_device_unregister(pseudo_primary);
6948 free_vm:
6949 	sdebug_erase_store(idx, NULL);
6950 free_q_arr:
6951 	kfree(sdebug_q_arr);
6952 	return ret;
6953 }
6954 
6955 static void __exit scsi_debug_exit(void)
6956 {
6957 	int k = sdebug_num_hosts;
6958 
6959 	stop_all_queued();
6960 	for (; k; k--)
6961 		sdebug_do_remove_host(true);
6962 	free_all_queued();
6963 	driver_unregister(&sdebug_driverfs_driver);
6964 	bus_unregister(&pseudo_lld_bus);
6965 	root_device_unregister(pseudo_primary);
6966 
6967 	sdebug_erase_all_stores(false);
6968 	xa_destroy(per_store_ap);
6969 	kfree(sdebug_q_arr);
6970 }
6971 
6972 device_initcall(scsi_debug_init);
6973 module_exit(scsi_debug_exit);
6974 
6975 static void sdebug_release_adapter(struct device *dev)
6976 {
6977 	struct sdebug_host_info *sdbg_host;
6978 
6979 	sdbg_host = to_sdebug_host(dev);
6980 	kfree(sdbg_host);
6981 }
6982 
6983 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6984 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6985 {
6986 	if (idx < 0)
6987 		return;
6988 	if (!sip) {
6989 		if (xa_empty(per_store_ap))
6990 			return;
6991 		sip = xa_load(per_store_ap, idx);
6992 		if (!sip)
6993 			return;
6994 	}
6995 	vfree(sip->map_storep);
6996 	vfree(sip->dif_storep);
6997 	vfree(sip->storep);
6998 	xa_erase(per_store_ap, idx);
6999 	kfree(sip);
7000 }
7001 
7002 /* Assume apart_from_first==false only in shutdown case. */
7003 static void sdebug_erase_all_stores(bool apart_from_first)
7004 {
7005 	unsigned long idx;
7006 	struct sdeb_store_info *sip = NULL;
7007 
7008 	xa_for_each(per_store_ap, idx, sip) {
7009 		if (apart_from_first)
7010 			apart_from_first = false;
7011 		else
7012 			sdebug_erase_store(idx, sip);
7013 	}
7014 	if (apart_from_first)
7015 		sdeb_most_recent_idx = sdeb_first_idx;
7016 }
7017 
7018 /*
7019  * Returns store xarray new element index (idx) if >=0 else negated errno.
7020  * Limit the number of stores to 65536.
7021  */
7022 static int sdebug_add_store(void)
7023 {
7024 	int res;
7025 	u32 n_idx;
7026 	unsigned long iflags;
7027 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7028 	struct sdeb_store_info *sip = NULL;
7029 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7030 
7031 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7032 	if (!sip)
7033 		return -ENOMEM;
7034 
7035 	xa_lock_irqsave(per_store_ap, iflags);
7036 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7037 	if (unlikely(res < 0)) {
7038 		xa_unlock_irqrestore(per_store_ap, iflags);
7039 		kfree(sip);
7040 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7041 		return res;
7042 	}
7043 	sdeb_most_recent_idx = n_idx;
7044 	if (sdeb_first_idx < 0)
7045 		sdeb_first_idx = n_idx;
7046 	xa_unlock_irqrestore(per_store_ap, iflags);
7047 
7048 	res = -ENOMEM;
7049 	sip->storep = vzalloc(sz);
7050 	if (!sip->storep) {
7051 		pr_err("user data oom\n");
7052 		goto err;
7053 	}
7054 	if (sdebug_num_parts > 0)
7055 		sdebug_build_parts(sip->storep, sz);
7056 
7057 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7058 	if (sdebug_dix) {
7059 		int dif_size;
7060 
7061 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7062 		sip->dif_storep = vmalloc(dif_size);
7063 
7064 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7065 			sip->dif_storep);
7066 
7067 		if (!sip->dif_storep) {
7068 			pr_err("DIX oom\n");
7069 			goto err;
7070 		}
7071 		memset(sip->dif_storep, 0xff, dif_size);
7072 	}
7073 	/* Logical Block Provisioning */
7074 	if (scsi_debug_lbp()) {
7075 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7076 		sip->map_storep = vmalloc(array_size(sizeof(long),
7077 						     BITS_TO_LONGS(map_size)));
7078 
7079 		pr_info("%lu provisioning blocks\n", map_size);
7080 
7081 		if (!sip->map_storep) {
7082 			pr_err("LBP map oom\n");
7083 			goto err;
7084 		}
7085 
7086 		bitmap_zero(sip->map_storep, map_size);
7087 
7088 		/* Map first 1KB for partition table */
7089 		if (sdebug_num_parts)
7090 			map_region(sip, 0, 2);
7091 	}
7092 
7093 	rwlock_init(&sip->macc_lck);
7094 	return (int)n_idx;
7095 err:
7096 	sdebug_erase_store((int)n_idx, sip);
7097 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7098 	return res;
7099 }
7100 
7101 static int sdebug_add_host_helper(int per_host_idx)
7102 {
7103 	int k, devs_per_host, idx;
7104 	int error = -ENOMEM;
7105 	struct sdebug_host_info *sdbg_host;
7106 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7107 
7108 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7109 	if (!sdbg_host)
7110 		return -ENOMEM;
7111 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7112 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7113 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7114 	sdbg_host->si_idx = idx;
7115 
7116 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7117 
7118 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7119 	for (k = 0; k < devs_per_host; k++) {
7120 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7121 		if (!sdbg_devinfo)
7122 			goto clean;
7123 	}
7124 
7125 	spin_lock(&sdebug_host_list_lock);
7126 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7127 	spin_unlock(&sdebug_host_list_lock);
7128 
7129 	sdbg_host->dev.bus = &pseudo_lld_bus;
7130 	sdbg_host->dev.parent = pseudo_primary;
7131 	sdbg_host->dev.release = &sdebug_release_adapter;
7132 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7133 
7134 	error = device_register(&sdbg_host->dev);
7135 	if (error) {
7136 		spin_lock(&sdebug_host_list_lock);
7137 		list_del(&sdbg_host->host_list);
7138 		spin_unlock(&sdebug_host_list_lock);
7139 		goto clean;
7140 	}
7141 
7142 	++sdebug_num_hosts;
7143 	return 0;
7144 
7145 clean:
7146 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7147 				 dev_list) {
7148 		list_del(&sdbg_devinfo->dev_list);
7149 		kfree(sdbg_devinfo->zstate);
7150 		kfree(sdbg_devinfo);
7151 	}
7152 	kfree(sdbg_host);
7153 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7154 	return error;
7155 }
7156 
7157 static int sdebug_do_add_host(bool mk_new_store)
7158 {
7159 	int ph_idx = sdeb_most_recent_idx;
7160 
7161 	if (mk_new_store) {
7162 		ph_idx = sdebug_add_store();
7163 		if (ph_idx < 0)
7164 			return ph_idx;
7165 	}
7166 	return sdebug_add_host_helper(ph_idx);
7167 }
7168 
7169 static void sdebug_do_remove_host(bool the_end)
7170 {
7171 	int idx = -1;
7172 	struct sdebug_host_info *sdbg_host = NULL;
7173 	struct sdebug_host_info *sdbg_host2;
7174 
7175 	spin_lock(&sdebug_host_list_lock);
7176 	if (!list_empty(&sdebug_host_list)) {
7177 		sdbg_host = list_entry(sdebug_host_list.prev,
7178 				       struct sdebug_host_info, host_list);
7179 		idx = sdbg_host->si_idx;
7180 	}
7181 	if (!the_end && idx >= 0) {
7182 		bool unique = true;
7183 
7184 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7185 			if (sdbg_host2 == sdbg_host)
7186 				continue;
7187 			if (idx == sdbg_host2->si_idx) {
7188 				unique = false;
7189 				break;
7190 			}
7191 		}
7192 		if (unique) {
7193 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7194 			if (idx == sdeb_most_recent_idx)
7195 				--sdeb_most_recent_idx;
7196 		}
7197 	}
7198 	if (sdbg_host)
7199 		list_del(&sdbg_host->host_list);
7200 	spin_unlock(&sdebug_host_list_lock);
7201 
7202 	if (!sdbg_host)
7203 		return;
7204 
7205 	device_unregister(&sdbg_host->dev);
7206 	--sdebug_num_hosts;
7207 }
7208 
7209 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7210 {
7211 	int num_in_q = 0;
7212 	struct sdebug_dev_info *devip;
7213 
7214 	block_unblock_all_queues(true);
7215 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7216 	if (NULL == devip) {
7217 		block_unblock_all_queues(false);
7218 		return	-ENODEV;
7219 	}
7220 	num_in_q = atomic_read(&devip->num_in_q);
7221 
7222 	if (qdepth > SDEBUG_CANQUEUE) {
7223 		qdepth = SDEBUG_CANQUEUE;
7224 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7225 			qdepth, SDEBUG_CANQUEUE);
7226 	}
7227 	if (qdepth < 1)
7228 		qdepth = 1;
7229 	if (qdepth != sdev->queue_depth)
7230 		scsi_change_queue_depth(sdev, qdepth);
7231 
7232 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7233 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7234 			    __func__, qdepth, num_in_q);
7235 	}
7236 	block_unblock_all_queues(false);
7237 	return sdev->queue_depth;
7238 }
7239 
7240 static bool fake_timeout(struct scsi_cmnd *scp)
7241 {
7242 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7243 		if (sdebug_every_nth < -1)
7244 			sdebug_every_nth = -1;
7245 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7246 			return true; /* ignore command causing timeout */
7247 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7248 			 scsi_medium_access_command(scp))
7249 			return true; /* time out reads and writes */
7250 	}
7251 	return false;
7252 }
7253 
7254 /* Response to TUR or media access command when device stopped */
7255 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7256 {
7257 	int stopped_state;
7258 	u64 diff_ns = 0;
7259 	ktime_t now_ts = ktime_get_boottime();
7260 	struct scsi_device *sdp = scp->device;
7261 
7262 	stopped_state = atomic_read(&devip->stopped);
7263 	if (stopped_state == 2) {
7264 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7265 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7266 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7267 				/* tur_ms_to_ready timer extinguished */
7268 				atomic_set(&devip->stopped, 0);
7269 				return 0;
7270 			}
7271 		}
7272 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7273 		if (sdebug_verbose)
7274 			sdev_printk(KERN_INFO, sdp,
7275 				    "%s: Not ready: in process of becoming ready\n", my_name);
7276 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7277 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7278 
7279 			if (diff_ns <= tur_nanosecs_to_ready)
7280 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7281 			else
7282 				diff_ns = tur_nanosecs_to_ready;
7283 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7284 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7285 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7286 						   diff_ns);
7287 			return check_condition_result;
7288 		}
7289 	}
7290 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7291 	if (sdebug_verbose)
7292 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7293 			    my_name);
7294 	return check_condition_result;
7295 }
7296 
7297 static int sdebug_map_queues(struct Scsi_Host *shost)
7298 {
7299 	int i, qoff;
7300 
7301 	if (shost->nr_hw_queues == 1)
7302 		return 0;
7303 
7304 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7305 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7306 
7307 		map->nr_queues  = 0;
7308 
7309 		if (i == HCTX_TYPE_DEFAULT)
7310 			map->nr_queues = submit_queues - poll_queues;
7311 		else if (i == HCTX_TYPE_POLL)
7312 			map->nr_queues = poll_queues;
7313 
7314 		if (!map->nr_queues) {
7315 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7316 			continue;
7317 		}
7318 
7319 		map->queue_offset = qoff;
7320 		blk_mq_map_queues(map);
7321 
7322 		qoff += map->nr_queues;
7323 	}
7324 
7325 	return 0;
7326 
7327 }
7328 
7329 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7330 {
7331 	bool first;
7332 	bool retiring = false;
7333 	int num_entries = 0;
7334 	unsigned int qc_idx = 0;
7335 	unsigned long iflags;
7336 	ktime_t kt_from_boot = ktime_get_boottime();
7337 	struct sdebug_queue *sqp;
7338 	struct sdebug_queued_cmd *sqcp;
7339 	struct scsi_cmnd *scp;
7340 	struct sdebug_dev_info *devip;
7341 	struct sdebug_defer *sd_dp;
7342 
7343 	sqp = sdebug_q_arr + queue_num;
7344 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7345 
7346 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7347 		if (first) {
7348 			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7349 			first = false;
7350 		} else {
7351 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7352 		}
7353 		if (unlikely(qc_idx >= sdebug_max_queue))
7354 			break;
7355 
7356 		sqcp = &sqp->qc_arr[qc_idx];
7357 		sd_dp = sqcp->sd_dp;
7358 		if (unlikely(!sd_dp))
7359 			continue;
7360 		scp = sqcp->a_cmnd;
7361 		if (unlikely(scp == NULL)) {
7362 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7363 			       queue_num, qc_idx, __func__);
7364 			break;
7365 		}
7366 		if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7367 			if (kt_from_boot < sd_dp->cmpl_ts)
7368 				continue;
7369 
7370 		} else		/* ignoring non REQ_HIPRI requests */
7371 			continue;
7372 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7373 		if (likely(devip))
7374 			atomic_dec(&devip->num_in_q);
7375 		else
7376 			pr_err("devip=NULL from %s\n", __func__);
7377 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7378 			retiring = true;
7379 
7380 		sqcp->a_cmnd = NULL;
7381 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7382 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7383 				sqp, queue_num, qc_idx, __func__);
7384 			break;
7385 		}
7386 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7387 			int k, retval;
7388 
7389 			retval = atomic_read(&retired_max_queue);
7390 			if (qc_idx >= retval) {
7391 				pr_err("index %d too large\n", retval);
7392 				break;
7393 			}
7394 			k = find_last_bit(sqp->in_use_bm, retval);
7395 			if ((k < sdebug_max_queue) || (k == retval))
7396 				atomic_set(&retired_max_queue, 0);
7397 			else
7398 				atomic_set(&retired_max_queue, k + 1);
7399 		}
7400 		sd_dp->defer_t = SDEB_DEFER_NONE;
7401 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7402 		scp->scsi_done(scp); /* callback to mid level */
7403 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7404 		num_entries++;
7405 	}
7406 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7407 	if (num_entries > 0)
7408 		atomic_add(num_entries, &sdeb_mq_poll_count);
7409 	return num_entries;
7410 }
7411 
7412 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7413 				   struct scsi_cmnd *scp)
7414 {
7415 	u8 sdeb_i;
7416 	struct scsi_device *sdp = scp->device;
7417 	const struct opcode_info_t *oip;
7418 	const struct opcode_info_t *r_oip;
7419 	struct sdebug_dev_info *devip;
7420 	u8 *cmd = scp->cmnd;
7421 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7422 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7423 	int k, na;
7424 	int errsts = 0;
7425 	u64 lun_index = sdp->lun & 0x3FFF;
7426 	u32 flags;
7427 	u16 sa;
7428 	u8 opcode = cmd[0];
7429 	bool has_wlun_rl;
7430 	bool inject_now;
7431 
7432 	scsi_set_resid(scp, 0);
7433 	if (sdebug_statistics) {
7434 		atomic_inc(&sdebug_cmnd_count);
7435 		inject_now = inject_on_this_cmd();
7436 	} else {
7437 		inject_now = false;
7438 	}
7439 	if (unlikely(sdebug_verbose &&
7440 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7441 		char b[120];
7442 		int n, len, sb;
7443 
7444 		len = scp->cmd_len;
7445 		sb = (int)sizeof(b);
7446 		if (len > 32)
7447 			strcpy(b, "too long, over 32 bytes");
7448 		else {
7449 			for (k = 0, n = 0; k < len && n < sb; ++k)
7450 				n += scnprintf(b + n, sb - n, "%02x ",
7451 					       (u32)cmd[k]);
7452 		}
7453 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7454 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7455 	}
7456 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7457 		return SCSI_MLQUEUE_HOST_BUSY;
7458 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7459 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7460 		goto err_out;
7461 
7462 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7463 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7464 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7465 	if (unlikely(!devip)) {
7466 		devip = find_build_dev_info(sdp);
7467 		if (NULL == devip)
7468 			goto err_out;
7469 	}
7470 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7471 		atomic_set(&sdeb_inject_pending, 1);
7472 
7473 	na = oip->num_attached;
7474 	r_pfp = oip->pfp;
7475 	if (na) {	/* multiple commands with this opcode */
7476 		r_oip = oip;
7477 		if (FF_SA & r_oip->flags) {
7478 			if (F_SA_LOW & oip->flags)
7479 				sa = 0x1f & cmd[1];
7480 			else
7481 				sa = get_unaligned_be16(cmd + 8);
7482 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7483 				if (opcode == oip->opcode && sa == oip->sa)
7484 					break;
7485 			}
7486 		} else {   /* since no service action only check opcode */
7487 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7488 				if (opcode == oip->opcode)
7489 					break;
7490 			}
7491 		}
7492 		if (k > na) {
7493 			if (F_SA_LOW & r_oip->flags)
7494 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7495 			else if (F_SA_HIGH & r_oip->flags)
7496 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7497 			else
7498 				mk_sense_invalid_opcode(scp);
7499 			goto check_cond;
7500 		}
7501 	}	/* else (when na==0) we assume the oip is a match */
7502 	flags = oip->flags;
7503 	if (unlikely(F_INV_OP & flags)) {
7504 		mk_sense_invalid_opcode(scp);
7505 		goto check_cond;
7506 	}
7507 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7508 		if (sdebug_verbose)
7509 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7510 				    my_name, opcode, " supported for wlun");
7511 		mk_sense_invalid_opcode(scp);
7512 		goto check_cond;
7513 	}
7514 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7515 		u8 rem;
7516 		int j;
7517 
7518 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7519 			rem = ~oip->len_mask[k] & cmd[k];
7520 			if (rem) {
7521 				for (j = 7; j >= 0; --j, rem <<= 1) {
7522 					if (0x80 & rem)
7523 						break;
7524 				}
7525 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7526 				goto check_cond;
7527 			}
7528 		}
7529 	}
7530 	if (unlikely(!(F_SKIP_UA & flags) &&
7531 		     find_first_bit(devip->uas_bm,
7532 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7533 		errsts = make_ua(scp, devip);
7534 		if (errsts)
7535 			goto check_cond;
7536 	}
7537 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7538 		     atomic_read(&devip->stopped))) {
7539 		errsts = resp_not_ready(scp, devip);
7540 		if (errsts)
7541 			goto fini;
7542 	}
7543 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7544 		goto fini;
7545 	if (unlikely(sdebug_every_nth)) {
7546 		if (fake_timeout(scp))
7547 			return 0;	/* ignore command: make trouble */
7548 	}
7549 	if (likely(oip->pfp))
7550 		pfp = oip->pfp;	/* calls a resp_* function */
7551 	else
7552 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7553 
7554 fini:
7555 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7556 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7557 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7558 					    sdebug_ndelay > 10000)) {
7559 		/*
7560 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7561 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7562 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7563 		 * For Synchronize Cache want 1/20 of SSU's delay.
7564 		 */
7565 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7566 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7567 
7568 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7569 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7570 	} else
7571 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7572 				     sdebug_ndelay);
7573 check_cond:
7574 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7575 err_out:
7576 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7577 }
7578 
7579 static struct scsi_host_template sdebug_driver_template = {
7580 	.show_info =		scsi_debug_show_info,
7581 	.write_info =		scsi_debug_write_info,
7582 	.proc_name =		sdebug_proc_name,
7583 	.name =			"SCSI DEBUG",
7584 	.info =			scsi_debug_info,
7585 	.slave_alloc =		scsi_debug_slave_alloc,
7586 	.slave_configure =	scsi_debug_slave_configure,
7587 	.slave_destroy =	scsi_debug_slave_destroy,
7588 	.ioctl =		scsi_debug_ioctl,
7589 	.queuecommand =		scsi_debug_queuecommand,
7590 	.change_queue_depth =	sdebug_change_qdepth,
7591 	.map_queues =		sdebug_map_queues,
7592 	.mq_poll =		sdebug_blk_mq_poll,
7593 	.eh_abort_handler =	scsi_debug_abort,
7594 	.eh_device_reset_handler = scsi_debug_device_reset,
7595 	.eh_target_reset_handler = scsi_debug_target_reset,
7596 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7597 	.eh_host_reset_handler = scsi_debug_host_reset,
7598 	.can_queue =		SDEBUG_CANQUEUE,
7599 	.this_id =		7,
7600 	.sg_tablesize =		SG_MAX_SEGMENTS,
7601 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7602 	.max_sectors =		-1U,
7603 	.max_segment_size =	-1U,
7604 	.module =		THIS_MODULE,
7605 	.track_queue_depth =	1,
7606 };
7607 
7608 static int sdebug_driver_probe(struct device *dev)
7609 {
7610 	int error = 0;
7611 	struct sdebug_host_info *sdbg_host;
7612 	struct Scsi_Host *hpnt;
7613 	int hprot;
7614 
7615 	sdbg_host = to_sdebug_host(dev);
7616 
7617 	sdebug_driver_template.can_queue = sdebug_max_queue;
7618 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7619 	if (!sdebug_clustering)
7620 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7621 
7622 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7623 	if (NULL == hpnt) {
7624 		pr_err("scsi_host_alloc failed\n");
7625 		error = -ENODEV;
7626 		return error;
7627 	}
7628 	if (submit_queues > nr_cpu_ids) {
7629 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7630 			my_name, submit_queues, nr_cpu_ids);
7631 		submit_queues = nr_cpu_ids;
7632 	}
7633 	/*
7634 	 * Decide whether to tell scsi subsystem that we want mq. The
7635 	 * following should give the same answer for each host.
7636 	 */
7637 	hpnt->nr_hw_queues = submit_queues;
7638 	if (sdebug_host_max_queue)
7639 		hpnt->host_tagset = 1;
7640 
7641 	/* poll queues are possible for nr_hw_queues > 1 */
7642 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7643 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7644 			 my_name, poll_queues, hpnt->nr_hw_queues);
7645 		poll_queues = 0;
7646 	}
7647 
7648 	/*
7649 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7650 	 * left over for non-polled I/O.
7651 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7652 	 */
7653 	if (poll_queues >= submit_queues) {
7654 		if (submit_queues < 3)
7655 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7656 		else
7657 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7658 				my_name, submit_queues - 1);
7659 		poll_queues = 1;
7660 	}
7661 	if (poll_queues)
7662 		hpnt->nr_maps = 3;
7663 
7664 	sdbg_host->shost = hpnt;
7665 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7666 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7667 		hpnt->max_id = sdebug_num_tgts + 1;
7668 	else
7669 		hpnt->max_id = sdebug_num_tgts;
7670 	/* = sdebug_max_luns; */
7671 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7672 
7673 	hprot = 0;
7674 
7675 	switch (sdebug_dif) {
7676 
7677 	case T10_PI_TYPE1_PROTECTION:
7678 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7679 		if (sdebug_dix)
7680 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7681 		break;
7682 
7683 	case T10_PI_TYPE2_PROTECTION:
7684 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7685 		if (sdebug_dix)
7686 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7687 		break;
7688 
7689 	case T10_PI_TYPE3_PROTECTION:
7690 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7691 		if (sdebug_dix)
7692 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7693 		break;
7694 
7695 	default:
7696 		if (sdebug_dix)
7697 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7698 		break;
7699 	}
7700 
7701 	scsi_host_set_prot(hpnt, hprot);
7702 
7703 	if (have_dif_prot || sdebug_dix)
7704 		pr_info("host protection%s%s%s%s%s%s%s\n",
7705 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7706 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7707 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7708 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7709 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7710 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7711 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7712 
7713 	if (sdebug_guard == 1)
7714 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7715 	else
7716 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7717 
7718 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7719 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7720 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7721 		sdebug_statistics = true;
7722 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7723 	if (error) {
7724 		pr_err("scsi_add_host failed\n");
7725 		error = -ENODEV;
7726 		scsi_host_put(hpnt);
7727 	} else {
7728 		scsi_scan_host(hpnt);
7729 	}
7730 
7731 	return error;
7732 }
7733 
7734 static void sdebug_driver_remove(struct device *dev)
7735 {
7736 	struct sdebug_host_info *sdbg_host;
7737 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7738 
7739 	sdbg_host = to_sdebug_host(dev);
7740 
7741 	scsi_remove_host(sdbg_host->shost);
7742 
7743 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7744 				 dev_list) {
7745 		list_del(&sdbg_devinfo->dev_list);
7746 		kfree(sdbg_devinfo->zstate);
7747 		kfree(sdbg_devinfo);
7748 	}
7749 
7750 	scsi_host_put(sdbg_host->shost);
7751 }
7752 
7753 static int pseudo_lld_bus_match(struct device *dev,
7754 				struct device_driver *dev_driver)
7755 {
7756 	return 1;
7757 }
7758 
7759 static struct bus_type pseudo_lld_bus = {
7760 	.name = "pseudo",
7761 	.match = pseudo_lld_bus_match,
7762 	.probe = sdebug_driver_probe,
7763 	.remove = sdebug_driver_remove,
7764 	.drv_groups = sdebug_drv_groups,
7765 };
7766