xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 11a163f2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  255
222 
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN			1	/* Data-in command (e.g. READ) */
225 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
231 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
234 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
236 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
238 
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250 
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 	ZBC_ZONE_TYPE_CNV	= 0x1,
254 	ZBC_ZONE_TYPE_SWR	= 0x2,
255 	ZBC_ZONE_TYPE_SWP	= 0x3,
256 };
257 
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 	ZBC_NOT_WRITE_POINTER	= 0x0,
261 	ZC1_EMPTY		= 0x1,
262 	ZC2_IMPLICIT_OPEN	= 0x2,
263 	ZC3_EXPLICIT_OPEN	= 0x3,
264 	ZC4_CLOSED		= 0x4,
265 	ZC6_READ_ONLY		= 0xd,
266 	ZC5_FULL		= 0xe,
267 	ZC7_OFFLINE		= 0xf,
268 };
269 
270 struct sdeb_zone_state {	/* ZBC: per zone state */
271 	enum sdebug_z_type z_type;
272 	enum sdebug_z_cond z_cond;
273 	bool z_non_seq_resource;
274 	unsigned int z_size;
275 	sector_t z_start;
276 	sector_t z_wp;
277 };
278 
279 struct sdebug_dev_info {
280 	struct list_head dev_list;
281 	unsigned int channel;
282 	unsigned int target;
283 	u64 lun;
284 	uuid_t lu_name;
285 	struct sdebug_host_info *sdbg_host;
286 	unsigned long uas_bm[1];
287 	atomic_t num_in_q;
288 	atomic_t stopped;	/* 1: by SSU, 2: device start */
289 	bool used;
290 
291 	/* For ZBC devices */
292 	enum blk_zoned_model zmodel;
293 	unsigned int zsize;
294 	unsigned int zsize_shift;
295 	unsigned int nr_zones;
296 	unsigned int nr_conv_zones;
297 	unsigned int nr_imp_open;
298 	unsigned int nr_exp_open;
299 	unsigned int nr_closed;
300 	unsigned int max_open;
301 	ktime_t create_ts;	/* time since bootup that this device was created */
302 	struct sdeb_zone_state *zstate;
303 };
304 
305 struct sdebug_host_info {
306 	struct list_head host_list;
307 	int si_idx;	/* sdeb_store_info (per host) xarray index */
308 	struct Scsi_Host *shost;
309 	struct device dev;
310 	struct list_head dev_info_list;
311 };
312 
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 	rwlock_t macc_lck;	/* for atomic media access on this store */
316 	u8 *storep;		/* user data storage (ram) */
317 	struct t10_pi_tuple *dif_storep; /* protection info */
318 	void *map_storep;	/* provisioning map */
319 };
320 
321 #define to_sdebug_host(d)	\
322 	container_of(d, struct sdebug_host_info, dev)
323 
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 		      SDEB_DEFER_WQ = 2};
326 
327 struct sdebug_defer {
328 	struct hrtimer hrt;
329 	struct execute_work ew;
330 	int sqa_idx;	/* index of sdebug_queue array */
331 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
332 	int hc_idx;	/* hostwide tag index */
333 	int issuing_cpu;
334 	bool init_hrt;
335 	bool init_wq;
336 	bool aborted;	/* true when blk_abort_request() already called */
337 	enum sdeb_defer_type defer_t;
338 };
339 
340 struct sdebug_queued_cmd {
341 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 	 * instance indicates this slot is in use.
343 	 */
344 	struct sdebug_defer *sd_dp;
345 	struct scsi_cmnd *a_cmnd;
346 };
347 
348 struct sdebug_queue {
349 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
351 	spinlock_t qc_lock;
352 	atomic_t blocked;	/* to temporarily stop more being queued */
353 };
354 
355 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
356 static atomic_t sdebug_completions;  /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
360 
361 struct opcode_info_t {
362 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
363 				/* for terminating element */
364 	u8 opcode;		/* if num_attached > 0, preferred */
365 	u16 sa;			/* service action */
366 	u32 flags;		/* OR-ed set of SDEB_F_* */
367 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
369 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
370 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
371 };
372 
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375 	SDEB_I_INVALID_OPCODE =	0,
376 	SDEB_I_INQUIRY = 1,
377 	SDEB_I_REPORT_LUNS = 2,
378 	SDEB_I_REQUEST_SENSE = 3,
379 	SDEB_I_TEST_UNIT_READY = 4,
380 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
381 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
382 	SDEB_I_LOG_SENSE = 7,
383 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
384 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
385 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
386 	SDEB_I_START_STOP = 11,
387 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
388 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
389 	SDEB_I_MAINT_IN = 14,
390 	SDEB_I_MAINT_OUT = 15,
391 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
392 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
393 	SDEB_I_RESERVE = 18,		/* 6, 10 */
394 	SDEB_I_RELEASE = 19,		/* 6, 10 */
395 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
396 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
397 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
398 	SDEB_I_SEND_DIAG = 23,
399 	SDEB_I_UNMAP = 24,
400 	SDEB_I_WRITE_BUFFER = 25,
401 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
402 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
403 	SDEB_I_COMP_WRITE = 28,
404 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
405 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
406 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
407 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
408 };
409 
410 
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
414 	    0, 0, 0, 0,
415 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
417 	    SDEB_I_RELEASE,
418 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419 	    SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429 	    SDEB_I_RELEASE,
430 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 	0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438 	0, 0, 0, SDEB_I_VERIFY,
439 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444 	     SDEB_I_MAINT_OUT, 0, 0, 0,
445 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 	0, 0, 0, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 };
455 
456 /*
457  * The following "response" functions return the SCSI mid-level's 4 byte
458  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459  * command completion, they can mask their return value with
460  * SDEG_RES_IMMED_MASK .
461  */
462 #define SDEG_RES_IMMED_MASK 0x40000000
463 
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
500 
501 /*
502  * The following are overflow arrays for cdbs that "hit" the same index in
503  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504  * should be placed in opcode_info_arr[], the others should be placed here.
505  */
506 static const struct opcode_info_t msense_iarr[] = {
507 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
508 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510 
511 static const struct opcode_info_t mselect_iarr[] = {
512 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
513 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515 
516 static const struct opcode_info_t read_iarr[] = {
517 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
519 	     0, 0, 0, 0} },
520 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
524 	     0xc7, 0, 0, 0, 0} },
525 };
526 
527 static const struct opcode_info_t write_iarr[] = {
528 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
529 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
530 		   0, 0, 0, 0, 0, 0} },
531 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
532 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
533 		   0, 0, 0} },
534 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
535 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 		   0xbf, 0xc7, 0, 0, 0, 0} },
537 };
538 
539 static const struct opcode_info_t verify_iarr[] = {
540 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
542 		   0, 0, 0, 0, 0, 0} },
543 };
544 
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
549 };
550 
551 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
552 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
555 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
558 };
559 
560 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
561 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
567 };
568 
569 static const struct opcode_info_t write_same_iarr[] = {
570 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
573 };
574 
575 static const struct opcode_info_t reserve_iarr[] = {
576 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
577 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 };
579 
580 static const struct opcode_info_t release_iarr[] = {
581 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
582 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584 
585 static const struct opcode_info_t sync_cache_iarr[] = {
586 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
589 };
590 
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
595 };
596 
597 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
598 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
601 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
604 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
607 };
608 
609 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
610 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
613 };
614 
615 
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617  * plus the terminating elements for logic that scans this table such as
618  * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
620 /* 0 */
621 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
622 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 	     0, 0} },					/* REPORT LUNS */
628 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 /* 5 */
633 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
634 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
635 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
637 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
638 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
640 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
641 	     0, 0, 0} },
642 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
643 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
644 	     0, 0} },
645 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
647 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
648 /* 10 */
649 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
651 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
662 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
664 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 				0xff, 0, 0xc7, 0, 0, 0, 0} },
666 /* 15 */
667 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
670 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
671 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
675 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
676 	     0xff, 0xff} },
677 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
679 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
680 	     0} },
681 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
683 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 	     0} },
685 /* 20 */
686 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
693 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 /* 25 */
697 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
700 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
702 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
703 		 0, 0, 0, 0, 0} },
704 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705 	    resp_sync_cache, sync_cache_iarr,
706 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
708 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
711 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712 	    resp_pre_fetch, pre_fetch_iarr,
713 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
715 
716 /* 30 */
717 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
725 /* sentinel */
726 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
727 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
728 };
729 
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue;	/* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
789 
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791 			  SAM_LUN_AM_FLAT = 0x1,
792 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793 			  SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
796 
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity;	/* in sectors */
799 
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801    may still need them */
802 static int sdebug_heads;		/* heads per disk */
803 static int sdebug_cylinders_per;	/* cylinders per surface */
804 static int sdebug_sectors_per;		/* sectors per cylinder */
805 
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
808 
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
814 
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
824 
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
830 
831 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
833 
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
836 
837 static rwlock_t *ramdisk_lck_a[2];
838 
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
841 
842 static struct bus_type pseudo_lld_bus;
843 
844 static struct device_driver sdebug_driverfs_driver = {
845 	.name 		= sdebug_proc_name,
846 	.bus		= &pseudo_lld_bus,
847 };
848 
849 static const int check_condition_result =
850 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
851 
852 static const int illegal_condition_result =
853 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
854 
855 static const int device_qfull_result =
856 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
857 
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
859 
860 
861 /* Only do the extra work involved in logical block provisioning if one or
862  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863  * real reads and writes (i.e. not skipping them for speed).
864  */
865 static inline bool scsi_debug_lbp(void)
866 {
867 	return 0 == sdebug_fake_rw &&
868 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
869 }
870 
871 static void *lba2fake_store(struct sdeb_store_info *sip,
872 			    unsigned long long lba)
873 {
874 	struct sdeb_store_info *lsip = sip;
875 
876 	lba = do_div(lba, sdebug_store_sectors);
877 	if (!sip || !sip->storep) {
878 		WARN_ON_ONCE(true);
879 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
880 	}
881 	return lsip->storep + lba * sdebug_sector_size;
882 }
883 
884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
885 				      sector_t sector)
886 {
887 	sector = sector_div(sector, sdebug_store_sectors);
888 
889 	return sip->dif_storep + sector;
890 }
891 
892 static void sdebug_max_tgts_luns(void)
893 {
894 	struct sdebug_host_info *sdbg_host;
895 	struct Scsi_Host *hpnt;
896 
897 	spin_lock(&sdebug_host_list_lock);
898 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 		hpnt = sdbg_host->shost;
900 		if ((hpnt->this_id >= 0) &&
901 		    (sdebug_num_tgts > hpnt->this_id))
902 			hpnt->max_id = sdebug_num_tgts + 1;
903 		else
904 			hpnt->max_id = sdebug_num_tgts;
905 		/* sdebug_max_luns; */
906 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
907 	}
908 	spin_unlock(&sdebug_host_list_lock);
909 }
910 
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
912 
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915 				 enum sdeb_cmd_data c_d,
916 				 int in_byte, int in_bit)
917 {
918 	unsigned char *sbuff;
919 	u8 sks[4];
920 	int sl, asc;
921 
922 	sbuff = scp->sense_buffer;
923 	if (!sbuff) {
924 		sdev_printk(KERN_ERR, scp->device,
925 			    "%s: sense_buffer is NULL\n", __func__);
926 		return;
927 	}
928 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931 	memset(sks, 0, sizeof(sks));
932 	sks[0] = 0x80;
933 	if (c_d)
934 		sks[0] |= 0x40;
935 	if (in_bit >= 0) {
936 		sks[0] |= 0x8;
937 		sks[0] |= 0x7 & in_bit;
938 	}
939 	put_unaligned_be16(in_byte, sks + 1);
940 	if (sdebug_dsense) {
941 		sl = sbuff[7] + 8;
942 		sbuff[7] = sl;
943 		sbuff[sl] = 0x2;
944 		sbuff[sl + 1] = 0x6;
945 		memcpy(sbuff + sl + 4, sks, 3);
946 	} else
947 		memcpy(sbuff + 15, sks, 3);
948 	if (sdebug_verbose)
949 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
950 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
952 }
953 
954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
955 {
956 	unsigned char *sbuff;
957 
958 	sbuff = scp->sense_buffer;
959 	if (!sbuff) {
960 		sdev_printk(KERN_ERR, scp->device,
961 			    "%s: sense_buffer is NULL\n", __func__);
962 		return;
963 	}
964 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
965 
966 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
967 
968 	if (sdebug_verbose)
969 		sdev_printk(KERN_INFO, scp->device,
970 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 			    my_name, key, asc, asq);
972 }
973 
974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978 
979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980 			    void __user *arg)
981 {
982 	if (sdebug_verbose) {
983 		if (0x1261 == cmd)
984 			sdev_printk(KERN_INFO, dev,
985 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
986 		else if (0x5331 == cmd)
987 			sdev_printk(KERN_INFO, dev,
988 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989 				    __func__);
990 		else
991 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 				    __func__, cmd);
993 	}
994 	return -EINVAL;
995 	/* return -ENOTTY; // correct return but upsets fdisk */
996 }
997 
998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000 	switch (sdebug_cdb_len) {
1001 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 		sdev->use_10_for_rw = false;
1003 		sdev->use_16_for_rw = false;
1004 		sdev->use_10_for_ms = false;
1005 		break;
1006 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 		sdev->use_10_for_rw = true;
1008 		sdev->use_16_for_rw = false;
1009 		sdev->use_10_for_ms = false;
1010 		break;
1011 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = true;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = true;
1015 		break;
1016 	case 16:
1017 		sdev->use_10_for_rw = false;
1018 		sdev->use_16_for_rw = true;
1019 		sdev->use_10_for_ms = true;
1020 		break;
1021 	case 32: /* No knobs to suggest this so same as 16 for now */
1022 		sdev->use_10_for_rw = false;
1023 		sdev->use_16_for_rw = true;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	default:
1027 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1028 			sdebug_cdb_len);
1029 		sdev->use_10_for_rw = true;
1030 		sdev->use_16_for_rw = false;
1031 		sdev->use_10_for_ms = false;
1032 		sdebug_cdb_len = 10;
1033 		break;
1034 	}
1035 }
1036 
1037 static void all_config_cdb_len(void)
1038 {
1039 	struct sdebug_host_info *sdbg_host;
1040 	struct Scsi_Host *shost;
1041 	struct scsi_device *sdev;
1042 
1043 	spin_lock(&sdebug_host_list_lock);
1044 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 		shost = sdbg_host->shost;
1046 		shost_for_each_device(sdev, shost) {
1047 			config_cdb_len(sdev);
1048 		}
1049 	}
1050 	spin_unlock(&sdebug_host_list_lock);
1051 }
1052 
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055 	struct sdebug_host_info *sdhp;
1056 	struct sdebug_dev_info *dp;
1057 
1058 	spin_lock(&sdebug_host_list_lock);
1059 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 			if ((devip->sdbg_host == dp->sdbg_host) &&
1062 			    (devip->target == dp->target))
1063 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 		}
1065 	}
1066 	spin_unlock(&sdebug_host_list_lock);
1067 }
1068 
1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071 	int k;
1072 
1073 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 	if (k != SDEBUG_NUM_UAS) {
1075 		const char *cp = NULL;
1076 
1077 		switch (k) {
1078 		case SDEBUG_UA_POR:
1079 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 					POWER_ON_RESET_ASCQ);
1081 			if (sdebug_verbose)
1082 				cp = "power on reset";
1083 			break;
1084 		case SDEBUG_UA_BUS_RESET:
1085 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 					BUS_RESET_ASCQ);
1087 			if (sdebug_verbose)
1088 				cp = "bus reset";
1089 			break;
1090 		case SDEBUG_UA_MODE_CHANGED:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 					MODE_CHANGED_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "mode parameters changed";
1095 			break;
1096 		case SDEBUG_UA_CAPACITY_CHANGED:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 					CAPACITY_CHANGED_ASCQ);
1099 			if (sdebug_verbose)
1100 				cp = "capacity data changed";
1101 			break;
1102 		case SDEBUG_UA_MICROCODE_CHANGED:
1103 			mk_sense_buffer(scp, UNIT_ATTENTION,
1104 					TARGET_CHANGED_ASC,
1105 					MICROCODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "microcode has been changed";
1108 			break;
1109 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION,
1111 					TARGET_CHANGED_ASC,
1112 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "microcode has been changed without reset";
1115 			break;
1116 		case SDEBUG_UA_LUNS_CHANGED:
1117 			/*
1118 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 			 * on the target, until a REPORT LUNS command is
1121 			 * received.  SPC-4 behavior is to report it only once.
1122 			 * NOTE:  sdebug_scsi_level does not use the same
1123 			 * values as struct scsi_device->scsi_level.
1124 			 */
1125 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1126 				clear_luns_changed_on_target(devip);
1127 			mk_sense_buffer(scp, UNIT_ATTENTION,
1128 					TARGET_CHANGED_ASC,
1129 					LUNS_CHANGED_ASCQ);
1130 			if (sdebug_verbose)
1131 				cp = "reported luns data has changed";
1132 			break;
1133 		default:
1134 			pr_warn("unexpected unit attention code=%d\n", k);
1135 			if (sdebug_verbose)
1136 				cp = "unknown";
1137 			break;
1138 		}
1139 		clear_bit(k, devip->uas_bm);
1140 		if (sdebug_verbose)
1141 			sdev_printk(KERN_INFO, scp->device,
1142 				   "%s reports: Unit attention: %s\n",
1143 				   my_name, cp);
1144 		return check_condition_result;
1145 	}
1146 	return 0;
1147 }
1148 
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 				int arr_len)
1152 {
1153 	int act_len;
1154 	struct scsi_data_buffer *sdb = &scp->sdb;
1155 
1156 	if (!sdb->length)
1157 		return 0;
1158 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 		return DID_ERROR << 16;
1160 
1161 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162 				      arr, arr_len);
1163 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164 
1165 	return 0;
1166 }
1167 
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170  * calls, not required to write in ascending offset order. Assumes resid
1171  * set to scsi_bufflen() prior to any calls.
1172  */
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 				  int arr_len, unsigned int off_dst)
1175 {
1176 	unsigned int act_len, n;
1177 	struct scsi_data_buffer *sdb = &scp->sdb;
1178 	off_t skip = off_dst;
1179 
1180 	if (sdb->length <= off_dst)
1181 		return 0;
1182 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 		return DID_ERROR << 16;
1184 
1185 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 				       arr, arr_len, skip);
1187 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 		 scsi_get_resid(scp));
1190 	n = scsi_bufflen(scp) - (off_dst + act_len);
1191 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1192 	return 0;
1193 }
1194 
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196  * 'arr' or -1 if error.
1197  */
1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199 			       int arr_len)
1200 {
1201 	if (!scsi_bufflen(scp))
1202 		return 0;
1203 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1204 		return -1;
1205 
1206 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208 
1209 
1210 static char sdebug_inq_vendor_id[9] = "Linux   ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217 
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 			  int target_dev_id, int dev_id_num,
1221 			  const char *dev_id_str, int dev_id_str_len,
1222 			  const uuid_t *lu_name)
1223 {
1224 	int num, port_a;
1225 	char b[32];
1226 
1227 	port_a = target_dev_id + 1;
1228 	/* T10 vendor identifier field format (faked) */
1229 	arr[0] = 0x2;	/* ASCII */
1230 	arr[1] = 0x1;
1231 	arr[2] = 0x0;
1232 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 	num = 8 + 16 + dev_id_str_len;
1236 	arr[3] = num;
1237 	num += 4;
1238 	if (dev_id_num >= 0) {
1239 		if (sdebug_uuid_ctl) {
1240 			/* Locally assigned UUID */
1241 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1242 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1243 			arr[num++] = 0x0;
1244 			arr[num++] = 0x12;
1245 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246 			arr[num++] = 0x0;
1247 			memcpy(arr + num, lu_name, 16);
1248 			num += 16;
1249 		} else {
1250 			/* NAA-3, Logical unit identifier (binary) */
1251 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1252 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1253 			arr[num++] = 0x0;
1254 			arr[num++] = 0x8;
1255 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256 			num += 8;
1257 		}
1258 		/* Target relative port number */
1259 		arr[num++] = 0x61;	/* proto=sas, binary */
1260 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1261 		arr[num++] = 0x0;	/* reserved */
1262 		arr[num++] = 0x4;	/* length */
1263 		arr[num++] = 0x0;	/* reserved */
1264 		arr[num++] = 0x0;	/* reserved */
1265 		arr[num++] = 0x0;
1266 		arr[num++] = 0x1;	/* relative port A */
1267 	}
1268 	/* NAA-3, Target port identifier */
1269 	arr[num++] = 0x61;	/* proto=sas, binary */
1270 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1271 	arr[num++] = 0x0;
1272 	arr[num++] = 0x8;
1273 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274 	num += 8;
1275 	/* NAA-3, Target port group identifier */
1276 	arr[num++] = 0x61;	/* proto=sas, binary */
1277 	arr[num++] = 0x95;	/* piv=1, target port group id */
1278 	arr[num++] = 0x0;
1279 	arr[num++] = 0x4;
1280 	arr[num++] = 0;
1281 	arr[num++] = 0;
1282 	put_unaligned_be16(port_group_id, arr + num);
1283 	num += 2;
1284 	/* NAA-3, Target device identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x8;
1289 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290 	num += 8;
1291 	/* SCSI name string: Target device identifier */
1292 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1293 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1294 	arr[num++] = 0x0;
1295 	arr[num++] = 24;
1296 	memcpy(arr + num, "naa.32222220", 12);
1297 	num += 12;
1298 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 	memcpy(arr + num, b, 8);
1300 	num += 8;
1301 	memset(arr + num, 0, 4);
1302 	num += 4;
1303 	return num;
1304 }
1305 
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308     0x22,0x22,0x22,0x0,0xbb,0x1,
1309     0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311 
1312 /*  Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 	return sizeof(vpd84_data);
1317 }
1318 
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322 	int num = 0;
1323 	const char *na1 = "https://www.kernel.org/config";
1324 	const char *na2 = "http://www.kernel.org/log";
1325 	int plen, olen;
1326 
1327 	arr[num++] = 0x1;	/* lu, storage config */
1328 	arr[num++] = 0x0;	/* reserved */
1329 	arr[num++] = 0x0;
1330 	olen = strlen(na1);
1331 	plen = olen + 1;
1332 	if (plen % 4)
1333 		plen = ((plen / 4) + 1) * 4;
1334 	arr[num++] = plen;	/* length, null termianted, padded */
1335 	memcpy(arr + num, na1, olen);
1336 	memset(arr + num + olen, 0, plen - olen);
1337 	num += plen;
1338 
1339 	arr[num++] = 0x4;	/* lu, logging */
1340 	arr[num++] = 0x0;	/* reserved */
1341 	arr[num++] = 0x0;
1342 	olen = strlen(na2);
1343 	plen = olen + 1;
1344 	if (plen % 4)
1345 		plen = ((plen / 4) + 1) * 4;
1346 	arr[num++] = plen;	/* length, null terminated, padded */
1347 	memcpy(arr + num, na2, olen);
1348 	memset(arr + num + olen, 0, plen - olen);
1349 	num += plen;
1350 
1351 	return num;
1352 }
1353 
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357 	int num = 0;
1358 	int port_a, port_b;
1359 
1360 	port_a = target_dev_id + 1;
1361 	port_b = port_a + 1;
1362 	arr[num++] = 0x0;	/* reserved */
1363 	arr[num++] = 0x0;	/* reserved */
1364 	arr[num++] = 0x0;
1365 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1366 	memset(arr + num, 0, 6);
1367 	num += 6;
1368 	arr[num++] = 0x0;
1369 	arr[num++] = 12;	/* length tp descriptor */
1370 	/* naa-5 target port identifier (A) */
1371 	arr[num++] = 0x61;	/* proto=sas, binary */
1372 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1373 	arr[num++] = 0x0;	/* reserved */
1374 	arr[num++] = 0x8;	/* length */
1375 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376 	num += 8;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (B) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391 	num += 8;
1392 
1393 	return num;
1394 }
1395 
1396 
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440 
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 	return sizeof(vpd89_data);
1446 }
1447 
1448 
1449 static unsigned char vpdb0_data[] = {
1450 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455 
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459 	unsigned int gran;
1460 
1461 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462 
1463 	/* Optimal transfer length granularity */
1464 	if (sdebug_opt_xferlen_exp != 0 &&
1465 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 		gran = 1 << sdebug_opt_xferlen_exp;
1467 	else
1468 		gran = 1 << sdebug_physblk_exp;
1469 	put_unaligned_be16(gran, arr + 2);
1470 
1471 	/* Maximum Transfer Length */
1472 	if (sdebug_store_sectors > 0x400)
1473 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474 
1475 	/* Optimal Transfer Length */
1476 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477 
1478 	if (sdebug_lbpu) {
1479 		/* Maximum Unmap LBA Count */
1480 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481 
1482 		/* Maximum Unmap Block Descriptor Count */
1483 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484 	}
1485 
1486 	/* Unmap Granularity Alignment */
1487 	if (sdebug_unmap_alignment) {
1488 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 		arr[28] |= 0x80; /* UGAVALID */
1490 	}
1491 
1492 	/* Optimal Unmap Granularity */
1493 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494 
1495 	/* Maximum WRITE SAME Length */
1496 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497 
1498 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499 
1500 	return sizeof(vpdb0_data);
1501 }
1502 
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506 	memset(arr, 0, 0x3c);
1507 	arr[0] = 0;
1508 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1509 	arr[2] = 0;
1510 	arr[3] = 5;	/* less than 1.8" */
1511 	if (devip->zmodel == BLK_ZONED_HA)
1512 		arr[4] = 1 << 4;	/* zoned field = 01b */
1513 
1514 	return 0x3c;
1515 }
1516 
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520 	memset(arr, 0, 0x4);
1521 	arr[0] = 0;			/* threshold exponent */
1522 	if (sdebug_lbpu)
1523 		arr[1] = 1 << 7;
1524 	if (sdebug_lbpws)
1525 		arr[1] |= 1 << 6;
1526 	if (sdebug_lbpws10)
1527 		arr[1] |= 1 << 5;
1528 	if (sdebug_lbprz && scsi_debug_lbp())
1529 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1530 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 	/* threshold_percentage=0 */
1533 	return 0x4;
1534 }
1535 
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539 	memset(arr, 0, 0x3c);
1540 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541 	/*
1542 	 * Set Optimal number of open sequential write preferred zones and
1543 	 * Optimal number of non-sequentially written sequential write
1544 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 	 * fields set to zero, apart from Max. number of open swrz_s field.
1546 	 */
1547 	put_unaligned_be32(0xffffffff, &arr[4]);
1548 	put_unaligned_be32(0xffffffff, &arr[8]);
1549 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 		put_unaligned_be32(devip->max_open, &arr[12]);
1551 	else
1552 		put_unaligned_be32(0xffffffff, &arr[12]);
1553 	return 0x3c;
1554 }
1555 
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558 
1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561 	unsigned char pq_pdt;
1562 	unsigned char *arr;
1563 	unsigned char *cmd = scp->cmnd;
1564 	int alloc_len, n, ret;
1565 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1566 
1567 	alloc_len = get_unaligned_be16(cmd + 3);
1568 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1569 	if (! arr)
1570 		return DID_REQUEUE << 16;
1571 	is_disk = (sdebug_ptype == TYPE_DISK);
1572 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1573 	is_disk_zbc = (is_disk || is_zbc);
1574 	have_wlun = scsi_is_wlun(scp->device->lun);
1575 	if (have_wlun)
1576 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1577 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1578 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1579 	else
1580 		pq_pdt = (sdebug_ptype & 0x1f);
1581 	arr[0] = pq_pdt;
1582 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1583 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1584 		kfree(arr);
1585 		return check_condition_result;
1586 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1587 		int lu_id_num, port_group_id, target_dev_id, len;
1588 		char lu_id_str[6];
1589 		int host_no = devip->sdbg_host->shost->host_no;
1590 
1591 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1592 		    (devip->channel & 0x7f);
1593 		if (sdebug_vpd_use_hostno == 0)
1594 			host_no = 0;
1595 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1596 			    (devip->target * 1000) + devip->lun);
1597 		target_dev_id = ((host_no + 1) * 2000) +
1598 				 (devip->target * 1000) - 3;
1599 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1600 		if (0 == cmd[2]) { /* supported vital product data pages */
1601 			arr[1] = cmd[2];	/*sanity */
1602 			n = 4;
1603 			arr[n++] = 0x0;   /* this page */
1604 			arr[n++] = 0x80;  /* unit serial number */
1605 			arr[n++] = 0x83;  /* device identification */
1606 			arr[n++] = 0x84;  /* software interface ident. */
1607 			arr[n++] = 0x85;  /* management network addresses */
1608 			arr[n++] = 0x86;  /* extended inquiry */
1609 			arr[n++] = 0x87;  /* mode page policy */
1610 			arr[n++] = 0x88;  /* SCSI ports */
1611 			if (is_disk_zbc) {	  /* SBC or ZBC */
1612 				arr[n++] = 0x89;  /* ATA information */
1613 				arr[n++] = 0xb0;  /* Block limits */
1614 				arr[n++] = 0xb1;  /* Block characteristics */
1615 				if (is_disk)
1616 					arr[n++] = 0xb2;  /* LB Provisioning */
1617 				if (is_zbc)
1618 					arr[n++] = 0xb6;  /* ZB dev. char. */
1619 			}
1620 			arr[3] = n - 4;	  /* number of supported VPD pages */
1621 		} else if (0x80 == cmd[2]) { /* unit serial number */
1622 			arr[1] = cmd[2];	/*sanity */
1623 			arr[3] = len;
1624 			memcpy(&arr[4], lu_id_str, len);
1625 		} else if (0x83 == cmd[2]) { /* device identification */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1628 						target_dev_id, lu_id_num,
1629 						lu_id_str, len,
1630 						&devip->lu_name);
1631 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1632 			arr[1] = cmd[2];	/*sanity */
1633 			arr[3] = inquiry_vpd_84(&arr[4]);
1634 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = inquiry_vpd_85(&arr[4]);
1637 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1638 			arr[1] = cmd[2];	/*sanity */
1639 			arr[3] = 0x3c;	/* number of following entries */
1640 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1641 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1642 			else if (have_dif_prot)
1643 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1644 			else
1645 				arr[4] = 0x0;   /* no protection stuff */
1646 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1647 		} else if (0x87 == cmd[2]) { /* mode page policy */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = 0x8;	/* number of following entries */
1650 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1651 			arr[6] = 0x80;	/* mlus, shared */
1652 			arr[8] = 0x18;	 /* protocol specific lu */
1653 			arr[10] = 0x82;	 /* mlus, per initiator port */
1654 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1657 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1658 			arr[1] = cmd[2];        /*sanity */
1659 			n = inquiry_vpd_89(&arr[4]);
1660 			put_unaligned_be16(n, arr + 2);
1661 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			arr[3] = inquiry_vpd_b0(&arr[4]);
1664 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1667 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1668 			arr[1] = cmd[2];        /*sanity */
1669 			arr[3] = inquiry_vpd_b2(&arr[4]);
1670 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1673 		} else {
1674 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1675 			kfree(arr);
1676 			return check_condition_result;
1677 		}
1678 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1679 		ret = fill_from_dev_buffer(scp, arr,
1680 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1681 		kfree(arr);
1682 		return ret;
1683 	}
1684 	/* drops through here for a standard inquiry */
1685 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1686 	arr[2] = sdebug_scsi_level;
1687 	arr[3] = 2;    /* response_data_format==2 */
1688 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1689 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1690 	if (sdebug_vpd_use_hostno == 0)
1691 		arr[5] |= 0x10; /* claim: implicit TPGS */
1692 	arr[6] = 0x10; /* claim: MultiP */
1693 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1694 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1695 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1696 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1697 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1698 	/* Use Vendor Specific area to place driver date in ASCII hex */
1699 	memcpy(&arr[36], sdebug_version_date, 8);
1700 	/* version descriptors (2 bytes each) follow */
1701 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1702 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1703 	n = 62;
1704 	if (is_disk) {		/* SBC-4 no version claimed */
1705 		put_unaligned_be16(0x600, arr + n);
1706 		n += 2;
1707 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1708 		put_unaligned_be16(0x525, arr + n);
1709 		n += 2;
1710 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1711 		put_unaligned_be16(0x624, arr + n);
1712 		n += 2;
1713 	}
1714 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1715 	ret = fill_from_dev_buffer(scp, arr,
1716 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1717 	kfree(arr);
1718 	return ret;
1719 }
1720 
1721 /* See resp_iec_m_pg() for how this data is manipulated */
1722 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1723 				   0, 0, 0x0, 0x0};
1724 
1725 static int resp_requests(struct scsi_cmnd *scp,
1726 			 struct sdebug_dev_info *devip)
1727 {
1728 	unsigned char *cmd = scp->cmnd;
1729 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1730 	bool dsense = !!(cmd[1] & 1);
1731 	int alloc_len = cmd[4];
1732 	int len = 18;
1733 	int stopped_state = atomic_read(&devip->stopped);
1734 
1735 	memset(arr, 0, sizeof(arr));
1736 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1737 		if (dsense) {
1738 			arr[0] = 0x72;
1739 			arr[1] = NOT_READY;
1740 			arr[2] = LOGICAL_UNIT_NOT_READY;
1741 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1742 			len = 8;
1743 		} else {
1744 			arr[0] = 0x70;
1745 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1746 			arr[7] = 0xa;			/* 18 byte sense buffer */
1747 			arr[12] = LOGICAL_UNIT_NOT_READY;
1748 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1749 		}
1750 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1751 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1752 		if (dsense) {
1753 			arr[0] = 0x72;
1754 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1755 			arr[2] = THRESHOLD_EXCEEDED;
1756 			arr[3] = 0xff;		/* Failure prediction(false) */
1757 			len = 8;
1758 		} else {
1759 			arr[0] = 0x70;
1760 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1761 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1762 			arr[12] = THRESHOLD_EXCEEDED;
1763 			arr[13] = 0xff;		/* Failure prediction(false) */
1764 		}
1765 	} else {	/* nothing to report */
1766 		if (dsense) {
1767 			len = 8;
1768 			memset(arr, 0, len);
1769 			arr[0] = 0x72;
1770 		} else {
1771 			memset(arr, 0, len);
1772 			arr[0] = 0x70;
1773 			arr[7] = 0xa;
1774 		}
1775 	}
1776 	return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1777 }
1778 
1779 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1780 {
1781 	unsigned char *cmd = scp->cmnd;
1782 	int power_cond, want_stop, stopped_state;
1783 	bool changing;
1784 
1785 	power_cond = (cmd[4] & 0xf0) >> 4;
1786 	if (power_cond) {
1787 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1788 		return check_condition_result;
1789 	}
1790 	want_stop = !(cmd[4] & 1);
1791 	stopped_state = atomic_read(&devip->stopped);
1792 	if (stopped_state == 2) {
1793 		ktime_t now_ts = ktime_get_boottime();
1794 
1795 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1796 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1797 
1798 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1799 				/* tur_ms_to_ready timer extinguished */
1800 				atomic_set(&devip->stopped, 0);
1801 				stopped_state = 0;
1802 			}
1803 		}
1804 		if (stopped_state == 2) {
1805 			if (want_stop) {
1806 				stopped_state = 1;	/* dummy up success */
1807 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1808 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1809 				return check_condition_result;
1810 			}
1811 		}
1812 	}
1813 	changing = (stopped_state != want_stop);
1814 	if (changing)
1815 		atomic_xchg(&devip->stopped, want_stop);
1816 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1817 		return SDEG_RES_IMMED_MASK;
1818 	else
1819 		return 0;
1820 }
1821 
1822 static sector_t get_sdebug_capacity(void)
1823 {
1824 	static const unsigned int gibibyte = 1073741824;
1825 
1826 	if (sdebug_virtual_gb > 0)
1827 		return (sector_t)sdebug_virtual_gb *
1828 			(gibibyte / sdebug_sector_size);
1829 	else
1830 		return sdebug_store_sectors;
1831 }
1832 
1833 #define SDEBUG_READCAP_ARR_SZ 8
1834 static int resp_readcap(struct scsi_cmnd *scp,
1835 			struct sdebug_dev_info *devip)
1836 {
1837 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1838 	unsigned int capac;
1839 
1840 	/* following just in case virtual_gb changed */
1841 	sdebug_capacity = get_sdebug_capacity();
1842 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1843 	if (sdebug_capacity < 0xffffffff) {
1844 		capac = (unsigned int)sdebug_capacity - 1;
1845 		put_unaligned_be32(capac, arr + 0);
1846 	} else
1847 		put_unaligned_be32(0xffffffff, arr + 0);
1848 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1849 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1850 }
1851 
1852 #define SDEBUG_READCAP16_ARR_SZ 32
1853 static int resp_readcap16(struct scsi_cmnd *scp,
1854 			  struct sdebug_dev_info *devip)
1855 {
1856 	unsigned char *cmd = scp->cmnd;
1857 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1858 	int alloc_len;
1859 
1860 	alloc_len = get_unaligned_be32(cmd + 10);
1861 	/* following just in case virtual_gb changed */
1862 	sdebug_capacity = get_sdebug_capacity();
1863 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1864 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1865 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1866 	arr[13] = sdebug_physblk_exp & 0xf;
1867 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1868 
1869 	if (scsi_debug_lbp()) {
1870 		arr[14] |= 0x80; /* LBPME */
1871 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1872 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1873 		 * in the wider field maps to 0 in this field.
1874 		 */
1875 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1876 			arr[14] |= 0x40;
1877 	}
1878 
1879 	arr[15] = sdebug_lowest_aligned & 0xff;
1880 
1881 	if (have_dif_prot) {
1882 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1883 		arr[12] |= 1; /* PROT_EN */
1884 	}
1885 
1886 	return fill_from_dev_buffer(scp, arr,
1887 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1888 }
1889 
1890 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1891 
1892 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1893 			      struct sdebug_dev_info *devip)
1894 {
1895 	unsigned char *cmd = scp->cmnd;
1896 	unsigned char *arr;
1897 	int host_no = devip->sdbg_host->shost->host_no;
1898 	int n, ret, alen, rlen;
1899 	int port_group_a, port_group_b, port_a, port_b;
1900 
1901 	alen = get_unaligned_be32(cmd + 6);
1902 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1903 	if (! arr)
1904 		return DID_REQUEUE << 16;
1905 	/*
1906 	 * EVPD page 0x88 states we have two ports, one
1907 	 * real and a fake port with no device connected.
1908 	 * So we create two port groups with one port each
1909 	 * and set the group with port B to unavailable.
1910 	 */
1911 	port_a = 0x1; /* relative port A */
1912 	port_b = 0x2; /* relative port B */
1913 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1914 			(devip->channel & 0x7f);
1915 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1916 			(devip->channel & 0x7f) + 0x80;
1917 
1918 	/*
1919 	 * The asymmetric access state is cycled according to the host_id.
1920 	 */
1921 	n = 4;
1922 	if (sdebug_vpd_use_hostno == 0) {
1923 		arr[n++] = host_no % 3; /* Asymm access state */
1924 		arr[n++] = 0x0F; /* claim: all states are supported */
1925 	} else {
1926 		arr[n++] = 0x0; /* Active/Optimized path */
1927 		arr[n++] = 0x01; /* only support active/optimized paths */
1928 	}
1929 	put_unaligned_be16(port_group_a, arr + n);
1930 	n += 2;
1931 	arr[n++] = 0;    /* Reserved */
1932 	arr[n++] = 0;    /* Status code */
1933 	arr[n++] = 0;    /* Vendor unique */
1934 	arr[n++] = 0x1;  /* One port per group */
1935 	arr[n++] = 0;    /* Reserved */
1936 	arr[n++] = 0;    /* Reserved */
1937 	put_unaligned_be16(port_a, arr + n);
1938 	n += 2;
1939 	arr[n++] = 3;    /* Port unavailable */
1940 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1941 	put_unaligned_be16(port_group_b, arr + n);
1942 	n += 2;
1943 	arr[n++] = 0;    /* Reserved */
1944 	arr[n++] = 0;    /* Status code */
1945 	arr[n++] = 0;    /* Vendor unique */
1946 	arr[n++] = 0x1;  /* One port per group */
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Reserved */
1949 	put_unaligned_be16(port_b, arr + n);
1950 	n += 2;
1951 
1952 	rlen = n - 4;
1953 	put_unaligned_be32(rlen, arr + 0);
1954 
1955 	/*
1956 	 * Return the smallest value of either
1957 	 * - The allocated length
1958 	 * - The constructed command length
1959 	 * - The maximum array size
1960 	 */
1961 	rlen = min_t(int, alen, n);
1962 	ret = fill_from_dev_buffer(scp, arr,
1963 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1964 	kfree(arr);
1965 	return ret;
1966 }
1967 
1968 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1969 			     struct sdebug_dev_info *devip)
1970 {
1971 	bool rctd;
1972 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1973 	u16 req_sa, u;
1974 	u32 alloc_len, a_len;
1975 	int k, offset, len, errsts, count, bump, na;
1976 	const struct opcode_info_t *oip;
1977 	const struct opcode_info_t *r_oip;
1978 	u8 *arr;
1979 	u8 *cmd = scp->cmnd;
1980 
1981 	rctd = !!(cmd[2] & 0x80);
1982 	reporting_opts = cmd[2] & 0x7;
1983 	req_opcode = cmd[3];
1984 	req_sa = get_unaligned_be16(cmd + 4);
1985 	alloc_len = get_unaligned_be32(cmd + 6);
1986 	if (alloc_len < 4 || alloc_len > 0xffff) {
1987 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1988 		return check_condition_result;
1989 	}
1990 	if (alloc_len > 8192)
1991 		a_len = 8192;
1992 	else
1993 		a_len = alloc_len;
1994 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1995 	if (NULL == arr) {
1996 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1997 				INSUFF_RES_ASCQ);
1998 		return check_condition_result;
1999 	}
2000 	switch (reporting_opts) {
2001 	case 0:	/* all commands */
2002 		/* count number of commands */
2003 		for (count = 0, oip = opcode_info_arr;
2004 		     oip->num_attached != 0xff; ++oip) {
2005 			if (F_INV_OP & oip->flags)
2006 				continue;
2007 			count += (oip->num_attached + 1);
2008 		}
2009 		bump = rctd ? 20 : 8;
2010 		put_unaligned_be32(count * bump, arr);
2011 		for (offset = 4, oip = opcode_info_arr;
2012 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2013 			if (F_INV_OP & oip->flags)
2014 				continue;
2015 			na = oip->num_attached;
2016 			arr[offset] = oip->opcode;
2017 			put_unaligned_be16(oip->sa, arr + offset + 2);
2018 			if (rctd)
2019 				arr[offset + 5] |= 0x2;
2020 			if (FF_SA & oip->flags)
2021 				arr[offset + 5] |= 0x1;
2022 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2023 			if (rctd)
2024 				put_unaligned_be16(0xa, arr + offset + 8);
2025 			r_oip = oip;
2026 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2027 				if (F_INV_OP & oip->flags)
2028 					continue;
2029 				offset += bump;
2030 				arr[offset] = oip->opcode;
2031 				put_unaligned_be16(oip->sa, arr + offset + 2);
2032 				if (rctd)
2033 					arr[offset + 5] |= 0x2;
2034 				if (FF_SA & oip->flags)
2035 					arr[offset + 5] |= 0x1;
2036 				put_unaligned_be16(oip->len_mask[0],
2037 						   arr + offset + 6);
2038 				if (rctd)
2039 					put_unaligned_be16(0xa,
2040 							   arr + offset + 8);
2041 			}
2042 			oip = r_oip;
2043 			offset += bump;
2044 		}
2045 		break;
2046 	case 1:	/* one command: opcode only */
2047 	case 2:	/* one command: opcode plus service action */
2048 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2049 		sdeb_i = opcode_ind_arr[req_opcode];
2050 		oip = &opcode_info_arr[sdeb_i];
2051 		if (F_INV_OP & oip->flags) {
2052 			supp = 1;
2053 			offset = 4;
2054 		} else {
2055 			if (1 == reporting_opts) {
2056 				if (FF_SA & oip->flags) {
2057 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2058 							     2, 2);
2059 					kfree(arr);
2060 					return check_condition_result;
2061 				}
2062 				req_sa = 0;
2063 			} else if (2 == reporting_opts &&
2064 				   0 == (FF_SA & oip->flags)) {
2065 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2066 				kfree(arr);	/* point at requested sa */
2067 				return check_condition_result;
2068 			}
2069 			if (0 == (FF_SA & oip->flags) &&
2070 			    req_opcode == oip->opcode)
2071 				supp = 3;
2072 			else if (0 == (FF_SA & oip->flags)) {
2073 				na = oip->num_attached;
2074 				for (k = 0, oip = oip->arrp; k < na;
2075 				     ++k, ++oip) {
2076 					if (req_opcode == oip->opcode)
2077 						break;
2078 				}
2079 				supp = (k >= na) ? 1 : 3;
2080 			} else if (req_sa != oip->sa) {
2081 				na = oip->num_attached;
2082 				for (k = 0, oip = oip->arrp; k < na;
2083 				     ++k, ++oip) {
2084 					if (req_sa == oip->sa)
2085 						break;
2086 				}
2087 				supp = (k >= na) ? 1 : 3;
2088 			} else
2089 				supp = 3;
2090 			if (3 == supp) {
2091 				u = oip->len_mask[0];
2092 				put_unaligned_be16(u, arr + 2);
2093 				arr[4] = oip->opcode;
2094 				for (k = 1; k < u; ++k)
2095 					arr[4 + k] = (k < 16) ?
2096 						 oip->len_mask[k] : 0xff;
2097 				offset = 4 + u;
2098 			} else
2099 				offset = 4;
2100 		}
2101 		arr[1] = (rctd ? 0x80 : 0) | supp;
2102 		if (rctd) {
2103 			put_unaligned_be16(0xa, arr + offset);
2104 			offset += 12;
2105 		}
2106 		break;
2107 	default:
2108 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2109 		kfree(arr);
2110 		return check_condition_result;
2111 	}
2112 	offset = (offset < a_len) ? offset : a_len;
2113 	len = (offset < alloc_len) ? offset : alloc_len;
2114 	errsts = fill_from_dev_buffer(scp, arr, len);
2115 	kfree(arr);
2116 	return errsts;
2117 }
2118 
2119 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2120 			  struct sdebug_dev_info *devip)
2121 {
2122 	bool repd;
2123 	u32 alloc_len, len;
2124 	u8 arr[16];
2125 	u8 *cmd = scp->cmnd;
2126 
2127 	memset(arr, 0, sizeof(arr));
2128 	repd = !!(cmd[2] & 0x80);
2129 	alloc_len = get_unaligned_be32(cmd + 6);
2130 	if (alloc_len < 4) {
2131 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2132 		return check_condition_result;
2133 	}
2134 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2135 	arr[1] = 0x1;		/* ITNRS */
2136 	if (repd) {
2137 		arr[3] = 0xc;
2138 		len = 16;
2139 	} else
2140 		len = 4;
2141 
2142 	len = (len < alloc_len) ? len : alloc_len;
2143 	return fill_from_dev_buffer(scp, arr, len);
2144 }
2145 
2146 /* <<Following mode page info copied from ST318451LW>> */
2147 
2148 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2149 {	/* Read-Write Error Recovery page for mode_sense */
2150 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2151 					5, 0, 0xff, 0xff};
2152 
2153 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2154 	if (1 == pcontrol)
2155 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2156 	return sizeof(err_recov_pg);
2157 }
2158 
2159 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2160 { 	/* Disconnect-Reconnect page for mode_sense */
2161 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2162 					 0, 0, 0, 0, 0, 0, 0, 0};
2163 
2164 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2165 	if (1 == pcontrol)
2166 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2167 	return sizeof(disconnect_pg);
2168 }
2169 
2170 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2171 {       /* Format device page for mode_sense */
2172 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2173 				     0, 0, 0, 0, 0, 0, 0, 0,
2174 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2175 
2176 	memcpy(p, format_pg, sizeof(format_pg));
2177 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2178 	put_unaligned_be16(sdebug_sector_size, p + 12);
2179 	if (sdebug_removable)
2180 		p[20] |= 0x20; /* should agree with INQUIRY */
2181 	if (1 == pcontrol)
2182 		memset(p + 2, 0, sizeof(format_pg) - 2);
2183 	return sizeof(format_pg);
2184 }
2185 
2186 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2187 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2188 				     0, 0, 0, 0};
2189 
2190 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2191 { 	/* Caching page for mode_sense */
2192 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2193 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2194 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2195 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2196 
2197 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2198 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2199 	memcpy(p, caching_pg, sizeof(caching_pg));
2200 	if (1 == pcontrol)
2201 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2202 	else if (2 == pcontrol)
2203 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2204 	return sizeof(caching_pg);
2205 }
2206 
2207 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2208 				    0, 0, 0x2, 0x4b};
2209 
2210 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2211 { 	/* Control mode page for mode_sense */
2212 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2213 					0, 0, 0, 0};
2214 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2215 				     0, 0, 0x2, 0x4b};
2216 
2217 	if (sdebug_dsense)
2218 		ctrl_m_pg[2] |= 0x4;
2219 	else
2220 		ctrl_m_pg[2] &= ~0x4;
2221 
2222 	if (sdebug_ato)
2223 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2224 
2225 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2226 	if (1 == pcontrol)
2227 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2228 	else if (2 == pcontrol)
2229 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2230 	return sizeof(ctrl_m_pg);
2231 }
2232 
2233 
2234 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2235 {	/* Informational Exceptions control mode page for mode_sense */
2236 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2237 				       0, 0, 0x0, 0x0};
2238 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2239 				      0, 0, 0x0, 0x0};
2240 
2241 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2242 	if (1 == pcontrol)
2243 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2244 	else if (2 == pcontrol)
2245 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2246 	return sizeof(iec_m_pg);
2247 }
2248 
2249 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2250 {	/* SAS SSP mode page - short format for mode_sense */
2251 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2252 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2253 
2254 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2255 	if (1 == pcontrol)
2256 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2257 	return sizeof(sas_sf_m_pg);
2258 }
2259 
2260 
2261 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2262 			      int target_dev_id)
2263 {	/* SAS phy control and discover mode page for mode_sense */
2264 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2265 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2266 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2267 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2268 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2269 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2270 		    0, 0, 0, 0, 0, 0, 0, 0,
2271 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2272 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2273 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2274 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2275 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2276 		    0, 0, 0, 0, 0, 0, 0, 0,
2277 		};
2278 	int port_a, port_b;
2279 
2280 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2281 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2282 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2283 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2284 	port_a = target_dev_id + 1;
2285 	port_b = port_a + 1;
2286 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2287 	put_unaligned_be32(port_a, p + 20);
2288 	put_unaligned_be32(port_b, p + 48 + 20);
2289 	if (1 == pcontrol)
2290 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2291 	return sizeof(sas_pcd_m_pg);
2292 }
2293 
2294 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2295 {	/* SAS SSP shared protocol specific port mode subpage */
2296 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2297 		    0, 0, 0, 0, 0, 0, 0, 0,
2298 		};
2299 
2300 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2301 	if (1 == pcontrol)
2302 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2303 	return sizeof(sas_sha_m_pg);
2304 }
2305 
2306 #define SDEBUG_MAX_MSENSE_SZ 256
2307 
2308 static int resp_mode_sense(struct scsi_cmnd *scp,
2309 			   struct sdebug_dev_info *devip)
2310 {
2311 	int pcontrol, pcode, subpcode, bd_len;
2312 	unsigned char dev_spec;
2313 	int alloc_len, offset, len, target_dev_id;
2314 	int target = scp->device->id;
2315 	unsigned char *ap;
2316 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2317 	unsigned char *cmd = scp->cmnd;
2318 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2319 
2320 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2321 	pcontrol = (cmd[2] & 0xc0) >> 6;
2322 	pcode = cmd[2] & 0x3f;
2323 	subpcode = cmd[3];
2324 	msense_6 = (MODE_SENSE == cmd[0]);
2325 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2326 	is_disk = (sdebug_ptype == TYPE_DISK);
2327 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2328 	if ((is_disk || is_zbc) && !dbd)
2329 		bd_len = llbaa ? 16 : 8;
2330 	else
2331 		bd_len = 0;
2332 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2333 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2334 	if (0x3 == pcontrol) {  /* Saving values not supported */
2335 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2336 		return check_condition_result;
2337 	}
2338 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2339 			(devip->target * 1000) - 3;
2340 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2341 	if (is_disk || is_zbc) {
2342 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2343 		if (sdebug_wp)
2344 			dev_spec |= 0x80;
2345 	} else
2346 		dev_spec = 0x0;
2347 	if (msense_6) {
2348 		arr[2] = dev_spec;
2349 		arr[3] = bd_len;
2350 		offset = 4;
2351 	} else {
2352 		arr[3] = dev_spec;
2353 		if (16 == bd_len)
2354 			arr[4] = 0x1;	/* set LONGLBA bit */
2355 		arr[7] = bd_len;	/* assume 255 or less */
2356 		offset = 8;
2357 	}
2358 	ap = arr + offset;
2359 	if ((bd_len > 0) && (!sdebug_capacity))
2360 		sdebug_capacity = get_sdebug_capacity();
2361 
2362 	if (8 == bd_len) {
2363 		if (sdebug_capacity > 0xfffffffe)
2364 			put_unaligned_be32(0xffffffff, ap + 0);
2365 		else
2366 			put_unaligned_be32(sdebug_capacity, ap + 0);
2367 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2368 		offset += bd_len;
2369 		ap = arr + offset;
2370 	} else if (16 == bd_len) {
2371 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2372 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2373 		offset += bd_len;
2374 		ap = arr + offset;
2375 	}
2376 
2377 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2378 		/* TODO: Control Extension page */
2379 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2380 		return check_condition_result;
2381 	}
2382 	bad_pcode = false;
2383 
2384 	switch (pcode) {
2385 	case 0x1:	/* Read-Write error recovery page, direct access */
2386 		len = resp_err_recov_pg(ap, pcontrol, target);
2387 		offset += len;
2388 		break;
2389 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2390 		len = resp_disconnect_pg(ap, pcontrol, target);
2391 		offset += len;
2392 		break;
2393 	case 0x3:       /* Format device page, direct access */
2394 		if (is_disk) {
2395 			len = resp_format_pg(ap, pcontrol, target);
2396 			offset += len;
2397 		} else
2398 			bad_pcode = true;
2399 		break;
2400 	case 0x8:	/* Caching page, direct access */
2401 		if (is_disk || is_zbc) {
2402 			len = resp_caching_pg(ap, pcontrol, target);
2403 			offset += len;
2404 		} else
2405 			bad_pcode = true;
2406 		break;
2407 	case 0xa:	/* Control Mode page, all devices */
2408 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2409 		offset += len;
2410 		break;
2411 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2412 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2413 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2414 			return check_condition_result;
2415 		}
2416 		len = 0;
2417 		if ((0x0 == subpcode) || (0xff == subpcode))
2418 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2419 		if ((0x1 == subpcode) || (0xff == subpcode))
2420 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2421 						  target_dev_id);
2422 		if ((0x2 == subpcode) || (0xff == subpcode))
2423 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2424 		offset += len;
2425 		break;
2426 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2427 		len = resp_iec_m_pg(ap, pcontrol, target);
2428 		offset += len;
2429 		break;
2430 	case 0x3f:	/* Read all Mode pages */
2431 		if ((0 == subpcode) || (0xff == subpcode)) {
2432 			len = resp_err_recov_pg(ap, pcontrol, target);
2433 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2434 			if (is_disk) {
2435 				len += resp_format_pg(ap + len, pcontrol,
2436 						      target);
2437 				len += resp_caching_pg(ap + len, pcontrol,
2438 						       target);
2439 			} else if (is_zbc) {
2440 				len += resp_caching_pg(ap + len, pcontrol,
2441 						       target);
2442 			}
2443 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2444 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2445 			if (0xff == subpcode) {
2446 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2447 						  target, target_dev_id);
2448 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2449 			}
2450 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2451 			offset += len;
2452 		} else {
2453 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2454 			return check_condition_result;
2455 		}
2456 		break;
2457 	default:
2458 		bad_pcode = true;
2459 		break;
2460 	}
2461 	if (bad_pcode) {
2462 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2463 		return check_condition_result;
2464 	}
2465 	if (msense_6)
2466 		arr[0] = offset - 1;
2467 	else
2468 		put_unaligned_be16((offset - 2), arr + 0);
2469 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2470 }
2471 
2472 #define SDEBUG_MAX_MSELECT_SZ 512
2473 
2474 static int resp_mode_select(struct scsi_cmnd *scp,
2475 			    struct sdebug_dev_info *devip)
2476 {
2477 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2478 	int param_len, res, mpage;
2479 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2480 	unsigned char *cmd = scp->cmnd;
2481 	int mselect6 = (MODE_SELECT == cmd[0]);
2482 
2483 	memset(arr, 0, sizeof(arr));
2484 	pf = cmd[1] & 0x10;
2485 	sp = cmd[1] & 0x1;
2486 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2487 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2488 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2489 		return check_condition_result;
2490 	}
2491 	res = fetch_to_dev_buffer(scp, arr, param_len);
2492 	if (-1 == res)
2493 		return DID_ERROR << 16;
2494 	else if (sdebug_verbose && (res < param_len))
2495 		sdev_printk(KERN_INFO, scp->device,
2496 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2497 			    __func__, param_len, res);
2498 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2499 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2500 	if (md_len > 2) {
2501 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2502 		return check_condition_result;
2503 	}
2504 	off = bd_len + (mselect6 ? 4 : 8);
2505 	mpage = arr[off] & 0x3f;
2506 	ps = !!(arr[off] & 0x80);
2507 	if (ps) {
2508 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2509 		return check_condition_result;
2510 	}
2511 	spf = !!(arr[off] & 0x40);
2512 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2513 		       (arr[off + 1] + 2);
2514 	if ((pg_len + off) > param_len) {
2515 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2516 				PARAMETER_LIST_LENGTH_ERR, 0);
2517 		return check_condition_result;
2518 	}
2519 	switch (mpage) {
2520 	case 0x8:      /* Caching Mode page */
2521 		if (caching_pg[1] == arr[off + 1]) {
2522 			memcpy(caching_pg + 2, arr + off + 2,
2523 			       sizeof(caching_pg) - 2);
2524 			goto set_mode_changed_ua;
2525 		}
2526 		break;
2527 	case 0xa:      /* Control Mode page */
2528 		if (ctrl_m_pg[1] == arr[off + 1]) {
2529 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2530 			       sizeof(ctrl_m_pg) - 2);
2531 			if (ctrl_m_pg[4] & 0x8)
2532 				sdebug_wp = true;
2533 			else
2534 				sdebug_wp = false;
2535 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2536 			goto set_mode_changed_ua;
2537 		}
2538 		break;
2539 	case 0x1c:      /* Informational Exceptions Mode page */
2540 		if (iec_m_pg[1] == arr[off + 1]) {
2541 			memcpy(iec_m_pg + 2, arr + off + 2,
2542 			       sizeof(iec_m_pg) - 2);
2543 			goto set_mode_changed_ua;
2544 		}
2545 		break;
2546 	default:
2547 		break;
2548 	}
2549 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2550 	return check_condition_result;
2551 set_mode_changed_ua:
2552 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2553 	return 0;
2554 }
2555 
2556 static int resp_temp_l_pg(unsigned char *arr)
2557 {
2558 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2559 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2560 		};
2561 
2562 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2563 	return sizeof(temp_l_pg);
2564 }
2565 
2566 static int resp_ie_l_pg(unsigned char *arr)
2567 {
2568 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2569 		};
2570 
2571 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2572 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2573 		arr[4] = THRESHOLD_EXCEEDED;
2574 		arr[5] = 0xff;
2575 	}
2576 	return sizeof(ie_l_pg);
2577 }
2578 
2579 #define SDEBUG_MAX_LSENSE_SZ 512
2580 
2581 static int resp_log_sense(struct scsi_cmnd *scp,
2582 			  struct sdebug_dev_info *devip)
2583 {
2584 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2585 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2586 	unsigned char *cmd = scp->cmnd;
2587 
2588 	memset(arr, 0, sizeof(arr));
2589 	ppc = cmd[1] & 0x2;
2590 	sp = cmd[1] & 0x1;
2591 	if (ppc || sp) {
2592 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2593 		return check_condition_result;
2594 	}
2595 	pcode = cmd[2] & 0x3f;
2596 	subpcode = cmd[3] & 0xff;
2597 	alloc_len = get_unaligned_be16(cmd + 7);
2598 	arr[0] = pcode;
2599 	if (0 == subpcode) {
2600 		switch (pcode) {
2601 		case 0x0:	/* Supported log pages log page */
2602 			n = 4;
2603 			arr[n++] = 0x0;		/* this page */
2604 			arr[n++] = 0xd;		/* Temperature */
2605 			arr[n++] = 0x2f;	/* Informational exceptions */
2606 			arr[3] = n - 4;
2607 			break;
2608 		case 0xd:	/* Temperature log page */
2609 			arr[3] = resp_temp_l_pg(arr + 4);
2610 			break;
2611 		case 0x2f:	/* Informational exceptions log page */
2612 			arr[3] = resp_ie_l_pg(arr + 4);
2613 			break;
2614 		default:
2615 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2616 			return check_condition_result;
2617 		}
2618 	} else if (0xff == subpcode) {
2619 		arr[0] |= 0x40;
2620 		arr[1] = subpcode;
2621 		switch (pcode) {
2622 		case 0x0:	/* Supported log pages and subpages log page */
2623 			n = 4;
2624 			arr[n++] = 0x0;
2625 			arr[n++] = 0x0;		/* 0,0 page */
2626 			arr[n++] = 0x0;
2627 			arr[n++] = 0xff;	/* this page */
2628 			arr[n++] = 0xd;
2629 			arr[n++] = 0x0;		/* Temperature */
2630 			arr[n++] = 0x2f;
2631 			arr[n++] = 0x0;	/* Informational exceptions */
2632 			arr[3] = n - 4;
2633 			break;
2634 		case 0xd:	/* Temperature subpages */
2635 			n = 4;
2636 			arr[n++] = 0xd;
2637 			arr[n++] = 0x0;		/* Temperature */
2638 			arr[3] = n - 4;
2639 			break;
2640 		case 0x2f:	/* Informational exceptions subpages */
2641 			n = 4;
2642 			arr[n++] = 0x2f;
2643 			arr[n++] = 0x0;		/* Informational exceptions */
2644 			arr[3] = n - 4;
2645 			break;
2646 		default:
2647 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2648 			return check_condition_result;
2649 		}
2650 	} else {
2651 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2652 		return check_condition_result;
2653 	}
2654 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2655 	return fill_from_dev_buffer(scp, arr,
2656 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2657 }
2658 
2659 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2660 {
2661 	return devip->nr_zones != 0;
2662 }
2663 
2664 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2665 					unsigned long long lba)
2666 {
2667 	return &devip->zstate[lba >> devip->zsize_shift];
2668 }
2669 
2670 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2671 {
2672 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2673 }
2674 
2675 static void zbc_close_zone(struct sdebug_dev_info *devip,
2676 			   struct sdeb_zone_state *zsp)
2677 {
2678 	enum sdebug_z_cond zc;
2679 
2680 	if (zbc_zone_is_conv(zsp))
2681 		return;
2682 
2683 	zc = zsp->z_cond;
2684 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2685 		return;
2686 
2687 	if (zc == ZC2_IMPLICIT_OPEN)
2688 		devip->nr_imp_open--;
2689 	else
2690 		devip->nr_exp_open--;
2691 
2692 	if (zsp->z_wp == zsp->z_start) {
2693 		zsp->z_cond = ZC1_EMPTY;
2694 	} else {
2695 		zsp->z_cond = ZC4_CLOSED;
2696 		devip->nr_closed++;
2697 	}
2698 }
2699 
2700 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2701 {
2702 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2703 	unsigned int i;
2704 
2705 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2706 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2707 			zbc_close_zone(devip, zsp);
2708 			return;
2709 		}
2710 	}
2711 }
2712 
2713 static void zbc_open_zone(struct sdebug_dev_info *devip,
2714 			  struct sdeb_zone_state *zsp, bool explicit)
2715 {
2716 	enum sdebug_z_cond zc;
2717 
2718 	if (zbc_zone_is_conv(zsp))
2719 		return;
2720 
2721 	zc = zsp->z_cond;
2722 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2723 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2724 		return;
2725 
2726 	/* Close an implicit open zone if necessary */
2727 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2728 		zbc_close_zone(devip, zsp);
2729 	else if (devip->max_open &&
2730 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2731 		zbc_close_imp_open_zone(devip);
2732 
2733 	if (zsp->z_cond == ZC4_CLOSED)
2734 		devip->nr_closed--;
2735 	if (explicit) {
2736 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2737 		devip->nr_exp_open++;
2738 	} else {
2739 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2740 		devip->nr_imp_open++;
2741 	}
2742 }
2743 
2744 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2745 		       unsigned long long lba, unsigned int num)
2746 {
2747 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2748 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2749 
2750 	if (zbc_zone_is_conv(zsp))
2751 		return;
2752 
2753 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2754 		zsp->z_wp += num;
2755 		if (zsp->z_wp >= zend)
2756 			zsp->z_cond = ZC5_FULL;
2757 		return;
2758 	}
2759 
2760 	while (num) {
2761 		if (lba != zsp->z_wp)
2762 			zsp->z_non_seq_resource = true;
2763 
2764 		end = lba + num;
2765 		if (end >= zend) {
2766 			n = zend - lba;
2767 			zsp->z_wp = zend;
2768 		} else if (end > zsp->z_wp) {
2769 			n = num;
2770 			zsp->z_wp = end;
2771 		} else {
2772 			n = num;
2773 		}
2774 		if (zsp->z_wp >= zend)
2775 			zsp->z_cond = ZC5_FULL;
2776 
2777 		num -= n;
2778 		lba += n;
2779 		if (num) {
2780 			zsp++;
2781 			zend = zsp->z_start + zsp->z_size;
2782 		}
2783 	}
2784 }
2785 
2786 static int check_zbc_access_params(struct scsi_cmnd *scp,
2787 			unsigned long long lba, unsigned int num, bool write)
2788 {
2789 	struct scsi_device *sdp = scp->device;
2790 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2791 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2792 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2793 
2794 	if (!write) {
2795 		if (devip->zmodel == BLK_ZONED_HA)
2796 			return 0;
2797 		/* For host-managed, reads cannot cross zone types boundaries */
2798 		if (zsp_end != zsp &&
2799 		    zbc_zone_is_conv(zsp) &&
2800 		    !zbc_zone_is_conv(zsp_end)) {
2801 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2802 					LBA_OUT_OF_RANGE,
2803 					READ_INVDATA_ASCQ);
2804 			return check_condition_result;
2805 		}
2806 		return 0;
2807 	}
2808 
2809 	/* No restrictions for writes within conventional zones */
2810 	if (zbc_zone_is_conv(zsp)) {
2811 		if (!zbc_zone_is_conv(zsp_end)) {
2812 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2813 					LBA_OUT_OF_RANGE,
2814 					WRITE_BOUNDARY_ASCQ);
2815 			return check_condition_result;
2816 		}
2817 		return 0;
2818 	}
2819 
2820 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2821 		/* Writes cannot cross sequential zone boundaries */
2822 		if (zsp_end != zsp) {
2823 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2824 					LBA_OUT_OF_RANGE,
2825 					WRITE_BOUNDARY_ASCQ);
2826 			return check_condition_result;
2827 		}
2828 		/* Cannot write full zones */
2829 		if (zsp->z_cond == ZC5_FULL) {
2830 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2831 					INVALID_FIELD_IN_CDB, 0);
2832 			return check_condition_result;
2833 		}
2834 		/* Writes must be aligned to the zone WP */
2835 		if (lba != zsp->z_wp) {
2836 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 					LBA_OUT_OF_RANGE,
2838 					UNALIGNED_WRITE_ASCQ);
2839 			return check_condition_result;
2840 		}
2841 	}
2842 
2843 	/* Handle implicit open of closed and empty zones */
2844 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2845 		if (devip->max_open &&
2846 		    devip->nr_exp_open >= devip->max_open) {
2847 			mk_sense_buffer(scp, DATA_PROTECT,
2848 					INSUFF_RES_ASC,
2849 					INSUFF_ZONE_ASCQ);
2850 			return check_condition_result;
2851 		}
2852 		zbc_open_zone(devip, zsp, false);
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 static inline int check_device_access_params
2859 			(struct scsi_cmnd *scp, unsigned long long lba,
2860 			 unsigned int num, bool write)
2861 {
2862 	struct scsi_device *sdp = scp->device;
2863 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2864 
2865 	if (lba + num > sdebug_capacity) {
2866 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2867 		return check_condition_result;
2868 	}
2869 	/* transfer length excessive (tie in to block limits VPD page) */
2870 	if (num > sdebug_store_sectors) {
2871 		/* needs work to find which cdb byte 'num' comes from */
2872 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2873 		return check_condition_result;
2874 	}
2875 	if (write && unlikely(sdebug_wp)) {
2876 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2877 		return check_condition_result;
2878 	}
2879 	if (sdebug_dev_is_zoned(devip))
2880 		return check_zbc_access_params(scp, lba, num, write);
2881 
2882 	return 0;
2883 }
2884 
2885 /*
2886  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2887  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2888  * that access any of the "stores" in struct sdeb_store_info should call this
2889  * function with bug_if_fake_rw set to true.
2890  */
2891 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2892 						bool bug_if_fake_rw)
2893 {
2894 	if (sdebug_fake_rw) {
2895 		BUG_ON(bug_if_fake_rw);	/* See note above */
2896 		return NULL;
2897 	}
2898 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2899 }
2900 
2901 /* Returns number of bytes copied or -1 if error. */
2902 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2903 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2904 {
2905 	int ret;
2906 	u64 block, rest = 0;
2907 	enum dma_data_direction dir;
2908 	struct scsi_data_buffer *sdb = &scp->sdb;
2909 	u8 *fsp;
2910 
2911 	if (do_write) {
2912 		dir = DMA_TO_DEVICE;
2913 		write_since_sync = true;
2914 	} else {
2915 		dir = DMA_FROM_DEVICE;
2916 	}
2917 
2918 	if (!sdb->length || !sip)
2919 		return 0;
2920 	if (scp->sc_data_direction != dir)
2921 		return -1;
2922 	fsp = sip->storep;
2923 
2924 	block = do_div(lba, sdebug_store_sectors);
2925 	if (block + num > sdebug_store_sectors)
2926 		rest = block + num - sdebug_store_sectors;
2927 
2928 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2929 		   fsp + (block * sdebug_sector_size),
2930 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2931 	if (ret != (num - rest) * sdebug_sector_size)
2932 		return ret;
2933 
2934 	if (rest) {
2935 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2936 			    fsp, rest * sdebug_sector_size,
2937 			    sg_skip + ((num - rest) * sdebug_sector_size),
2938 			    do_write);
2939 	}
2940 
2941 	return ret;
2942 }
2943 
2944 /* Returns number of bytes copied or -1 if error. */
2945 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2946 {
2947 	struct scsi_data_buffer *sdb = &scp->sdb;
2948 
2949 	if (!sdb->length)
2950 		return 0;
2951 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2952 		return -1;
2953 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2954 			      num * sdebug_sector_size, 0, true);
2955 }
2956 
2957 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2958  * arr into sip->storep+lba and return true. If comparison fails then
2959  * return false. */
2960 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2961 			      const u8 *arr, bool compare_only)
2962 {
2963 	bool res;
2964 	u64 block, rest = 0;
2965 	u32 store_blks = sdebug_store_sectors;
2966 	u32 lb_size = sdebug_sector_size;
2967 	u8 *fsp = sip->storep;
2968 
2969 	block = do_div(lba, store_blks);
2970 	if (block + num > store_blks)
2971 		rest = block + num - store_blks;
2972 
2973 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2974 	if (!res)
2975 		return res;
2976 	if (rest)
2977 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2978 			     rest * lb_size);
2979 	if (!res)
2980 		return res;
2981 	if (compare_only)
2982 		return true;
2983 	arr += num * lb_size;
2984 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2985 	if (rest)
2986 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2987 	return res;
2988 }
2989 
2990 static __be16 dif_compute_csum(const void *buf, int len)
2991 {
2992 	__be16 csum;
2993 
2994 	if (sdebug_guard)
2995 		csum = (__force __be16)ip_compute_csum(buf, len);
2996 	else
2997 		csum = cpu_to_be16(crc_t10dif(buf, len));
2998 
2999 	return csum;
3000 }
3001 
3002 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3003 		      sector_t sector, u32 ei_lba)
3004 {
3005 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3006 
3007 	if (sdt->guard_tag != csum) {
3008 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3009 			(unsigned long)sector,
3010 			be16_to_cpu(sdt->guard_tag),
3011 			be16_to_cpu(csum));
3012 		return 0x01;
3013 	}
3014 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3015 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3016 		pr_err("REF check failed on sector %lu\n",
3017 			(unsigned long)sector);
3018 		return 0x03;
3019 	}
3020 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3021 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3022 		pr_err("REF check failed on sector %lu\n",
3023 			(unsigned long)sector);
3024 		return 0x03;
3025 	}
3026 	return 0;
3027 }
3028 
3029 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3030 			  unsigned int sectors, bool read)
3031 {
3032 	size_t resid;
3033 	void *paddr;
3034 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3035 						scp->device->hostdata, true);
3036 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3037 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3038 	struct sg_mapping_iter miter;
3039 
3040 	/* Bytes of protection data to copy into sgl */
3041 	resid = sectors * sizeof(*dif_storep);
3042 
3043 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3044 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3045 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3046 
3047 	while (sg_miter_next(&miter) && resid > 0) {
3048 		size_t len = min_t(size_t, miter.length, resid);
3049 		void *start = dif_store(sip, sector);
3050 		size_t rest = 0;
3051 
3052 		if (dif_store_end < start + len)
3053 			rest = start + len - dif_store_end;
3054 
3055 		paddr = miter.addr;
3056 
3057 		if (read)
3058 			memcpy(paddr, start, len - rest);
3059 		else
3060 			memcpy(start, paddr, len - rest);
3061 
3062 		if (rest) {
3063 			if (read)
3064 				memcpy(paddr + len - rest, dif_storep, rest);
3065 			else
3066 				memcpy(dif_storep, paddr + len - rest, rest);
3067 		}
3068 
3069 		sector += len / sizeof(*dif_storep);
3070 		resid -= len;
3071 	}
3072 	sg_miter_stop(&miter);
3073 }
3074 
3075 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3076 			    unsigned int sectors, u32 ei_lba)
3077 {
3078 	unsigned int i;
3079 	sector_t sector;
3080 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3081 						scp->device->hostdata, true);
3082 	struct t10_pi_tuple *sdt;
3083 
3084 	for (i = 0; i < sectors; i++, ei_lba++) {
3085 		int ret;
3086 
3087 		sector = start_sec + i;
3088 		sdt = dif_store(sip, sector);
3089 
3090 		if (sdt->app_tag == cpu_to_be16(0xffff))
3091 			continue;
3092 
3093 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3094 				 ei_lba);
3095 		if (ret) {
3096 			dif_errors++;
3097 			return ret;
3098 		}
3099 	}
3100 
3101 	dif_copy_prot(scp, start_sec, sectors, true);
3102 	dix_reads++;
3103 
3104 	return 0;
3105 }
3106 
3107 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3108 {
3109 	bool check_prot;
3110 	u32 num;
3111 	u32 ei_lba;
3112 	int ret;
3113 	u64 lba;
3114 	struct sdeb_store_info *sip = devip2sip(devip, true);
3115 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3116 	u8 *cmd = scp->cmnd;
3117 
3118 	switch (cmd[0]) {
3119 	case READ_16:
3120 		ei_lba = 0;
3121 		lba = get_unaligned_be64(cmd + 2);
3122 		num = get_unaligned_be32(cmd + 10);
3123 		check_prot = true;
3124 		break;
3125 	case READ_10:
3126 		ei_lba = 0;
3127 		lba = get_unaligned_be32(cmd + 2);
3128 		num = get_unaligned_be16(cmd + 7);
3129 		check_prot = true;
3130 		break;
3131 	case READ_6:
3132 		ei_lba = 0;
3133 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3134 		      (u32)(cmd[1] & 0x1f) << 16;
3135 		num = (0 == cmd[4]) ? 256 : cmd[4];
3136 		check_prot = true;
3137 		break;
3138 	case READ_12:
3139 		ei_lba = 0;
3140 		lba = get_unaligned_be32(cmd + 2);
3141 		num = get_unaligned_be32(cmd + 6);
3142 		check_prot = true;
3143 		break;
3144 	case XDWRITEREAD_10:
3145 		ei_lba = 0;
3146 		lba = get_unaligned_be32(cmd + 2);
3147 		num = get_unaligned_be16(cmd + 7);
3148 		check_prot = false;
3149 		break;
3150 	default:	/* assume READ(32) */
3151 		lba = get_unaligned_be64(cmd + 12);
3152 		ei_lba = get_unaligned_be32(cmd + 20);
3153 		num = get_unaligned_be32(cmd + 28);
3154 		check_prot = false;
3155 		break;
3156 	}
3157 	if (unlikely(have_dif_prot && check_prot)) {
3158 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3159 		    (cmd[1] & 0xe0)) {
3160 			mk_sense_invalid_opcode(scp);
3161 			return check_condition_result;
3162 		}
3163 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3164 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3165 		    (cmd[1] & 0xe0) == 0)
3166 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3167 				    "to DIF device\n");
3168 	}
3169 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3170 		     atomic_read(&sdeb_inject_pending))) {
3171 		num /= 2;
3172 		atomic_set(&sdeb_inject_pending, 0);
3173 	}
3174 
3175 	ret = check_device_access_params(scp, lba, num, false);
3176 	if (ret)
3177 		return ret;
3178 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3179 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3180 		     ((lba + num) > sdebug_medium_error_start))) {
3181 		/* claim unrecoverable read error */
3182 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3183 		/* set info field and valid bit for fixed descriptor */
3184 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3185 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3186 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3187 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3188 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3189 		}
3190 		scsi_set_resid(scp, scsi_bufflen(scp));
3191 		return check_condition_result;
3192 	}
3193 
3194 	read_lock(macc_lckp);
3195 
3196 	/* DIX + T10 DIF */
3197 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3198 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3199 
3200 		if (prot_ret) {
3201 			read_unlock(macc_lckp);
3202 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3203 			return illegal_condition_result;
3204 		}
3205 	}
3206 
3207 	ret = do_device_access(sip, scp, 0, lba, num, false);
3208 	read_unlock(macc_lckp);
3209 	if (unlikely(ret == -1))
3210 		return DID_ERROR << 16;
3211 
3212 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3213 
3214 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3215 		     atomic_read(&sdeb_inject_pending))) {
3216 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3217 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3218 			atomic_set(&sdeb_inject_pending, 0);
3219 			return check_condition_result;
3220 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3221 			/* Logical block guard check failed */
3222 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3223 			atomic_set(&sdeb_inject_pending, 0);
3224 			return illegal_condition_result;
3225 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3226 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3227 			atomic_set(&sdeb_inject_pending, 0);
3228 			return illegal_condition_result;
3229 		}
3230 	}
3231 	return 0;
3232 }
3233 
3234 static void dump_sector(unsigned char *buf, int len)
3235 {
3236 	int i, j, n;
3237 
3238 	pr_err(">>> Sector Dump <<<\n");
3239 	for (i = 0 ; i < len ; i += 16) {
3240 		char b[128];
3241 
3242 		for (j = 0, n = 0; j < 16; j++) {
3243 			unsigned char c = buf[i+j];
3244 
3245 			if (c >= 0x20 && c < 0x7e)
3246 				n += scnprintf(b + n, sizeof(b) - n,
3247 					       " %c ", buf[i+j]);
3248 			else
3249 				n += scnprintf(b + n, sizeof(b) - n,
3250 					       "%02x ", buf[i+j]);
3251 		}
3252 		pr_err("%04d: %s\n", i, b);
3253 	}
3254 }
3255 
3256 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3257 			     unsigned int sectors, u32 ei_lba)
3258 {
3259 	int ret;
3260 	struct t10_pi_tuple *sdt;
3261 	void *daddr;
3262 	sector_t sector = start_sec;
3263 	int ppage_offset;
3264 	int dpage_offset;
3265 	struct sg_mapping_iter diter;
3266 	struct sg_mapping_iter piter;
3267 
3268 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3269 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3270 
3271 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3272 			scsi_prot_sg_count(SCpnt),
3273 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3274 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3275 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3276 
3277 	/* For each protection page */
3278 	while (sg_miter_next(&piter)) {
3279 		dpage_offset = 0;
3280 		if (WARN_ON(!sg_miter_next(&diter))) {
3281 			ret = 0x01;
3282 			goto out;
3283 		}
3284 
3285 		for (ppage_offset = 0; ppage_offset < piter.length;
3286 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3287 			/* If we're at the end of the current
3288 			 * data page advance to the next one
3289 			 */
3290 			if (dpage_offset >= diter.length) {
3291 				if (WARN_ON(!sg_miter_next(&diter))) {
3292 					ret = 0x01;
3293 					goto out;
3294 				}
3295 				dpage_offset = 0;
3296 			}
3297 
3298 			sdt = piter.addr + ppage_offset;
3299 			daddr = diter.addr + dpage_offset;
3300 
3301 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3302 			if (ret) {
3303 				dump_sector(daddr, sdebug_sector_size);
3304 				goto out;
3305 			}
3306 
3307 			sector++;
3308 			ei_lba++;
3309 			dpage_offset += sdebug_sector_size;
3310 		}
3311 		diter.consumed = dpage_offset;
3312 		sg_miter_stop(&diter);
3313 	}
3314 	sg_miter_stop(&piter);
3315 
3316 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3317 	dix_writes++;
3318 
3319 	return 0;
3320 
3321 out:
3322 	dif_errors++;
3323 	sg_miter_stop(&diter);
3324 	sg_miter_stop(&piter);
3325 	return ret;
3326 }
3327 
3328 static unsigned long lba_to_map_index(sector_t lba)
3329 {
3330 	if (sdebug_unmap_alignment)
3331 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3332 	sector_div(lba, sdebug_unmap_granularity);
3333 	return lba;
3334 }
3335 
3336 static sector_t map_index_to_lba(unsigned long index)
3337 {
3338 	sector_t lba = index * sdebug_unmap_granularity;
3339 
3340 	if (sdebug_unmap_alignment)
3341 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3342 	return lba;
3343 }
3344 
3345 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3346 			      unsigned int *num)
3347 {
3348 	sector_t end;
3349 	unsigned int mapped;
3350 	unsigned long index;
3351 	unsigned long next;
3352 
3353 	index = lba_to_map_index(lba);
3354 	mapped = test_bit(index, sip->map_storep);
3355 
3356 	if (mapped)
3357 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3358 	else
3359 		next = find_next_bit(sip->map_storep, map_size, index);
3360 
3361 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3362 	*num = end - lba;
3363 	return mapped;
3364 }
3365 
3366 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3367 		       unsigned int len)
3368 {
3369 	sector_t end = lba + len;
3370 
3371 	while (lba < end) {
3372 		unsigned long index = lba_to_map_index(lba);
3373 
3374 		if (index < map_size)
3375 			set_bit(index, sip->map_storep);
3376 
3377 		lba = map_index_to_lba(index + 1);
3378 	}
3379 }
3380 
3381 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3382 			 unsigned int len)
3383 {
3384 	sector_t end = lba + len;
3385 	u8 *fsp = sip->storep;
3386 
3387 	while (lba < end) {
3388 		unsigned long index = lba_to_map_index(lba);
3389 
3390 		if (lba == map_index_to_lba(index) &&
3391 		    lba + sdebug_unmap_granularity <= end &&
3392 		    index < map_size) {
3393 			clear_bit(index, sip->map_storep);
3394 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3395 				memset(fsp + lba * sdebug_sector_size,
3396 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3397 				       sdebug_sector_size *
3398 				       sdebug_unmap_granularity);
3399 			}
3400 			if (sip->dif_storep) {
3401 				memset(sip->dif_storep + lba, 0xff,
3402 				       sizeof(*sip->dif_storep) *
3403 				       sdebug_unmap_granularity);
3404 			}
3405 		}
3406 		lba = map_index_to_lba(index + 1);
3407 	}
3408 }
3409 
3410 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3411 {
3412 	bool check_prot;
3413 	u32 num;
3414 	u32 ei_lba;
3415 	int ret;
3416 	u64 lba;
3417 	struct sdeb_store_info *sip = devip2sip(devip, true);
3418 	rwlock_t *macc_lckp = &sip->macc_lck;
3419 	u8 *cmd = scp->cmnd;
3420 
3421 	switch (cmd[0]) {
3422 	case WRITE_16:
3423 		ei_lba = 0;
3424 		lba = get_unaligned_be64(cmd + 2);
3425 		num = get_unaligned_be32(cmd + 10);
3426 		check_prot = true;
3427 		break;
3428 	case WRITE_10:
3429 		ei_lba = 0;
3430 		lba = get_unaligned_be32(cmd + 2);
3431 		num = get_unaligned_be16(cmd + 7);
3432 		check_prot = true;
3433 		break;
3434 	case WRITE_6:
3435 		ei_lba = 0;
3436 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3437 		      (u32)(cmd[1] & 0x1f) << 16;
3438 		num = (0 == cmd[4]) ? 256 : cmd[4];
3439 		check_prot = true;
3440 		break;
3441 	case WRITE_12:
3442 		ei_lba = 0;
3443 		lba = get_unaligned_be32(cmd + 2);
3444 		num = get_unaligned_be32(cmd + 6);
3445 		check_prot = true;
3446 		break;
3447 	case 0x53:	/* XDWRITEREAD(10) */
3448 		ei_lba = 0;
3449 		lba = get_unaligned_be32(cmd + 2);
3450 		num = get_unaligned_be16(cmd + 7);
3451 		check_prot = false;
3452 		break;
3453 	default:	/* assume WRITE(32) */
3454 		lba = get_unaligned_be64(cmd + 12);
3455 		ei_lba = get_unaligned_be32(cmd + 20);
3456 		num = get_unaligned_be32(cmd + 28);
3457 		check_prot = false;
3458 		break;
3459 	}
3460 	if (unlikely(have_dif_prot && check_prot)) {
3461 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3462 		    (cmd[1] & 0xe0)) {
3463 			mk_sense_invalid_opcode(scp);
3464 			return check_condition_result;
3465 		}
3466 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3467 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3468 		    (cmd[1] & 0xe0) == 0)
3469 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3470 				    "to DIF device\n");
3471 	}
3472 
3473 	write_lock(macc_lckp);
3474 	ret = check_device_access_params(scp, lba, num, true);
3475 	if (ret) {
3476 		write_unlock(macc_lckp);
3477 		return ret;
3478 	}
3479 
3480 	/* DIX + T10 DIF */
3481 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3482 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3483 
3484 		if (prot_ret) {
3485 			write_unlock(macc_lckp);
3486 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3487 			return illegal_condition_result;
3488 		}
3489 	}
3490 
3491 	ret = do_device_access(sip, scp, 0, lba, num, true);
3492 	if (unlikely(scsi_debug_lbp()))
3493 		map_region(sip, lba, num);
3494 	/* If ZBC zone then bump its write pointer */
3495 	if (sdebug_dev_is_zoned(devip))
3496 		zbc_inc_wp(devip, lba, num);
3497 	write_unlock(macc_lckp);
3498 	if (unlikely(-1 == ret))
3499 		return DID_ERROR << 16;
3500 	else if (unlikely(sdebug_verbose &&
3501 			  (ret < (num * sdebug_sector_size))))
3502 		sdev_printk(KERN_INFO, scp->device,
3503 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3504 			    my_name, num * sdebug_sector_size, ret);
3505 
3506 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3507 		     atomic_read(&sdeb_inject_pending))) {
3508 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3509 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3510 			atomic_set(&sdeb_inject_pending, 0);
3511 			return check_condition_result;
3512 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3513 			/* Logical block guard check failed */
3514 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3515 			atomic_set(&sdeb_inject_pending, 0);
3516 			return illegal_condition_result;
3517 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3518 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3519 			atomic_set(&sdeb_inject_pending, 0);
3520 			return illegal_condition_result;
3521 		}
3522 	}
3523 	return 0;
3524 }
3525 
3526 /*
3527  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3528  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3529  */
3530 static int resp_write_scat(struct scsi_cmnd *scp,
3531 			   struct sdebug_dev_info *devip)
3532 {
3533 	u8 *cmd = scp->cmnd;
3534 	u8 *lrdp = NULL;
3535 	u8 *up;
3536 	struct sdeb_store_info *sip = devip2sip(devip, true);
3537 	rwlock_t *macc_lckp = &sip->macc_lck;
3538 	u8 wrprotect;
3539 	u16 lbdof, num_lrd, k;
3540 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3541 	u32 lb_size = sdebug_sector_size;
3542 	u32 ei_lba;
3543 	u64 lba;
3544 	int ret, res;
3545 	bool is_16;
3546 	static const u32 lrd_size = 32; /* + parameter list header size */
3547 
3548 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3549 		is_16 = false;
3550 		wrprotect = (cmd[10] >> 5) & 0x7;
3551 		lbdof = get_unaligned_be16(cmd + 12);
3552 		num_lrd = get_unaligned_be16(cmd + 16);
3553 		bt_len = get_unaligned_be32(cmd + 28);
3554 	} else {        /* that leaves WRITE SCATTERED(16) */
3555 		is_16 = true;
3556 		wrprotect = (cmd[2] >> 5) & 0x7;
3557 		lbdof = get_unaligned_be16(cmd + 4);
3558 		num_lrd = get_unaligned_be16(cmd + 8);
3559 		bt_len = get_unaligned_be32(cmd + 10);
3560 		if (unlikely(have_dif_prot)) {
3561 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3562 			    wrprotect) {
3563 				mk_sense_invalid_opcode(scp);
3564 				return illegal_condition_result;
3565 			}
3566 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3567 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3568 			     wrprotect == 0)
3569 				sdev_printk(KERN_ERR, scp->device,
3570 					    "Unprotected WR to DIF device\n");
3571 		}
3572 	}
3573 	if ((num_lrd == 0) || (bt_len == 0))
3574 		return 0;       /* T10 says these do-nothings are not errors */
3575 	if (lbdof == 0) {
3576 		if (sdebug_verbose)
3577 			sdev_printk(KERN_INFO, scp->device,
3578 				"%s: %s: LB Data Offset field bad\n",
3579 				my_name, __func__);
3580 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3581 		return illegal_condition_result;
3582 	}
3583 	lbdof_blen = lbdof * lb_size;
3584 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3585 		if (sdebug_verbose)
3586 			sdev_printk(KERN_INFO, scp->device,
3587 				"%s: %s: LBA range descriptors don't fit\n",
3588 				my_name, __func__);
3589 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3590 		return illegal_condition_result;
3591 	}
3592 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3593 	if (lrdp == NULL)
3594 		return SCSI_MLQUEUE_HOST_BUSY;
3595 	if (sdebug_verbose)
3596 		sdev_printk(KERN_INFO, scp->device,
3597 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3598 			my_name, __func__, lbdof_blen);
3599 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3600 	if (res == -1) {
3601 		ret = DID_ERROR << 16;
3602 		goto err_out;
3603 	}
3604 
3605 	write_lock(macc_lckp);
3606 	sg_off = lbdof_blen;
3607 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3608 	cum_lb = 0;
3609 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3610 		lba = get_unaligned_be64(up + 0);
3611 		num = get_unaligned_be32(up + 8);
3612 		if (sdebug_verbose)
3613 			sdev_printk(KERN_INFO, scp->device,
3614 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3615 				my_name, __func__, k, lba, num, sg_off);
3616 		if (num == 0)
3617 			continue;
3618 		ret = check_device_access_params(scp, lba, num, true);
3619 		if (ret)
3620 			goto err_out_unlock;
3621 		num_by = num * lb_size;
3622 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3623 
3624 		if ((cum_lb + num) > bt_len) {
3625 			if (sdebug_verbose)
3626 				sdev_printk(KERN_INFO, scp->device,
3627 				    "%s: %s: sum of blocks > data provided\n",
3628 				    my_name, __func__);
3629 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3630 					0);
3631 			ret = illegal_condition_result;
3632 			goto err_out_unlock;
3633 		}
3634 
3635 		/* DIX + T10 DIF */
3636 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3637 			int prot_ret = prot_verify_write(scp, lba, num,
3638 							 ei_lba);
3639 
3640 			if (prot_ret) {
3641 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3642 						prot_ret);
3643 				ret = illegal_condition_result;
3644 				goto err_out_unlock;
3645 			}
3646 		}
3647 
3648 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3649 		/* If ZBC zone then bump its write pointer */
3650 		if (sdebug_dev_is_zoned(devip))
3651 			zbc_inc_wp(devip, lba, num);
3652 		if (unlikely(scsi_debug_lbp()))
3653 			map_region(sip, lba, num);
3654 		if (unlikely(-1 == ret)) {
3655 			ret = DID_ERROR << 16;
3656 			goto err_out_unlock;
3657 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3658 			sdev_printk(KERN_INFO, scp->device,
3659 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3660 			    my_name, num_by, ret);
3661 
3662 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3663 			     atomic_read(&sdeb_inject_pending))) {
3664 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3665 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3666 				atomic_set(&sdeb_inject_pending, 0);
3667 				ret = check_condition_result;
3668 				goto err_out_unlock;
3669 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3670 				/* Logical block guard check failed */
3671 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3672 				atomic_set(&sdeb_inject_pending, 0);
3673 				ret = illegal_condition_result;
3674 				goto err_out_unlock;
3675 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3676 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3677 				atomic_set(&sdeb_inject_pending, 0);
3678 				ret = illegal_condition_result;
3679 				goto err_out_unlock;
3680 			}
3681 		}
3682 		sg_off += num_by;
3683 		cum_lb += num;
3684 	}
3685 	ret = 0;
3686 err_out_unlock:
3687 	write_unlock(macc_lckp);
3688 err_out:
3689 	kfree(lrdp);
3690 	return ret;
3691 }
3692 
3693 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3694 			   u32 ei_lba, bool unmap, bool ndob)
3695 {
3696 	struct scsi_device *sdp = scp->device;
3697 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3698 	unsigned long long i;
3699 	u64 block, lbaa;
3700 	u32 lb_size = sdebug_sector_size;
3701 	int ret;
3702 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3703 						scp->device->hostdata, true);
3704 	rwlock_t *macc_lckp = &sip->macc_lck;
3705 	u8 *fs1p;
3706 	u8 *fsp;
3707 
3708 	write_lock(macc_lckp);
3709 
3710 	ret = check_device_access_params(scp, lba, num, true);
3711 	if (ret) {
3712 		write_unlock(macc_lckp);
3713 		return ret;
3714 	}
3715 
3716 	if (unmap && scsi_debug_lbp()) {
3717 		unmap_region(sip, lba, num);
3718 		goto out;
3719 	}
3720 	lbaa = lba;
3721 	block = do_div(lbaa, sdebug_store_sectors);
3722 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3723 	fsp = sip->storep;
3724 	fs1p = fsp + (block * lb_size);
3725 	if (ndob) {
3726 		memset(fs1p, 0, lb_size);
3727 		ret = 0;
3728 	} else
3729 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3730 
3731 	if (-1 == ret) {
3732 		write_unlock(&sip->macc_lck);
3733 		return DID_ERROR << 16;
3734 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3735 		sdev_printk(KERN_INFO, scp->device,
3736 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3737 			    my_name, "write same", lb_size, ret);
3738 
3739 	/* Copy first sector to remaining blocks */
3740 	for (i = 1 ; i < num ; i++) {
3741 		lbaa = lba + i;
3742 		block = do_div(lbaa, sdebug_store_sectors);
3743 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3744 	}
3745 	if (scsi_debug_lbp())
3746 		map_region(sip, lba, num);
3747 	/* If ZBC zone then bump its write pointer */
3748 	if (sdebug_dev_is_zoned(devip))
3749 		zbc_inc_wp(devip, lba, num);
3750 out:
3751 	write_unlock(macc_lckp);
3752 
3753 	return 0;
3754 }
3755 
3756 static int resp_write_same_10(struct scsi_cmnd *scp,
3757 			      struct sdebug_dev_info *devip)
3758 {
3759 	u8 *cmd = scp->cmnd;
3760 	u32 lba;
3761 	u16 num;
3762 	u32 ei_lba = 0;
3763 	bool unmap = false;
3764 
3765 	if (cmd[1] & 0x8) {
3766 		if (sdebug_lbpws10 == 0) {
3767 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3768 			return check_condition_result;
3769 		} else
3770 			unmap = true;
3771 	}
3772 	lba = get_unaligned_be32(cmd + 2);
3773 	num = get_unaligned_be16(cmd + 7);
3774 	if (num > sdebug_write_same_length) {
3775 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3776 		return check_condition_result;
3777 	}
3778 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3779 }
3780 
3781 static int resp_write_same_16(struct scsi_cmnd *scp,
3782 			      struct sdebug_dev_info *devip)
3783 {
3784 	u8 *cmd = scp->cmnd;
3785 	u64 lba;
3786 	u32 num;
3787 	u32 ei_lba = 0;
3788 	bool unmap = false;
3789 	bool ndob = false;
3790 
3791 	if (cmd[1] & 0x8) {	/* UNMAP */
3792 		if (sdebug_lbpws == 0) {
3793 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3794 			return check_condition_result;
3795 		} else
3796 			unmap = true;
3797 	}
3798 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3799 		ndob = true;
3800 	lba = get_unaligned_be64(cmd + 2);
3801 	num = get_unaligned_be32(cmd + 10);
3802 	if (num > sdebug_write_same_length) {
3803 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3804 		return check_condition_result;
3805 	}
3806 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3807 }
3808 
3809 /* Note the mode field is in the same position as the (lower) service action
3810  * field. For the Report supported operation codes command, SPC-4 suggests
3811  * each mode of this command should be reported separately; for future. */
3812 static int resp_write_buffer(struct scsi_cmnd *scp,
3813 			     struct sdebug_dev_info *devip)
3814 {
3815 	u8 *cmd = scp->cmnd;
3816 	struct scsi_device *sdp = scp->device;
3817 	struct sdebug_dev_info *dp;
3818 	u8 mode;
3819 
3820 	mode = cmd[1] & 0x1f;
3821 	switch (mode) {
3822 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3823 		/* set UAs on this device only */
3824 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3825 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3826 		break;
3827 	case 0x5:	/* download MC, save and ACT */
3828 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3829 		break;
3830 	case 0x6:	/* download MC with offsets and ACT */
3831 		/* set UAs on most devices (LUs) in this target */
3832 		list_for_each_entry(dp,
3833 				    &devip->sdbg_host->dev_info_list,
3834 				    dev_list)
3835 			if (dp->target == sdp->id) {
3836 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3837 				if (devip != dp)
3838 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3839 						dp->uas_bm);
3840 			}
3841 		break;
3842 	case 0x7:	/* download MC with offsets, save, and ACT */
3843 		/* set UA on all devices (LUs) in this target */
3844 		list_for_each_entry(dp,
3845 				    &devip->sdbg_host->dev_info_list,
3846 				    dev_list)
3847 			if (dp->target == sdp->id)
3848 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3849 					dp->uas_bm);
3850 		break;
3851 	default:
3852 		/* do nothing for this command for other mode values */
3853 		break;
3854 	}
3855 	return 0;
3856 }
3857 
3858 static int resp_comp_write(struct scsi_cmnd *scp,
3859 			   struct sdebug_dev_info *devip)
3860 {
3861 	u8 *cmd = scp->cmnd;
3862 	u8 *arr;
3863 	struct sdeb_store_info *sip = devip2sip(devip, true);
3864 	rwlock_t *macc_lckp = &sip->macc_lck;
3865 	u64 lba;
3866 	u32 dnum;
3867 	u32 lb_size = sdebug_sector_size;
3868 	u8 num;
3869 	int ret;
3870 	int retval = 0;
3871 
3872 	lba = get_unaligned_be64(cmd + 2);
3873 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3874 	if (0 == num)
3875 		return 0;	/* degenerate case, not an error */
3876 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3877 	    (cmd[1] & 0xe0)) {
3878 		mk_sense_invalid_opcode(scp);
3879 		return check_condition_result;
3880 	}
3881 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3882 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3883 	    (cmd[1] & 0xe0) == 0)
3884 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3885 			    "to DIF device\n");
3886 	ret = check_device_access_params(scp, lba, num, false);
3887 	if (ret)
3888 		return ret;
3889 	dnum = 2 * num;
3890 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3891 	if (NULL == arr) {
3892 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3893 				INSUFF_RES_ASCQ);
3894 		return check_condition_result;
3895 	}
3896 
3897 	write_lock(macc_lckp);
3898 
3899 	ret = do_dout_fetch(scp, dnum, arr);
3900 	if (ret == -1) {
3901 		retval = DID_ERROR << 16;
3902 		goto cleanup;
3903 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3904 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3905 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3906 			    dnum * lb_size, ret);
3907 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3908 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3909 		retval = check_condition_result;
3910 		goto cleanup;
3911 	}
3912 	if (scsi_debug_lbp())
3913 		map_region(sip, lba, num);
3914 cleanup:
3915 	write_unlock(macc_lckp);
3916 	kfree(arr);
3917 	return retval;
3918 }
3919 
3920 struct unmap_block_desc {
3921 	__be64	lba;
3922 	__be32	blocks;
3923 	__be32	__reserved;
3924 };
3925 
3926 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3927 {
3928 	unsigned char *buf;
3929 	struct unmap_block_desc *desc;
3930 	struct sdeb_store_info *sip = devip2sip(devip, true);
3931 	rwlock_t *macc_lckp = &sip->macc_lck;
3932 	unsigned int i, payload_len, descriptors;
3933 	int ret;
3934 
3935 	if (!scsi_debug_lbp())
3936 		return 0;	/* fib and say its done */
3937 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3938 	BUG_ON(scsi_bufflen(scp) != payload_len);
3939 
3940 	descriptors = (payload_len - 8) / 16;
3941 	if (descriptors > sdebug_unmap_max_desc) {
3942 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3943 		return check_condition_result;
3944 	}
3945 
3946 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3947 	if (!buf) {
3948 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3949 				INSUFF_RES_ASCQ);
3950 		return check_condition_result;
3951 	}
3952 
3953 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3954 
3955 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3956 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3957 
3958 	desc = (void *)&buf[8];
3959 
3960 	write_lock(macc_lckp);
3961 
3962 	for (i = 0 ; i < descriptors ; i++) {
3963 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3964 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3965 
3966 		ret = check_device_access_params(scp, lba, num, true);
3967 		if (ret)
3968 			goto out;
3969 
3970 		unmap_region(sip, lba, num);
3971 	}
3972 
3973 	ret = 0;
3974 
3975 out:
3976 	write_unlock(macc_lckp);
3977 	kfree(buf);
3978 
3979 	return ret;
3980 }
3981 
3982 #define SDEBUG_GET_LBA_STATUS_LEN 32
3983 
3984 static int resp_get_lba_status(struct scsi_cmnd *scp,
3985 			       struct sdebug_dev_info *devip)
3986 {
3987 	u8 *cmd = scp->cmnd;
3988 	u64 lba;
3989 	u32 alloc_len, mapped, num;
3990 	int ret;
3991 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3992 
3993 	lba = get_unaligned_be64(cmd + 2);
3994 	alloc_len = get_unaligned_be32(cmd + 10);
3995 
3996 	if (alloc_len < 24)
3997 		return 0;
3998 
3999 	ret = check_device_access_params(scp, lba, 1, false);
4000 	if (ret)
4001 		return ret;
4002 
4003 	if (scsi_debug_lbp()) {
4004 		struct sdeb_store_info *sip = devip2sip(devip, true);
4005 
4006 		mapped = map_state(sip, lba, &num);
4007 	} else {
4008 		mapped = 1;
4009 		/* following just in case virtual_gb changed */
4010 		sdebug_capacity = get_sdebug_capacity();
4011 		if (sdebug_capacity - lba <= 0xffffffff)
4012 			num = sdebug_capacity - lba;
4013 		else
4014 			num = 0xffffffff;
4015 	}
4016 
4017 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4018 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4019 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4020 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4021 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4022 
4023 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4024 }
4025 
4026 static int resp_sync_cache(struct scsi_cmnd *scp,
4027 			   struct sdebug_dev_info *devip)
4028 {
4029 	int res = 0;
4030 	u64 lba;
4031 	u32 num_blocks;
4032 	u8 *cmd = scp->cmnd;
4033 
4034 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4035 		lba = get_unaligned_be32(cmd + 2);
4036 		num_blocks = get_unaligned_be16(cmd + 7);
4037 	} else {				/* SYNCHRONIZE_CACHE(16) */
4038 		lba = get_unaligned_be64(cmd + 2);
4039 		num_blocks = get_unaligned_be32(cmd + 10);
4040 	}
4041 	if (lba + num_blocks > sdebug_capacity) {
4042 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4043 		return check_condition_result;
4044 	}
4045 	if (!write_since_sync || (cmd[1] & 0x2))
4046 		res = SDEG_RES_IMMED_MASK;
4047 	else		/* delay if write_since_sync and IMMED clear */
4048 		write_since_sync = false;
4049 	return res;
4050 }
4051 
4052 /*
4053  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4054  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4055  * a GOOD status otherwise. Model a disk with a big cache and yield
4056  * CONDITION MET. Actually tries to bring range in main memory into the
4057  * cache associated with the CPU(s).
4058  */
4059 static int resp_pre_fetch(struct scsi_cmnd *scp,
4060 			  struct sdebug_dev_info *devip)
4061 {
4062 	int res = 0;
4063 	u64 lba;
4064 	u64 block, rest = 0;
4065 	u32 nblks;
4066 	u8 *cmd = scp->cmnd;
4067 	struct sdeb_store_info *sip = devip2sip(devip, true);
4068 	rwlock_t *macc_lckp = &sip->macc_lck;
4069 	u8 *fsp = sip->storep;
4070 
4071 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4072 		lba = get_unaligned_be32(cmd + 2);
4073 		nblks = get_unaligned_be16(cmd + 7);
4074 	} else {			/* PRE-FETCH(16) */
4075 		lba = get_unaligned_be64(cmd + 2);
4076 		nblks = get_unaligned_be32(cmd + 10);
4077 	}
4078 	if (lba + nblks > sdebug_capacity) {
4079 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4080 		return check_condition_result;
4081 	}
4082 	if (!fsp)
4083 		goto fini;
4084 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4085 	block = do_div(lba, sdebug_store_sectors);
4086 	if (block + nblks > sdebug_store_sectors)
4087 		rest = block + nblks - sdebug_store_sectors;
4088 
4089 	/* Try to bring the PRE-FETCH range into CPU's cache */
4090 	read_lock(macc_lckp);
4091 	prefetch_range(fsp + (sdebug_sector_size * block),
4092 		       (nblks - rest) * sdebug_sector_size);
4093 	if (rest)
4094 		prefetch_range(fsp, rest * sdebug_sector_size);
4095 	read_unlock(macc_lckp);
4096 fini:
4097 	if (cmd[1] & 0x2)
4098 		res = SDEG_RES_IMMED_MASK;
4099 	return res | condition_met_result;
4100 }
4101 
4102 #define RL_BUCKET_ELEMS 8
4103 
4104 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4105  * (W-LUN), the normal Linux scanning logic does not associate it with a
4106  * device (e.g. /dev/sg7). The following magic will make that association:
4107  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4108  * where <n> is a host number. If there are multiple targets in a host then
4109  * the above will associate a W-LUN to each target. To only get a W-LUN
4110  * for target 2, then use "echo '- 2 49409' > scan" .
4111  */
4112 static int resp_report_luns(struct scsi_cmnd *scp,
4113 			    struct sdebug_dev_info *devip)
4114 {
4115 	unsigned char *cmd = scp->cmnd;
4116 	unsigned int alloc_len;
4117 	unsigned char select_report;
4118 	u64 lun;
4119 	struct scsi_lun *lun_p;
4120 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4121 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4122 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4123 	unsigned int tlun_cnt;	/* total LUN count */
4124 	unsigned int rlen;	/* response length (in bytes) */
4125 	int k, j, n, res;
4126 	unsigned int off_rsp = 0;
4127 	const int sz_lun = sizeof(struct scsi_lun);
4128 
4129 	clear_luns_changed_on_target(devip);
4130 
4131 	select_report = cmd[2];
4132 	alloc_len = get_unaligned_be32(cmd + 6);
4133 
4134 	if (alloc_len < 4) {
4135 		pr_err("alloc len too small %d\n", alloc_len);
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	switch (select_report) {
4141 	case 0:		/* all LUNs apart from W-LUNs */
4142 		lun_cnt = sdebug_max_luns;
4143 		wlun_cnt = 0;
4144 		break;
4145 	case 1:		/* only W-LUNs */
4146 		lun_cnt = 0;
4147 		wlun_cnt = 1;
4148 		break;
4149 	case 2:		/* all LUNs */
4150 		lun_cnt = sdebug_max_luns;
4151 		wlun_cnt = 1;
4152 		break;
4153 	case 0x10:	/* only administrative LUs */
4154 	case 0x11:	/* see SPC-5 */
4155 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4156 	default:
4157 		pr_debug("select report invalid %d\n", select_report);
4158 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4159 		return check_condition_result;
4160 	}
4161 
4162 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4163 		--lun_cnt;
4164 
4165 	tlun_cnt = lun_cnt + wlun_cnt;
4166 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4167 	scsi_set_resid(scp, scsi_bufflen(scp));
4168 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4169 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4170 
4171 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4172 	lun = sdebug_no_lun_0 ? 1 : 0;
4173 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4174 		memset(arr, 0, sizeof(arr));
4175 		lun_p = (struct scsi_lun *)&arr[0];
4176 		if (k == 0) {
4177 			put_unaligned_be32(rlen, &arr[0]);
4178 			++lun_p;
4179 			j = 1;
4180 		}
4181 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4182 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4183 				break;
4184 			int_to_scsilun(lun++, lun_p);
4185 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4186 				lun_p->scsi_lun[0] |= 0x40;
4187 		}
4188 		if (j < RL_BUCKET_ELEMS)
4189 			break;
4190 		n = j * sz_lun;
4191 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4192 		if (res)
4193 			return res;
4194 		off_rsp += n;
4195 	}
4196 	if (wlun_cnt) {
4197 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4198 		++j;
4199 	}
4200 	if (j > 0)
4201 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4202 	return res;
4203 }
4204 
4205 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4206 {
4207 	bool is_bytchk3 = false;
4208 	u8 bytchk;
4209 	int ret, j;
4210 	u32 vnum, a_num, off;
4211 	const u32 lb_size = sdebug_sector_size;
4212 	u64 lba;
4213 	u8 *arr;
4214 	u8 *cmd = scp->cmnd;
4215 	struct sdeb_store_info *sip = devip2sip(devip, true);
4216 	rwlock_t *macc_lckp = &sip->macc_lck;
4217 
4218 	bytchk = (cmd[1] >> 1) & 0x3;
4219 	if (bytchk == 0) {
4220 		return 0;	/* always claim internal verify okay */
4221 	} else if (bytchk == 2) {
4222 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4223 		return check_condition_result;
4224 	} else if (bytchk == 3) {
4225 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4226 	}
4227 	switch (cmd[0]) {
4228 	case VERIFY_16:
4229 		lba = get_unaligned_be64(cmd + 2);
4230 		vnum = get_unaligned_be32(cmd + 10);
4231 		break;
4232 	case VERIFY:		/* is VERIFY(10) */
4233 		lba = get_unaligned_be32(cmd + 2);
4234 		vnum = get_unaligned_be16(cmd + 7);
4235 		break;
4236 	default:
4237 		mk_sense_invalid_opcode(scp);
4238 		return check_condition_result;
4239 	}
4240 	a_num = is_bytchk3 ? 1 : vnum;
4241 	/* Treat following check like one for read (i.e. no write) access */
4242 	ret = check_device_access_params(scp, lba, a_num, false);
4243 	if (ret)
4244 		return ret;
4245 
4246 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4247 	if (!arr) {
4248 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4249 				INSUFF_RES_ASCQ);
4250 		return check_condition_result;
4251 	}
4252 	/* Not changing store, so only need read access */
4253 	read_lock(macc_lckp);
4254 
4255 	ret = do_dout_fetch(scp, a_num, arr);
4256 	if (ret == -1) {
4257 		ret = DID_ERROR << 16;
4258 		goto cleanup;
4259 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4260 		sdev_printk(KERN_INFO, scp->device,
4261 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4262 			    my_name, __func__, a_num * lb_size, ret);
4263 	}
4264 	if (is_bytchk3) {
4265 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4266 			memcpy(arr + off, arr, lb_size);
4267 	}
4268 	ret = 0;
4269 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4270 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4271 		ret = check_condition_result;
4272 		goto cleanup;
4273 	}
4274 cleanup:
4275 	read_unlock(macc_lckp);
4276 	kfree(arr);
4277 	return ret;
4278 }
4279 
4280 #define RZONES_DESC_HD 64
4281 
4282 /* Report zones depending on start LBA nad reporting options */
4283 static int resp_report_zones(struct scsi_cmnd *scp,
4284 			     struct sdebug_dev_info *devip)
4285 {
4286 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4287 	int ret = 0;
4288 	u32 alloc_len, rep_opts, rep_len;
4289 	bool partial;
4290 	u64 lba, zs_lba;
4291 	u8 *arr = NULL, *desc;
4292 	u8 *cmd = scp->cmnd;
4293 	struct sdeb_zone_state *zsp;
4294 	struct sdeb_store_info *sip = devip2sip(devip, false);
4295 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4296 
4297 	if (!sdebug_dev_is_zoned(devip)) {
4298 		mk_sense_invalid_opcode(scp);
4299 		return check_condition_result;
4300 	}
4301 	zs_lba = get_unaligned_be64(cmd + 2);
4302 	alloc_len = get_unaligned_be32(cmd + 10);
4303 	rep_opts = cmd[14] & 0x3f;
4304 	partial = cmd[14] & 0x80;
4305 
4306 	if (zs_lba >= sdebug_capacity) {
4307 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4308 		return check_condition_result;
4309 	}
4310 
4311 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4312 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4313 			    max_zones);
4314 
4315 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4316 	if (!arr) {
4317 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4318 				INSUFF_RES_ASCQ);
4319 		return check_condition_result;
4320 	}
4321 
4322 	read_lock(macc_lckp);
4323 
4324 	desc = arr + 64;
4325 	for (i = 0; i < max_zones; i++) {
4326 		lba = zs_lba + devip->zsize * i;
4327 		if (lba > sdebug_capacity)
4328 			break;
4329 		zsp = zbc_zone(devip, lba);
4330 		switch (rep_opts) {
4331 		case 0x00:
4332 			/* All zones */
4333 			break;
4334 		case 0x01:
4335 			/* Empty zones */
4336 			if (zsp->z_cond != ZC1_EMPTY)
4337 				continue;
4338 			break;
4339 		case 0x02:
4340 			/* Implicit open zones */
4341 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4342 				continue;
4343 			break;
4344 		case 0x03:
4345 			/* Explicit open zones */
4346 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4347 				continue;
4348 			break;
4349 		case 0x04:
4350 			/* Closed zones */
4351 			if (zsp->z_cond != ZC4_CLOSED)
4352 				continue;
4353 			break;
4354 		case 0x05:
4355 			/* Full zones */
4356 			if (zsp->z_cond != ZC5_FULL)
4357 				continue;
4358 			break;
4359 		case 0x06:
4360 		case 0x07:
4361 		case 0x10:
4362 			/*
4363 			 * Read-only, offline, reset WP recommended are
4364 			 * not emulated: no zones to report;
4365 			 */
4366 			continue;
4367 		case 0x11:
4368 			/* non-seq-resource set */
4369 			if (!zsp->z_non_seq_resource)
4370 				continue;
4371 			break;
4372 		case 0x3f:
4373 			/* Not write pointer (conventional) zones */
4374 			if (!zbc_zone_is_conv(zsp))
4375 				continue;
4376 			break;
4377 		default:
4378 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4379 					INVALID_FIELD_IN_CDB, 0);
4380 			ret = check_condition_result;
4381 			goto fini;
4382 		}
4383 
4384 		if (nrz < rep_max_zones) {
4385 			/* Fill zone descriptor */
4386 			desc[0] = zsp->z_type;
4387 			desc[1] = zsp->z_cond << 4;
4388 			if (zsp->z_non_seq_resource)
4389 				desc[1] |= 1 << 1;
4390 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4391 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4392 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4393 			desc += 64;
4394 		}
4395 
4396 		if (partial && nrz >= rep_max_zones)
4397 			break;
4398 
4399 		nrz++;
4400 	}
4401 
4402 	/* Report header */
4403 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4404 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4405 
4406 	rep_len = (unsigned long)desc - (unsigned long)arr;
4407 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4408 
4409 fini:
4410 	read_unlock(macc_lckp);
4411 	kfree(arr);
4412 	return ret;
4413 }
4414 
4415 /* Logic transplanted from tcmu-runner, file_zbc.c */
4416 static void zbc_open_all(struct sdebug_dev_info *devip)
4417 {
4418 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4419 	unsigned int i;
4420 
4421 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4422 		if (zsp->z_cond == ZC4_CLOSED)
4423 			zbc_open_zone(devip, &devip->zstate[i], true);
4424 	}
4425 }
4426 
4427 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4428 {
4429 	int res = 0;
4430 	u64 z_id;
4431 	enum sdebug_z_cond zc;
4432 	u8 *cmd = scp->cmnd;
4433 	struct sdeb_zone_state *zsp;
4434 	bool all = cmd[14] & 0x01;
4435 	struct sdeb_store_info *sip = devip2sip(devip, false);
4436 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4437 
4438 	if (!sdebug_dev_is_zoned(devip)) {
4439 		mk_sense_invalid_opcode(scp);
4440 		return check_condition_result;
4441 	}
4442 
4443 	write_lock(macc_lckp);
4444 
4445 	if (all) {
4446 		/* Check if all closed zones can be open */
4447 		if (devip->max_open &&
4448 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4449 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4450 					INSUFF_ZONE_ASCQ);
4451 			res = check_condition_result;
4452 			goto fini;
4453 		}
4454 		/* Open all closed zones */
4455 		zbc_open_all(devip);
4456 		goto fini;
4457 	}
4458 
4459 	/* Open the specified zone */
4460 	z_id = get_unaligned_be64(cmd + 2);
4461 	if (z_id >= sdebug_capacity) {
4462 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4463 		res = check_condition_result;
4464 		goto fini;
4465 	}
4466 
4467 	zsp = zbc_zone(devip, z_id);
4468 	if (z_id != zsp->z_start) {
4469 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4470 		res = check_condition_result;
4471 		goto fini;
4472 	}
4473 	if (zbc_zone_is_conv(zsp)) {
4474 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4475 		res = check_condition_result;
4476 		goto fini;
4477 	}
4478 
4479 	zc = zsp->z_cond;
4480 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4481 		goto fini;
4482 
4483 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4484 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4485 				INSUFF_ZONE_ASCQ);
4486 		res = check_condition_result;
4487 		goto fini;
4488 	}
4489 
4490 	zbc_open_zone(devip, zsp, true);
4491 fini:
4492 	write_unlock(macc_lckp);
4493 	return res;
4494 }
4495 
4496 static void zbc_close_all(struct sdebug_dev_info *devip)
4497 {
4498 	unsigned int i;
4499 
4500 	for (i = 0; i < devip->nr_zones; i++)
4501 		zbc_close_zone(devip, &devip->zstate[i]);
4502 }
4503 
4504 static int resp_close_zone(struct scsi_cmnd *scp,
4505 			   struct sdebug_dev_info *devip)
4506 {
4507 	int res = 0;
4508 	u64 z_id;
4509 	u8 *cmd = scp->cmnd;
4510 	struct sdeb_zone_state *zsp;
4511 	bool all = cmd[14] & 0x01;
4512 	struct sdeb_store_info *sip = devip2sip(devip, false);
4513 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4514 
4515 	if (!sdebug_dev_is_zoned(devip)) {
4516 		mk_sense_invalid_opcode(scp);
4517 		return check_condition_result;
4518 	}
4519 
4520 	write_lock(macc_lckp);
4521 
4522 	if (all) {
4523 		zbc_close_all(devip);
4524 		goto fini;
4525 	}
4526 
4527 	/* Close specified zone */
4528 	z_id = get_unaligned_be64(cmd + 2);
4529 	if (z_id >= sdebug_capacity) {
4530 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4531 		res = check_condition_result;
4532 		goto fini;
4533 	}
4534 
4535 	zsp = zbc_zone(devip, z_id);
4536 	if (z_id != zsp->z_start) {
4537 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4538 		res = check_condition_result;
4539 		goto fini;
4540 	}
4541 	if (zbc_zone_is_conv(zsp)) {
4542 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4543 		res = check_condition_result;
4544 		goto fini;
4545 	}
4546 
4547 	zbc_close_zone(devip, zsp);
4548 fini:
4549 	write_unlock(macc_lckp);
4550 	return res;
4551 }
4552 
4553 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4554 			    struct sdeb_zone_state *zsp, bool empty)
4555 {
4556 	enum sdebug_z_cond zc = zsp->z_cond;
4557 
4558 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4559 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4560 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4561 			zbc_close_zone(devip, zsp);
4562 		if (zsp->z_cond == ZC4_CLOSED)
4563 			devip->nr_closed--;
4564 		zsp->z_wp = zsp->z_start + zsp->z_size;
4565 		zsp->z_cond = ZC5_FULL;
4566 	}
4567 }
4568 
4569 static void zbc_finish_all(struct sdebug_dev_info *devip)
4570 {
4571 	unsigned int i;
4572 
4573 	for (i = 0; i < devip->nr_zones; i++)
4574 		zbc_finish_zone(devip, &devip->zstate[i], false);
4575 }
4576 
4577 static int resp_finish_zone(struct scsi_cmnd *scp,
4578 			    struct sdebug_dev_info *devip)
4579 {
4580 	struct sdeb_zone_state *zsp;
4581 	int res = 0;
4582 	u64 z_id;
4583 	u8 *cmd = scp->cmnd;
4584 	bool all = cmd[14] & 0x01;
4585 	struct sdeb_store_info *sip = devip2sip(devip, false);
4586 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4587 
4588 	if (!sdebug_dev_is_zoned(devip)) {
4589 		mk_sense_invalid_opcode(scp);
4590 		return check_condition_result;
4591 	}
4592 
4593 	write_lock(macc_lckp);
4594 
4595 	if (all) {
4596 		zbc_finish_all(devip);
4597 		goto fini;
4598 	}
4599 
4600 	/* Finish the specified zone */
4601 	z_id = get_unaligned_be64(cmd + 2);
4602 	if (z_id >= sdebug_capacity) {
4603 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4604 		res = check_condition_result;
4605 		goto fini;
4606 	}
4607 
4608 	zsp = zbc_zone(devip, z_id);
4609 	if (z_id != zsp->z_start) {
4610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4611 		res = check_condition_result;
4612 		goto fini;
4613 	}
4614 	if (zbc_zone_is_conv(zsp)) {
4615 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4616 		res = check_condition_result;
4617 		goto fini;
4618 	}
4619 
4620 	zbc_finish_zone(devip, zsp, true);
4621 fini:
4622 	write_unlock(macc_lckp);
4623 	return res;
4624 }
4625 
4626 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4627 			 struct sdeb_zone_state *zsp)
4628 {
4629 	enum sdebug_z_cond zc;
4630 
4631 	if (zbc_zone_is_conv(zsp))
4632 		return;
4633 
4634 	zc = zsp->z_cond;
4635 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4636 		zbc_close_zone(devip, zsp);
4637 
4638 	if (zsp->z_cond == ZC4_CLOSED)
4639 		devip->nr_closed--;
4640 
4641 	zsp->z_non_seq_resource = false;
4642 	zsp->z_wp = zsp->z_start;
4643 	zsp->z_cond = ZC1_EMPTY;
4644 }
4645 
4646 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4647 {
4648 	unsigned int i;
4649 
4650 	for (i = 0; i < devip->nr_zones; i++)
4651 		zbc_rwp_zone(devip, &devip->zstate[i]);
4652 }
4653 
4654 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4655 {
4656 	struct sdeb_zone_state *zsp;
4657 	int res = 0;
4658 	u64 z_id;
4659 	u8 *cmd = scp->cmnd;
4660 	bool all = cmd[14] & 0x01;
4661 	struct sdeb_store_info *sip = devip2sip(devip, false);
4662 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4663 
4664 	if (!sdebug_dev_is_zoned(devip)) {
4665 		mk_sense_invalid_opcode(scp);
4666 		return check_condition_result;
4667 	}
4668 
4669 	write_lock(macc_lckp);
4670 
4671 	if (all) {
4672 		zbc_rwp_all(devip);
4673 		goto fini;
4674 	}
4675 
4676 	z_id = get_unaligned_be64(cmd + 2);
4677 	if (z_id >= sdebug_capacity) {
4678 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4679 		res = check_condition_result;
4680 		goto fini;
4681 	}
4682 
4683 	zsp = zbc_zone(devip, z_id);
4684 	if (z_id != zsp->z_start) {
4685 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4686 		res = check_condition_result;
4687 		goto fini;
4688 	}
4689 	if (zbc_zone_is_conv(zsp)) {
4690 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4691 		res = check_condition_result;
4692 		goto fini;
4693 	}
4694 
4695 	zbc_rwp_zone(devip, zsp);
4696 fini:
4697 	write_unlock(macc_lckp);
4698 	return res;
4699 }
4700 
4701 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4702 {
4703 	u16 hwq;
4704 	u32 tag = blk_mq_unique_tag(cmnd->request);
4705 
4706 	hwq = blk_mq_unique_tag_to_hwq(tag);
4707 
4708 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4709 	if (WARN_ON_ONCE(hwq >= submit_queues))
4710 		hwq = 0;
4711 
4712 	return sdebug_q_arr + hwq;
4713 }
4714 
4715 static u32 get_tag(struct scsi_cmnd *cmnd)
4716 {
4717 	return blk_mq_unique_tag(cmnd->request);
4718 }
4719 
4720 /* Queued (deferred) command completions converge here. */
4721 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4722 {
4723 	bool aborted = sd_dp->aborted;
4724 	int qc_idx;
4725 	int retiring = 0;
4726 	unsigned long iflags;
4727 	struct sdebug_queue *sqp;
4728 	struct sdebug_queued_cmd *sqcp;
4729 	struct scsi_cmnd *scp;
4730 	struct sdebug_dev_info *devip;
4731 
4732 	sd_dp->defer_t = SDEB_DEFER_NONE;
4733 	if (unlikely(aborted))
4734 		sd_dp->aborted = false;
4735 	qc_idx = sd_dp->qc_idx;
4736 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4737 	if (sdebug_statistics) {
4738 		atomic_inc(&sdebug_completions);
4739 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4740 			atomic_inc(&sdebug_miss_cpus);
4741 	}
4742 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4743 		pr_err("wild qc_idx=%d\n", qc_idx);
4744 		return;
4745 	}
4746 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4747 	sqcp = &sqp->qc_arr[qc_idx];
4748 	scp = sqcp->a_cmnd;
4749 	if (unlikely(scp == NULL)) {
4750 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4751 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4752 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4753 		return;
4754 	}
4755 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4756 	if (likely(devip))
4757 		atomic_dec(&devip->num_in_q);
4758 	else
4759 		pr_err("devip=NULL\n");
4760 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4761 		retiring = 1;
4762 
4763 	sqcp->a_cmnd = NULL;
4764 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4765 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4766 		pr_err("Unexpected completion\n");
4767 		return;
4768 	}
4769 
4770 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4771 		int k, retval;
4772 
4773 		retval = atomic_read(&retired_max_queue);
4774 		if (qc_idx >= retval) {
4775 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4776 			pr_err("index %d too large\n", retval);
4777 			return;
4778 		}
4779 		k = find_last_bit(sqp->in_use_bm, retval);
4780 		if ((k < sdebug_max_queue) || (k == retval))
4781 			atomic_set(&retired_max_queue, 0);
4782 		else
4783 			atomic_set(&retired_max_queue, k + 1);
4784 	}
4785 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4786 	if (unlikely(aborted)) {
4787 		if (sdebug_verbose)
4788 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4789 		return;
4790 	}
4791 	scp->scsi_done(scp); /* callback to mid level */
4792 }
4793 
4794 /* When high resolution timer goes off this function is called. */
4795 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4796 {
4797 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4798 						  hrt);
4799 	sdebug_q_cmd_complete(sd_dp);
4800 	return HRTIMER_NORESTART;
4801 }
4802 
4803 /* When work queue schedules work, it calls this function. */
4804 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4805 {
4806 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4807 						  ew.work);
4808 	sdebug_q_cmd_complete(sd_dp);
4809 }
4810 
4811 static bool got_shared_uuid;
4812 static uuid_t shared_uuid;
4813 
4814 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4815 {
4816 	struct sdeb_zone_state *zsp;
4817 	sector_t capacity = get_sdebug_capacity();
4818 	sector_t zstart = 0;
4819 	unsigned int i;
4820 
4821 	/*
4822 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4823 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4824 	 * use the specified zone size checking that at least 2 zones can be
4825 	 * created for the device.
4826 	 */
4827 	if (!sdeb_zbc_zone_size_mb) {
4828 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4829 			>> ilog2(sdebug_sector_size);
4830 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4831 			devip->zsize >>= 1;
4832 		if (devip->zsize < 2) {
4833 			pr_err("Device capacity too small\n");
4834 			return -EINVAL;
4835 		}
4836 	} else {
4837 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4838 			pr_err("Zone size is not a power of 2\n");
4839 			return -EINVAL;
4840 		}
4841 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4842 			>> ilog2(sdebug_sector_size);
4843 		if (devip->zsize >= capacity) {
4844 			pr_err("Zone size too large for device capacity\n");
4845 			return -EINVAL;
4846 		}
4847 	}
4848 
4849 	devip->zsize_shift = ilog2(devip->zsize);
4850 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4851 
4852 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4853 		pr_err("Number of conventional zones too large\n");
4854 		return -EINVAL;
4855 	}
4856 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4857 
4858 	if (devip->zmodel == BLK_ZONED_HM) {
4859 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4860 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4861 			devip->max_open = (devip->nr_zones - 1) / 2;
4862 		else
4863 			devip->max_open = sdeb_zbc_max_open;
4864 	}
4865 
4866 	devip->zstate = kcalloc(devip->nr_zones,
4867 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4868 	if (!devip->zstate)
4869 		return -ENOMEM;
4870 
4871 	for (i = 0; i < devip->nr_zones; i++) {
4872 		zsp = &devip->zstate[i];
4873 
4874 		zsp->z_start = zstart;
4875 
4876 		if (i < devip->nr_conv_zones) {
4877 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4878 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4879 			zsp->z_wp = (sector_t)-1;
4880 		} else {
4881 			if (devip->zmodel == BLK_ZONED_HM)
4882 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4883 			else
4884 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4885 			zsp->z_cond = ZC1_EMPTY;
4886 			zsp->z_wp = zsp->z_start;
4887 		}
4888 
4889 		if (zsp->z_start + devip->zsize < capacity)
4890 			zsp->z_size = devip->zsize;
4891 		else
4892 			zsp->z_size = capacity - zsp->z_start;
4893 
4894 		zstart += zsp->z_size;
4895 	}
4896 
4897 	return 0;
4898 }
4899 
4900 static struct sdebug_dev_info *sdebug_device_create(
4901 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4902 {
4903 	struct sdebug_dev_info *devip;
4904 
4905 	devip = kzalloc(sizeof(*devip), flags);
4906 	if (devip) {
4907 		if (sdebug_uuid_ctl == 1)
4908 			uuid_gen(&devip->lu_name);
4909 		else if (sdebug_uuid_ctl == 2) {
4910 			if (got_shared_uuid)
4911 				devip->lu_name = shared_uuid;
4912 			else {
4913 				uuid_gen(&shared_uuid);
4914 				got_shared_uuid = true;
4915 				devip->lu_name = shared_uuid;
4916 			}
4917 		}
4918 		devip->sdbg_host = sdbg_host;
4919 		if (sdeb_zbc_in_use) {
4920 			devip->zmodel = sdeb_zbc_model;
4921 			if (sdebug_device_create_zones(devip)) {
4922 				kfree(devip);
4923 				return NULL;
4924 			}
4925 		} else {
4926 			devip->zmodel = BLK_ZONED_NONE;
4927 		}
4928 		devip->sdbg_host = sdbg_host;
4929 		devip->create_ts = ktime_get_boottime();
4930 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4931 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4932 	}
4933 	return devip;
4934 }
4935 
4936 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4937 {
4938 	struct sdebug_host_info *sdbg_host;
4939 	struct sdebug_dev_info *open_devip = NULL;
4940 	struct sdebug_dev_info *devip;
4941 
4942 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4943 	if (!sdbg_host) {
4944 		pr_err("Host info NULL\n");
4945 		return NULL;
4946 	}
4947 
4948 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4949 		if ((devip->used) && (devip->channel == sdev->channel) &&
4950 		    (devip->target == sdev->id) &&
4951 		    (devip->lun == sdev->lun))
4952 			return devip;
4953 		else {
4954 			if ((!devip->used) && (!open_devip))
4955 				open_devip = devip;
4956 		}
4957 	}
4958 	if (!open_devip) { /* try and make a new one */
4959 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4960 		if (!open_devip) {
4961 			pr_err("out of memory at line %d\n", __LINE__);
4962 			return NULL;
4963 		}
4964 	}
4965 
4966 	open_devip->channel = sdev->channel;
4967 	open_devip->target = sdev->id;
4968 	open_devip->lun = sdev->lun;
4969 	open_devip->sdbg_host = sdbg_host;
4970 	atomic_set(&open_devip->num_in_q, 0);
4971 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4972 	open_devip->used = true;
4973 	return open_devip;
4974 }
4975 
4976 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4977 {
4978 	if (sdebug_verbose)
4979 		pr_info("slave_alloc <%u %u %u %llu>\n",
4980 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4981 	return 0;
4982 }
4983 
4984 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4985 {
4986 	struct sdebug_dev_info *devip =
4987 			(struct sdebug_dev_info *)sdp->hostdata;
4988 
4989 	if (sdebug_verbose)
4990 		pr_info("slave_configure <%u %u %u %llu>\n",
4991 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4992 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4993 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4994 	if (devip == NULL) {
4995 		devip = find_build_dev_info(sdp);
4996 		if (devip == NULL)
4997 			return 1;  /* no resources, will be marked offline */
4998 	}
4999 	sdp->hostdata = devip;
5000 	if (sdebug_no_uld)
5001 		sdp->no_uld_attach = 1;
5002 	config_cdb_len(sdp);
5003 	return 0;
5004 }
5005 
5006 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5007 {
5008 	struct sdebug_dev_info *devip =
5009 		(struct sdebug_dev_info *)sdp->hostdata;
5010 
5011 	if (sdebug_verbose)
5012 		pr_info("slave_destroy <%u %u %u %llu>\n",
5013 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5014 	if (devip) {
5015 		/* make this slot available for re-use */
5016 		devip->used = false;
5017 		sdp->hostdata = NULL;
5018 	}
5019 }
5020 
5021 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5022 			   enum sdeb_defer_type defer_t)
5023 {
5024 	if (!sd_dp)
5025 		return;
5026 	if (defer_t == SDEB_DEFER_HRT)
5027 		hrtimer_cancel(&sd_dp->hrt);
5028 	else if (defer_t == SDEB_DEFER_WQ)
5029 		cancel_work_sync(&sd_dp->ew.work);
5030 }
5031 
5032 /* If @cmnd found deletes its timer or work queue and returns true; else
5033    returns false */
5034 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5035 {
5036 	unsigned long iflags;
5037 	int j, k, qmax, r_qmax;
5038 	enum sdeb_defer_type l_defer_t;
5039 	struct sdebug_queue *sqp;
5040 	struct sdebug_queued_cmd *sqcp;
5041 	struct sdebug_dev_info *devip;
5042 	struct sdebug_defer *sd_dp;
5043 
5044 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5045 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5046 		qmax = sdebug_max_queue;
5047 		r_qmax = atomic_read(&retired_max_queue);
5048 		if (r_qmax > qmax)
5049 			qmax = r_qmax;
5050 		for (k = 0; k < qmax; ++k) {
5051 			if (test_bit(k, sqp->in_use_bm)) {
5052 				sqcp = &sqp->qc_arr[k];
5053 				if (cmnd != sqcp->a_cmnd)
5054 					continue;
5055 				/* found */
5056 				devip = (struct sdebug_dev_info *)
5057 						cmnd->device->hostdata;
5058 				if (devip)
5059 					atomic_dec(&devip->num_in_q);
5060 				sqcp->a_cmnd = NULL;
5061 				sd_dp = sqcp->sd_dp;
5062 				if (sd_dp) {
5063 					l_defer_t = sd_dp->defer_t;
5064 					sd_dp->defer_t = SDEB_DEFER_NONE;
5065 				} else
5066 					l_defer_t = SDEB_DEFER_NONE;
5067 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5068 				stop_qc_helper(sd_dp, l_defer_t);
5069 				clear_bit(k, sqp->in_use_bm);
5070 				return true;
5071 			}
5072 		}
5073 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5074 	}
5075 	return false;
5076 }
5077 
5078 /* Deletes (stops) timers or work queues of all queued commands */
5079 static void stop_all_queued(void)
5080 {
5081 	unsigned long iflags;
5082 	int j, k;
5083 	enum sdeb_defer_type l_defer_t;
5084 	struct sdebug_queue *sqp;
5085 	struct sdebug_queued_cmd *sqcp;
5086 	struct sdebug_dev_info *devip;
5087 	struct sdebug_defer *sd_dp;
5088 
5089 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5090 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5091 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5092 			if (test_bit(k, sqp->in_use_bm)) {
5093 				sqcp = &sqp->qc_arr[k];
5094 				if (sqcp->a_cmnd == NULL)
5095 					continue;
5096 				devip = (struct sdebug_dev_info *)
5097 					sqcp->a_cmnd->device->hostdata;
5098 				if (devip)
5099 					atomic_dec(&devip->num_in_q);
5100 				sqcp->a_cmnd = NULL;
5101 				sd_dp = sqcp->sd_dp;
5102 				if (sd_dp) {
5103 					l_defer_t = sd_dp->defer_t;
5104 					sd_dp->defer_t = SDEB_DEFER_NONE;
5105 				} else
5106 					l_defer_t = SDEB_DEFER_NONE;
5107 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5108 				stop_qc_helper(sd_dp, l_defer_t);
5109 				clear_bit(k, sqp->in_use_bm);
5110 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5111 			}
5112 		}
5113 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5114 	}
5115 }
5116 
5117 /* Free queued command memory on heap */
5118 static void free_all_queued(void)
5119 {
5120 	int j, k;
5121 	struct sdebug_queue *sqp;
5122 	struct sdebug_queued_cmd *sqcp;
5123 
5124 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5125 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5126 			sqcp = &sqp->qc_arr[k];
5127 			kfree(sqcp->sd_dp);
5128 			sqcp->sd_dp = NULL;
5129 		}
5130 	}
5131 }
5132 
5133 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5134 {
5135 	bool ok;
5136 
5137 	++num_aborts;
5138 	if (SCpnt) {
5139 		ok = stop_queued_cmnd(SCpnt);
5140 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5141 			sdev_printk(KERN_INFO, SCpnt->device,
5142 				    "%s: command%s found\n", __func__,
5143 				    ok ? "" : " not");
5144 	}
5145 	return SUCCESS;
5146 }
5147 
5148 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5149 {
5150 	++num_dev_resets;
5151 	if (SCpnt && SCpnt->device) {
5152 		struct scsi_device *sdp = SCpnt->device;
5153 		struct sdebug_dev_info *devip =
5154 				(struct sdebug_dev_info *)sdp->hostdata;
5155 
5156 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5157 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5158 		if (devip)
5159 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5160 	}
5161 	return SUCCESS;
5162 }
5163 
5164 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5165 {
5166 	struct sdebug_host_info *sdbg_host;
5167 	struct sdebug_dev_info *devip;
5168 	struct scsi_device *sdp;
5169 	struct Scsi_Host *hp;
5170 	int k = 0;
5171 
5172 	++num_target_resets;
5173 	if (!SCpnt)
5174 		goto lie;
5175 	sdp = SCpnt->device;
5176 	if (!sdp)
5177 		goto lie;
5178 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5179 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5180 	hp = sdp->host;
5181 	if (!hp)
5182 		goto lie;
5183 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5184 	if (sdbg_host) {
5185 		list_for_each_entry(devip,
5186 				    &sdbg_host->dev_info_list,
5187 				    dev_list)
5188 			if (devip->target == sdp->id) {
5189 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5190 				++k;
5191 			}
5192 	}
5193 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5194 		sdev_printk(KERN_INFO, sdp,
5195 			    "%s: %d device(s) found in target\n", __func__, k);
5196 lie:
5197 	return SUCCESS;
5198 }
5199 
5200 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5201 {
5202 	struct sdebug_host_info *sdbg_host;
5203 	struct sdebug_dev_info *devip;
5204 	struct scsi_device *sdp;
5205 	struct Scsi_Host *hp;
5206 	int k = 0;
5207 
5208 	++num_bus_resets;
5209 	if (!(SCpnt && SCpnt->device))
5210 		goto lie;
5211 	sdp = SCpnt->device;
5212 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5213 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5214 	hp = sdp->host;
5215 	if (hp) {
5216 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5217 		if (sdbg_host) {
5218 			list_for_each_entry(devip,
5219 					    &sdbg_host->dev_info_list,
5220 					    dev_list) {
5221 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5222 				++k;
5223 			}
5224 		}
5225 	}
5226 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5227 		sdev_printk(KERN_INFO, sdp,
5228 			    "%s: %d device(s) found in host\n", __func__, k);
5229 lie:
5230 	return SUCCESS;
5231 }
5232 
5233 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5234 {
5235 	struct sdebug_host_info *sdbg_host;
5236 	struct sdebug_dev_info *devip;
5237 	int k = 0;
5238 
5239 	++num_host_resets;
5240 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5241 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5242 	spin_lock(&sdebug_host_list_lock);
5243 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5244 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5245 				    dev_list) {
5246 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5247 			++k;
5248 		}
5249 	}
5250 	spin_unlock(&sdebug_host_list_lock);
5251 	stop_all_queued();
5252 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5253 		sdev_printk(KERN_INFO, SCpnt->device,
5254 			    "%s: %d device(s) found\n", __func__, k);
5255 	return SUCCESS;
5256 }
5257 
5258 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5259 {
5260 	struct msdos_partition *pp;
5261 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5262 	int sectors_per_part, num_sectors, k;
5263 	int heads_by_sects, start_sec, end_sec;
5264 
5265 	/* assume partition table already zeroed */
5266 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5267 		return;
5268 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5269 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5270 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5271 	}
5272 	num_sectors = (int)get_sdebug_capacity();
5273 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5274 			   / sdebug_num_parts;
5275 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5276 	starts[0] = sdebug_sectors_per;
5277 	max_part_secs = sectors_per_part;
5278 	for (k = 1; k < sdebug_num_parts; ++k) {
5279 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5280 			    * heads_by_sects;
5281 		if (starts[k] - starts[k - 1] < max_part_secs)
5282 			max_part_secs = starts[k] - starts[k - 1];
5283 	}
5284 	starts[sdebug_num_parts] = num_sectors;
5285 	starts[sdebug_num_parts + 1] = 0;
5286 
5287 	ramp[510] = 0x55;	/* magic partition markings */
5288 	ramp[511] = 0xAA;
5289 	pp = (struct msdos_partition *)(ramp + 0x1be);
5290 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5291 		start_sec = starts[k];
5292 		end_sec = starts[k] + max_part_secs - 1;
5293 		pp->boot_ind = 0;
5294 
5295 		pp->cyl = start_sec / heads_by_sects;
5296 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5297 			   / sdebug_sectors_per;
5298 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5299 
5300 		pp->end_cyl = end_sec / heads_by_sects;
5301 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5302 			       / sdebug_sectors_per;
5303 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5304 
5305 		pp->start_sect = cpu_to_le32(start_sec);
5306 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5307 		pp->sys_ind = 0x83;	/* plain Linux partition */
5308 	}
5309 }
5310 
5311 static void block_unblock_all_queues(bool block)
5312 {
5313 	int j;
5314 	struct sdebug_queue *sqp;
5315 
5316 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5317 		atomic_set(&sqp->blocked, (int)block);
5318 }
5319 
5320 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5321  * commands will be processed normally before triggers occur.
5322  */
5323 static void tweak_cmnd_count(void)
5324 {
5325 	int count, modulo;
5326 
5327 	modulo = abs(sdebug_every_nth);
5328 	if (modulo < 2)
5329 		return;
5330 	block_unblock_all_queues(true);
5331 	count = atomic_read(&sdebug_cmnd_count);
5332 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5333 	block_unblock_all_queues(false);
5334 }
5335 
5336 static void clear_queue_stats(void)
5337 {
5338 	atomic_set(&sdebug_cmnd_count, 0);
5339 	atomic_set(&sdebug_completions, 0);
5340 	atomic_set(&sdebug_miss_cpus, 0);
5341 	atomic_set(&sdebug_a_tsf, 0);
5342 }
5343 
5344 static bool inject_on_this_cmd(void)
5345 {
5346 	if (sdebug_every_nth == 0)
5347 		return false;
5348 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5349 }
5350 
5351 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5352 
5353 /* Complete the processing of the thread that queued a SCSI command to this
5354  * driver. It either completes the command by calling cmnd_done() or
5355  * schedules a hr timer or work queue then returns 0. Returns
5356  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5357  */
5358 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5359 			 int scsi_result,
5360 			 int (*pfp)(struct scsi_cmnd *,
5361 				    struct sdebug_dev_info *),
5362 			 int delta_jiff, int ndelay)
5363 {
5364 	bool new_sd_dp;
5365 	bool inject = false;
5366 	int k, num_in_q, qdepth;
5367 	unsigned long iflags;
5368 	u64 ns_from_boot = 0;
5369 	struct sdebug_queue *sqp;
5370 	struct sdebug_queued_cmd *sqcp;
5371 	struct scsi_device *sdp;
5372 	struct sdebug_defer *sd_dp;
5373 
5374 	if (unlikely(devip == NULL)) {
5375 		if (scsi_result == 0)
5376 			scsi_result = DID_NO_CONNECT << 16;
5377 		goto respond_in_thread;
5378 	}
5379 	sdp = cmnd->device;
5380 
5381 	if (delta_jiff == 0)
5382 		goto respond_in_thread;
5383 
5384 	sqp = get_queue(cmnd);
5385 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5386 	if (unlikely(atomic_read(&sqp->blocked))) {
5387 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5388 		return SCSI_MLQUEUE_HOST_BUSY;
5389 	}
5390 	num_in_q = atomic_read(&devip->num_in_q);
5391 	qdepth = cmnd->device->queue_depth;
5392 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5393 		if (scsi_result) {
5394 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5395 			goto respond_in_thread;
5396 		} else
5397 			scsi_result = device_qfull_result;
5398 	} else if (unlikely(sdebug_every_nth &&
5399 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5400 			    (scsi_result == 0))) {
5401 		if ((num_in_q == (qdepth - 1)) &&
5402 		    (atomic_inc_return(&sdebug_a_tsf) >=
5403 		     abs(sdebug_every_nth))) {
5404 			atomic_set(&sdebug_a_tsf, 0);
5405 			inject = true;
5406 			scsi_result = device_qfull_result;
5407 		}
5408 	}
5409 
5410 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5411 	if (unlikely(k >= sdebug_max_queue)) {
5412 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5413 		if (scsi_result)
5414 			goto respond_in_thread;
5415 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5416 			scsi_result = device_qfull_result;
5417 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5418 			sdev_printk(KERN_INFO, sdp,
5419 				    "%s: max_queue=%d exceeded, %s\n",
5420 				    __func__, sdebug_max_queue,
5421 				    (scsi_result ?  "status: TASK SET FULL" :
5422 						    "report: host busy"));
5423 		if (scsi_result)
5424 			goto respond_in_thread;
5425 		else
5426 			return SCSI_MLQUEUE_HOST_BUSY;
5427 	}
5428 	set_bit(k, sqp->in_use_bm);
5429 	atomic_inc(&devip->num_in_q);
5430 	sqcp = &sqp->qc_arr[k];
5431 	sqcp->a_cmnd = cmnd;
5432 	cmnd->host_scribble = (unsigned char *)sqcp;
5433 	sd_dp = sqcp->sd_dp;
5434 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5435 	if (!sd_dp) {
5436 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5437 		if (!sd_dp) {
5438 			atomic_dec(&devip->num_in_q);
5439 			clear_bit(k, sqp->in_use_bm);
5440 			return SCSI_MLQUEUE_HOST_BUSY;
5441 		}
5442 		new_sd_dp = true;
5443 	} else {
5444 		new_sd_dp = false;
5445 	}
5446 
5447 	/* Set the hostwide tag */
5448 	if (sdebug_host_max_queue)
5449 		sd_dp->hc_idx = get_tag(cmnd);
5450 
5451 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5452 		ns_from_boot = ktime_get_boottime_ns();
5453 
5454 	/* one of the resp_*() response functions is called here */
5455 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5456 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5457 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5458 		delta_jiff = ndelay = 0;
5459 	}
5460 	if (cmnd->result == 0 && scsi_result != 0)
5461 		cmnd->result = scsi_result;
5462 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5463 		if (atomic_read(&sdeb_inject_pending)) {
5464 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5465 			atomic_set(&sdeb_inject_pending, 0);
5466 			cmnd->result = check_condition_result;
5467 		}
5468 	}
5469 
5470 	if (unlikely(sdebug_verbose && cmnd->result))
5471 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5472 			    __func__, cmnd->result);
5473 
5474 	if (delta_jiff > 0 || ndelay > 0) {
5475 		ktime_t kt;
5476 
5477 		if (delta_jiff > 0) {
5478 			u64 ns = jiffies_to_nsecs(delta_jiff);
5479 
5480 			if (sdebug_random && ns < U32_MAX) {
5481 				ns = prandom_u32_max((u32)ns);
5482 			} else if (sdebug_random) {
5483 				ns >>= 12;	/* scale to 4 usec precision */
5484 				if (ns < U32_MAX)	/* over 4 hours max */
5485 					ns = prandom_u32_max((u32)ns);
5486 				ns <<= 12;
5487 			}
5488 			kt = ns_to_ktime(ns);
5489 		} else {	/* ndelay has a 4.2 second max */
5490 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5491 					     (u32)ndelay;
5492 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5493 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5494 
5495 				if (kt <= d) {	/* elapsed duration >= kt */
5496 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5497 					sqcp->a_cmnd = NULL;
5498 					atomic_dec(&devip->num_in_q);
5499 					clear_bit(k, sqp->in_use_bm);
5500 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5501 					if (new_sd_dp)
5502 						kfree(sd_dp);
5503 					/* call scsi_done() from this thread */
5504 					cmnd->scsi_done(cmnd);
5505 					return 0;
5506 				}
5507 				/* otherwise reduce kt by elapsed time */
5508 				kt -= d;
5509 			}
5510 		}
5511 		if (!sd_dp->init_hrt) {
5512 			sd_dp->init_hrt = true;
5513 			sqcp->sd_dp = sd_dp;
5514 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5515 				     HRTIMER_MODE_REL_PINNED);
5516 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5517 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5518 			sd_dp->qc_idx = k;
5519 		}
5520 		if (sdebug_statistics)
5521 			sd_dp->issuing_cpu = raw_smp_processor_id();
5522 		sd_dp->defer_t = SDEB_DEFER_HRT;
5523 		/* schedule the invocation of scsi_done() for a later time */
5524 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5525 	} else {	/* jdelay < 0, use work queue */
5526 		if (!sd_dp->init_wq) {
5527 			sd_dp->init_wq = true;
5528 			sqcp->sd_dp = sd_dp;
5529 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5530 			sd_dp->qc_idx = k;
5531 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5532 		}
5533 		if (sdebug_statistics)
5534 			sd_dp->issuing_cpu = raw_smp_processor_id();
5535 		sd_dp->defer_t = SDEB_DEFER_WQ;
5536 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5537 			     atomic_read(&sdeb_inject_pending)))
5538 			sd_dp->aborted = true;
5539 		schedule_work(&sd_dp->ew.work);
5540 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5541 			     atomic_read(&sdeb_inject_pending))) {
5542 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5543 			blk_abort_request(cmnd->request);
5544 			atomic_set(&sdeb_inject_pending, 0);
5545 		}
5546 	}
5547 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5548 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5549 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5550 	return 0;
5551 
5552 respond_in_thread:	/* call back to mid-layer using invocation thread */
5553 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5554 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5555 	if (cmnd->result == 0 && scsi_result != 0)
5556 		cmnd->result = scsi_result;
5557 	cmnd->scsi_done(cmnd);
5558 	return 0;
5559 }
5560 
5561 /* Note: The following macros create attribute files in the
5562    /sys/module/scsi_debug/parameters directory. Unfortunately this
5563    driver is unaware of a change and cannot trigger auxiliary actions
5564    as it can when the corresponding attribute in the
5565    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5566  */
5567 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5568 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5569 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5570 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5571 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5572 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5573 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5574 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5575 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5576 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5577 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5578 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5579 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5580 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5581 module_param_string(inq_product, sdebug_inq_product_id,
5582 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5583 module_param_string(inq_rev, sdebug_inq_product_rev,
5584 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5585 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5586 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5587 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5588 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5589 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5590 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5591 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5592 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5593 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5594 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5595 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5596 		   S_IRUGO | S_IWUSR);
5597 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5598 		   S_IRUGO | S_IWUSR);
5599 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5600 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5601 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5602 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5603 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5604 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5605 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5606 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5607 module_param_named(per_host_store, sdebug_per_host_store, bool,
5608 		   S_IRUGO | S_IWUSR);
5609 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5610 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5611 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5613 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5614 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5615 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5616 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5617 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5618 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5619 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5620 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5621 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5622 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5623 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5624 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5625 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5626 		   S_IRUGO | S_IWUSR);
5627 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5628 module_param_named(write_same_length, sdebug_write_same_length, int,
5629 		   S_IRUGO | S_IWUSR);
5630 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5631 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5632 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5633 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5634 
5635 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5636 MODULE_DESCRIPTION("SCSI debug adapter driver");
5637 MODULE_LICENSE("GPL");
5638 MODULE_VERSION(SDEBUG_VERSION);
5639 
5640 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5641 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5642 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5643 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5644 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5645 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5646 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5647 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5648 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5649 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5650 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5651 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5652 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5653 MODULE_PARM_DESC(host_max_queue,
5654 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5655 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5656 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5657 		 SDEBUG_VERSION "\")");
5658 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5659 MODULE_PARM_DESC(lbprz,
5660 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5661 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5662 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5663 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5664 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5665 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5666 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5667 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5668 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5669 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5670 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5671 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5672 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5673 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5674 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5675 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5676 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5677 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5678 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5679 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5680 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5681 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5682 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5683 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5684 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5685 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5686 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5687 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5688 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5689 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5690 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5691 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5692 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5693 MODULE_PARM_DESC(uuid_ctl,
5694 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5695 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5696 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5697 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5698 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5699 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5700 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5701 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5702 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5703 
5704 #define SDEBUG_INFO_LEN 256
5705 static char sdebug_info[SDEBUG_INFO_LEN];
5706 
5707 static const char *scsi_debug_info(struct Scsi_Host *shp)
5708 {
5709 	int k;
5710 
5711 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5712 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5713 	if (k >= (SDEBUG_INFO_LEN - 1))
5714 		return sdebug_info;
5715 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5716 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5717 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5718 		  "statistics", (int)sdebug_statistics);
5719 	return sdebug_info;
5720 }
5721 
5722 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5723 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5724 				 int length)
5725 {
5726 	char arr[16];
5727 	int opts;
5728 	int minLen = length > 15 ? 15 : length;
5729 
5730 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5731 		return -EACCES;
5732 	memcpy(arr, buffer, minLen);
5733 	arr[minLen] = '\0';
5734 	if (1 != sscanf(arr, "%d", &opts))
5735 		return -EINVAL;
5736 	sdebug_opts = opts;
5737 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5738 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5739 	if (sdebug_every_nth != 0)
5740 		tweak_cmnd_count();
5741 	return length;
5742 }
5743 
5744 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5745  * same for each scsi_debug host (if more than one). Some of the counters
5746  * output are not atomics so might be inaccurate in a busy system. */
5747 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5748 {
5749 	int f, j, l;
5750 	struct sdebug_queue *sqp;
5751 	struct sdebug_host_info *sdhp;
5752 
5753 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5754 		   SDEBUG_VERSION, sdebug_version_date);
5755 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5756 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5757 		   sdebug_opts, sdebug_every_nth);
5758 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5759 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5760 		   sdebug_sector_size, "bytes");
5761 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5762 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5763 		   num_aborts);
5764 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5765 		   num_dev_resets, num_target_resets, num_bus_resets,
5766 		   num_host_resets);
5767 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5768 		   dix_reads, dix_writes, dif_errors);
5769 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5770 		   sdebug_statistics);
5771 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5772 		   atomic_read(&sdebug_cmnd_count),
5773 		   atomic_read(&sdebug_completions),
5774 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5775 		   atomic_read(&sdebug_a_tsf));
5776 
5777 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5778 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5779 		seq_printf(m, "  queue %d:\n", j);
5780 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5781 		if (f != sdebug_max_queue) {
5782 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5783 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5784 				   "first,last bits", f, l);
5785 		}
5786 	}
5787 
5788 	seq_printf(m, "this host_no=%d\n", host->host_no);
5789 	if (!xa_empty(per_store_ap)) {
5790 		bool niu;
5791 		int idx;
5792 		unsigned long l_idx;
5793 		struct sdeb_store_info *sip;
5794 
5795 		seq_puts(m, "\nhost list:\n");
5796 		j = 0;
5797 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5798 			idx = sdhp->si_idx;
5799 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5800 				   sdhp->shost->host_no, idx);
5801 			++j;
5802 		}
5803 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5804 			   sdeb_most_recent_idx);
5805 		j = 0;
5806 		xa_for_each(per_store_ap, l_idx, sip) {
5807 			niu = xa_get_mark(per_store_ap, l_idx,
5808 					  SDEB_XA_NOT_IN_USE);
5809 			idx = (int)l_idx;
5810 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5811 				   (niu ? "  not_in_use" : ""));
5812 			++j;
5813 		}
5814 	}
5815 	return 0;
5816 }
5817 
5818 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5819 {
5820 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5821 }
5822 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5823  * of delay is jiffies.
5824  */
5825 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5826 			   size_t count)
5827 {
5828 	int jdelay, res;
5829 
5830 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5831 		res = count;
5832 		if (sdebug_jdelay != jdelay) {
5833 			int j, k;
5834 			struct sdebug_queue *sqp;
5835 
5836 			block_unblock_all_queues(true);
5837 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5838 			     ++j, ++sqp) {
5839 				k = find_first_bit(sqp->in_use_bm,
5840 						   sdebug_max_queue);
5841 				if (k != sdebug_max_queue) {
5842 					res = -EBUSY;   /* queued commands */
5843 					break;
5844 				}
5845 			}
5846 			if (res > 0) {
5847 				sdebug_jdelay = jdelay;
5848 				sdebug_ndelay = 0;
5849 			}
5850 			block_unblock_all_queues(false);
5851 		}
5852 		return res;
5853 	}
5854 	return -EINVAL;
5855 }
5856 static DRIVER_ATTR_RW(delay);
5857 
5858 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5859 {
5860 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5861 }
5862 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5863 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5864 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5865 			    size_t count)
5866 {
5867 	int ndelay, res;
5868 
5869 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5870 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5871 		res = count;
5872 		if (sdebug_ndelay != ndelay) {
5873 			int j, k;
5874 			struct sdebug_queue *sqp;
5875 
5876 			block_unblock_all_queues(true);
5877 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5878 			     ++j, ++sqp) {
5879 				k = find_first_bit(sqp->in_use_bm,
5880 						   sdebug_max_queue);
5881 				if (k != sdebug_max_queue) {
5882 					res = -EBUSY;   /* queued commands */
5883 					break;
5884 				}
5885 			}
5886 			if (res > 0) {
5887 				sdebug_ndelay = ndelay;
5888 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5889 							: DEF_JDELAY;
5890 			}
5891 			block_unblock_all_queues(false);
5892 		}
5893 		return res;
5894 	}
5895 	return -EINVAL;
5896 }
5897 static DRIVER_ATTR_RW(ndelay);
5898 
5899 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5900 {
5901 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5902 }
5903 
5904 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5905 			  size_t count)
5906 {
5907 	int opts;
5908 	char work[20];
5909 
5910 	if (sscanf(buf, "%10s", work) == 1) {
5911 		if (strncasecmp(work, "0x", 2) == 0) {
5912 			if (kstrtoint(work + 2, 16, &opts) == 0)
5913 				goto opts_done;
5914 		} else {
5915 			if (kstrtoint(work, 10, &opts) == 0)
5916 				goto opts_done;
5917 		}
5918 	}
5919 	return -EINVAL;
5920 opts_done:
5921 	sdebug_opts = opts;
5922 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5923 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5924 	tweak_cmnd_count();
5925 	return count;
5926 }
5927 static DRIVER_ATTR_RW(opts);
5928 
5929 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5930 {
5931 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5932 }
5933 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5934 			   size_t count)
5935 {
5936 	int n;
5937 
5938 	/* Cannot change from or to TYPE_ZBC with sysfs */
5939 	if (sdebug_ptype == TYPE_ZBC)
5940 		return -EINVAL;
5941 
5942 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5943 		if (n == TYPE_ZBC)
5944 			return -EINVAL;
5945 		sdebug_ptype = n;
5946 		return count;
5947 	}
5948 	return -EINVAL;
5949 }
5950 static DRIVER_ATTR_RW(ptype);
5951 
5952 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5953 {
5954 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5955 }
5956 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5957 			    size_t count)
5958 {
5959 	int n;
5960 
5961 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5962 		sdebug_dsense = n;
5963 		return count;
5964 	}
5965 	return -EINVAL;
5966 }
5967 static DRIVER_ATTR_RW(dsense);
5968 
5969 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5970 {
5971 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5972 }
5973 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5974 			     size_t count)
5975 {
5976 	int n, idx;
5977 
5978 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5979 		bool want_store = (n == 0);
5980 		struct sdebug_host_info *sdhp;
5981 
5982 		n = (n > 0);
5983 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5984 		if (sdebug_fake_rw == n)
5985 			return count;	/* not transitioning so do nothing */
5986 
5987 		if (want_store) {	/* 1 --> 0 transition, set up store */
5988 			if (sdeb_first_idx < 0) {
5989 				idx = sdebug_add_store();
5990 				if (idx < 0)
5991 					return idx;
5992 			} else {
5993 				idx = sdeb_first_idx;
5994 				xa_clear_mark(per_store_ap, idx,
5995 					      SDEB_XA_NOT_IN_USE);
5996 			}
5997 			/* make all hosts use same store */
5998 			list_for_each_entry(sdhp, &sdebug_host_list,
5999 					    host_list) {
6000 				if (sdhp->si_idx != idx) {
6001 					xa_set_mark(per_store_ap, sdhp->si_idx,
6002 						    SDEB_XA_NOT_IN_USE);
6003 					sdhp->si_idx = idx;
6004 				}
6005 			}
6006 			sdeb_most_recent_idx = idx;
6007 		} else {	/* 0 --> 1 transition is trigger for shrink */
6008 			sdebug_erase_all_stores(true /* apart from first */);
6009 		}
6010 		sdebug_fake_rw = n;
6011 		return count;
6012 	}
6013 	return -EINVAL;
6014 }
6015 static DRIVER_ATTR_RW(fake_rw);
6016 
6017 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6018 {
6019 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6020 }
6021 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6022 			      size_t count)
6023 {
6024 	int n;
6025 
6026 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6027 		sdebug_no_lun_0 = n;
6028 		return count;
6029 	}
6030 	return -EINVAL;
6031 }
6032 static DRIVER_ATTR_RW(no_lun_0);
6033 
6034 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6035 {
6036 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6037 }
6038 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6039 			      size_t count)
6040 {
6041 	int n;
6042 
6043 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6044 		sdebug_num_tgts = n;
6045 		sdebug_max_tgts_luns();
6046 		return count;
6047 	}
6048 	return -EINVAL;
6049 }
6050 static DRIVER_ATTR_RW(num_tgts);
6051 
6052 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6053 {
6054 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6055 }
6056 static DRIVER_ATTR_RO(dev_size_mb);
6057 
6058 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6059 {
6060 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6061 }
6062 
6063 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6064 				    size_t count)
6065 {
6066 	bool v;
6067 
6068 	if (kstrtobool(buf, &v))
6069 		return -EINVAL;
6070 
6071 	sdebug_per_host_store = v;
6072 	return count;
6073 }
6074 static DRIVER_ATTR_RW(per_host_store);
6075 
6076 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6077 {
6078 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6079 }
6080 static DRIVER_ATTR_RO(num_parts);
6081 
6082 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6083 {
6084 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6085 }
6086 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6087 			       size_t count)
6088 {
6089 	int nth;
6090 	char work[20];
6091 
6092 	if (sscanf(buf, "%10s", work) == 1) {
6093 		if (strncasecmp(work, "0x", 2) == 0) {
6094 			if (kstrtoint(work + 2, 16, &nth) == 0)
6095 				goto every_nth_done;
6096 		} else {
6097 			if (kstrtoint(work, 10, &nth) == 0)
6098 				goto every_nth_done;
6099 		}
6100 	}
6101 	return -EINVAL;
6102 
6103 every_nth_done:
6104 	sdebug_every_nth = nth;
6105 	if (nth && !sdebug_statistics) {
6106 		pr_info("every_nth needs statistics=1, set it\n");
6107 		sdebug_statistics = true;
6108 	}
6109 	tweak_cmnd_count();
6110 	return count;
6111 }
6112 static DRIVER_ATTR_RW(every_nth);
6113 
6114 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6115 {
6116 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6117 }
6118 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6119 				size_t count)
6120 {
6121 	int n;
6122 	bool changed;
6123 
6124 	if (kstrtoint(buf, 0, &n))
6125 		return -EINVAL;
6126 	if (n >= 0) {
6127 		if (n > (int)SAM_LUN_AM_FLAT) {
6128 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6129 			return -EINVAL;
6130 		}
6131 		changed = ((int)sdebug_lun_am != n);
6132 		sdebug_lun_am = n;
6133 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6134 			struct sdebug_host_info *sdhp;
6135 			struct sdebug_dev_info *dp;
6136 
6137 			spin_lock(&sdebug_host_list_lock);
6138 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6139 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6140 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6141 				}
6142 			}
6143 			spin_unlock(&sdebug_host_list_lock);
6144 		}
6145 		return count;
6146 	}
6147 	return -EINVAL;
6148 }
6149 static DRIVER_ATTR_RW(lun_format);
6150 
6151 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6152 {
6153 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6154 }
6155 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6156 			      size_t count)
6157 {
6158 	int n;
6159 	bool changed;
6160 
6161 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6162 		if (n > 256) {
6163 			pr_warn("max_luns can be no more than 256\n");
6164 			return -EINVAL;
6165 		}
6166 		changed = (sdebug_max_luns != n);
6167 		sdebug_max_luns = n;
6168 		sdebug_max_tgts_luns();
6169 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6170 			struct sdebug_host_info *sdhp;
6171 			struct sdebug_dev_info *dp;
6172 
6173 			spin_lock(&sdebug_host_list_lock);
6174 			list_for_each_entry(sdhp, &sdebug_host_list,
6175 					    host_list) {
6176 				list_for_each_entry(dp, &sdhp->dev_info_list,
6177 						    dev_list) {
6178 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6179 						dp->uas_bm);
6180 				}
6181 			}
6182 			spin_unlock(&sdebug_host_list_lock);
6183 		}
6184 		return count;
6185 	}
6186 	return -EINVAL;
6187 }
6188 static DRIVER_ATTR_RW(max_luns);
6189 
6190 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6191 {
6192 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6193 }
6194 /* N.B. max_queue can be changed while there are queued commands. In flight
6195  * commands beyond the new max_queue will be completed. */
6196 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6197 			       size_t count)
6198 {
6199 	int j, n, k, a;
6200 	struct sdebug_queue *sqp;
6201 
6202 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6203 	    (n <= SDEBUG_CANQUEUE) &&
6204 	    (sdebug_host_max_queue == 0)) {
6205 		block_unblock_all_queues(true);
6206 		k = 0;
6207 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6208 		     ++j, ++sqp) {
6209 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6210 			if (a > k)
6211 				k = a;
6212 		}
6213 		sdebug_max_queue = n;
6214 		if (k == SDEBUG_CANQUEUE)
6215 			atomic_set(&retired_max_queue, 0);
6216 		else if (k >= n)
6217 			atomic_set(&retired_max_queue, k + 1);
6218 		else
6219 			atomic_set(&retired_max_queue, 0);
6220 		block_unblock_all_queues(false);
6221 		return count;
6222 	}
6223 	return -EINVAL;
6224 }
6225 static DRIVER_ATTR_RW(max_queue);
6226 
6227 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6228 {
6229 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6230 }
6231 
6232 /*
6233  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6234  * in range [0, sdebug_host_max_queue), we can't change it.
6235  */
6236 static DRIVER_ATTR_RO(host_max_queue);
6237 
6238 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6239 {
6240 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6241 }
6242 static DRIVER_ATTR_RO(no_uld);
6243 
6244 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6245 {
6246 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6247 }
6248 static DRIVER_ATTR_RO(scsi_level);
6249 
6250 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6251 {
6252 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6253 }
6254 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6255 				size_t count)
6256 {
6257 	int n;
6258 	bool changed;
6259 
6260 	/* Ignore capacity change for ZBC drives for now */
6261 	if (sdeb_zbc_in_use)
6262 		return -ENOTSUPP;
6263 
6264 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6265 		changed = (sdebug_virtual_gb != n);
6266 		sdebug_virtual_gb = n;
6267 		sdebug_capacity = get_sdebug_capacity();
6268 		if (changed) {
6269 			struct sdebug_host_info *sdhp;
6270 			struct sdebug_dev_info *dp;
6271 
6272 			spin_lock(&sdebug_host_list_lock);
6273 			list_for_each_entry(sdhp, &sdebug_host_list,
6274 					    host_list) {
6275 				list_for_each_entry(dp, &sdhp->dev_info_list,
6276 						    dev_list) {
6277 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6278 						dp->uas_bm);
6279 				}
6280 			}
6281 			spin_unlock(&sdebug_host_list_lock);
6282 		}
6283 		return count;
6284 	}
6285 	return -EINVAL;
6286 }
6287 static DRIVER_ATTR_RW(virtual_gb);
6288 
6289 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6290 {
6291 	/* absolute number of hosts currently active is what is shown */
6292 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6293 }
6294 
6295 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6296 			      size_t count)
6297 {
6298 	bool found;
6299 	unsigned long idx;
6300 	struct sdeb_store_info *sip;
6301 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6302 	int delta_hosts;
6303 
6304 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6305 		return -EINVAL;
6306 	if (delta_hosts > 0) {
6307 		do {
6308 			found = false;
6309 			if (want_phs) {
6310 				xa_for_each_marked(per_store_ap, idx, sip,
6311 						   SDEB_XA_NOT_IN_USE) {
6312 					sdeb_most_recent_idx = (int)idx;
6313 					found = true;
6314 					break;
6315 				}
6316 				if (found)	/* re-use case */
6317 					sdebug_add_host_helper((int)idx);
6318 				else
6319 					sdebug_do_add_host(true);
6320 			} else {
6321 				sdebug_do_add_host(false);
6322 			}
6323 		} while (--delta_hosts);
6324 	} else if (delta_hosts < 0) {
6325 		do {
6326 			sdebug_do_remove_host(false);
6327 		} while (++delta_hosts);
6328 	}
6329 	return count;
6330 }
6331 static DRIVER_ATTR_RW(add_host);
6332 
6333 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6334 {
6335 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6336 }
6337 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6338 				    size_t count)
6339 {
6340 	int n;
6341 
6342 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6343 		sdebug_vpd_use_hostno = n;
6344 		return count;
6345 	}
6346 	return -EINVAL;
6347 }
6348 static DRIVER_ATTR_RW(vpd_use_hostno);
6349 
6350 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6351 {
6352 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6353 }
6354 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6355 				size_t count)
6356 {
6357 	int n;
6358 
6359 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6360 		if (n > 0)
6361 			sdebug_statistics = true;
6362 		else {
6363 			clear_queue_stats();
6364 			sdebug_statistics = false;
6365 		}
6366 		return count;
6367 	}
6368 	return -EINVAL;
6369 }
6370 static DRIVER_ATTR_RW(statistics);
6371 
6372 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6373 {
6374 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6375 }
6376 static DRIVER_ATTR_RO(sector_size);
6377 
6378 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6379 {
6380 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6381 }
6382 static DRIVER_ATTR_RO(submit_queues);
6383 
6384 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6385 {
6386 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6387 }
6388 static DRIVER_ATTR_RO(dix);
6389 
6390 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6391 {
6392 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6393 }
6394 static DRIVER_ATTR_RO(dif);
6395 
6396 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6397 {
6398 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6399 }
6400 static DRIVER_ATTR_RO(guard);
6401 
6402 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6403 {
6404 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6405 }
6406 static DRIVER_ATTR_RO(ato);
6407 
6408 static ssize_t map_show(struct device_driver *ddp, char *buf)
6409 {
6410 	ssize_t count = 0;
6411 
6412 	if (!scsi_debug_lbp())
6413 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6414 				 sdebug_store_sectors);
6415 
6416 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6417 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6418 
6419 		if (sip)
6420 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6421 					  (int)map_size, sip->map_storep);
6422 	}
6423 	buf[count++] = '\n';
6424 	buf[count] = '\0';
6425 
6426 	return count;
6427 }
6428 static DRIVER_ATTR_RO(map);
6429 
6430 static ssize_t random_show(struct device_driver *ddp, char *buf)
6431 {
6432 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6433 }
6434 
6435 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6436 			    size_t count)
6437 {
6438 	bool v;
6439 
6440 	if (kstrtobool(buf, &v))
6441 		return -EINVAL;
6442 
6443 	sdebug_random = v;
6444 	return count;
6445 }
6446 static DRIVER_ATTR_RW(random);
6447 
6448 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6449 {
6450 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6451 }
6452 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6453 			       size_t count)
6454 {
6455 	int n;
6456 
6457 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6458 		sdebug_removable = (n > 0);
6459 		return count;
6460 	}
6461 	return -EINVAL;
6462 }
6463 static DRIVER_ATTR_RW(removable);
6464 
6465 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6466 {
6467 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6468 }
6469 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6470 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6471 			       size_t count)
6472 {
6473 	int n;
6474 
6475 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6476 		sdebug_host_lock = (n > 0);
6477 		return count;
6478 	}
6479 	return -EINVAL;
6480 }
6481 static DRIVER_ATTR_RW(host_lock);
6482 
6483 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6484 {
6485 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6486 }
6487 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6488 			    size_t count)
6489 {
6490 	int n;
6491 
6492 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6493 		sdebug_strict = (n > 0);
6494 		return count;
6495 	}
6496 	return -EINVAL;
6497 }
6498 static DRIVER_ATTR_RW(strict);
6499 
6500 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6501 {
6502 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6503 }
6504 static DRIVER_ATTR_RO(uuid_ctl);
6505 
6506 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6507 {
6508 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6509 }
6510 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6511 			     size_t count)
6512 {
6513 	int ret, n;
6514 
6515 	ret = kstrtoint(buf, 0, &n);
6516 	if (ret)
6517 		return ret;
6518 	sdebug_cdb_len = n;
6519 	all_config_cdb_len();
6520 	return count;
6521 }
6522 static DRIVER_ATTR_RW(cdb_len);
6523 
6524 static const char * const zbc_model_strs_a[] = {
6525 	[BLK_ZONED_NONE] = "none",
6526 	[BLK_ZONED_HA]   = "host-aware",
6527 	[BLK_ZONED_HM]   = "host-managed",
6528 };
6529 
6530 static const char * const zbc_model_strs_b[] = {
6531 	[BLK_ZONED_NONE] = "no",
6532 	[BLK_ZONED_HA]   = "aware",
6533 	[BLK_ZONED_HM]   = "managed",
6534 };
6535 
6536 static const char * const zbc_model_strs_c[] = {
6537 	[BLK_ZONED_NONE] = "0",
6538 	[BLK_ZONED_HA]   = "1",
6539 	[BLK_ZONED_HM]   = "2",
6540 };
6541 
6542 static int sdeb_zbc_model_str(const char *cp)
6543 {
6544 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6545 
6546 	if (res < 0) {
6547 		res = sysfs_match_string(zbc_model_strs_b, cp);
6548 		if (res < 0) {
6549 			res = sysfs_match_string(zbc_model_strs_c, cp);
6550 			if (res < 0)
6551 				return -EINVAL;
6552 		}
6553 	}
6554 	return res;
6555 }
6556 
6557 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6558 {
6559 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6560 			 zbc_model_strs_a[sdeb_zbc_model]);
6561 }
6562 static DRIVER_ATTR_RO(zbc);
6563 
6564 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6565 {
6566 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6567 }
6568 static DRIVER_ATTR_RO(tur_ms_to_ready);
6569 
6570 /* Note: The following array creates attribute files in the
6571    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6572    files (over those found in the /sys/module/scsi_debug/parameters
6573    directory) is that auxiliary actions can be triggered when an attribute
6574    is changed. For example see: add_host_store() above.
6575  */
6576 
6577 static struct attribute *sdebug_drv_attrs[] = {
6578 	&driver_attr_delay.attr,
6579 	&driver_attr_opts.attr,
6580 	&driver_attr_ptype.attr,
6581 	&driver_attr_dsense.attr,
6582 	&driver_attr_fake_rw.attr,
6583 	&driver_attr_host_max_queue.attr,
6584 	&driver_attr_no_lun_0.attr,
6585 	&driver_attr_num_tgts.attr,
6586 	&driver_attr_dev_size_mb.attr,
6587 	&driver_attr_num_parts.attr,
6588 	&driver_attr_every_nth.attr,
6589 	&driver_attr_lun_format.attr,
6590 	&driver_attr_max_luns.attr,
6591 	&driver_attr_max_queue.attr,
6592 	&driver_attr_no_uld.attr,
6593 	&driver_attr_scsi_level.attr,
6594 	&driver_attr_virtual_gb.attr,
6595 	&driver_attr_add_host.attr,
6596 	&driver_attr_per_host_store.attr,
6597 	&driver_attr_vpd_use_hostno.attr,
6598 	&driver_attr_sector_size.attr,
6599 	&driver_attr_statistics.attr,
6600 	&driver_attr_submit_queues.attr,
6601 	&driver_attr_dix.attr,
6602 	&driver_attr_dif.attr,
6603 	&driver_attr_guard.attr,
6604 	&driver_attr_ato.attr,
6605 	&driver_attr_map.attr,
6606 	&driver_attr_random.attr,
6607 	&driver_attr_removable.attr,
6608 	&driver_attr_host_lock.attr,
6609 	&driver_attr_ndelay.attr,
6610 	&driver_attr_strict.attr,
6611 	&driver_attr_uuid_ctl.attr,
6612 	&driver_attr_cdb_len.attr,
6613 	&driver_attr_tur_ms_to_ready.attr,
6614 	&driver_attr_zbc.attr,
6615 	NULL,
6616 };
6617 ATTRIBUTE_GROUPS(sdebug_drv);
6618 
6619 static struct device *pseudo_primary;
6620 
6621 static int __init scsi_debug_init(void)
6622 {
6623 	bool want_store = (sdebug_fake_rw == 0);
6624 	unsigned long sz;
6625 	int k, ret, hosts_to_add;
6626 	int idx = -1;
6627 
6628 	ramdisk_lck_a[0] = &atomic_rw;
6629 	ramdisk_lck_a[1] = &atomic_rw2;
6630 	atomic_set(&retired_max_queue, 0);
6631 
6632 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6633 		pr_warn("ndelay must be less than 1 second, ignored\n");
6634 		sdebug_ndelay = 0;
6635 	} else if (sdebug_ndelay > 0)
6636 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6637 
6638 	switch (sdebug_sector_size) {
6639 	case  512:
6640 	case 1024:
6641 	case 2048:
6642 	case 4096:
6643 		break;
6644 	default:
6645 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6646 		return -EINVAL;
6647 	}
6648 
6649 	switch (sdebug_dif) {
6650 	case T10_PI_TYPE0_PROTECTION:
6651 		break;
6652 	case T10_PI_TYPE1_PROTECTION:
6653 	case T10_PI_TYPE2_PROTECTION:
6654 	case T10_PI_TYPE3_PROTECTION:
6655 		have_dif_prot = true;
6656 		break;
6657 
6658 	default:
6659 		pr_err("dif must be 0, 1, 2 or 3\n");
6660 		return -EINVAL;
6661 	}
6662 
6663 	if (sdebug_num_tgts < 0) {
6664 		pr_err("num_tgts must be >= 0\n");
6665 		return -EINVAL;
6666 	}
6667 
6668 	if (sdebug_guard > 1) {
6669 		pr_err("guard must be 0 or 1\n");
6670 		return -EINVAL;
6671 	}
6672 
6673 	if (sdebug_ato > 1) {
6674 		pr_err("ato must be 0 or 1\n");
6675 		return -EINVAL;
6676 	}
6677 
6678 	if (sdebug_physblk_exp > 15) {
6679 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6680 		return -EINVAL;
6681 	}
6682 
6683 	sdebug_lun_am = sdebug_lun_am_i;
6684 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6685 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6686 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6687 	}
6688 
6689 	if (sdebug_max_luns > 256) {
6690 		if (sdebug_max_luns > 16384) {
6691 			pr_warn("max_luns can be no more than 16384, use default\n");
6692 			sdebug_max_luns = DEF_MAX_LUNS;
6693 		}
6694 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6695 	}
6696 
6697 	if (sdebug_lowest_aligned > 0x3fff) {
6698 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6699 		return -EINVAL;
6700 	}
6701 
6702 	if (submit_queues < 1) {
6703 		pr_err("submit_queues must be 1 or more\n");
6704 		return -EINVAL;
6705 	}
6706 
6707 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6708 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6709 		return -EINVAL;
6710 	}
6711 
6712 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6713 	    (sdebug_host_max_queue < 0)) {
6714 		pr_err("host_max_queue must be in range [0 %d]\n",
6715 		       SDEBUG_CANQUEUE);
6716 		return -EINVAL;
6717 	}
6718 
6719 	if (sdebug_host_max_queue &&
6720 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6721 		sdebug_max_queue = sdebug_host_max_queue;
6722 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6723 			sdebug_max_queue);
6724 	}
6725 
6726 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6727 			       GFP_KERNEL);
6728 	if (sdebug_q_arr == NULL)
6729 		return -ENOMEM;
6730 	for (k = 0; k < submit_queues; ++k)
6731 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6732 
6733 	/*
6734 	 * check for host managed zoned block device specified with
6735 	 * ptype=0x14 or zbc=XXX.
6736 	 */
6737 	if (sdebug_ptype == TYPE_ZBC) {
6738 		sdeb_zbc_model = BLK_ZONED_HM;
6739 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6740 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6741 		if (k < 0) {
6742 			ret = k;
6743 			goto free_vm;
6744 		}
6745 		sdeb_zbc_model = k;
6746 		switch (sdeb_zbc_model) {
6747 		case BLK_ZONED_NONE:
6748 		case BLK_ZONED_HA:
6749 			sdebug_ptype = TYPE_DISK;
6750 			break;
6751 		case BLK_ZONED_HM:
6752 			sdebug_ptype = TYPE_ZBC;
6753 			break;
6754 		default:
6755 			pr_err("Invalid ZBC model\n");
6756 			return -EINVAL;
6757 		}
6758 	}
6759 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6760 		sdeb_zbc_in_use = true;
6761 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6762 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6763 	}
6764 
6765 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6766 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6767 	if (sdebug_dev_size_mb < 1)
6768 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6769 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6770 	sdebug_store_sectors = sz / sdebug_sector_size;
6771 	sdebug_capacity = get_sdebug_capacity();
6772 
6773 	/* play around with geometry, don't waste too much on track 0 */
6774 	sdebug_heads = 8;
6775 	sdebug_sectors_per = 32;
6776 	if (sdebug_dev_size_mb >= 256)
6777 		sdebug_heads = 64;
6778 	else if (sdebug_dev_size_mb >= 16)
6779 		sdebug_heads = 32;
6780 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6781 			       (sdebug_sectors_per * sdebug_heads);
6782 	if (sdebug_cylinders_per >= 1024) {
6783 		/* other LLDs do this; implies >= 1GB ram disk ... */
6784 		sdebug_heads = 255;
6785 		sdebug_sectors_per = 63;
6786 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6787 			       (sdebug_sectors_per * sdebug_heads);
6788 	}
6789 	if (scsi_debug_lbp()) {
6790 		sdebug_unmap_max_blocks =
6791 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6792 
6793 		sdebug_unmap_max_desc =
6794 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6795 
6796 		sdebug_unmap_granularity =
6797 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6798 
6799 		if (sdebug_unmap_alignment &&
6800 		    sdebug_unmap_granularity <=
6801 		    sdebug_unmap_alignment) {
6802 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6803 			ret = -EINVAL;
6804 			goto free_q_arr;
6805 		}
6806 	}
6807 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6808 	if (want_store) {
6809 		idx = sdebug_add_store();
6810 		if (idx < 0) {
6811 			ret = idx;
6812 			goto free_q_arr;
6813 		}
6814 	}
6815 
6816 	pseudo_primary = root_device_register("pseudo_0");
6817 	if (IS_ERR(pseudo_primary)) {
6818 		pr_warn("root_device_register() error\n");
6819 		ret = PTR_ERR(pseudo_primary);
6820 		goto free_vm;
6821 	}
6822 	ret = bus_register(&pseudo_lld_bus);
6823 	if (ret < 0) {
6824 		pr_warn("bus_register error: %d\n", ret);
6825 		goto dev_unreg;
6826 	}
6827 	ret = driver_register(&sdebug_driverfs_driver);
6828 	if (ret < 0) {
6829 		pr_warn("driver_register error: %d\n", ret);
6830 		goto bus_unreg;
6831 	}
6832 
6833 	hosts_to_add = sdebug_add_host;
6834 	sdebug_add_host = 0;
6835 
6836 	for (k = 0; k < hosts_to_add; k++) {
6837 		if (want_store && k == 0) {
6838 			ret = sdebug_add_host_helper(idx);
6839 			if (ret < 0) {
6840 				pr_err("add_host_helper k=%d, error=%d\n",
6841 				       k, -ret);
6842 				break;
6843 			}
6844 		} else {
6845 			ret = sdebug_do_add_host(want_store &&
6846 						 sdebug_per_host_store);
6847 			if (ret < 0) {
6848 				pr_err("add_host k=%d error=%d\n", k, -ret);
6849 				break;
6850 			}
6851 		}
6852 	}
6853 	if (sdebug_verbose)
6854 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6855 
6856 	return 0;
6857 
6858 bus_unreg:
6859 	bus_unregister(&pseudo_lld_bus);
6860 dev_unreg:
6861 	root_device_unregister(pseudo_primary);
6862 free_vm:
6863 	sdebug_erase_store(idx, NULL);
6864 free_q_arr:
6865 	kfree(sdebug_q_arr);
6866 	return ret;
6867 }
6868 
6869 static void __exit scsi_debug_exit(void)
6870 {
6871 	int k = sdebug_num_hosts;
6872 
6873 	stop_all_queued();
6874 	for (; k; k--)
6875 		sdebug_do_remove_host(true);
6876 	free_all_queued();
6877 	driver_unregister(&sdebug_driverfs_driver);
6878 	bus_unregister(&pseudo_lld_bus);
6879 	root_device_unregister(pseudo_primary);
6880 
6881 	sdebug_erase_all_stores(false);
6882 	xa_destroy(per_store_ap);
6883 }
6884 
6885 device_initcall(scsi_debug_init);
6886 module_exit(scsi_debug_exit);
6887 
6888 static void sdebug_release_adapter(struct device *dev)
6889 {
6890 	struct sdebug_host_info *sdbg_host;
6891 
6892 	sdbg_host = to_sdebug_host(dev);
6893 	kfree(sdbg_host);
6894 }
6895 
6896 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6897 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6898 {
6899 	if (idx < 0)
6900 		return;
6901 	if (!sip) {
6902 		if (xa_empty(per_store_ap))
6903 			return;
6904 		sip = xa_load(per_store_ap, idx);
6905 		if (!sip)
6906 			return;
6907 	}
6908 	vfree(sip->map_storep);
6909 	vfree(sip->dif_storep);
6910 	vfree(sip->storep);
6911 	xa_erase(per_store_ap, idx);
6912 	kfree(sip);
6913 }
6914 
6915 /* Assume apart_from_first==false only in shutdown case. */
6916 static void sdebug_erase_all_stores(bool apart_from_first)
6917 {
6918 	unsigned long idx;
6919 	struct sdeb_store_info *sip = NULL;
6920 
6921 	xa_for_each(per_store_ap, idx, sip) {
6922 		if (apart_from_first)
6923 			apart_from_first = false;
6924 		else
6925 			sdebug_erase_store(idx, sip);
6926 	}
6927 	if (apart_from_first)
6928 		sdeb_most_recent_idx = sdeb_first_idx;
6929 }
6930 
6931 /*
6932  * Returns store xarray new element index (idx) if >=0 else negated errno.
6933  * Limit the number of stores to 65536.
6934  */
6935 static int sdebug_add_store(void)
6936 {
6937 	int res;
6938 	u32 n_idx;
6939 	unsigned long iflags;
6940 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6941 	struct sdeb_store_info *sip = NULL;
6942 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6943 
6944 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6945 	if (!sip)
6946 		return -ENOMEM;
6947 
6948 	xa_lock_irqsave(per_store_ap, iflags);
6949 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6950 	if (unlikely(res < 0)) {
6951 		xa_unlock_irqrestore(per_store_ap, iflags);
6952 		kfree(sip);
6953 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6954 		return res;
6955 	}
6956 	sdeb_most_recent_idx = n_idx;
6957 	if (sdeb_first_idx < 0)
6958 		sdeb_first_idx = n_idx;
6959 	xa_unlock_irqrestore(per_store_ap, iflags);
6960 
6961 	res = -ENOMEM;
6962 	sip->storep = vzalloc(sz);
6963 	if (!sip->storep) {
6964 		pr_err("user data oom\n");
6965 		goto err;
6966 	}
6967 	if (sdebug_num_parts > 0)
6968 		sdebug_build_parts(sip->storep, sz);
6969 
6970 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6971 	if (sdebug_dix) {
6972 		int dif_size;
6973 
6974 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6975 		sip->dif_storep = vmalloc(dif_size);
6976 
6977 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6978 			sip->dif_storep);
6979 
6980 		if (!sip->dif_storep) {
6981 			pr_err("DIX oom\n");
6982 			goto err;
6983 		}
6984 		memset(sip->dif_storep, 0xff, dif_size);
6985 	}
6986 	/* Logical Block Provisioning */
6987 	if (scsi_debug_lbp()) {
6988 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6989 		sip->map_storep = vmalloc(array_size(sizeof(long),
6990 						     BITS_TO_LONGS(map_size)));
6991 
6992 		pr_info("%lu provisioning blocks\n", map_size);
6993 
6994 		if (!sip->map_storep) {
6995 			pr_err("LBP map oom\n");
6996 			goto err;
6997 		}
6998 
6999 		bitmap_zero(sip->map_storep, map_size);
7000 
7001 		/* Map first 1KB for partition table */
7002 		if (sdebug_num_parts)
7003 			map_region(sip, 0, 2);
7004 	}
7005 
7006 	rwlock_init(&sip->macc_lck);
7007 	return (int)n_idx;
7008 err:
7009 	sdebug_erase_store((int)n_idx, sip);
7010 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7011 	return res;
7012 }
7013 
7014 static int sdebug_add_host_helper(int per_host_idx)
7015 {
7016 	int k, devs_per_host, idx;
7017 	int error = -ENOMEM;
7018 	struct sdebug_host_info *sdbg_host;
7019 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7020 
7021 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7022 	if (!sdbg_host)
7023 		return -ENOMEM;
7024 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7025 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7026 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7027 	sdbg_host->si_idx = idx;
7028 
7029 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7030 
7031 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7032 	for (k = 0; k < devs_per_host; k++) {
7033 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7034 		if (!sdbg_devinfo)
7035 			goto clean;
7036 	}
7037 
7038 	spin_lock(&sdebug_host_list_lock);
7039 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7040 	spin_unlock(&sdebug_host_list_lock);
7041 
7042 	sdbg_host->dev.bus = &pseudo_lld_bus;
7043 	sdbg_host->dev.parent = pseudo_primary;
7044 	sdbg_host->dev.release = &sdebug_release_adapter;
7045 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7046 
7047 	error = device_register(&sdbg_host->dev);
7048 	if (error)
7049 		goto clean;
7050 
7051 	++sdebug_num_hosts;
7052 	return 0;
7053 
7054 clean:
7055 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7056 				 dev_list) {
7057 		list_del(&sdbg_devinfo->dev_list);
7058 		kfree(sdbg_devinfo->zstate);
7059 		kfree(sdbg_devinfo);
7060 	}
7061 	kfree(sdbg_host);
7062 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7063 	return error;
7064 }
7065 
7066 static int sdebug_do_add_host(bool mk_new_store)
7067 {
7068 	int ph_idx = sdeb_most_recent_idx;
7069 
7070 	if (mk_new_store) {
7071 		ph_idx = sdebug_add_store();
7072 		if (ph_idx < 0)
7073 			return ph_idx;
7074 	}
7075 	return sdebug_add_host_helper(ph_idx);
7076 }
7077 
7078 static void sdebug_do_remove_host(bool the_end)
7079 {
7080 	int idx = -1;
7081 	struct sdebug_host_info *sdbg_host = NULL;
7082 	struct sdebug_host_info *sdbg_host2;
7083 
7084 	spin_lock(&sdebug_host_list_lock);
7085 	if (!list_empty(&sdebug_host_list)) {
7086 		sdbg_host = list_entry(sdebug_host_list.prev,
7087 				       struct sdebug_host_info, host_list);
7088 		idx = sdbg_host->si_idx;
7089 	}
7090 	if (!the_end && idx >= 0) {
7091 		bool unique = true;
7092 
7093 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7094 			if (sdbg_host2 == sdbg_host)
7095 				continue;
7096 			if (idx == sdbg_host2->si_idx) {
7097 				unique = false;
7098 				break;
7099 			}
7100 		}
7101 		if (unique) {
7102 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7103 			if (idx == sdeb_most_recent_idx)
7104 				--sdeb_most_recent_idx;
7105 		}
7106 	}
7107 	if (sdbg_host)
7108 		list_del(&sdbg_host->host_list);
7109 	spin_unlock(&sdebug_host_list_lock);
7110 
7111 	if (!sdbg_host)
7112 		return;
7113 
7114 	device_unregister(&sdbg_host->dev);
7115 	--sdebug_num_hosts;
7116 }
7117 
7118 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7119 {
7120 	int num_in_q = 0;
7121 	struct sdebug_dev_info *devip;
7122 
7123 	block_unblock_all_queues(true);
7124 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7125 	if (NULL == devip) {
7126 		block_unblock_all_queues(false);
7127 		return	-ENODEV;
7128 	}
7129 	num_in_q = atomic_read(&devip->num_in_q);
7130 
7131 	if (qdepth < 1)
7132 		qdepth = 1;
7133 	/* allow to exceed max host qc_arr elements for testing */
7134 	if (qdepth > SDEBUG_CANQUEUE + 10)
7135 		qdepth = SDEBUG_CANQUEUE + 10;
7136 	scsi_change_queue_depth(sdev, qdepth);
7137 
7138 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7139 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7140 			    __func__, qdepth, num_in_q);
7141 	}
7142 	block_unblock_all_queues(false);
7143 	return sdev->queue_depth;
7144 }
7145 
7146 static bool fake_timeout(struct scsi_cmnd *scp)
7147 {
7148 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7149 		if (sdebug_every_nth < -1)
7150 			sdebug_every_nth = -1;
7151 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7152 			return true; /* ignore command causing timeout */
7153 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7154 			 scsi_medium_access_command(scp))
7155 			return true; /* time out reads and writes */
7156 	}
7157 	return false;
7158 }
7159 
7160 /* Response to TUR or media access command when device stopped */
7161 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7162 {
7163 	int stopped_state;
7164 	u64 diff_ns = 0;
7165 	ktime_t now_ts = ktime_get_boottime();
7166 	struct scsi_device *sdp = scp->device;
7167 
7168 	stopped_state = atomic_read(&devip->stopped);
7169 	if (stopped_state == 2) {
7170 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7171 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7172 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7173 				/* tur_ms_to_ready timer extinguished */
7174 				atomic_set(&devip->stopped, 0);
7175 				return 0;
7176 			}
7177 		}
7178 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7179 		if (sdebug_verbose)
7180 			sdev_printk(KERN_INFO, sdp,
7181 				    "%s: Not ready: in process of becoming ready\n", my_name);
7182 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7183 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7184 
7185 			if (diff_ns <= tur_nanosecs_to_ready)
7186 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7187 			else
7188 				diff_ns = tur_nanosecs_to_ready;
7189 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7190 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7191 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7192 						   diff_ns);
7193 			return check_condition_result;
7194 		}
7195 	}
7196 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7197 	if (sdebug_verbose)
7198 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7199 			    my_name);
7200 	return check_condition_result;
7201 }
7202 
7203 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7204 				   struct scsi_cmnd *scp)
7205 {
7206 	u8 sdeb_i;
7207 	struct scsi_device *sdp = scp->device;
7208 	const struct opcode_info_t *oip;
7209 	const struct opcode_info_t *r_oip;
7210 	struct sdebug_dev_info *devip;
7211 	u8 *cmd = scp->cmnd;
7212 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7213 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7214 	int k, na;
7215 	int errsts = 0;
7216 	u64 lun_index = sdp->lun & 0x3FFF;
7217 	u32 flags;
7218 	u16 sa;
7219 	u8 opcode = cmd[0];
7220 	bool has_wlun_rl;
7221 	bool inject_now;
7222 
7223 	scsi_set_resid(scp, 0);
7224 	if (sdebug_statistics) {
7225 		atomic_inc(&sdebug_cmnd_count);
7226 		inject_now = inject_on_this_cmd();
7227 	} else {
7228 		inject_now = false;
7229 	}
7230 	if (unlikely(sdebug_verbose &&
7231 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7232 		char b[120];
7233 		int n, len, sb;
7234 
7235 		len = scp->cmd_len;
7236 		sb = (int)sizeof(b);
7237 		if (len > 32)
7238 			strcpy(b, "too long, over 32 bytes");
7239 		else {
7240 			for (k = 0, n = 0; k < len && n < sb; ++k)
7241 				n += scnprintf(b + n, sb - n, "%02x ",
7242 					       (u32)cmd[k]);
7243 		}
7244 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7245 			    blk_mq_unique_tag(scp->request), b);
7246 	}
7247 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7248 		return SCSI_MLQUEUE_HOST_BUSY;
7249 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7250 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7251 		goto err_out;
7252 
7253 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7254 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7255 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7256 	if (unlikely(!devip)) {
7257 		devip = find_build_dev_info(sdp);
7258 		if (NULL == devip)
7259 			goto err_out;
7260 	}
7261 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7262 		atomic_set(&sdeb_inject_pending, 1);
7263 
7264 	na = oip->num_attached;
7265 	r_pfp = oip->pfp;
7266 	if (na) {	/* multiple commands with this opcode */
7267 		r_oip = oip;
7268 		if (FF_SA & r_oip->flags) {
7269 			if (F_SA_LOW & oip->flags)
7270 				sa = 0x1f & cmd[1];
7271 			else
7272 				sa = get_unaligned_be16(cmd + 8);
7273 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7274 				if (opcode == oip->opcode && sa == oip->sa)
7275 					break;
7276 			}
7277 		} else {   /* since no service action only check opcode */
7278 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7279 				if (opcode == oip->opcode)
7280 					break;
7281 			}
7282 		}
7283 		if (k > na) {
7284 			if (F_SA_LOW & r_oip->flags)
7285 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7286 			else if (F_SA_HIGH & r_oip->flags)
7287 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7288 			else
7289 				mk_sense_invalid_opcode(scp);
7290 			goto check_cond;
7291 		}
7292 	}	/* else (when na==0) we assume the oip is a match */
7293 	flags = oip->flags;
7294 	if (unlikely(F_INV_OP & flags)) {
7295 		mk_sense_invalid_opcode(scp);
7296 		goto check_cond;
7297 	}
7298 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7299 		if (sdebug_verbose)
7300 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7301 				    my_name, opcode, " supported for wlun");
7302 		mk_sense_invalid_opcode(scp);
7303 		goto check_cond;
7304 	}
7305 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7306 		u8 rem;
7307 		int j;
7308 
7309 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7310 			rem = ~oip->len_mask[k] & cmd[k];
7311 			if (rem) {
7312 				for (j = 7; j >= 0; --j, rem <<= 1) {
7313 					if (0x80 & rem)
7314 						break;
7315 				}
7316 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7317 				goto check_cond;
7318 			}
7319 		}
7320 	}
7321 	if (unlikely(!(F_SKIP_UA & flags) &&
7322 		     find_first_bit(devip->uas_bm,
7323 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7324 		errsts = make_ua(scp, devip);
7325 		if (errsts)
7326 			goto check_cond;
7327 	}
7328 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7329 		     atomic_read(&devip->stopped))) {
7330 		errsts = resp_not_ready(scp, devip);
7331 		if (errsts)
7332 			goto fini;
7333 	}
7334 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7335 		goto fini;
7336 	if (unlikely(sdebug_every_nth)) {
7337 		if (fake_timeout(scp))
7338 			return 0;	/* ignore command: make trouble */
7339 	}
7340 	if (likely(oip->pfp))
7341 		pfp = oip->pfp;	/* calls a resp_* function */
7342 	else
7343 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7344 
7345 fini:
7346 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7347 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7348 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7349 					    sdebug_ndelay > 10000)) {
7350 		/*
7351 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7352 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7353 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7354 		 * For Synchronize Cache want 1/20 of SSU's delay.
7355 		 */
7356 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7357 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7358 
7359 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7360 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7361 	} else
7362 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7363 				     sdebug_ndelay);
7364 check_cond:
7365 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7366 err_out:
7367 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7368 }
7369 
7370 static struct scsi_host_template sdebug_driver_template = {
7371 	.show_info =		scsi_debug_show_info,
7372 	.write_info =		scsi_debug_write_info,
7373 	.proc_name =		sdebug_proc_name,
7374 	.name =			"SCSI DEBUG",
7375 	.info =			scsi_debug_info,
7376 	.slave_alloc =		scsi_debug_slave_alloc,
7377 	.slave_configure =	scsi_debug_slave_configure,
7378 	.slave_destroy =	scsi_debug_slave_destroy,
7379 	.ioctl =		scsi_debug_ioctl,
7380 	.queuecommand =		scsi_debug_queuecommand,
7381 	.change_queue_depth =	sdebug_change_qdepth,
7382 	.eh_abort_handler =	scsi_debug_abort,
7383 	.eh_device_reset_handler = scsi_debug_device_reset,
7384 	.eh_target_reset_handler = scsi_debug_target_reset,
7385 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7386 	.eh_host_reset_handler = scsi_debug_host_reset,
7387 	.can_queue =		SDEBUG_CANQUEUE,
7388 	.this_id =		7,
7389 	.sg_tablesize =		SG_MAX_SEGMENTS,
7390 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7391 	.max_sectors =		-1U,
7392 	.max_segment_size =	-1U,
7393 	.module =		THIS_MODULE,
7394 	.track_queue_depth =	1,
7395 };
7396 
7397 static int sdebug_driver_probe(struct device *dev)
7398 {
7399 	int error = 0;
7400 	struct sdebug_host_info *sdbg_host;
7401 	struct Scsi_Host *hpnt;
7402 	int hprot;
7403 
7404 	sdbg_host = to_sdebug_host(dev);
7405 
7406 	sdebug_driver_template.can_queue = sdebug_max_queue;
7407 	if (!sdebug_clustering)
7408 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7409 
7410 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7411 	if (NULL == hpnt) {
7412 		pr_err("scsi_host_alloc failed\n");
7413 		error = -ENODEV;
7414 		return error;
7415 	}
7416 	if (submit_queues > nr_cpu_ids) {
7417 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7418 			my_name, submit_queues, nr_cpu_ids);
7419 		submit_queues = nr_cpu_ids;
7420 	}
7421 	/*
7422 	 * Decide whether to tell scsi subsystem that we want mq. The
7423 	 * following should give the same answer for each host.
7424 	 */
7425 	hpnt->nr_hw_queues = submit_queues;
7426 	if (sdebug_host_max_queue)
7427 		hpnt->host_tagset = 1;
7428 
7429 	sdbg_host->shost = hpnt;
7430 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7431 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7432 		hpnt->max_id = sdebug_num_tgts + 1;
7433 	else
7434 		hpnt->max_id = sdebug_num_tgts;
7435 	/* = sdebug_max_luns; */
7436 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7437 
7438 	hprot = 0;
7439 
7440 	switch (sdebug_dif) {
7441 
7442 	case T10_PI_TYPE1_PROTECTION:
7443 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7444 		if (sdebug_dix)
7445 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7446 		break;
7447 
7448 	case T10_PI_TYPE2_PROTECTION:
7449 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7450 		if (sdebug_dix)
7451 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7452 		break;
7453 
7454 	case T10_PI_TYPE3_PROTECTION:
7455 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7456 		if (sdebug_dix)
7457 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7458 		break;
7459 
7460 	default:
7461 		if (sdebug_dix)
7462 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7463 		break;
7464 	}
7465 
7466 	scsi_host_set_prot(hpnt, hprot);
7467 
7468 	if (have_dif_prot || sdebug_dix)
7469 		pr_info("host protection%s%s%s%s%s%s%s\n",
7470 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7471 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7472 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7473 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7474 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7475 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7476 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7477 
7478 	if (sdebug_guard == 1)
7479 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7480 	else
7481 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7482 
7483 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7484 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7485 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7486 		sdebug_statistics = true;
7487 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7488 	if (error) {
7489 		pr_err("scsi_add_host failed\n");
7490 		error = -ENODEV;
7491 		scsi_host_put(hpnt);
7492 	} else {
7493 		scsi_scan_host(hpnt);
7494 	}
7495 
7496 	return error;
7497 }
7498 
7499 static int sdebug_driver_remove(struct device *dev)
7500 {
7501 	struct sdebug_host_info *sdbg_host;
7502 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7503 
7504 	sdbg_host = to_sdebug_host(dev);
7505 
7506 	if (!sdbg_host) {
7507 		pr_err("Unable to locate host info\n");
7508 		return -ENODEV;
7509 	}
7510 
7511 	scsi_remove_host(sdbg_host->shost);
7512 
7513 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7514 				 dev_list) {
7515 		list_del(&sdbg_devinfo->dev_list);
7516 		kfree(sdbg_devinfo->zstate);
7517 		kfree(sdbg_devinfo);
7518 	}
7519 
7520 	scsi_host_put(sdbg_host->shost);
7521 	return 0;
7522 }
7523 
7524 static int pseudo_lld_bus_match(struct device *dev,
7525 				struct device_driver *dev_driver)
7526 {
7527 	return 1;
7528 }
7529 
7530 static struct bus_type pseudo_lld_bus = {
7531 	.name = "pseudo",
7532 	.match = pseudo_lld_bus_match,
7533 	.probe = sdebug_driver_probe,
7534 	.remove = sdebug_driver_remove,
7535 	.drv_groups = sdebug_drv_groups,
7536 };
7537