xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 2aad3cd8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/mutex.h>
37 #include <linux/interrupt.h>
38 #include <linux/atomic.h>
39 #include <linux/hrtimer.h>
40 #include <linux/uuid.h>
41 #include <linux/t10-pi.h>
42 #include <linux/msdos_partition.h>
43 #include <linux/random.h>
44 #include <linux/xarray.h>
45 #include <linux/prefetch.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20200710";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
88 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
89 #define CAPACITY_CHANGED_ASCQ 0x9
90 #define SAVING_PARAMS_UNSUP 0x39
91 #define TRANSPORT_PROBLEM 0x4b
92 #define THRESHOLD_EXCEEDED 0x5d
93 #define LOW_POWER_COND_ON 0x5e
94 #define MISCOMPARE_VERIFY_ASC 0x1d
95 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
96 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
97 #define WRITE_ERROR_ASC 0xc
98 #define UNALIGNED_WRITE_ASCQ 0x4
99 #define WRITE_BOUNDARY_ASCQ 0x5
100 #define READ_INVDATA_ASCQ 0x6
101 #define READ_BOUNDARY_ASCQ 0x7
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_BUS_RESET 1
201 #define SDEBUG_UA_MODE_CHANGED 2
202 #define SDEBUG_UA_CAPACITY_CHANGED 3
203 #define SDEBUG_UA_LUNS_CHANGED 4
204 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
205 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
206 #define SDEBUG_NUM_UAS 7
207 
208 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
209  * sector on read commands: */
210 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
211 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
212 
213 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
214  * (for response) per submit queue at one time. Can be reduced by max_queue
215  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
216  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
217  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
218  * but cannot exceed SDEBUG_CANQUEUE .
219  */
220 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
221 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
222 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
223 
224 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
225 #define F_D_IN			1	/* Data-in command (e.g. READ) */
226 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
230 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
231 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
232 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
234 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
235 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
236 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
237 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
238 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
239 
240 /* Useful combinations of the above flags */
241 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
242 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
243 #define FF_SA (F_SA_HIGH | F_SA_LOW)
244 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
245 
246 #define SDEBUG_MAX_PARTS 4
247 
248 #define SDEBUG_MAX_CMD_LEN 32
249 
250 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 
252 /* Zone types (zbcr05 table 25) */
253 enum sdebug_z_type {
254 	ZBC_ZONE_TYPE_CNV	= 0x1,
255 	ZBC_ZONE_TYPE_SWR	= 0x2,
256 	ZBC_ZONE_TYPE_SWP	= 0x3,
257 };
258 
259 /* enumeration names taken from table 26, zbcr05 */
260 enum sdebug_z_cond {
261 	ZBC_NOT_WRITE_POINTER	= 0x0,
262 	ZC1_EMPTY		= 0x1,
263 	ZC2_IMPLICIT_OPEN	= 0x2,
264 	ZC3_EXPLICIT_OPEN	= 0x3,
265 	ZC4_CLOSED		= 0x4,
266 	ZC6_READ_ONLY		= 0xd,
267 	ZC5_FULL		= 0xe,
268 	ZC7_OFFLINE		= 0xf,
269 };
270 
271 struct sdeb_zone_state {	/* ZBC: per zone state */
272 	enum sdebug_z_type z_type;
273 	enum sdebug_z_cond z_cond;
274 	bool z_non_seq_resource;
275 	unsigned int z_size;
276 	sector_t z_start;
277 	sector_t z_wp;
278 };
279 
280 struct sdebug_dev_info {
281 	struct list_head dev_list;
282 	unsigned int channel;
283 	unsigned int target;
284 	u64 lun;
285 	uuid_t lu_name;
286 	struct sdebug_host_info *sdbg_host;
287 	unsigned long uas_bm[1];
288 	atomic_t num_in_q;
289 	atomic_t stopped;	/* 1: by SSU, 2: device start */
290 	bool used;
291 
292 	/* For ZBC devices */
293 	enum blk_zoned_model zmodel;
294 	unsigned int zsize;
295 	unsigned int zsize_shift;
296 	unsigned int nr_zones;
297 	unsigned int nr_conv_zones;
298 	unsigned int nr_imp_open;
299 	unsigned int nr_exp_open;
300 	unsigned int nr_closed;
301 	unsigned int max_open;
302 	ktime_t create_ts;	/* time since bootup that this device was created */
303 	struct sdeb_zone_state *zstate;
304 };
305 
306 struct sdebug_host_info {
307 	struct list_head host_list;
308 	int si_idx;	/* sdeb_store_info (per host) xarray index */
309 	struct Scsi_Host *shost;
310 	struct device dev;
311 	struct list_head dev_info_list;
312 };
313 
314 /* There is an xarray of pointers to this struct's objects, one per host */
315 struct sdeb_store_info {
316 	rwlock_t macc_lck;	/* for atomic media access on this store */
317 	u8 *storep;		/* user data storage (ram) */
318 	struct t10_pi_tuple *dif_storep; /* protection info */
319 	void *map_storep;	/* provisioning map */
320 };
321 
322 #define to_sdebug_host(d)	\
323 	container_of(d, struct sdebug_host_info, dev)
324 
325 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
326 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 
328 struct sdebug_defer {
329 	struct hrtimer hrt;
330 	struct execute_work ew;
331 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
332 	int sqa_idx;	/* index of sdebug_queue array */
333 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
334 	int hc_idx;	/* hostwide tag index */
335 	int issuing_cpu;
336 	bool init_hrt;
337 	bool init_wq;
338 	bool init_poll;
339 	bool aborted;	/* true when blk_abort_request() already called */
340 	enum sdeb_defer_type defer_t;
341 };
342 
343 struct sdebug_queued_cmd {
344 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
345 	 * instance indicates this slot is in use.
346 	 */
347 	struct sdebug_defer *sd_dp;
348 	struct scsi_cmnd *a_cmnd;
349 };
350 
351 struct sdebug_queue {
352 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
353 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 	spinlock_t qc_lock;
355 	atomic_t blocked;	/* to temporarily stop more being queued */
356 };
357 
358 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
359 static atomic_t sdebug_completions;  /* count of deferred completions */
360 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
361 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
362 static atomic_t sdeb_inject_pending;
363 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
364 
365 struct opcode_info_t {
366 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
367 				/* for terminating element */
368 	u8 opcode;		/* if num_attached > 0, preferred */
369 	u16 sa;			/* service action */
370 	u32 flags;		/* OR-ed set of SDEB_F_* */
371 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
372 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
373 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
374 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
375 };
376 
377 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
378 enum sdeb_opcode_index {
379 	SDEB_I_INVALID_OPCODE =	0,
380 	SDEB_I_INQUIRY = 1,
381 	SDEB_I_REPORT_LUNS = 2,
382 	SDEB_I_REQUEST_SENSE = 3,
383 	SDEB_I_TEST_UNIT_READY = 4,
384 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
385 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
386 	SDEB_I_LOG_SENSE = 7,
387 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
388 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
389 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
390 	SDEB_I_START_STOP = 11,
391 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
392 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
393 	SDEB_I_MAINT_IN = 14,
394 	SDEB_I_MAINT_OUT = 15,
395 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
396 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
397 	SDEB_I_RESERVE = 18,		/* 6, 10 */
398 	SDEB_I_RELEASE = 19,		/* 6, 10 */
399 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
400 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
401 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
402 	SDEB_I_SEND_DIAG = 23,
403 	SDEB_I_UNMAP = 24,
404 	SDEB_I_WRITE_BUFFER = 25,
405 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
406 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
407 	SDEB_I_COMP_WRITE = 28,
408 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
409 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
410 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
411 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
412 };
413 
414 
415 static const unsigned char opcode_ind_arr[256] = {
416 /* 0x0; 0x0->0x1f: 6 byte cdbs */
417 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 	    0, 0, 0, 0,
419 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
420 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 	    SDEB_I_RELEASE,
422 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
423 	    SDEB_I_ALLOW_REMOVAL, 0,
424 /* 0x20; 0x20->0x3f: 10 byte cdbs */
425 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
426 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
427 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
428 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
429 /* 0x40; 0x40->0x5f: 10 byte cdbs */
430 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
432 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 	    SDEB_I_RELEASE,
434 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
435 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 	0, SDEB_I_VARIABLE_LEN,
439 /* 0x80; 0x80->0x9f: 16 byte cdbs */
440 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
441 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
442 	0, 0, 0, SDEB_I_VERIFY,
443 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
444 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
445 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
446 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
447 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
448 	     SDEB_I_MAINT_OUT, 0, 0, 0,
449 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
450 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0,
453 /* 0xc0; 0xc0->0xff: vendor specific */
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 };
459 
460 /*
461  * The following "response" functions return the SCSI mid-level's 4 byte
462  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
463  * command completion, they can mask their return value with
464  * SDEG_RES_IMMED_MASK .
465  */
466 #define SDEG_RES_IMMED_MASK 0x40000000
467 
468 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 
498 static int sdebug_do_add_host(bool mk_new_store);
499 static int sdebug_add_host_helper(int per_host_idx);
500 static void sdebug_do_remove_host(bool the_end);
501 static int sdebug_add_store(void);
502 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
503 static void sdebug_erase_all_stores(bool apart_from_first);
504 
505 /*
506  * The following are overflow arrays for cdbs that "hit" the same index in
507  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
508  * should be placed in opcode_info_arr[], the others should be placed here.
509  */
510 static const struct opcode_info_t msense_iarr[] = {
511 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
512 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 };
514 
515 static const struct opcode_info_t mselect_iarr[] = {
516 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
517 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t read_iarr[] = {
521 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
522 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 	     0, 0, 0, 0} },
524 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
525 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
527 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
528 	     0xc7, 0, 0, 0, 0} },
529 };
530 
531 static const struct opcode_info_t write_iarr[] = {
532 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
533 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 		   0, 0, 0, 0, 0, 0} },
535 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
536 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 		   0, 0, 0} },
538 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
539 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
540 		   0xbf, 0xc7, 0, 0, 0, 0} },
541 };
542 
543 static const struct opcode_info_t verify_iarr[] = {
544 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
545 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
546 		   0, 0, 0, 0, 0, 0} },
547 };
548 
549 static const struct opcode_info_t sa_in_16_iarr[] = {
550 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
551 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
552 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
553 };
554 
555 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
556 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
557 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
558 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
559 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
560 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
561 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
562 };
563 
564 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
565 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
566 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
567 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
568 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
569 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
570 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
571 };
572 
573 static const struct opcode_info_t write_same_iarr[] = {
574 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
575 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
576 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
577 };
578 
579 static const struct opcode_info_t reserve_iarr[] = {
580 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
581 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 };
583 
584 static const struct opcode_info_t release_iarr[] = {
585 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
586 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t sync_cache_iarr[] = {
590 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
591 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
592 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
593 };
594 
595 static const struct opcode_info_t pre_fetch_iarr[] = {
596 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
597 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
599 };
600 
601 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
602 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
603 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
605 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
606 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
607 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
608 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
609 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
610 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
611 };
612 
613 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
614 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
615 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
617 };
618 
619 
620 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
621  * plus the terminating elements for logic that scans this table such as
622  * REPORT SUPPORTED OPERATION CODES. */
623 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 /* 0 */
625 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
626 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
628 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
630 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
631 	     0, 0} },					/* REPORT LUNS */
632 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
633 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
635 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 /* 5 */
637 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
638 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
639 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
640 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
641 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
642 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
643 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
644 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 	     0, 0, 0} },
646 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
647 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 	     0, 0} },
649 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
650 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
651 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 /* 10 */
653 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
654 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
655 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
656 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
658 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
659 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
660 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
661 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
662 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
663 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
664 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
665 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
666 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
667 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
668 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
669 				0xff, 0, 0xc7, 0, 0, 0, 0} },
670 /* 15 */
671 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
672 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
674 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
675 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
677 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
678 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
679 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 	     0xff, 0xff} },
681 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
682 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
683 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 	     0} },
685 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
686 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
687 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
688 	     0} },
689 /* 20 */
690 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
691 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
693 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
695 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
697 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
699 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 /* 25 */
701 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
702 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
703 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
704 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
705 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
706 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 		 0, 0, 0, 0, 0} },
708 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
709 	    resp_sync_cache, sync_cache_iarr,
710 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
711 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
712 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
713 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
714 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
715 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
716 	    resp_pre_fetch, pre_fetch_iarr,
717 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
718 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
719 
720 /* 30 */
721 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
722 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
723 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
725 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
726 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
727 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 /* sentinel */
730 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
731 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static atomic_t sdebug_num_hosts;
735 static DEFINE_MUTEX(add_host_mutex);
736 
737 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
738 static int sdebug_ato = DEF_ATO;
739 static int sdebug_cdb_len = DEF_CDB_LEN;
740 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
741 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
742 static int sdebug_dif = DEF_DIF;
743 static int sdebug_dix = DEF_DIX;
744 static int sdebug_dsense = DEF_D_SENSE;
745 static int sdebug_every_nth = DEF_EVERY_NTH;
746 static int sdebug_fake_rw = DEF_FAKE_RW;
747 static unsigned int sdebug_guard = DEF_GUARD;
748 static int sdebug_host_max_queue;	/* per host */
749 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
750 static int sdebug_max_luns = DEF_MAX_LUNS;
751 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
752 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
753 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
754 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
755 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
756 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
757 static int sdebug_no_uld;
758 static int sdebug_num_parts = DEF_NUM_PARTS;
759 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
760 static int sdebug_opt_blks = DEF_OPT_BLKS;
761 static int sdebug_opts = DEF_OPTS;
762 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
763 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
764 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
765 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
766 static int sdebug_sector_size = DEF_SECTOR_SIZE;
767 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
768 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
769 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
770 static unsigned int sdebug_lbpu = DEF_LBPU;
771 static unsigned int sdebug_lbpws = DEF_LBPWS;
772 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
773 static unsigned int sdebug_lbprz = DEF_LBPRZ;
774 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
775 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
776 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
777 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
778 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
779 static int sdebug_uuid_ctl = DEF_UUID_CTL;
780 static bool sdebug_random = DEF_RANDOM;
781 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
782 static bool sdebug_removable = DEF_REMOVABLE;
783 static bool sdebug_deflect_incoming;
784 static bool sdebug_clustering;
785 static bool sdebug_host_lock = DEF_HOST_LOCK;
786 static bool sdebug_strict = DEF_STRICT;
787 static bool sdebug_any_injecting_opt;
788 static bool sdebug_verbose;
789 static bool have_dif_prot;
790 static bool write_since_sync;
791 static bool sdebug_statistics = DEF_STATISTICS;
792 static bool sdebug_wp;
793 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
794 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
795 static char *sdeb_zbc_model_s;
796 
797 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
798 			  SAM_LUN_AM_FLAT = 0x1,
799 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
800 			  SAM_LUN_AM_EXTENDED = 0x3};
801 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
802 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
803 
804 static unsigned int sdebug_store_sectors;
805 static sector_t sdebug_capacity;	/* in sectors */
806 
807 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
808    may still need them */
809 static int sdebug_heads;		/* heads per disk */
810 static int sdebug_cylinders_per;	/* cylinders per surface */
811 static int sdebug_sectors_per;		/* sectors per cylinder */
812 
813 static LIST_HEAD(sdebug_host_list);
814 static DEFINE_SPINLOCK(sdebug_host_list_lock);
815 
816 static struct xarray per_store_arr;
817 static struct xarray *per_store_ap = &per_store_arr;
818 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
819 static int sdeb_most_recent_idx = -1;
820 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
821 
822 static unsigned long map_size;
823 static int num_aborts;
824 static int num_dev_resets;
825 static int num_target_resets;
826 static int num_bus_resets;
827 static int num_host_resets;
828 static int dix_writes;
829 static int dix_reads;
830 static int dif_errors;
831 
832 /* ZBC global data */
833 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
834 static int sdeb_zbc_zone_size_mb;
835 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
836 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
837 
838 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
839 static int poll_queues; /* iouring iopoll interface.*/
840 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
841 
842 static DEFINE_RWLOCK(atomic_rw);
843 static DEFINE_RWLOCK(atomic_rw2);
844 
845 static rwlock_t *ramdisk_lck_a[2];
846 
847 static char sdebug_proc_name[] = MY_NAME;
848 static const char *my_name = MY_NAME;
849 
850 static struct bus_type pseudo_lld_bus;
851 
852 static struct device_driver sdebug_driverfs_driver = {
853 	.name 		= sdebug_proc_name,
854 	.bus		= &pseudo_lld_bus,
855 };
856 
857 static const int check_condition_result =
858 	SAM_STAT_CHECK_CONDITION;
859 
860 static const int illegal_condition_result =
861 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
862 
863 static const int device_qfull_result =
864 	(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
865 
866 static const int condition_met_result = SAM_STAT_CONDITION_MET;
867 
868 
869 /* Only do the extra work involved in logical block provisioning if one or
870  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
871  * real reads and writes (i.e. not skipping them for speed).
872  */
873 static inline bool scsi_debug_lbp(void)
874 {
875 	return 0 == sdebug_fake_rw &&
876 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
877 }
878 
879 static void *lba2fake_store(struct sdeb_store_info *sip,
880 			    unsigned long long lba)
881 {
882 	struct sdeb_store_info *lsip = sip;
883 
884 	lba = do_div(lba, sdebug_store_sectors);
885 	if (!sip || !sip->storep) {
886 		WARN_ON_ONCE(true);
887 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
888 	}
889 	return lsip->storep + lba * sdebug_sector_size;
890 }
891 
892 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
893 				      sector_t sector)
894 {
895 	sector = sector_div(sector, sdebug_store_sectors);
896 
897 	return sip->dif_storep + sector;
898 }
899 
900 static void sdebug_max_tgts_luns(void)
901 {
902 	struct sdebug_host_info *sdbg_host;
903 	struct Scsi_Host *hpnt;
904 
905 	spin_lock(&sdebug_host_list_lock);
906 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
907 		hpnt = sdbg_host->shost;
908 		if ((hpnt->this_id >= 0) &&
909 		    (sdebug_num_tgts > hpnt->this_id))
910 			hpnt->max_id = sdebug_num_tgts + 1;
911 		else
912 			hpnt->max_id = sdebug_num_tgts;
913 		/* sdebug_max_luns; */
914 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
915 	}
916 	spin_unlock(&sdebug_host_list_lock);
917 }
918 
919 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
920 
921 /* Set in_bit to -1 to indicate no bit position of invalid field */
922 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
923 				 enum sdeb_cmd_data c_d,
924 				 int in_byte, int in_bit)
925 {
926 	unsigned char *sbuff;
927 	u8 sks[4];
928 	int sl, asc;
929 
930 	sbuff = scp->sense_buffer;
931 	if (!sbuff) {
932 		sdev_printk(KERN_ERR, scp->device,
933 			    "%s: sense_buffer is NULL\n", __func__);
934 		return;
935 	}
936 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
937 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
938 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
939 	memset(sks, 0, sizeof(sks));
940 	sks[0] = 0x80;
941 	if (c_d)
942 		sks[0] |= 0x40;
943 	if (in_bit >= 0) {
944 		sks[0] |= 0x8;
945 		sks[0] |= 0x7 & in_bit;
946 	}
947 	put_unaligned_be16(in_byte, sks + 1);
948 	if (sdebug_dsense) {
949 		sl = sbuff[7] + 8;
950 		sbuff[7] = sl;
951 		sbuff[sl] = 0x2;
952 		sbuff[sl + 1] = 0x6;
953 		memcpy(sbuff + sl + 4, sks, 3);
954 	} else
955 		memcpy(sbuff + 15, sks, 3);
956 	if (sdebug_verbose)
957 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
958 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
959 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
960 }
961 
962 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
963 {
964 	if (!scp->sense_buffer) {
965 		sdev_printk(KERN_ERR, scp->device,
966 			    "%s: sense_buffer is NULL\n", __func__);
967 		return;
968 	}
969 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
970 
971 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
972 
973 	if (sdebug_verbose)
974 		sdev_printk(KERN_INFO, scp->device,
975 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
976 			    my_name, key, asc, asq);
977 }
978 
979 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
980 {
981 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
982 }
983 
984 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
985 			    void __user *arg)
986 {
987 	if (sdebug_verbose) {
988 		if (0x1261 == cmd)
989 			sdev_printk(KERN_INFO, dev,
990 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
991 		else if (0x5331 == cmd)
992 			sdev_printk(KERN_INFO, dev,
993 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
994 				    __func__);
995 		else
996 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
997 				    __func__, cmd);
998 	}
999 	return -EINVAL;
1000 	/* return -ENOTTY; // correct return but upsets fdisk */
1001 }
1002 
1003 static void config_cdb_len(struct scsi_device *sdev)
1004 {
1005 	switch (sdebug_cdb_len) {
1006 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1007 		sdev->use_10_for_rw = false;
1008 		sdev->use_16_for_rw = false;
1009 		sdev->use_10_for_ms = false;
1010 		break;
1011 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = true;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = false;
1015 		break;
1016 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1017 		sdev->use_10_for_rw = true;
1018 		sdev->use_16_for_rw = false;
1019 		sdev->use_10_for_ms = true;
1020 		break;
1021 	case 16:
1022 		sdev->use_10_for_rw = false;
1023 		sdev->use_16_for_rw = true;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	case 32: /* No knobs to suggest this so same as 16 for now */
1027 		sdev->use_10_for_rw = false;
1028 		sdev->use_16_for_rw = true;
1029 		sdev->use_10_for_ms = true;
1030 		break;
1031 	default:
1032 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1033 			sdebug_cdb_len);
1034 		sdev->use_10_for_rw = true;
1035 		sdev->use_16_for_rw = false;
1036 		sdev->use_10_for_ms = false;
1037 		sdebug_cdb_len = 10;
1038 		break;
1039 	}
1040 }
1041 
1042 static void all_config_cdb_len(void)
1043 {
1044 	struct sdebug_host_info *sdbg_host;
1045 	struct Scsi_Host *shost;
1046 	struct scsi_device *sdev;
1047 
1048 	spin_lock(&sdebug_host_list_lock);
1049 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1050 		shost = sdbg_host->shost;
1051 		shost_for_each_device(sdev, shost) {
1052 			config_cdb_len(sdev);
1053 		}
1054 	}
1055 	spin_unlock(&sdebug_host_list_lock);
1056 }
1057 
1058 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1059 {
1060 	struct sdebug_host_info *sdhp;
1061 	struct sdebug_dev_info *dp;
1062 
1063 	spin_lock(&sdebug_host_list_lock);
1064 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1065 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1066 			if ((devip->sdbg_host == dp->sdbg_host) &&
1067 			    (devip->target == dp->target))
1068 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1069 		}
1070 	}
1071 	spin_unlock(&sdebug_host_list_lock);
1072 }
1073 
1074 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1075 {
1076 	int k;
1077 
1078 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1079 	if (k != SDEBUG_NUM_UAS) {
1080 		const char *cp = NULL;
1081 
1082 		switch (k) {
1083 		case SDEBUG_UA_POR:
1084 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1085 					POWER_ON_RESET_ASCQ);
1086 			if (sdebug_verbose)
1087 				cp = "power on reset";
1088 			break;
1089 		case SDEBUG_UA_BUS_RESET:
1090 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 					BUS_RESET_ASCQ);
1092 			if (sdebug_verbose)
1093 				cp = "bus reset";
1094 			break;
1095 		case SDEBUG_UA_MODE_CHANGED:
1096 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1097 					MODE_CHANGED_ASCQ);
1098 			if (sdebug_verbose)
1099 				cp = "mode parameters changed";
1100 			break;
1101 		case SDEBUG_UA_CAPACITY_CHANGED:
1102 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1103 					CAPACITY_CHANGED_ASCQ);
1104 			if (sdebug_verbose)
1105 				cp = "capacity data changed";
1106 			break;
1107 		case SDEBUG_UA_MICROCODE_CHANGED:
1108 			mk_sense_buffer(scp, UNIT_ATTENTION,
1109 					TARGET_CHANGED_ASC,
1110 					MICROCODE_CHANGED_ASCQ);
1111 			if (sdebug_verbose)
1112 				cp = "microcode has been changed";
1113 			break;
1114 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1115 			mk_sense_buffer(scp, UNIT_ATTENTION,
1116 					TARGET_CHANGED_ASC,
1117 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1118 			if (sdebug_verbose)
1119 				cp = "microcode has been changed without reset";
1120 			break;
1121 		case SDEBUG_UA_LUNS_CHANGED:
1122 			/*
1123 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1124 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1125 			 * on the target, until a REPORT LUNS command is
1126 			 * received.  SPC-4 behavior is to report it only once.
1127 			 * NOTE:  sdebug_scsi_level does not use the same
1128 			 * values as struct scsi_device->scsi_level.
1129 			 */
1130 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1131 				clear_luns_changed_on_target(devip);
1132 			mk_sense_buffer(scp, UNIT_ATTENTION,
1133 					TARGET_CHANGED_ASC,
1134 					LUNS_CHANGED_ASCQ);
1135 			if (sdebug_verbose)
1136 				cp = "reported luns data has changed";
1137 			break;
1138 		default:
1139 			pr_warn("unexpected unit attention code=%d\n", k);
1140 			if (sdebug_verbose)
1141 				cp = "unknown";
1142 			break;
1143 		}
1144 		clear_bit(k, devip->uas_bm);
1145 		if (sdebug_verbose)
1146 			sdev_printk(KERN_INFO, scp->device,
1147 				   "%s reports: Unit attention: %s\n",
1148 				   my_name, cp);
1149 		return check_condition_result;
1150 	}
1151 	return 0;
1152 }
1153 
1154 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1155 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1156 				int arr_len)
1157 {
1158 	int act_len;
1159 	struct scsi_data_buffer *sdb = &scp->sdb;
1160 
1161 	if (!sdb->length)
1162 		return 0;
1163 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1164 		return DID_ERROR << 16;
1165 
1166 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1167 				      arr, arr_len);
1168 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 
1170 	return 0;
1171 }
1172 
1173 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1174  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1175  * calls, not required to write in ascending offset order. Assumes resid
1176  * set to scsi_bufflen() prior to any calls.
1177  */
1178 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1179 				  int arr_len, unsigned int off_dst)
1180 {
1181 	unsigned int act_len, n;
1182 	struct scsi_data_buffer *sdb = &scp->sdb;
1183 	off_t skip = off_dst;
1184 
1185 	if (sdb->length <= off_dst)
1186 		return 0;
1187 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1188 		return DID_ERROR << 16;
1189 
1190 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1191 				       arr, arr_len, skip);
1192 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1193 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1194 		 scsi_get_resid(scp));
1195 	n = scsi_bufflen(scp) - (off_dst + act_len);
1196 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1197 	return 0;
1198 }
1199 
1200 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1201  * 'arr' or -1 if error.
1202  */
1203 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1204 			       int arr_len)
1205 {
1206 	if (!scsi_bufflen(scp))
1207 		return 0;
1208 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1209 		return -1;
1210 
1211 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1212 }
1213 
1214 
1215 static char sdebug_inq_vendor_id[9] = "Linux   ";
1216 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1217 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1218 /* Use some locally assigned NAAs for SAS addresses. */
1219 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1220 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1221 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1222 
1223 /* Device identification VPD page. Returns number of bytes placed in arr */
1224 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1225 			  int target_dev_id, int dev_id_num,
1226 			  const char *dev_id_str, int dev_id_str_len,
1227 			  const uuid_t *lu_name)
1228 {
1229 	int num, port_a;
1230 	char b[32];
1231 
1232 	port_a = target_dev_id + 1;
1233 	/* T10 vendor identifier field format (faked) */
1234 	arr[0] = 0x2;	/* ASCII */
1235 	arr[1] = 0x1;
1236 	arr[2] = 0x0;
1237 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1238 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1239 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1240 	num = 8 + 16 + dev_id_str_len;
1241 	arr[3] = num;
1242 	num += 4;
1243 	if (dev_id_num >= 0) {
1244 		if (sdebug_uuid_ctl) {
1245 			/* Locally assigned UUID */
1246 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1247 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1248 			arr[num++] = 0x0;
1249 			arr[num++] = 0x12;
1250 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1251 			arr[num++] = 0x0;
1252 			memcpy(arr + num, lu_name, 16);
1253 			num += 16;
1254 		} else {
1255 			/* NAA-3, Logical unit identifier (binary) */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x8;
1260 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1261 			num += 8;
1262 		}
1263 		/* Target relative port number */
1264 		arr[num++] = 0x61;	/* proto=sas, binary */
1265 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1266 		arr[num++] = 0x0;	/* reserved */
1267 		arr[num++] = 0x4;	/* length */
1268 		arr[num++] = 0x0;	/* reserved */
1269 		arr[num++] = 0x0;	/* reserved */
1270 		arr[num++] = 0x0;
1271 		arr[num++] = 0x1;	/* relative port A */
1272 	}
1273 	/* NAA-3, Target port identifier */
1274 	arr[num++] = 0x61;	/* proto=sas, binary */
1275 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1276 	arr[num++] = 0x0;
1277 	arr[num++] = 0x8;
1278 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1279 	num += 8;
1280 	/* NAA-3, Target port group identifier */
1281 	arr[num++] = 0x61;	/* proto=sas, binary */
1282 	arr[num++] = 0x95;	/* piv=1, target port group id */
1283 	arr[num++] = 0x0;
1284 	arr[num++] = 0x4;
1285 	arr[num++] = 0;
1286 	arr[num++] = 0;
1287 	put_unaligned_be16(port_group_id, arr + num);
1288 	num += 2;
1289 	/* NAA-3, Target device identifier */
1290 	arr[num++] = 0x61;	/* proto=sas, binary */
1291 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1292 	arr[num++] = 0x0;
1293 	arr[num++] = 0x8;
1294 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1295 	num += 8;
1296 	/* SCSI name string: Target device identifier */
1297 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1298 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1299 	arr[num++] = 0x0;
1300 	arr[num++] = 24;
1301 	memcpy(arr + num, "naa.32222220", 12);
1302 	num += 12;
1303 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1304 	memcpy(arr + num, b, 8);
1305 	num += 8;
1306 	memset(arr + num, 0, 4);
1307 	num += 4;
1308 	return num;
1309 }
1310 
1311 static unsigned char vpd84_data[] = {
1312 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1313     0x22,0x22,0x22,0x0,0xbb,0x1,
1314     0x22,0x22,0x22,0x0,0xbb,0x2,
1315 };
1316 
1317 /*  Software interface identification VPD page */
1318 static int inquiry_vpd_84(unsigned char *arr)
1319 {
1320 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1321 	return sizeof(vpd84_data);
1322 }
1323 
1324 /* Management network addresses VPD page */
1325 static int inquiry_vpd_85(unsigned char *arr)
1326 {
1327 	int num = 0;
1328 	const char *na1 = "https://www.kernel.org/config";
1329 	const char *na2 = "http://www.kernel.org/log";
1330 	int plen, olen;
1331 
1332 	arr[num++] = 0x1;	/* lu, storage config */
1333 	arr[num++] = 0x0;	/* reserved */
1334 	arr[num++] = 0x0;
1335 	olen = strlen(na1);
1336 	plen = olen + 1;
1337 	if (plen % 4)
1338 		plen = ((plen / 4) + 1) * 4;
1339 	arr[num++] = plen;	/* length, null termianted, padded */
1340 	memcpy(arr + num, na1, olen);
1341 	memset(arr + num + olen, 0, plen - olen);
1342 	num += plen;
1343 
1344 	arr[num++] = 0x4;	/* lu, logging */
1345 	arr[num++] = 0x0;	/* reserved */
1346 	arr[num++] = 0x0;
1347 	olen = strlen(na2);
1348 	plen = olen + 1;
1349 	if (plen % 4)
1350 		plen = ((plen / 4) + 1) * 4;
1351 	arr[num++] = plen;	/* length, null terminated, padded */
1352 	memcpy(arr + num, na2, olen);
1353 	memset(arr + num + olen, 0, plen - olen);
1354 	num += plen;
1355 
1356 	return num;
1357 }
1358 
1359 /* SCSI ports VPD page */
1360 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 {
1362 	int num = 0;
1363 	int port_a, port_b;
1364 
1365 	port_a = target_dev_id + 1;
1366 	port_b = port_a + 1;
1367 	arr[num++] = 0x0;	/* reserved */
1368 	arr[num++] = 0x0;	/* reserved */
1369 	arr[num++] = 0x0;
1370 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1371 	memset(arr + num, 0, 6);
1372 	num += 6;
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 12;	/* length tp descriptor */
1375 	/* naa-5 target port identifier (A) */
1376 	arr[num++] = 0x61;	/* proto=sas, binary */
1377 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x8;	/* length */
1380 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1381 	num += 8;
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;	/* reserved */
1384 	arr[num++] = 0x0;
1385 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1386 	memset(arr + num, 0, 6);
1387 	num += 6;
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 12;	/* length tp descriptor */
1390 	/* naa-5 target port identifier (B) */
1391 	arr[num++] = 0x61;	/* proto=sas, binary */
1392 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x8;	/* length */
1395 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1396 	num += 8;
1397 
1398 	return num;
1399 }
1400 
1401 
1402 static unsigned char vpd89_data[] = {
1403 /* from 4th byte */ 0,0,0,0,
1404 'l','i','n','u','x',' ',' ',' ',
1405 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1406 '1','2','3','4',
1407 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1408 0xec,0,0,0,
1409 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1410 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1412 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1413 0x53,0x41,
1414 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1415 0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1417 0x10,0x80,
1418 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1419 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1420 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1422 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1423 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1424 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1429 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1430 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1431 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1444 };
1445 
1446 /* ATA Information VPD page */
1447 static int inquiry_vpd_89(unsigned char *arr)
1448 {
1449 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1450 	return sizeof(vpd89_data);
1451 }
1452 
1453 
1454 static unsigned char vpdb0_data[] = {
1455 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1456 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1459 };
1460 
1461 /* Block limits VPD page (SBC-3) */
1462 static int inquiry_vpd_b0(unsigned char *arr)
1463 {
1464 	unsigned int gran;
1465 
1466 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1467 
1468 	/* Optimal transfer length granularity */
1469 	if (sdebug_opt_xferlen_exp != 0 &&
1470 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1471 		gran = 1 << sdebug_opt_xferlen_exp;
1472 	else
1473 		gran = 1 << sdebug_physblk_exp;
1474 	put_unaligned_be16(gran, arr + 2);
1475 
1476 	/* Maximum Transfer Length */
1477 	if (sdebug_store_sectors > 0x400)
1478 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1479 
1480 	/* Optimal Transfer Length */
1481 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1482 
1483 	if (sdebug_lbpu) {
1484 		/* Maximum Unmap LBA Count */
1485 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1486 
1487 		/* Maximum Unmap Block Descriptor Count */
1488 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1489 	}
1490 
1491 	/* Unmap Granularity Alignment */
1492 	if (sdebug_unmap_alignment) {
1493 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1494 		arr[28] |= 0x80; /* UGAVALID */
1495 	}
1496 
1497 	/* Optimal Unmap Granularity */
1498 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1499 
1500 	/* Maximum WRITE SAME Length */
1501 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1502 
1503 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1504 
1505 	return sizeof(vpdb0_data);
1506 }
1507 
1508 /* Block device characteristics VPD page (SBC-3) */
1509 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1510 {
1511 	memset(arr, 0, 0x3c);
1512 	arr[0] = 0;
1513 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1514 	arr[2] = 0;
1515 	arr[3] = 5;	/* less than 1.8" */
1516 	if (devip->zmodel == BLK_ZONED_HA)
1517 		arr[4] = 1 << 4;	/* zoned field = 01b */
1518 
1519 	return 0x3c;
1520 }
1521 
1522 /* Logical block provisioning VPD page (SBC-4) */
1523 static int inquiry_vpd_b2(unsigned char *arr)
1524 {
1525 	memset(arr, 0, 0x4);
1526 	arr[0] = 0;			/* threshold exponent */
1527 	if (sdebug_lbpu)
1528 		arr[1] = 1 << 7;
1529 	if (sdebug_lbpws)
1530 		arr[1] |= 1 << 6;
1531 	if (sdebug_lbpws10)
1532 		arr[1] |= 1 << 5;
1533 	if (sdebug_lbprz && scsi_debug_lbp())
1534 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1535 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1536 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1537 	/* threshold_percentage=0 */
1538 	return 0x4;
1539 }
1540 
1541 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1542 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1543 {
1544 	memset(arr, 0, 0x3c);
1545 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1546 	/*
1547 	 * Set Optimal number of open sequential write preferred zones and
1548 	 * Optimal number of non-sequentially written sequential write
1549 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1550 	 * fields set to zero, apart from Max. number of open swrz_s field.
1551 	 */
1552 	put_unaligned_be32(0xffffffff, &arr[4]);
1553 	put_unaligned_be32(0xffffffff, &arr[8]);
1554 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1555 		put_unaligned_be32(devip->max_open, &arr[12]);
1556 	else
1557 		put_unaligned_be32(0xffffffff, &arr[12]);
1558 	return 0x3c;
1559 }
1560 
1561 #define SDEBUG_LONG_INQ_SZ 96
1562 #define SDEBUG_MAX_INQ_ARR_SZ 584
1563 
1564 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1565 {
1566 	unsigned char pq_pdt;
1567 	unsigned char *arr;
1568 	unsigned char *cmd = scp->cmnd;
1569 	u32 alloc_len, n;
1570 	int ret;
1571 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1572 
1573 	alloc_len = get_unaligned_be16(cmd + 3);
1574 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1575 	if (! arr)
1576 		return DID_REQUEUE << 16;
1577 	is_disk = (sdebug_ptype == TYPE_DISK);
1578 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1579 	is_disk_zbc = (is_disk || is_zbc);
1580 	have_wlun = scsi_is_wlun(scp->device->lun);
1581 	if (have_wlun)
1582 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1583 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1584 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1585 	else
1586 		pq_pdt = (sdebug_ptype & 0x1f);
1587 	arr[0] = pq_pdt;
1588 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1590 		kfree(arr);
1591 		return check_condition_result;
1592 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1593 		int lu_id_num, port_group_id, target_dev_id;
1594 		u32 len;
1595 		char lu_id_str[6];
1596 		int host_no = devip->sdbg_host->shost->host_no;
1597 
1598 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1599 		    (devip->channel & 0x7f);
1600 		if (sdebug_vpd_use_hostno == 0)
1601 			host_no = 0;
1602 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1603 			    (devip->target * 1000) + devip->lun);
1604 		target_dev_id = ((host_no + 1) * 2000) +
1605 				 (devip->target * 1000) - 3;
1606 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1607 		if (0 == cmd[2]) { /* supported vital product data pages */
1608 			arr[1] = cmd[2];	/*sanity */
1609 			n = 4;
1610 			arr[n++] = 0x0;   /* this page */
1611 			arr[n++] = 0x80;  /* unit serial number */
1612 			arr[n++] = 0x83;  /* device identification */
1613 			arr[n++] = 0x84;  /* software interface ident. */
1614 			arr[n++] = 0x85;  /* management network addresses */
1615 			arr[n++] = 0x86;  /* extended inquiry */
1616 			arr[n++] = 0x87;  /* mode page policy */
1617 			arr[n++] = 0x88;  /* SCSI ports */
1618 			if (is_disk_zbc) {	  /* SBC or ZBC */
1619 				arr[n++] = 0x89;  /* ATA information */
1620 				arr[n++] = 0xb0;  /* Block limits */
1621 				arr[n++] = 0xb1;  /* Block characteristics */
1622 				if (is_disk)
1623 					arr[n++] = 0xb2;  /* LB Provisioning */
1624 				if (is_zbc)
1625 					arr[n++] = 0xb6;  /* ZB dev. char. */
1626 			}
1627 			arr[3] = n - 4;	  /* number of supported VPD pages */
1628 		} else if (0x80 == cmd[2]) { /* unit serial number */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = len;
1631 			memcpy(&arr[4], lu_id_str, len);
1632 		} else if (0x83 == cmd[2]) { /* device identification */
1633 			arr[1] = cmd[2];	/*sanity */
1634 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1635 						target_dev_id, lu_id_num,
1636 						lu_id_str, len,
1637 						&devip->lu_name);
1638 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1639 			arr[1] = cmd[2];	/*sanity */
1640 			arr[3] = inquiry_vpd_84(&arr[4]);
1641 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = inquiry_vpd_85(&arr[4]);
1644 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = 0x3c;	/* number of following entries */
1647 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1648 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1649 			else if (have_dif_prot)
1650 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1651 			else
1652 				arr[4] = 0x0;   /* no protection stuff */
1653 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1654 		} else if (0x87 == cmd[2]) { /* mode page policy */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = 0x8;	/* number of following entries */
1657 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1658 			arr[6] = 0x80;	/* mlus, shared */
1659 			arr[8] = 0x18;	 /* protocol specific lu */
1660 			arr[10] = 0x82;	 /* mlus, per initiator port */
1661 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1662 			arr[1] = cmd[2];	/*sanity */
1663 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1664 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			n = inquiry_vpd_89(&arr[4]);
1667 			put_unaligned_be16(n, arr + 2);
1668 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1669 			arr[1] = cmd[2];        /*sanity */
1670 			arr[3] = inquiry_vpd_b0(&arr[4]);
1671 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1672 			arr[1] = cmd[2];        /*sanity */
1673 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1674 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1675 			arr[1] = cmd[2];        /*sanity */
1676 			arr[3] = inquiry_vpd_b2(&arr[4]);
1677 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1678 			arr[1] = cmd[2];        /*sanity */
1679 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1680 		} else {
1681 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1682 			kfree(arr);
1683 			return check_condition_result;
1684 		}
1685 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1686 		ret = fill_from_dev_buffer(scp, arr,
1687 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1688 		kfree(arr);
1689 		return ret;
1690 	}
1691 	/* drops through here for a standard inquiry */
1692 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1693 	arr[2] = sdebug_scsi_level;
1694 	arr[3] = 2;    /* response_data_format==2 */
1695 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1696 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1697 	if (sdebug_vpd_use_hostno == 0)
1698 		arr[5] |= 0x10; /* claim: implicit TPGS */
1699 	arr[6] = 0x10; /* claim: MultiP */
1700 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1701 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1702 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1703 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1704 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1705 	/* Use Vendor Specific area to place driver date in ASCII hex */
1706 	memcpy(&arr[36], sdebug_version_date, 8);
1707 	/* version descriptors (2 bytes each) follow */
1708 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1709 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1710 	n = 62;
1711 	if (is_disk) {		/* SBC-4 no version claimed */
1712 		put_unaligned_be16(0x600, arr + n);
1713 		n += 2;
1714 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1715 		put_unaligned_be16(0x525, arr + n);
1716 		n += 2;
1717 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1718 		put_unaligned_be16(0x624, arr + n);
1719 		n += 2;
1720 	}
1721 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1722 	ret = fill_from_dev_buffer(scp, arr,
1723 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1724 	kfree(arr);
1725 	return ret;
1726 }
1727 
1728 /* See resp_iec_m_pg() for how this data is manipulated */
1729 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1730 				   0, 0, 0x0, 0x0};
1731 
1732 static int resp_requests(struct scsi_cmnd *scp,
1733 			 struct sdebug_dev_info *devip)
1734 {
1735 	unsigned char *cmd = scp->cmnd;
1736 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1737 	bool dsense = !!(cmd[1] & 1);
1738 	u32 alloc_len = cmd[4];
1739 	u32 len = 18;
1740 	int stopped_state = atomic_read(&devip->stopped);
1741 
1742 	memset(arr, 0, sizeof(arr));
1743 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1744 		if (dsense) {
1745 			arr[0] = 0x72;
1746 			arr[1] = NOT_READY;
1747 			arr[2] = LOGICAL_UNIT_NOT_READY;
1748 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1749 			len = 8;
1750 		} else {
1751 			arr[0] = 0x70;
1752 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1753 			arr[7] = 0xa;			/* 18 byte sense buffer */
1754 			arr[12] = LOGICAL_UNIT_NOT_READY;
1755 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1756 		}
1757 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1758 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1759 		if (dsense) {
1760 			arr[0] = 0x72;
1761 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1762 			arr[2] = THRESHOLD_EXCEEDED;
1763 			arr[3] = 0xff;		/* Failure prediction(false) */
1764 			len = 8;
1765 		} else {
1766 			arr[0] = 0x70;
1767 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1768 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1769 			arr[12] = THRESHOLD_EXCEEDED;
1770 			arr[13] = 0xff;		/* Failure prediction(false) */
1771 		}
1772 	} else {	/* nothing to report */
1773 		if (dsense) {
1774 			len = 8;
1775 			memset(arr, 0, len);
1776 			arr[0] = 0x72;
1777 		} else {
1778 			memset(arr, 0, len);
1779 			arr[0] = 0x70;
1780 			arr[7] = 0xa;
1781 		}
1782 	}
1783 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1784 }
1785 
1786 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1787 {
1788 	unsigned char *cmd = scp->cmnd;
1789 	int power_cond, want_stop, stopped_state;
1790 	bool changing;
1791 
1792 	power_cond = (cmd[4] & 0xf0) >> 4;
1793 	if (power_cond) {
1794 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1795 		return check_condition_result;
1796 	}
1797 	want_stop = !(cmd[4] & 1);
1798 	stopped_state = atomic_read(&devip->stopped);
1799 	if (stopped_state == 2) {
1800 		ktime_t now_ts = ktime_get_boottime();
1801 
1802 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1803 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1804 
1805 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1806 				/* tur_ms_to_ready timer extinguished */
1807 				atomic_set(&devip->stopped, 0);
1808 				stopped_state = 0;
1809 			}
1810 		}
1811 		if (stopped_state == 2) {
1812 			if (want_stop) {
1813 				stopped_state = 1;	/* dummy up success */
1814 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1815 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1816 				return check_condition_result;
1817 			}
1818 		}
1819 	}
1820 	changing = (stopped_state != want_stop);
1821 	if (changing)
1822 		atomic_xchg(&devip->stopped, want_stop);
1823 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1824 		return SDEG_RES_IMMED_MASK;
1825 	else
1826 		return 0;
1827 }
1828 
1829 static sector_t get_sdebug_capacity(void)
1830 {
1831 	static const unsigned int gibibyte = 1073741824;
1832 
1833 	if (sdebug_virtual_gb > 0)
1834 		return (sector_t)sdebug_virtual_gb *
1835 			(gibibyte / sdebug_sector_size);
1836 	else
1837 		return sdebug_store_sectors;
1838 }
1839 
1840 #define SDEBUG_READCAP_ARR_SZ 8
1841 static int resp_readcap(struct scsi_cmnd *scp,
1842 			struct sdebug_dev_info *devip)
1843 {
1844 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1845 	unsigned int capac;
1846 
1847 	/* following just in case virtual_gb changed */
1848 	sdebug_capacity = get_sdebug_capacity();
1849 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1850 	if (sdebug_capacity < 0xffffffff) {
1851 		capac = (unsigned int)sdebug_capacity - 1;
1852 		put_unaligned_be32(capac, arr + 0);
1853 	} else
1854 		put_unaligned_be32(0xffffffff, arr + 0);
1855 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1856 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1857 }
1858 
1859 #define SDEBUG_READCAP16_ARR_SZ 32
1860 static int resp_readcap16(struct scsi_cmnd *scp,
1861 			  struct sdebug_dev_info *devip)
1862 {
1863 	unsigned char *cmd = scp->cmnd;
1864 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1865 	u32 alloc_len;
1866 
1867 	alloc_len = get_unaligned_be32(cmd + 10);
1868 	/* following just in case virtual_gb changed */
1869 	sdebug_capacity = get_sdebug_capacity();
1870 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1871 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1872 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1873 	arr[13] = sdebug_physblk_exp & 0xf;
1874 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1875 
1876 	if (scsi_debug_lbp()) {
1877 		arr[14] |= 0x80; /* LBPME */
1878 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1879 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1880 		 * in the wider field maps to 0 in this field.
1881 		 */
1882 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1883 			arr[14] |= 0x40;
1884 	}
1885 
1886 	arr[15] = sdebug_lowest_aligned & 0xff;
1887 
1888 	if (have_dif_prot) {
1889 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1890 		arr[12] |= 1; /* PROT_EN */
1891 	}
1892 
1893 	return fill_from_dev_buffer(scp, arr,
1894 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1895 }
1896 
1897 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1898 
1899 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1900 			      struct sdebug_dev_info *devip)
1901 {
1902 	unsigned char *cmd = scp->cmnd;
1903 	unsigned char *arr;
1904 	int host_no = devip->sdbg_host->shost->host_no;
1905 	int port_group_a, port_group_b, port_a, port_b;
1906 	u32 alen, n, rlen;
1907 	int ret;
1908 
1909 	alen = get_unaligned_be32(cmd + 6);
1910 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1911 	if (! arr)
1912 		return DID_REQUEUE << 16;
1913 	/*
1914 	 * EVPD page 0x88 states we have two ports, one
1915 	 * real and a fake port with no device connected.
1916 	 * So we create two port groups with one port each
1917 	 * and set the group with port B to unavailable.
1918 	 */
1919 	port_a = 0x1; /* relative port A */
1920 	port_b = 0x2; /* relative port B */
1921 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1922 			(devip->channel & 0x7f);
1923 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1924 			(devip->channel & 0x7f) + 0x80;
1925 
1926 	/*
1927 	 * The asymmetric access state is cycled according to the host_id.
1928 	 */
1929 	n = 4;
1930 	if (sdebug_vpd_use_hostno == 0) {
1931 		arr[n++] = host_no % 3; /* Asymm access state */
1932 		arr[n++] = 0x0F; /* claim: all states are supported */
1933 	} else {
1934 		arr[n++] = 0x0; /* Active/Optimized path */
1935 		arr[n++] = 0x01; /* only support active/optimized paths */
1936 	}
1937 	put_unaligned_be16(port_group_a, arr + n);
1938 	n += 2;
1939 	arr[n++] = 0;    /* Reserved */
1940 	arr[n++] = 0;    /* Status code */
1941 	arr[n++] = 0;    /* Vendor unique */
1942 	arr[n++] = 0x1;  /* One port per group */
1943 	arr[n++] = 0;    /* Reserved */
1944 	arr[n++] = 0;    /* Reserved */
1945 	put_unaligned_be16(port_a, arr + n);
1946 	n += 2;
1947 	arr[n++] = 3;    /* Port unavailable */
1948 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1949 	put_unaligned_be16(port_group_b, arr + n);
1950 	n += 2;
1951 	arr[n++] = 0;    /* Reserved */
1952 	arr[n++] = 0;    /* Status code */
1953 	arr[n++] = 0;    /* Vendor unique */
1954 	arr[n++] = 0x1;  /* One port per group */
1955 	arr[n++] = 0;    /* Reserved */
1956 	arr[n++] = 0;    /* Reserved */
1957 	put_unaligned_be16(port_b, arr + n);
1958 	n += 2;
1959 
1960 	rlen = n - 4;
1961 	put_unaligned_be32(rlen, arr + 0);
1962 
1963 	/*
1964 	 * Return the smallest value of either
1965 	 * - The allocated length
1966 	 * - The constructed command length
1967 	 * - The maximum array size
1968 	 */
1969 	rlen = min(alen, n);
1970 	ret = fill_from_dev_buffer(scp, arr,
1971 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1972 	kfree(arr);
1973 	return ret;
1974 }
1975 
1976 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1977 			     struct sdebug_dev_info *devip)
1978 {
1979 	bool rctd;
1980 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1981 	u16 req_sa, u;
1982 	u32 alloc_len, a_len;
1983 	int k, offset, len, errsts, count, bump, na;
1984 	const struct opcode_info_t *oip;
1985 	const struct opcode_info_t *r_oip;
1986 	u8 *arr;
1987 	u8 *cmd = scp->cmnd;
1988 
1989 	rctd = !!(cmd[2] & 0x80);
1990 	reporting_opts = cmd[2] & 0x7;
1991 	req_opcode = cmd[3];
1992 	req_sa = get_unaligned_be16(cmd + 4);
1993 	alloc_len = get_unaligned_be32(cmd + 6);
1994 	if (alloc_len < 4 || alloc_len > 0xffff) {
1995 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1996 		return check_condition_result;
1997 	}
1998 	if (alloc_len > 8192)
1999 		a_len = 8192;
2000 	else
2001 		a_len = alloc_len;
2002 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2003 	if (NULL == arr) {
2004 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2005 				INSUFF_RES_ASCQ);
2006 		return check_condition_result;
2007 	}
2008 	switch (reporting_opts) {
2009 	case 0:	/* all commands */
2010 		/* count number of commands */
2011 		for (count = 0, oip = opcode_info_arr;
2012 		     oip->num_attached != 0xff; ++oip) {
2013 			if (F_INV_OP & oip->flags)
2014 				continue;
2015 			count += (oip->num_attached + 1);
2016 		}
2017 		bump = rctd ? 20 : 8;
2018 		put_unaligned_be32(count * bump, arr);
2019 		for (offset = 4, oip = opcode_info_arr;
2020 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2021 			if (F_INV_OP & oip->flags)
2022 				continue;
2023 			na = oip->num_attached;
2024 			arr[offset] = oip->opcode;
2025 			put_unaligned_be16(oip->sa, arr + offset + 2);
2026 			if (rctd)
2027 				arr[offset + 5] |= 0x2;
2028 			if (FF_SA & oip->flags)
2029 				arr[offset + 5] |= 0x1;
2030 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2031 			if (rctd)
2032 				put_unaligned_be16(0xa, arr + offset + 8);
2033 			r_oip = oip;
2034 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2035 				if (F_INV_OP & oip->flags)
2036 					continue;
2037 				offset += bump;
2038 				arr[offset] = oip->opcode;
2039 				put_unaligned_be16(oip->sa, arr + offset + 2);
2040 				if (rctd)
2041 					arr[offset + 5] |= 0x2;
2042 				if (FF_SA & oip->flags)
2043 					arr[offset + 5] |= 0x1;
2044 				put_unaligned_be16(oip->len_mask[0],
2045 						   arr + offset + 6);
2046 				if (rctd)
2047 					put_unaligned_be16(0xa,
2048 							   arr + offset + 8);
2049 			}
2050 			oip = r_oip;
2051 			offset += bump;
2052 		}
2053 		break;
2054 	case 1:	/* one command: opcode only */
2055 	case 2:	/* one command: opcode plus service action */
2056 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2057 		sdeb_i = opcode_ind_arr[req_opcode];
2058 		oip = &opcode_info_arr[sdeb_i];
2059 		if (F_INV_OP & oip->flags) {
2060 			supp = 1;
2061 			offset = 4;
2062 		} else {
2063 			if (1 == reporting_opts) {
2064 				if (FF_SA & oip->flags) {
2065 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2066 							     2, 2);
2067 					kfree(arr);
2068 					return check_condition_result;
2069 				}
2070 				req_sa = 0;
2071 			} else if (2 == reporting_opts &&
2072 				   0 == (FF_SA & oip->flags)) {
2073 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2074 				kfree(arr);	/* point at requested sa */
2075 				return check_condition_result;
2076 			}
2077 			if (0 == (FF_SA & oip->flags) &&
2078 			    req_opcode == oip->opcode)
2079 				supp = 3;
2080 			else if (0 == (FF_SA & oip->flags)) {
2081 				na = oip->num_attached;
2082 				for (k = 0, oip = oip->arrp; k < na;
2083 				     ++k, ++oip) {
2084 					if (req_opcode == oip->opcode)
2085 						break;
2086 				}
2087 				supp = (k >= na) ? 1 : 3;
2088 			} else if (req_sa != oip->sa) {
2089 				na = oip->num_attached;
2090 				for (k = 0, oip = oip->arrp; k < na;
2091 				     ++k, ++oip) {
2092 					if (req_sa == oip->sa)
2093 						break;
2094 				}
2095 				supp = (k >= na) ? 1 : 3;
2096 			} else
2097 				supp = 3;
2098 			if (3 == supp) {
2099 				u = oip->len_mask[0];
2100 				put_unaligned_be16(u, arr + 2);
2101 				arr[4] = oip->opcode;
2102 				for (k = 1; k < u; ++k)
2103 					arr[4 + k] = (k < 16) ?
2104 						 oip->len_mask[k] : 0xff;
2105 				offset = 4 + u;
2106 			} else
2107 				offset = 4;
2108 		}
2109 		arr[1] = (rctd ? 0x80 : 0) | supp;
2110 		if (rctd) {
2111 			put_unaligned_be16(0xa, arr + offset);
2112 			offset += 12;
2113 		}
2114 		break;
2115 	default:
2116 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2117 		kfree(arr);
2118 		return check_condition_result;
2119 	}
2120 	offset = (offset < a_len) ? offset : a_len;
2121 	len = (offset < alloc_len) ? offset : alloc_len;
2122 	errsts = fill_from_dev_buffer(scp, arr, len);
2123 	kfree(arr);
2124 	return errsts;
2125 }
2126 
2127 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2128 			  struct sdebug_dev_info *devip)
2129 {
2130 	bool repd;
2131 	u32 alloc_len, len;
2132 	u8 arr[16];
2133 	u8 *cmd = scp->cmnd;
2134 
2135 	memset(arr, 0, sizeof(arr));
2136 	repd = !!(cmd[2] & 0x80);
2137 	alloc_len = get_unaligned_be32(cmd + 6);
2138 	if (alloc_len < 4) {
2139 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2140 		return check_condition_result;
2141 	}
2142 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2143 	arr[1] = 0x1;		/* ITNRS */
2144 	if (repd) {
2145 		arr[3] = 0xc;
2146 		len = 16;
2147 	} else
2148 		len = 4;
2149 
2150 	len = (len < alloc_len) ? len : alloc_len;
2151 	return fill_from_dev_buffer(scp, arr, len);
2152 }
2153 
2154 /* <<Following mode page info copied from ST318451LW>> */
2155 
2156 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2157 {	/* Read-Write Error Recovery page for mode_sense */
2158 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2159 					5, 0, 0xff, 0xff};
2160 
2161 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2162 	if (1 == pcontrol)
2163 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2164 	return sizeof(err_recov_pg);
2165 }
2166 
2167 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2168 { 	/* Disconnect-Reconnect page for mode_sense */
2169 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2170 					 0, 0, 0, 0, 0, 0, 0, 0};
2171 
2172 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2173 	if (1 == pcontrol)
2174 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2175 	return sizeof(disconnect_pg);
2176 }
2177 
2178 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2179 {       /* Format device page for mode_sense */
2180 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2181 				     0, 0, 0, 0, 0, 0, 0, 0,
2182 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2183 
2184 	memcpy(p, format_pg, sizeof(format_pg));
2185 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2186 	put_unaligned_be16(sdebug_sector_size, p + 12);
2187 	if (sdebug_removable)
2188 		p[20] |= 0x20; /* should agree with INQUIRY */
2189 	if (1 == pcontrol)
2190 		memset(p + 2, 0, sizeof(format_pg) - 2);
2191 	return sizeof(format_pg);
2192 }
2193 
2194 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2195 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2196 				     0, 0, 0, 0};
2197 
2198 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2199 { 	/* Caching page for mode_sense */
2200 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2201 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2202 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2203 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2204 
2205 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2206 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2207 	memcpy(p, caching_pg, sizeof(caching_pg));
2208 	if (1 == pcontrol)
2209 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2210 	else if (2 == pcontrol)
2211 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2212 	return sizeof(caching_pg);
2213 }
2214 
2215 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2216 				    0, 0, 0x2, 0x4b};
2217 
2218 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2219 { 	/* Control mode page for mode_sense */
2220 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2221 					0, 0, 0, 0};
2222 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2223 				     0, 0, 0x2, 0x4b};
2224 
2225 	if (sdebug_dsense)
2226 		ctrl_m_pg[2] |= 0x4;
2227 	else
2228 		ctrl_m_pg[2] &= ~0x4;
2229 
2230 	if (sdebug_ato)
2231 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2232 
2233 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2234 	if (1 == pcontrol)
2235 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2236 	else if (2 == pcontrol)
2237 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2238 	return sizeof(ctrl_m_pg);
2239 }
2240 
2241 
2242 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2243 {	/* Informational Exceptions control mode page for mode_sense */
2244 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2245 				       0, 0, 0x0, 0x0};
2246 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2247 				      0, 0, 0x0, 0x0};
2248 
2249 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2250 	if (1 == pcontrol)
2251 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2252 	else if (2 == pcontrol)
2253 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2254 	return sizeof(iec_m_pg);
2255 }
2256 
2257 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2258 {	/* SAS SSP mode page - short format for mode_sense */
2259 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2260 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2261 
2262 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2263 	if (1 == pcontrol)
2264 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2265 	return sizeof(sas_sf_m_pg);
2266 }
2267 
2268 
2269 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2270 			      int target_dev_id)
2271 {	/* SAS phy control and discover mode page for mode_sense */
2272 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2273 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2274 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2275 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2276 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2277 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2278 		    0, 0, 0, 0, 0, 0, 0, 0,
2279 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2280 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2281 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2282 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2283 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2284 		    0, 0, 0, 0, 0, 0, 0, 0,
2285 		};
2286 	int port_a, port_b;
2287 
2288 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2289 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2290 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2291 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2292 	port_a = target_dev_id + 1;
2293 	port_b = port_a + 1;
2294 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2295 	put_unaligned_be32(port_a, p + 20);
2296 	put_unaligned_be32(port_b, p + 48 + 20);
2297 	if (1 == pcontrol)
2298 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2299 	return sizeof(sas_pcd_m_pg);
2300 }
2301 
2302 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2303 {	/* SAS SSP shared protocol specific port mode subpage */
2304 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2305 		    0, 0, 0, 0, 0, 0, 0, 0,
2306 		};
2307 
2308 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2309 	if (1 == pcontrol)
2310 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2311 	return sizeof(sas_sha_m_pg);
2312 }
2313 
2314 #define SDEBUG_MAX_MSENSE_SZ 256
2315 
2316 static int resp_mode_sense(struct scsi_cmnd *scp,
2317 			   struct sdebug_dev_info *devip)
2318 {
2319 	int pcontrol, pcode, subpcode, bd_len;
2320 	unsigned char dev_spec;
2321 	u32 alloc_len, offset, len;
2322 	int target_dev_id;
2323 	int target = scp->device->id;
2324 	unsigned char *ap;
2325 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2326 	unsigned char *cmd = scp->cmnd;
2327 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2328 
2329 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2330 	pcontrol = (cmd[2] & 0xc0) >> 6;
2331 	pcode = cmd[2] & 0x3f;
2332 	subpcode = cmd[3];
2333 	msense_6 = (MODE_SENSE == cmd[0]);
2334 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2335 	is_disk = (sdebug_ptype == TYPE_DISK);
2336 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2337 	if ((is_disk || is_zbc) && !dbd)
2338 		bd_len = llbaa ? 16 : 8;
2339 	else
2340 		bd_len = 0;
2341 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2342 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2343 	if (0x3 == pcontrol) {  /* Saving values not supported */
2344 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2345 		return check_condition_result;
2346 	}
2347 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2348 			(devip->target * 1000) - 3;
2349 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2350 	if (is_disk || is_zbc) {
2351 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2352 		if (sdebug_wp)
2353 			dev_spec |= 0x80;
2354 	} else
2355 		dev_spec = 0x0;
2356 	if (msense_6) {
2357 		arr[2] = dev_spec;
2358 		arr[3] = bd_len;
2359 		offset = 4;
2360 	} else {
2361 		arr[3] = dev_spec;
2362 		if (16 == bd_len)
2363 			arr[4] = 0x1;	/* set LONGLBA bit */
2364 		arr[7] = bd_len;	/* assume 255 or less */
2365 		offset = 8;
2366 	}
2367 	ap = arr + offset;
2368 	if ((bd_len > 0) && (!sdebug_capacity))
2369 		sdebug_capacity = get_sdebug_capacity();
2370 
2371 	if (8 == bd_len) {
2372 		if (sdebug_capacity > 0xfffffffe)
2373 			put_unaligned_be32(0xffffffff, ap + 0);
2374 		else
2375 			put_unaligned_be32(sdebug_capacity, ap + 0);
2376 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2377 		offset += bd_len;
2378 		ap = arr + offset;
2379 	} else if (16 == bd_len) {
2380 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2381 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2382 		offset += bd_len;
2383 		ap = arr + offset;
2384 	}
2385 
2386 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2387 		/* TODO: Control Extension page */
2388 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2389 		return check_condition_result;
2390 	}
2391 	bad_pcode = false;
2392 
2393 	switch (pcode) {
2394 	case 0x1:	/* Read-Write error recovery page, direct access */
2395 		len = resp_err_recov_pg(ap, pcontrol, target);
2396 		offset += len;
2397 		break;
2398 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2399 		len = resp_disconnect_pg(ap, pcontrol, target);
2400 		offset += len;
2401 		break;
2402 	case 0x3:       /* Format device page, direct access */
2403 		if (is_disk) {
2404 			len = resp_format_pg(ap, pcontrol, target);
2405 			offset += len;
2406 		} else
2407 			bad_pcode = true;
2408 		break;
2409 	case 0x8:	/* Caching page, direct access */
2410 		if (is_disk || is_zbc) {
2411 			len = resp_caching_pg(ap, pcontrol, target);
2412 			offset += len;
2413 		} else
2414 			bad_pcode = true;
2415 		break;
2416 	case 0xa:	/* Control Mode page, all devices */
2417 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2418 		offset += len;
2419 		break;
2420 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2421 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2422 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2423 			return check_condition_result;
2424 		}
2425 		len = 0;
2426 		if ((0x0 == subpcode) || (0xff == subpcode))
2427 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2428 		if ((0x1 == subpcode) || (0xff == subpcode))
2429 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2430 						  target_dev_id);
2431 		if ((0x2 == subpcode) || (0xff == subpcode))
2432 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2433 		offset += len;
2434 		break;
2435 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2436 		len = resp_iec_m_pg(ap, pcontrol, target);
2437 		offset += len;
2438 		break;
2439 	case 0x3f:	/* Read all Mode pages */
2440 		if ((0 == subpcode) || (0xff == subpcode)) {
2441 			len = resp_err_recov_pg(ap, pcontrol, target);
2442 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2443 			if (is_disk) {
2444 				len += resp_format_pg(ap + len, pcontrol,
2445 						      target);
2446 				len += resp_caching_pg(ap + len, pcontrol,
2447 						       target);
2448 			} else if (is_zbc) {
2449 				len += resp_caching_pg(ap + len, pcontrol,
2450 						       target);
2451 			}
2452 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2453 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2454 			if (0xff == subpcode) {
2455 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2456 						  target, target_dev_id);
2457 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2458 			}
2459 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2460 			offset += len;
2461 		} else {
2462 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2463 			return check_condition_result;
2464 		}
2465 		break;
2466 	default:
2467 		bad_pcode = true;
2468 		break;
2469 	}
2470 	if (bad_pcode) {
2471 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2472 		return check_condition_result;
2473 	}
2474 	if (msense_6)
2475 		arr[0] = offset - 1;
2476 	else
2477 		put_unaligned_be16((offset - 2), arr + 0);
2478 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2479 }
2480 
2481 #define SDEBUG_MAX_MSELECT_SZ 512
2482 
2483 static int resp_mode_select(struct scsi_cmnd *scp,
2484 			    struct sdebug_dev_info *devip)
2485 {
2486 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2487 	int param_len, res, mpage;
2488 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2489 	unsigned char *cmd = scp->cmnd;
2490 	int mselect6 = (MODE_SELECT == cmd[0]);
2491 
2492 	memset(arr, 0, sizeof(arr));
2493 	pf = cmd[1] & 0x10;
2494 	sp = cmd[1] & 0x1;
2495 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2496 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2497 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2498 		return check_condition_result;
2499 	}
2500 	res = fetch_to_dev_buffer(scp, arr, param_len);
2501 	if (-1 == res)
2502 		return DID_ERROR << 16;
2503 	else if (sdebug_verbose && (res < param_len))
2504 		sdev_printk(KERN_INFO, scp->device,
2505 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2506 			    __func__, param_len, res);
2507 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2508 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2509 	off = bd_len + (mselect6 ? 4 : 8);
2510 	if (md_len > 2 || off >= res) {
2511 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2512 		return check_condition_result;
2513 	}
2514 	mpage = arr[off] & 0x3f;
2515 	ps = !!(arr[off] & 0x80);
2516 	if (ps) {
2517 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2518 		return check_condition_result;
2519 	}
2520 	spf = !!(arr[off] & 0x40);
2521 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2522 		       (arr[off + 1] + 2);
2523 	if ((pg_len + off) > param_len) {
2524 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2525 				PARAMETER_LIST_LENGTH_ERR, 0);
2526 		return check_condition_result;
2527 	}
2528 	switch (mpage) {
2529 	case 0x8:      /* Caching Mode page */
2530 		if (caching_pg[1] == arr[off + 1]) {
2531 			memcpy(caching_pg + 2, arr + off + 2,
2532 			       sizeof(caching_pg) - 2);
2533 			goto set_mode_changed_ua;
2534 		}
2535 		break;
2536 	case 0xa:      /* Control Mode page */
2537 		if (ctrl_m_pg[1] == arr[off + 1]) {
2538 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2539 			       sizeof(ctrl_m_pg) - 2);
2540 			if (ctrl_m_pg[4] & 0x8)
2541 				sdebug_wp = true;
2542 			else
2543 				sdebug_wp = false;
2544 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2545 			goto set_mode_changed_ua;
2546 		}
2547 		break;
2548 	case 0x1c:      /* Informational Exceptions Mode page */
2549 		if (iec_m_pg[1] == arr[off + 1]) {
2550 			memcpy(iec_m_pg + 2, arr + off + 2,
2551 			       sizeof(iec_m_pg) - 2);
2552 			goto set_mode_changed_ua;
2553 		}
2554 		break;
2555 	default:
2556 		break;
2557 	}
2558 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2559 	return check_condition_result;
2560 set_mode_changed_ua:
2561 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2562 	return 0;
2563 }
2564 
2565 static int resp_temp_l_pg(unsigned char *arr)
2566 {
2567 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2568 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2569 		};
2570 
2571 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2572 	return sizeof(temp_l_pg);
2573 }
2574 
2575 static int resp_ie_l_pg(unsigned char *arr)
2576 {
2577 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2578 		};
2579 
2580 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2581 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2582 		arr[4] = THRESHOLD_EXCEEDED;
2583 		arr[5] = 0xff;
2584 	}
2585 	return sizeof(ie_l_pg);
2586 }
2587 
2588 #define SDEBUG_MAX_LSENSE_SZ 512
2589 
2590 static int resp_log_sense(struct scsi_cmnd *scp,
2591 			  struct sdebug_dev_info *devip)
2592 {
2593 	int ppc, sp, pcode, subpcode;
2594 	u32 alloc_len, len, n;
2595 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2596 	unsigned char *cmd = scp->cmnd;
2597 
2598 	memset(arr, 0, sizeof(arr));
2599 	ppc = cmd[1] & 0x2;
2600 	sp = cmd[1] & 0x1;
2601 	if (ppc || sp) {
2602 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2603 		return check_condition_result;
2604 	}
2605 	pcode = cmd[2] & 0x3f;
2606 	subpcode = cmd[3] & 0xff;
2607 	alloc_len = get_unaligned_be16(cmd + 7);
2608 	arr[0] = pcode;
2609 	if (0 == subpcode) {
2610 		switch (pcode) {
2611 		case 0x0:	/* Supported log pages log page */
2612 			n = 4;
2613 			arr[n++] = 0x0;		/* this page */
2614 			arr[n++] = 0xd;		/* Temperature */
2615 			arr[n++] = 0x2f;	/* Informational exceptions */
2616 			arr[3] = n - 4;
2617 			break;
2618 		case 0xd:	/* Temperature log page */
2619 			arr[3] = resp_temp_l_pg(arr + 4);
2620 			break;
2621 		case 0x2f:	/* Informational exceptions log page */
2622 			arr[3] = resp_ie_l_pg(arr + 4);
2623 			break;
2624 		default:
2625 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2626 			return check_condition_result;
2627 		}
2628 	} else if (0xff == subpcode) {
2629 		arr[0] |= 0x40;
2630 		arr[1] = subpcode;
2631 		switch (pcode) {
2632 		case 0x0:	/* Supported log pages and subpages log page */
2633 			n = 4;
2634 			arr[n++] = 0x0;
2635 			arr[n++] = 0x0;		/* 0,0 page */
2636 			arr[n++] = 0x0;
2637 			arr[n++] = 0xff;	/* this page */
2638 			arr[n++] = 0xd;
2639 			arr[n++] = 0x0;		/* Temperature */
2640 			arr[n++] = 0x2f;
2641 			arr[n++] = 0x0;	/* Informational exceptions */
2642 			arr[3] = n - 4;
2643 			break;
2644 		case 0xd:	/* Temperature subpages */
2645 			n = 4;
2646 			arr[n++] = 0xd;
2647 			arr[n++] = 0x0;		/* Temperature */
2648 			arr[3] = n - 4;
2649 			break;
2650 		case 0x2f:	/* Informational exceptions subpages */
2651 			n = 4;
2652 			arr[n++] = 0x2f;
2653 			arr[n++] = 0x0;		/* Informational exceptions */
2654 			arr[3] = n - 4;
2655 			break;
2656 		default:
2657 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2658 			return check_condition_result;
2659 		}
2660 	} else {
2661 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2662 		return check_condition_result;
2663 	}
2664 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2665 	return fill_from_dev_buffer(scp, arr,
2666 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2667 }
2668 
2669 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2670 {
2671 	return devip->nr_zones != 0;
2672 }
2673 
2674 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2675 					unsigned long long lba)
2676 {
2677 	return &devip->zstate[lba >> devip->zsize_shift];
2678 }
2679 
2680 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2681 {
2682 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2683 }
2684 
2685 static void zbc_close_zone(struct sdebug_dev_info *devip,
2686 			   struct sdeb_zone_state *zsp)
2687 {
2688 	enum sdebug_z_cond zc;
2689 
2690 	if (zbc_zone_is_conv(zsp))
2691 		return;
2692 
2693 	zc = zsp->z_cond;
2694 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2695 		return;
2696 
2697 	if (zc == ZC2_IMPLICIT_OPEN)
2698 		devip->nr_imp_open--;
2699 	else
2700 		devip->nr_exp_open--;
2701 
2702 	if (zsp->z_wp == zsp->z_start) {
2703 		zsp->z_cond = ZC1_EMPTY;
2704 	} else {
2705 		zsp->z_cond = ZC4_CLOSED;
2706 		devip->nr_closed++;
2707 	}
2708 }
2709 
2710 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2711 {
2712 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2713 	unsigned int i;
2714 
2715 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2716 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2717 			zbc_close_zone(devip, zsp);
2718 			return;
2719 		}
2720 	}
2721 }
2722 
2723 static void zbc_open_zone(struct sdebug_dev_info *devip,
2724 			  struct sdeb_zone_state *zsp, bool explicit)
2725 {
2726 	enum sdebug_z_cond zc;
2727 
2728 	if (zbc_zone_is_conv(zsp))
2729 		return;
2730 
2731 	zc = zsp->z_cond;
2732 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2733 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2734 		return;
2735 
2736 	/* Close an implicit open zone if necessary */
2737 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2738 		zbc_close_zone(devip, zsp);
2739 	else if (devip->max_open &&
2740 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2741 		zbc_close_imp_open_zone(devip);
2742 
2743 	if (zsp->z_cond == ZC4_CLOSED)
2744 		devip->nr_closed--;
2745 	if (explicit) {
2746 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2747 		devip->nr_exp_open++;
2748 	} else {
2749 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2750 		devip->nr_imp_open++;
2751 	}
2752 }
2753 
2754 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2755 		       unsigned long long lba, unsigned int num)
2756 {
2757 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2758 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2759 
2760 	if (zbc_zone_is_conv(zsp))
2761 		return;
2762 
2763 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2764 		zsp->z_wp += num;
2765 		if (zsp->z_wp >= zend)
2766 			zsp->z_cond = ZC5_FULL;
2767 		return;
2768 	}
2769 
2770 	while (num) {
2771 		if (lba != zsp->z_wp)
2772 			zsp->z_non_seq_resource = true;
2773 
2774 		end = lba + num;
2775 		if (end >= zend) {
2776 			n = zend - lba;
2777 			zsp->z_wp = zend;
2778 		} else if (end > zsp->z_wp) {
2779 			n = num;
2780 			zsp->z_wp = end;
2781 		} else {
2782 			n = num;
2783 		}
2784 		if (zsp->z_wp >= zend)
2785 			zsp->z_cond = ZC5_FULL;
2786 
2787 		num -= n;
2788 		lba += n;
2789 		if (num) {
2790 			zsp++;
2791 			zend = zsp->z_start + zsp->z_size;
2792 		}
2793 	}
2794 }
2795 
2796 static int check_zbc_access_params(struct scsi_cmnd *scp,
2797 			unsigned long long lba, unsigned int num, bool write)
2798 {
2799 	struct scsi_device *sdp = scp->device;
2800 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2801 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2802 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2803 
2804 	if (!write) {
2805 		if (devip->zmodel == BLK_ZONED_HA)
2806 			return 0;
2807 		/* For host-managed, reads cannot cross zone types boundaries */
2808 		if (zsp_end != zsp &&
2809 		    zbc_zone_is_conv(zsp) &&
2810 		    !zbc_zone_is_conv(zsp_end)) {
2811 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2812 					LBA_OUT_OF_RANGE,
2813 					READ_INVDATA_ASCQ);
2814 			return check_condition_result;
2815 		}
2816 		return 0;
2817 	}
2818 
2819 	/* No restrictions for writes within conventional zones */
2820 	if (zbc_zone_is_conv(zsp)) {
2821 		if (!zbc_zone_is_conv(zsp_end)) {
2822 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2823 					LBA_OUT_OF_RANGE,
2824 					WRITE_BOUNDARY_ASCQ);
2825 			return check_condition_result;
2826 		}
2827 		return 0;
2828 	}
2829 
2830 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2831 		/* Writes cannot cross sequential zone boundaries */
2832 		if (zsp_end != zsp) {
2833 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2834 					LBA_OUT_OF_RANGE,
2835 					WRITE_BOUNDARY_ASCQ);
2836 			return check_condition_result;
2837 		}
2838 		/* Cannot write full zones */
2839 		if (zsp->z_cond == ZC5_FULL) {
2840 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2841 					INVALID_FIELD_IN_CDB, 0);
2842 			return check_condition_result;
2843 		}
2844 		/* Writes must be aligned to the zone WP */
2845 		if (lba != zsp->z_wp) {
2846 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2847 					LBA_OUT_OF_RANGE,
2848 					UNALIGNED_WRITE_ASCQ);
2849 			return check_condition_result;
2850 		}
2851 	}
2852 
2853 	/* Handle implicit open of closed and empty zones */
2854 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2855 		if (devip->max_open &&
2856 		    devip->nr_exp_open >= devip->max_open) {
2857 			mk_sense_buffer(scp, DATA_PROTECT,
2858 					INSUFF_RES_ASC,
2859 					INSUFF_ZONE_ASCQ);
2860 			return check_condition_result;
2861 		}
2862 		zbc_open_zone(devip, zsp, false);
2863 	}
2864 
2865 	return 0;
2866 }
2867 
2868 static inline int check_device_access_params
2869 			(struct scsi_cmnd *scp, unsigned long long lba,
2870 			 unsigned int num, bool write)
2871 {
2872 	struct scsi_device *sdp = scp->device;
2873 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2874 
2875 	if (lba + num > sdebug_capacity) {
2876 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877 		return check_condition_result;
2878 	}
2879 	/* transfer length excessive (tie in to block limits VPD page) */
2880 	if (num > sdebug_store_sectors) {
2881 		/* needs work to find which cdb byte 'num' comes from */
2882 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883 		return check_condition_result;
2884 	}
2885 	if (write && unlikely(sdebug_wp)) {
2886 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2887 		return check_condition_result;
2888 	}
2889 	if (sdebug_dev_is_zoned(devip))
2890 		return check_zbc_access_params(scp, lba, num, write);
2891 
2892 	return 0;
2893 }
2894 
2895 /*
2896  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2897  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2898  * that access any of the "stores" in struct sdeb_store_info should call this
2899  * function with bug_if_fake_rw set to true.
2900  */
2901 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2902 						bool bug_if_fake_rw)
2903 {
2904 	if (sdebug_fake_rw) {
2905 		BUG_ON(bug_if_fake_rw);	/* See note above */
2906 		return NULL;
2907 	}
2908 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2909 }
2910 
2911 /* Returns number of bytes copied or -1 if error. */
2912 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2913 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2914 {
2915 	int ret;
2916 	u64 block, rest = 0;
2917 	enum dma_data_direction dir;
2918 	struct scsi_data_buffer *sdb = &scp->sdb;
2919 	u8 *fsp;
2920 
2921 	if (do_write) {
2922 		dir = DMA_TO_DEVICE;
2923 		write_since_sync = true;
2924 	} else {
2925 		dir = DMA_FROM_DEVICE;
2926 	}
2927 
2928 	if (!sdb->length || !sip)
2929 		return 0;
2930 	if (scp->sc_data_direction != dir)
2931 		return -1;
2932 	fsp = sip->storep;
2933 
2934 	block = do_div(lba, sdebug_store_sectors);
2935 	if (block + num > sdebug_store_sectors)
2936 		rest = block + num - sdebug_store_sectors;
2937 
2938 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2939 		   fsp + (block * sdebug_sector_size),
2940 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2941 	if (ret != (num - rest) * sdebug_sector_size)
2942 		return ret;
2943 
2944 	if (rest) {
2945 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2946 			    fsp, rest * sdebug_sector_size,
2947 			    sg_skip + ((num - rest) * sdebug_sector_size),
2948 			    do_write);
2949 	}
2950 
2951 	return ret;
2952 }
2953 
2954 /* Returns number of bytes copied or -1 if error. */
2955 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2956 {
2957 	struct scsi_data_buffer *sdb = &scp->sdb;
2958 
2959 	if (!sdb->length)
2960 		return 0;
2961 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2962 		return -1;
2963 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2964 			      num * sdebug_sector_size, 0, true);
2965 }
2966 
2967 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2968  * arr into sip->storep+lba and return true. If comparison fails then
2969  * return false. */
2970 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2971 			      const u8 *arr, bool compare_only)
2972 {
2973 	bool res;
2974 	u64 block, rest = 0;
2975 	u32 store_blks = sdebug_store_sectors;
2976 	u32 lb_size = sdebug_sector_size;
2977 	u8 *fsp = sip->storep;
2978 
2979 	block = do_div(lba, store_blks);
2980 	if (block + num > store_blks)
2981 		rest = block + num - store_blks;
2982 
2983 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2984 	if (!res)
2985 		return res;
2986 	if (rest)
2987 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2988 			     rest * lb_size);
2989 	if (!res)
2990 		return res;
2991 	if (compare_only)
2992 		return true;
2993 	arr += num * lb_size;
2994 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2995 	if (rest)
2996 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2997 	return res;
2998 }
2999 
3000 static __be16 dif_compute_csum(const void *buf, int len)
3001 {
3002 	__be16 csum;
3003 
3004 	if (sdebug_guard)
3005 		csum = (__force __be16)ip_compute_csum(buf, len);
3006 	else
3007 		csum = cpu_to_be16(crc_t10dif(buf, len));
3008 
3009 	return csum;
3010 }
3011 
3012 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3013 		      sector_t sector, u32 ei_lba)
3014 {
3015 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3016 
3017 	if (sdt->guard_tag != csum) {
3018 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3019 			(unsigned long)sector,
3020 			be16_to_cpu(sdt->guard_tag),
3021 			be16_to_cpu(csum));
3022 		return 0x01;
3023 	}
3024 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3025 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3026 		pr_err("REF check failed on sector %lu\n",
3027 			(unsigned long)sector);
3028 		return 0x03;
3029 	}
3030 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3031 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3032 		pr_err("REF check failed on sector %lu\n",
3033 			(unsigned long)sector);
3034 		return 0x03;
3035 	}
3036 	return 0;
3037 }
3038 
3039 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3040 			  unsigned int sectors, bool read)
3041 {
3042 	size_t resid;
3043 	void *paddr;
3044 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3045 						scp->device->hostdata, true);
3046 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3047 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3048 	struct sg_mapping_iter miter;
3049 
3050 	/* Bytes of protection data to copy into sgl */
3051 	resid = sectors * sizeof(*dif_storep);
3052 
3053 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3054 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3055 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3056 
3057 	while (sg_miter_next(&miter) && resid > 0) {
3058 		size_t len = min_t(size_t, miter.length, resid);
3059 		void *start = dif_store(sip, sector);
3060 		size_t rest = 0;
3061 
3062 		if (dif_store_end < start + len)
3063 			rest = start + len - dif_store_end;
3064 
3065 		paddr = miter.addr;
3066 
3067 		if (read)
3068 			memcpy(paddr, start, len - rest);
3069 		else
3070 			memcpy(start, paddr, len - rest);
3071 
3072 		if (rest) {
3073 			if (read)
3074 				memcpy(paddr + len - rest, dif_storep, rest);
3075 			else
3076 				memcpy(dif_storep, paddr + len - rest, rest);
3077 		}
3078 
3079 		sector += len / sizeof(*dif_storep);
3080 		resid -= len;
3081 	}
3082 	sg_miter_stop(&miter);
3083 }
3084 
3085 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3086 			    unsigned int sectors, u32 ei_lba)
3087 {
3088 	int ret = 0;
3089 	unsigned int i;
3090 	sector_t sector;
3091 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3092 						scp->device->hostdata, true);
3093 	struct t10_pi_tuple *sdt;
3094 
3095 	for (i = 0; i < sectors; i++, ei_lba++) {
3096 		sector = start_sec + i;
3097 		sdt = dif_store(sip, sector);
3098 
3099 		if (sdt->app_tag == cpu_to_be16(0xffff))
3100 			continue;
3101 
3102 		/*
3103 		 * Because scsi_debug acts as both initiator and
3104 		 * target we proceed to verify the PI even if
3105 		 * RDPROTECT=3. This is done so the "initiator" knows
3106 		 * which type of error to return. Otherwise we would
3107 		 * have to iterate over the PI twice.
3108 		 */
3109 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3110 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3111 					 sector, ei_lba);
3112 			if (ret) {
3113 				dif_errors++;
3114 				break;
3115 			}
3116 		}
3117 	}
3118 
3119 	dif_copy_prot(scp, start_sec, sectors, true);
3120 	dix_reads++;
3121 
3122 	return ret;
3123 }
3124 
3125 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3126 {
3127 	bool check_prot;
3128 	u32 num;
3129 	u32 ei_lba;
3130 	int ret;
3131 	u64 lba;
3132 	struct sdeb_store_info *sip = devip2sip(devip, true);
3133 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3134 	u8 *cmd = scp->cmnd;
3135 
3136 	switch (cmd[0]) {
3137 	case READ_16:
3138 		ei_lba = 0;
3139 		lba = get_unaligned_be64(cmd + 2);
3140 		num = get_unaligned_be32(cmd + 10);
3141 		check_prot = true;
3142 		break;
3143 	case READ_10:
3144 		ei_lba = 0;
3145 		lba = get_unaligned_be32(cmd + 2);
3146 		num = get_unaligned_be16(cmd + 7);
3147 		check_prot = true;
3148 		break;
3149 	case READ_6:
3150 		ei_lba = 0;
3151 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3152 		      (u32)(cmd[1] & 0x1f) << 16;
3153 		num = (0 == cmd[4]) ? 256 : cmd[4];
3154 		check_prot = true;
3155 		break;
3156 	case READ_12:
3157 		ei_lba = 0;
3158 		lba = get_unaligned_be32(cmd + 2);
3159 		num = get_unaligned_be32(cmd + 6);
3160 		check_prot = true;
3161 		break;
3162 	case XDWRITEREAD_10:
3163 		ei_lba = 0;
3164 		lba = get_unaligned_be32(cmd + 2);
3165 		num = get_unaligned_be16(cmd + 7);
3166 		check_prot = false;
3167 		break;
3168 	default:	/* assume READ(32) */
3169 		lba = get_unaligned_be64(cmd + 12);
3170 		ei_lba = get_unaligned_be32(cmd + 20);
3171 		num = get_unaligned_be32(cmd + 28);
3172 		check_prot = false;
3173 		break;
3174 	}
3175 	if (unlikely(have_dif_prot && check_prot)) {
3176 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3177 		    (cmd[1] & 0xe0)) {
3178 			mk_sense_invalid_opcode(scp);
3179 			return check_condition_result;
3180 		}
3181 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3182 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3183 		    (cmd[1] & 0xe0) == 0)
3184 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3185 				    "to DIF device\n");
3186 	}
3187 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3188 		     atomic_read(&sdeb_inject_pending))) {
3189 		num /= 2;
3190 		atomic_set(&sdeb_inject_pending, 0);
3191 	}
3192 
3193 	ret = check_device_access_params(scp, lba, num, false);
3194 	if (ret)
3195 		return ret;
3196 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3197 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3198 		     ((lba + num) > sdebug_medium_error_start))) {
3199 		/* claim unrecoverable read error */
3200 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3201 		/* set info field and valid bit for fixed descriptor */
3202 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3203 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3204 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3205 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3206 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3207 		}
3208 		scsi_set_resid(scp, scsi_bufflen(scp));
3209 		return check_condition_result;
3210 	}
3211 
3212 	read_lock(macc_lckp);
3213 
3214 	/* DIX + T10 DIF */
3215 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3216 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3217 		case 1: /* Guard tag error */
3218 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3219 				read_unlock(macc_lckp);
3220 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3221 				return check_condition_result;
3222 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3223 				read_unlock(macc_lckp);
3224 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3225 				return illegal_condition_result;
3226 			}
3227 			break;
3228 		case 3: /* Reference tag error */
3229 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3230 				read_unlock(macc_lckp);
3231 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3232 				return check_condition_result;
3233 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3234 				read_unlock(macc_lckp);
3235 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3236 				return illegal_condition_result;
3237 			}
3238 			break;
3239 		}
3240 	}
3241 
3242 	ret = do_device_access(sip, scp, 0, lba, num, false);
3243 	read_unlock(macc_lckp);
3244 	if (unlikely(ret == -1))
3245 		return DID_ERROR << 16;
3246 
3247 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3248 
3249 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3250 		     atomic_read(&sdeb_inject_pending))) {
3251 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3252 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3253 			atomic_set(&sdeb_inject_pending, 0);
3254 			return check_condition_result;
3255 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3256 			/* Logical block guard check failed */
3257 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3258 			atomic_set(&sdeb_inject_pending, 0);
3259 			return illegal_condition_result;
3260 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3261 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3262 			atomic_set(&sdeb_inject_pending, 0);
3263 			return illegal_condition_result;
3264 		}
3265 	}
3266 	return 0;
3267 }
3268 
3269 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3270 			     unsigned int sectors, u32 ei_lba)
3271 {
3272 	int ret;
3273 	struct t10_pi_tuple *sdt;
3274 	void *daddr;
3275 	sector_t sector = start_sec;
3276 	int ppage_offset;
3277 	int dpage_offset;
3278 	struct sg_mapping_iter diter;
3279 	struct sg_mapping_iter piter;
3280 
3281 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3282 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3283 
3284 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3285 			scsi_prot_sg_count(SCpnt),
3286 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3287 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3288 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3289 
3290 	/* For each protection page */
3291 	while (sg_miter_next(&piter)) {
3292 		dpage_offset = 0;
3293 		if (WARN_ON(!sg_miter_next(&diter))) {
3294 			ret = 0x01;
3295 			goto out;
3296 		}
3297 
3298 		for (ppage_offset = 0; ppage_offset < piter.length;
3299 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3300 			/* If we're at the end of the current
3301 			 * data page advance to the next one
3302 			 */
3303 			if (dpage_offset >= diter.length) {
3304 				if (WARN_ON(!sg_miter_next(&diter))) {
3305 					ret = 0x01;
3306 					goto out;
3307 				}
3308 				dpage_offset = 0;
3309 			}
3310 
3311 			sdt = piter.addr + ppage_offset;
3312 			daddr = diter.addr + dpage_offset;
3313 
3314 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3315 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3316 				if (ret)
3317 					goto out;
3318 			}
3319 
3320 			sector++;
3321 			ei_lba++;
3322 			dpage_offset += sdebug_sector_size;
3323 		}
3324 		diter.consumed = dpage_offset;
3325 		sg_miter_stop(&diter);
3326 	}
3327 	sg_miter_stop(&piter);
3328 
3329 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3330 	dix_writes++;
3331 
3332 	return 0;
3333 
3334 out:
3335 	dif_errors++;
3336 	sg_miter_stop(&diter);
3337 	sg_miter_stop(&piter);
3338 	return ret;
3339 }
3340 
3341 static unsigned long lba_to_map_index(sector_t lba)
3342 {
3343 	if (sdebug_unmap_alignment)
3344 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3345 	sector_div(lba, sdebug_unmap_granularity);
3346 	return lba;
3347 }
3348 
3349 static sector_t map_index_to_lba(unsigned long index)
3350 {
3351 	sector_t lba = index * sdebug_unmap_granularity;
3352 
3353 	if (sdebug_unmap_alignment)
3354 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3355 	return lba;
3356 }
3357 
3358 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3359 			      unsigned int *num)
3360 {
3361 	sector_t end;
3362 	unsigned int mapped;
3363 	unsigned long index;
3364 	unsigned long next;
3365 
3366 	index = lba_to_map_index(lba);
3367 	mapped = test_bit(index, sip->map_storep);
3368 
3369 	if (mapped)
3370 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3371 	else
3372 		next = find_next_bit(sip->map_storep, map_size, index);
3373 
3374 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3375 	*num = end - lba;
3376 	return mapped;
3377 }
3378 
3379 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3380 		       unsigned int len)
3381 {
3382 	sector_t end = lba + len;
3383 
3384 	while (lba < end) {
3385 		unsigned long index = lba_to_map_index(lba);
3386 
3387 		if (index < map_size)
3388 			set_bit(index, sip->map_storep);
3389 
3390 		lba = map_index_to_lba(index + 1);
3391 	}
3392 }
3393 
3394 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3395 			 unsigned int len)
3396 {
3397 	sector_t end = lba + len;
3398 	u8 *fsp = sip->storep;
3399 
3400 	while (lba < end) {
3401 		unsigned long index = lba_to_map_index(lba);
3402 
3403 		if (lba == map_index_to_lba(index) &&
3404 		    lba + sdebug_unmap_granularity <= end &&
3405 		    index < map_size) {
3406 			clear_bit(index, sip->map_storep);
3407 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3408 				memset(fsp + lba * sdebug_sector_size,
3409 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3410 				       sdebug_sector_size *
3411 				       sdebug_unmap_granularity);
3412 			}
3413 			if (sip->dif_storep) {
3414 				memset(sip->dif_storep + lba, 0xff,
3415 				       sizeof(*sip->dif_storep) *
3416 				       sdebug_unmap_granularity);
3417 			}
3418 		}
3419 		lba = map_index_to_lba(index + 1);
3420 	}
3421 }
3422 
3423 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3424 {
3425 	bool check_prot;
3426 	u32 num;
3427 	u32 ei_lba;
3428 	int ret;
3429 	u64 lba;
3430 	struct sdeb_store_info *sip = devip2sip(devip, true);
3431 	rwlock_t *macc_lckp = &sip->macc_lck;
3432 	u8 *cmd = scp->cmnd;
3433 
3434 	switch (cmd[0]) {
3435 	case WRITE_16:
3436 		ei_lba = 0;
3437 		lba = get_unaligned_be64(cmd + 2);
3438 		num = get_unaligned_be32(cmd + 10);
3439 		check_prot = true;
3440 		break;
3441 	case WRITE_10:
3442 		ei_lba = 0;
3443 		lba = get_unaligned_be32(cmd + 2);
3444 		num = get_unaligned_be16(cmd + 7);
3445 		check_prot = true;
3446 		break;
3447 	case WRITE_6:
3448 		ei_lba = 0;
3449 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3450 		      (u32)(cmd[1] & 0x1f) << 16;
3451 		num = (0 == cmd[4]) ? 256 : cmd[4];
3452 		check_prot = true;
3453 		break;
3454 	case WRITE_12:
3455 		ei_lba = 0;
3456 		lba = get_unaligned_be32(cmd + 2);
3457 		num = get_unaligned_be32(cmd + 6);
3458 		check_prot = true;
3459 		break;
3460 	case 0x53:	/* XDWRITEREAD(10) */
3461 		ei_lba = 0;
3462 		lba = get_unaligned_be32(cmd + 2);
3463 		num = get_unaligned_be16(cmd + 7);
3464 		check_prot = false;
3465 		break;
3466 	default:	/* assume WRITE(32) */
3467 		lba = get_unaligned_be64(cmd + 12);
3468 		ei_lba = get_unaligned_be32(cmd + 20);
3469 		num = get_unaligned_be32(cmd + 28);
3470 		check_prot = false;
3471 		break;
3472 	}
3473 	if (unlikely(have_dif_prot && check_prot)) {
3474 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3475 		    (cmd[1] & 0xe0)) {
3476 			mk_sense_invalid_opcode(scp);
3477 			return check_condition_result;
3478 		}
3479 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3480 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3481 		    (cmd[1] & 0xe0) == 0)
3482 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3483 				    "to DIF device\n");
3484 	}
3485 
3486 	write_lock(macc_lckp);
3487 	ret = check_device_access_params(scp, lba, num, true);
3488 	if (ret) {
3489 		write_unlock(macc_lckp);
3490 		return ret;
3491 	}
3492 
3493 	/* DIX + T10 DIF */
3494 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3495 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3496 		case 1: /* Guard tag error */
3497 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3498 				write_unlock(macc_lckp);
3499 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3500 				return illegal_condition_result;
3501 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3502 				write_unlock(macc_lckp);
3503 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3504 				return check_condition_result;
3505 			}
3506 			break;
3507 		case 3: /* Reference tag error */
3508 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3509 				write_unlock(macc_lckp);
3510 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3511 				return illegal_condition_result;
3512 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3513 				write_unlock(macc_lckp);
3514 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3515 				return check_condition_result;
3516 			}
3517 			break;
3518 		}
3519 	}
3520 
3521 	ret = do_device_access(sip, scp, 0, lba, num, true);
3522 	if (unlikely(scsi_debug_lbp()))
3523 		map_region(sip, lba, num);
3524 	/* If ZBC zone then bump its write pointer */
3525 	if (sdebug_dev_is_zoned(devip))
3526 		zbc_inc_wp(devip, lba, num);
3527 	write_unlock(macc_lckp);
3528 	if (unlikely(-1 == ret))
3529 		return DID_ERROR << 16;
3530 	else if (unlikely(sdebug_verbose &&
3531 			  (ret < (num * sdebug_sector_size))))
3532 		sdev_printk(KERN_INFO, scp->device,
3533 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3534 			    my_name, num * sdebug_sector_size, ret);
3535 
3536 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3537 		     atomic_read(&sdeb_inject_pending))) {
3538 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3539 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3540 			atomic_set(&sdeb_inject_pending, 0);
3541 			return check_condition_result;
3542 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3543 			/* Logical block guard check failed */
3544 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3545 			atomic_set(&sdeb_inject_pending, 0);
3546 			return illegal_condition_result;
3547 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3548 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3549 			atomic_set(&sdeb_inject_pending, 0);
3550 			return illegal_condition_result;
3551 		}
3552 	}
3553 	return 0;
3554 }
3555 
3556 /*
3557  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3558  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3559  */
3560 static int resp_write_scat(struct scsi_cmnd *scp,
3561 			   struct sdebug_dev_info *devip)
3562 {
3563 	u8 *cmd = scp->cmnd;
3564 	u8 *lrdp = NULL;
3565 	u8 *up;
3566 	struct sdeb_store_info *sip = devip2sip(devip, true);
3567 	rwlock_t *macc_lckp = &sip->macc_lck;
3568 	u8 wrprotect;
3569 	u16 lbdof, num_lrd, k;
3570 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3571 	u32 lb_size = sdebug_sector_size;
3572 	u32 ei_lba;
3573 	u64 lba;
3574 	int ret, res;
3575 	bool is_16;
3576 	static const u32 lrd_size = 32; /* + parameter list header size */
3577 
3578 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3579 		is_16 = false;
3580 		wrprotect = (cmd[10] >> 5) & 0x7;
3581 		lbdof = get_unaligned_be16(cmd + 12);
3582 		num_lrd = get_unaligned_be16(cmd + 16);
3583 		bt_len = get_unaligned_be32(cmd + 28);
3584 	} else {        /* that leaves WRITE SCATTERED(16) */
3585 		is_16 = true;
3586 		wrprotect = (cmd[2] >> 5) & 0x7;
3587 		lbdof = get_unaligned_be16(cmd + 4);
3588 		num_lrd = get_unaligned_be16(cmd + 8);
3589 		bt_len = get_unaligned_be32(cmd + 10);
3590 		if (unlikely(have_dif_prot)) {
3591 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3592 			    wrprotect) {
3593 				mk_sense_invalid_opcode(scp);
3594 				return illegal_condition_result;
3595 			}
3596 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3597 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3598 			     wrprotect == 0)
3599 				sdev_printk(KERN_ERR, scp->device,
3600 					    "Unprotected WR to DIF device\n");
3601 		}
3602 	}
3603 	if ((num_lrd == 0) || (bt_len == 0))
3604 		return 0;       /* T10 says these do-nothings are not errors */
3605 	if (lbdof == 0) {
3606 		if (sdebug_verbose)
3607 			sdev_printk(KERN_INFO, scp->device,
3608 				"%s: %s: LB Data Offset field bad\n",
3609 				my_name, __func__);
3610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3611 		return illegal_condition_result;
3612 	}
3613 	lbdof_blen = lbdof * lb_size;
3614 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3615 		if (sdebug_verbose)
3616 			sdev_printk(KERN_INFO, scp->device,
3617 				"%s: %s: LBA range descriptors don't fit\n",
3618 				my_name, __func__);
3619 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3620 		return illegal_condition_result;
3621 	}
3622 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3623 	if (lrdp == NULL)
3624 		return SCSI_MLQUEUE_HOST_BUSY;
3625 	if (sdebug_verbose)
3626 		sdev_printk(KERN_INFO, scp->device,
3627 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3628 			my_name, __func__, lbdof_blen);
3629 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3630 	if (res == -1) {
3631 		ret = DID_ERROR << 16;
3632 		goto err_out;
3633 	}
3634 
3635 	write_lock(macc_lckp);
3636 	sg_off = lbdof_blen;
3637 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3638 	cum_lb = 0;
3639 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3640 		lba = get_unaligned_be64(up + 0);
3641 		num = get_unaligned_be32(up + 8);
3642 		if (sdebug_verbose)
3643 			sdev_printk(KERN_INFO, scp->device,
3644 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3645 				my_name, __func__, k, lba, num, sg_off);
3646 		if (num == 0)
3647 			continue;
3648 		ret = check_device_access_params(scp, lba, num, true);
3649 		if (ret)
3650 			goto err_out_unlock;
3651 		num_by = num * lb_size;
3652 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3653 
3654 		if ((cum_lb + num) > bt_len) {
3655 			if (sdebug_verbose)
3656 				sdev_printk(KERN_INFO, scp->device,
3657 				    "%s: %s: sum of blocks > data provided\n",
3658 				    my_name, __func__);
3659 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3660 					0);
3661 			ret = illegal_condition_result;
3662 			goto err_out_unlock;
3663 		}
3664 
3665 		/* DIX + T10 DIF */
3666 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3667 			int prot_ret = prot_verify_write(scp, lba, num,
3668 							 ei_lba);
3669 
3670 			if (prot_ret) {
3671 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3672 						prot_ret);
3673 				ret = illegal_condition_result;
3674 				goto err_out_unlock;
3675 			}
3676 		}
3677 
3678 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3679 		/* If ZBC zone then bump its write pointer */
3680 		if (sdebug_dev_is_zoned(devip))
3681 			zbc_inc_wp(devip, lba, num);
3682 		if (unlikely(scsi_debug_lbp()))
3683 			map_region(sip, lba, num);
3684 		if (unlikely(-1 == ret)) {
3685 			ret = DID_ERROR << 16;
3686 			goto err_out_unlock;
3687 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3688 			sdev_printk(KERN_INFO, scp->device,
3689 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3690 			    my_name, num_by, ret);
3691 
3692 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3693 			     atomic_read(&sdeb_inject_pending))) {
3694 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3695 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3696 				atomic_set(&sdeb_inject_pending, 0);
3697 				ret = check_condition_result;
3698 				goto err_out_unlock;
3699 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3700 				/* Logical block guard check failed */
3701 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3702 				atomic_set(&sdeb_inject_pending, 0);
3703 				ret = illegal_condition_result;
3704 				goto err_out_unlock;
3705 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3706 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3707 				atomic_set(&sdeb_inject_pending, 0);
3708 				ret = illegal_condition_result;
3709 				goto err_out_unlock;
3710 			}
3711 		}
3712 		sg_off += num_by;
3713 		cum_lb += num;
3714 	}
3715 	ret = 0;
3716 err_out_unlock:
3717 	write_unlock(macc_lckp);
3718 err_out:
3719 	kfree(lrdp);
3720 	return ret;
3721 }
3722 
3723 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3724 			   u32 ei_lba, bool unmap, bool ndob)
3725 {
3726 	struct scsi_device *sdp = scp->device;
3727 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3728 	unsigned long long i;
3729 	u64 block, lbaa;
3730 	u32 lb_size = sdebug_sector_size;
3731 	int ret;
3732 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3733 						scp->device->hostdata, true);
3734 	rwlock_t *macc_lckp = &sip->macc_lck;
3735 	u8 *fs1p;
3736 	u8 *fsp;
3737 
3738 	write_lock(macc_lckp);
3739 
3740 	ret = check_device_access_params(scp, lba, num, true);
3741 	if (ret) {
3742 		write_unlock(macc_lckp);
3743 		return ret;
3744 	}
3745 
3746 	if (unmap && scsi_debug_lbp()) {
3747 		unmap_region(sip, lba, num);
3748 		goto out;
3749 	}
3750 	lbaa = lba;
3751 	block = do_div(lbaa, sdebug_store_sectors);
3752 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3753 	fsp = sip->storep;
3754 	fs1p = fsp + (block * lb_size);
3755 	if (ndob) {
3756 		memset(fs1p, 0, lb_size);
3757 		ret = 0;
3758 	} else
3759 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3760 
3761 	if (-1 == ret) {
3762 		write_unlock(&sip->macc_lck);
3763 		return DID_ERROR << 16;
3764 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3765 		sdev_printk(KERN_INFO, scp->device,
3766 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3767 			    my_name, "write same", lb_size, ret);
3768 
3769 	/* Copy first sector to remaining blocks */
3770 	for (i = 1 ; i < num ; i++) {
3771 		lbaa = lba + i;
3772 		block = do_div(lbaa, sdebug_store_sectors);
3773 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3774 	}
3775 	if (scsi_debug_lbp())
3776 		map_region(sip, lba, num);
3777 	/* If ZBC zone then bump its write pointer */
3778 	if (sdebug_dev_is_zoned(devip))
3779 		zbc_inc_wp(devip, lba, num);
3780 out:
3781 	write_unlock(macc_lckp);
3782 
3783 	return 0;
3784 }
3785 
3786 static int resp_write_same_10(struct scsi_cmnd *scp,
3787 			      struct sdebug_dev_info *devip)
3788 {
3789 	u8 *cmd = scp->cmnd;
3790 	u32 lba;
3791 	u16 num;
3792 	u32 ei_lba = 0;
3793 	bool unmap = false;
3794 
3795 	if (cmd[1] & 0x8) {
3796 		if (sdebug_lbpws10 == 0) {
3797 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3798 			return check_condition_result;
3799 		} else
3800 			unmap = true;
3801 	}
3802 	lba = get_unaligned_be32(cmd + 2);
3803 	num = get_unaligned_be16(cmd + 7);
3804 	if (num > sdebug_write_same_length) {
3805 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3806 		return check_condition_result;
3807 	}
3808 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3809 }
3810 
3811 static int resp_write_same_16(struct scsi_cmnd *scp,
3812 			      struct sdebug_dev_info *devip)
3813 {
3814 	u8 *cmd = scp->cmnd;
3815 	u64 lba;
3816 	u32 num;
3817 	u32 ei_lba = 0;
3818 	bool unmap = false;
3819 	bool ndob = false;
3820 
3821 	if (cmd[1] & 0x8) {	/* UNMAP */
3822 		if (sdebug_lbpws == 0) {
3823 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3824 			return check_condition_result;
3825 		} else
3826 			unmap = true;
3827 	}
3828 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3829 		ndob = true;
3830 	lba = get_unaligned_be64(cmd + 2);
3831 	num = get_unaligned_be32(cmd + 10);
3832 	if (num > sdebug_write_same_length) {
3833 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3834 		return check_condition_result;
3835 	}
3836 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3837 }
3838 
3839 /* Note the mode field is in the same position as the (lower) service action
3840  * field. For the Report supported operation codes command, SPC-4 suggests
3841  * each mode of this command should be reported separately; for future. */
3842 static int resp_write_buffer(struct scsi_cmnd *scp,
3843 			     struct sdebug_dev_info *devip)
3844 {
3845 	u8 *cmd = scp->cmnd;
3846 	struct scsi_device *sdp = scp->device;
3847 	struct sdebug_dev_info *dp;
3848 	u8 mode;
3849 
3850 	mode = cmd[1] & 0x1f;
3851 	switch (mode) {
3852 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3853 		/* set UAs on this device only */
3854 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3855 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3856 		break;
3857 	case 0x5:	/* download MC, save and ACT */
3858 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3859 		break;
3860 	case 0x6:	/* download MC with offsets and ACT */
3861 		/* set UAs on most devices (LUs) in this target */
3862 		list_for_each_entry(dp,
3863 				    &devip->sdbg_host->dev_info_list,
3864 				    dev_list)
3865 			if (dp->target == sdp->id) {
3866 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3867 				if (devip != dp)
3868 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3869 						dp->uas_bm);
3870 			}
3871 		break;
3872 	case 0x7:	/* download MC with offsets, save, and ACT */
3873 		/* set UA on all devices (LUs) in this target */
3874 		list_for_each_entry(dp,
3875 				    &devip->sdbg_host->dev_info_list,
3876 				    dev_list)
3877 			if (dp->target == sdp->id)
3878 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3879 					dp->uas_bm);
3880 		break;
3881 	default:
3882 		/* do nothing for this command for other mode values */
3883 		break;
3884 	}
3885 	return 0;
3886 }
3887 
3888 static int resp_comp_write(struct scsi_cmnd *scp,
3889 			   struct sdebug_dev_info *devip)
3890 {
3891 	u8 *cmd = scp->cmnd;
3892 	u8 *arr;
3893 	struct sdeb_store_info *sip = devip2sip(devip, true);
3894 	rwlock_t *macc_lckp = &sip->macc_lck;
3895 	u64 lba;
3896 	u32 dnum;
3897 	u32 lb_size = sdebug_sector_size;
3898 	u8 num;
3899 	int ret;
3900 	int retval = 0;
3901 
3902 	lba = get_unaligned_be64(cmd + 2);
3903 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3904 	if (0 == num)
3905 		return 0;	/* degenerate case, not an error */
3906 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3907 	    (cmd[1] & 0xe0)) {
3908 		mk_sense_invalid_opcode(scp);
3909 		return check_condition_result;
3910 	}
3911 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3912 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3913 	    (cmd[1] & 0xe0) == 0)
3914 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3915 			    "to DIF device\n");
3916 	ret = check_device_access_params(scp, lba, num, false);
3917 	if (ret)
3918 		return ret;
3919 	dnum = 2 * num;
3920 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3921 	if (NULL == arr) {
3922 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3923 				INSUFF_RES_ASCQ);
3924 		return check_condition_result;
3925 	}
3926 
3927 	write_lock(macc_lckp);
3928 
3929 	ret = do_dout_fetch(scp, dnum, arr);
3930 	if (ret == -1) {
3931 		retval = DID_ERROR << 16;
3932 		goto cleanup;
3933 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3934 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3935 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3936 			    dnum * lb_size, ret);
3937 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3938 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3939 		retval = check_condition_result;
3940 		goto cleanup;
3941 	}
3942 	if (scsi_debug_lbp())
3943 		map_region(sip, lba, num);
3944 cleanup:
3945 	write_unlock(macc_lckp);
3946 	kfree(arr);
3947 	return retval;
3948 }
3949 
3950 struct unmap_block_desc {
3951 	__be64	lba;
3952 	__be32	blocks;
3953 	__be32	__reserved;
3954 };
3955 
3956 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3957 {
3958 	unsigned char *buf;
3959 	struct unmap_block_desc *desc;
3960 	struct sdeb_store_info *sip = devip2sip(devip, true);
3961 	rwlock_t *macc_lckp = &sip->macc_lck;
3962 	unsigned int i, payload_len, descriptors;
3963 	int ret;
3964 
3965 	if (!scsi_debug_lbp())
3966 		return 0;	/* fib and say its done */
3967 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3968 	BUG_ON(scsi_bufflen(scp) != payload_len);
3969 
3970 	descriptors = (payload_len - 8) / 16;
3971 	if (descriptors > sdebug_unmap_max_desc) {
3972 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3973 		return check_condition_result;
3974 	}
3975 
3976 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3977 	if (!buf) {
3978 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3979 				INSUFF_RES_ASCQ);
3980 		return check_condition_result;
3981 	}
3982 
3983 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3984 
3985 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3986 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3987 
3988 	desc = (void *)&buf[8];
3989 
3990 	write_lock(macc_lckp);
3991 
3992 	for (i = 0 ; i < descriptors ; i++) {
3993 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3994 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3995 
3996 		ret = check_device_access_params(scp, lba, num, true);
3997 		if (ret)
3998 			goto out;
3999 
4000 		unmap_region(sip, lba, num);
4001 	}
4002 
4003 	ret = 0;
4004 
4005 out:
4006 	write_unlock(macc_lckp);
4007 	kfree(buf);
4008 
4009 	return ret;
4010 }
4011 
4012 #define SDEBUG_GET_LBA_STATUS_LEN 32
4013 
4014 static int resp_get_lba_status(struct scsi_cmnd *scp,
4015 			       struct sdebug_dev_info *devip)
4016 {
4017 	u8 *cmd = scp->cmnd;
4018 	u64 lba;
4019 	u32 alloc_len, mapped, num;
4020 	int ret;
4021 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4022 
4023 	lba = get_unaligned_be64(cmd + 2);
4024 	alloc_len = get_unaligned_be32(cmd + 10);
4025 
4026 	if (alloc_len < 24)
4027 		return 0;
4028 
4029 	ret = check_device_access_params(scp, lba, 1, false);
4030 	if (ret)
4031 		return ret;
4032 
4033 	if (scsi_debug_lbp()) {
4034 		struct sdeb_store_info *sip = devip2sip(devip, true);
4035 
4036 		mapped = map_state(sip, lba, &num);
4037 	} else {
4038 		mapped = 1;
4039 		/* following just in case virtual_gb changed */
4040 		sdebug_capacity = get_sdebug_capacity();
4041 		if (sdebug_capacity - lba <= 0xffffffff)
4042 			num = sdebug_capacity - lba;
4043 		else
4044 			num = 0xffffffff;
4045 	}
4046 
4047 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4048 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4049 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4050 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4051 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4052 
4053 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4054 }
4055 
4056 static int resp_sync_cache(struct scsi_cmnd *scp,
4057 			   struct sdebug_dev_info *devip)
4058 {
4059 	int res = 0;
4060 	u64 lba;
4061 	u32 num_blocks;
4062 	u8 *cmd = scp->cmnd;
4063 
4064 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4065 		lba = get_unaligned_be32(cmd + 2);
4066 		num_blocks = get_unaligned_be16(cmd + 7);
4067 	} else {				/* SYNCHRONIZE_CACHE(16) */
4068 		lba = get_unaligned_be64(cmd + 2);
4069 		num_blocks = get_unaligned_be32(cmd + 10);
4070 	}
4071 	if (lba + num_blocks > sdebug_capacity) {
4072 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4073 		return check_condition_result;
4074 	}
4075 	if (!write_since_sync || (cmd[1] & 0x2))
4076 		res = SDEG_RES_IMMED_MASK;
4077 	else		/* delay if write_since_sync and IMMED clear */
4078 		write_since_sync = false;
4079 	return res;
4080 }
4081 
4082 /*
4083  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4084  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4085  * a GOOD status otherwise. Model a disk with a big cache and yield
4086  * CONDITION MET. Actually tries to bring range in main memory into the
4087  * cache associated with the CPU(s).
4088  */
4089 static int resp_pre_fetch(struct scsi_cmnd *scp,
4090 			  struct sdebug_dev_info *devip)
4091 {
4092 	int res = 0;
4093 	u64 lba;
4094 	u64 block, rest = 0;
4095 	u32 nblks;
4096 	u8 *cmd = scp->cmnd;
4097 	struct sdeb_store_info *sip = devip2sip(devip, true);
4098 	rwlock_t *macc_lckp = &sip->macc_lck;
4099 	u8 *fsp = sip->storep;
4100 
4101 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4102 		lba = get_unaligned_be32(cmd + 2);
4103 		nblks = get_unaligned_be16(cmd + 7);
4104 	} else {			/* PRE-FETCH(16) */
4105 		lba = get_unaligned_be64(cmd + 2);
4106 		nblks = get_unaligned_be32(cmd + 10);
4107 	}
4108 	if (lba + nblks > sdebug_capacity) {
4109 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4110 		return check_condition_result;
4111 	}
4112 	if (!fsp)
4113 		goto fini;
4114 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4115 	block = do_div(lba, sdebug_store_sectors);
4116 	if (block + nblks > sdebug_store_sectors)
4117 		rest = block + nblks - sdebug_store_sectors;
4118 
4119 	/* Try to bring the PRE-FETCH range into CPU's cache */
4120 	read_lock(macc_lckp);
4121 	prefetch_range(fsp + (sdebug_sector_size * block),
4122 		       (nblks - rest) * sdebug_sector_size);
4123 	if (rest)
4124 		prefetch_range(fsp, rest * sdebug_sector_size);
4125 	read_unlock(macc_lckp);
4126 fini:
4127 	if (cmd[1] & 0x2)
4128 		res = SDEG_RES_IMMED_MASK;
4129 	return res | condition_met_result;
4130 }
4131 
4132 #define RL_BUCKET_ELEMS 8
4133 
4134 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4135  * (W-LUN), the normal Linux scanning logic does not associate it with a
4136  * device (e.g. /dev/sg7). The following magic will make that association:
4137  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4138  * where <n> is a host number. If there are multiple targets in a host then
4139  * the above will associate a W-LUN to each target. To only get a W-LUN
4140  * for target 2, then use "echo '- 2 49409' > scan" .
4141  */
4142 static int resp_report_luns(struct scsi_cmnd *scp,
4143 			    struct sdebug_dev_info *devip)
4144 {
4145 	unsigned char *cmd = scp->cmnd;
4146 	unsigned int alloc_len;
4147 	unsigned char select_report;
4148 	u64 lun;
4149 	struct scsi_lun *lun_p;
4150 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4151 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4152 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4153 	unsigned int tlun_cnt;	/* total LUN count */
4154 	unsigned int rlen;	/* response length (in bytes) */
4155 	int k, j, n, res;
4156 	unsigned int off_rsp = 0;
4157 	const int sz_lun = sizeof(struct scsi_lun);
4158 
4159 	clear_luns_changed_on_target(devip);
4160 
4161 	select_report = cmd[2];
4162 	alloc_len = get_unaligned_be32(cmd + 6);
4163 
4164 	if (alloc_len < 4) {
4165 		pr_err("alloc len too small %d\n", alloc_len);
4166 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4167 		return check_condition_result;
4168 	}
4169 
4170 	switch (select_report) {
4171 	case 0:		/* all LUNs apart from W-LUNs */
4172 		lun_cnt = sdebug_max_luns;
4173 		wlun_cnt = 0;
4174 		break;
4175 	case 1:		/* only W-LUNs */
4176 		lun_cnt = 0;
4177 		wlun_cnt = 1;
4178 		break;
4179 	case 2:		/* all LUNs */
4180 		lun_cnt = sdebug_max_luns;
4181 		wlun_cnt = 1;
4182 		break;
4183 	case 0x10:	/* only administrative LUs */
4184 	case 0x11:	/* see SPC-5 */
4185 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4186 	default:
4187 		pr_debug("select report invalid %d\n", select_report);
4188 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4189 		return check_condition_result;
4190 	}
4191 
4192 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4193 		--lun_cnt;
4194 
4195 	tlun_cnt = lun_cnt + wlun_cnt;
4196 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4197 	scsi_set_resid(scp, scsi_bufflen(scp));
4198 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4199 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4200 
4201 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4202 	lun = sdebug_no_lun_0 ? 1 : 0;
4203 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4204 		memset(arr, 0, sizeof(arr));
4205 		lun_p = (struct scsi_lun *)&arr[0];
4206 		if (k == 0) {
4207 			put_unaligned_be32(rlen, &arr[0]);
4208 			++lun_p;
4209 			j = 1;
4210 		}
4211 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4212 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4213 				break;
4214 			int_to_scsilun(lun++, lun_p);
4215 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4216 				lun_p->scsi_lun[0] |= 0x40;
4217 		}
4218 		if (j < RL_BUCKET_ELEMS)
4219 			break;
4220 		n = j * sz_lun;
4221 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4222 		if (res)
4223 			return res;
4224 		off_rsp += n;
4225 	}
4226 	if (wlun_cnt) {
4227 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4228 		++j;
4229 	}
4230 	if (j > 0)
4231 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4232 	return res;
4233 }
4234 
4235 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4236 {
4237 	bool is_bytchk3 = false;
4238 	u8 bytchk;
4239 	int ret, j;
4240 	u32 vnum, a_num, off;
4241 	const u32 lb_size = sdebug_sector_size;
4242 	u64 lba;
4243 	u8 *arr;
4244 	u8 *cmd = scp->cmnd;
4245 	struct sdeb_store_info *sip = devip2sip(devip, true);
4246 	rwlock_t *macc_lckp = &sip->macc_lck;
4247 
4248 	bytchk = (cmd[1] >> 1) & 0x3;
4249 	if (bytchk == 0) {
4250 		return 0;	/* always claim internal verify okay */
4251 	} else if (bytchk == 2) {
4252 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4253 		return check_condition_result;
4254 	} else if (bytchk == 3) {
4255 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4256 	}
4257 	switch (cmd[0]) {
4258 	case VERIFY_16:
4259 		lba = get_unaligned_be64(cmd + 2);
4260 		vnum = get_unaligned_be32(cmd + 10);
4261 		break;
4262 	case VERIFY:		/* is VERIFY(10) */
4263 		lba = get_unaligned_be32(cmd + 2);
4264 		vnum = get_unaligned_be16(cmd + 7);
4265 		break;
4266 	default:
4267 		mk_sense_invalid_opcode(scp);
4268 		return check_condition_result;
4269 	}
4270 	if (vnum == 0)
4271 		return 0;	/* not an error */
4272 	a_num = is_bytchk3 ? 1 : vnum;
4273 	/* Treat following check like one for read (i.e. no write) access */
4274 	ret = check_device_access_params(scp, lba, a_num, false);
4275 	if (ret)
4276 		return ret;
4277 
4278 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4279 	if (!arr) {
4280 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4281 				INSUFF_RES_ASCQ);
4282 		return check_condition_result;
4283 	}
4284 	/* Not changing store, so only need read access */
4285 	read_lock(macc_lckp);
4286 
4287 	ret = do_dout_fetch(scp, a_num, arr);
4288 	if (ret == -1) {
4289 		ret = DID_ERROR << 16;
4290 		goto cleanup;
4291 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4292 		sdev_printk(KERN_INFO, scp->device,
4293 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4294 			    my_name, __func__, a_num * lb_size, ret);
4295 	}
4296 	if (is_bytchk3) {
4297 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4298 			memcpy(arr + off, arr, lb_size);
4299 	}
4300 	ret = 0;
4301 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4302 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4303 		ret = check_condition_result;
4304 		goto cleanup;
4305 	}
4306 cleanup:
4307 	read_unlock(macc_lckp);
4308 	kfree(arr);
4309 	return ret;
4310 }
4311 
4312 #define RZONES_DESC_HD 64
4313 
4314 /* Report zones depending on start LBA nad reporting options */
4315 static int resp_report_zones(struct scsi_cmnd *scp,
4316 			     struct sdebug_dev_info *devip)
4317 {
4318 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4319 	int ret = 0;
4320 	u32 alloc_len, rep_opts, rep_len;
4321 	bool partial;
4322 	u64 lba, zs_lba;
4323 	u8 *arr = NULL, *desc;
4324 	u8 *cmd = scp->cmnd;
4325 	struct sdeb_zone_state *zsp;
4326 	struct sdeb_store_info *sip = devip2sip(devip, false);
4327 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4328 
4329 	if (!sdebug_dev_is_zoned(devip)) {
4330 		mk_sense_invalid_opcode(scp);
4331 		return check_condition_result;
4332 	}
4333 	zs_lba = get_unaligned_be64(cmd + 2);
4334 	alloc_len = get_unaligned_be32(cmd + 10);
4335 	if (alloc_len == 0)
4336 		return 0;	/* not an error */
4337 	rep_opts = cmd[14] & 0x3f;
4338 	partial = cmd[14] & 0x80;
4339 
4340 	if (zs_lba >= sdebug_capacity) {
4341 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4342 		return check_condition_result;
4343 	}
4344 
4345 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4346 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4347 			    max_zones);
4348 
4349 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4350 	if (!arr) {
4351 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4352 				INSUFF_RES_ASCQ);
4353 		return check_condition_result;
4354 	}
4355 
4356 	read_lock(macc_lckp);
4357 
4358 	desc = arr + 64;
4359 	for (i = 0; i < max_zones; i++) {
4360 		lba = zs_lba + devip->zsize * i;
4361 		if (lba > sdebug_capacity)
4362 			break;
4363 		zsp = zbc_zone(devip, lba);
4364 		switch (rep_opts) {
4365 		case 0x00:
4366 			/* All zones */
4367 			break;
4368 		case 0x01:
4369 			/* Empty zones */
4370 			if (zsp->z_cond != ZC1_EMPTY)
4371 				continue;
4372 			break;
4373 		case 0x02:
4374 			/* Implicit open zones */
4375 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4376 				continue;
4377 			break;
4378 		case 0x03:
4379 			/* Explicit open zones */
4380 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4381 				continue;
4382 			break;
4383 		case 0x04:
4384 			/* Closed zones */
4385 			if (zsp->z_cond != ZC4_CLOSED)
4386 				continue;
4387 			break;
4388 		case 0x05:
4389 			/* Full zones */
4390 			if (zsp->z_cond != ZC5_FULL)
4391 				continue;
4392 			break;
4393 		case 0x06:
4394 		case 0x07:
4395 		case 0x10:
4396 			/*
4397 			 * Read-only, offline, reset WP recommended are
4398 			 * not emulated: no zones to report;
4399 			 */
4400 			continue;
4401 		case 0x11:
4402 			/* non-seq-resource set */
4403 			if (!zsp->z_non_seq_resource)
4404 				continue;
4405 			break;
4406 		case 0x3f:
4407 			/* Not write pointer (conventional) zones */
4408 			if (!zbc_zone_is_conv(zsp))
4409 				continue;
4410 			break;
4411 		default:
4412 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4413 					INVALID_FIELD_IN_CDB, 0);
4414 			ret = check_condition_result;
4415 			goto fini;
4416 		}
4417 
4418 		if (nrz < rep_max_zones) {
4419 			/* Fill zone descriptor */
4420 			desc[0] = zsp->z_type;
4421 			desc[1] = zsp->z_cond << 4;
4422 			if (zsp->z_non_seq_resource)
4423 				desc[1] |= 1 << 1;
4424 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4425 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4426 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4427 			desc += 64;
4428 		}
4429 
4430 		if (partial && nrz >= rep_max_zones)
4431 			break;
4432 
4433 		nrz++;
4434 	}
4435 
4436 	/* Report header */
4437 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4438 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4439 
4440 	rep_len = (unsigned long)desc - (unsigned long)arr;
4441 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4442 
4443 fini:
4444 	read_unlock(macc_lckp);
4445 	kfree(arr);
4446 	return ret;
4447 }
4448 
4449 /* Logic transplanted from tcmu-runner, file_zbc.c */
4450 static void zbc_open_all(struct sdebug_dev_info *devip)
4451 {
4452 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4453 	unsigned int i;
4454 
4455 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4456 		if (zsp->z_cond == ZC4_CLOSED)
4457 			zbc_open_zone(devip, &devip->zstate[i], true);
4458 	}
4459 }
4460 
4461 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4462 {
4463 	int res = 0;
4464 	u64 z_id;
4465 	enum sdebug_z_cond zc;
4466 	u8 *cmd = scp->cmnd;
4467 	struct sdeb_zone_state *zsp;
4468 	bool all = cmd[14] & 0x01;
4469 	struct sdeb_store_info *sip = devip2sip(devip, false);
4470 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4471 
4472 	if (!sdebug_dev_is_zoned(devip)) {
4473 		mk_sense_invalid_opcode(scp);
4474 		return check_condition_result;
4475 	}
4476 
4477 	write_lock(macc_lckp);
4478 
4479 	if (all) {
4480 		/* Check if all closed zones can be open */
4481 		if (devip->max_open &&
4482 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4483 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4484 					INSUFF_ZONE_ASCQ);
4485 			res = check_condition_result;
4486 			goto fini;
4487 		}
4488 		/* Open all closed zones */
4489 		zbc_open_all(devip);
4490 		goto fini;
4491 	}
4492 
4493 	/* Open the specified zone */
4494 	z_id = get_unaligned_be64(cmd + 2);
4495 	if (z_id >= sdebug_capacity) {
4496 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4497 		res = check_condition_result;
4498 		goto fini;
4499 	}
4500 
4501 	zsp = zbc_zone(devip, z_id);
4502 	if (z_id != zsp->z_start) {
4503 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4504 		res = check_condition_result;
4505 		goto fini;
4506 	}
4507 	if (zbc_zone_is_conv(zsp)) {
4508 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4509 		res = check_condition_result;
4510 		goto fini;
4511 	}
4512 
4513 	zc = zsp->z_cond;
4514 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4515 		goto fini;
4516 
4517 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4518 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4519 				INSUFF_ZONE_ASCQ);
4520 		res = check_condition_result;
4521 		goto fini;
4522 	}
4523 
4524 	zbc_open_zone(devip, zsp, true);
4525 fini:
4526 	write_unlock(macc_lckp);
4527 	return res;
4528 }
4529 
4530 static void zbc_close_all(struct sdebug_dev_info *devip)
4531 {
4532 	unsigned int i;
4533 
4534 	for (i = 0; i < devip->nr_zones; i++)
4535 		zbc_close_zone(devip, &devip->zstate[i]);
4536 }
4537 
4538 static int resp_close_zone(struct scsi_cmnd *scp,
4539 			   struct sdebug_dev_info *devip)
4540 {
4541 	int res = 0;
4542 	u64 z_id;
4543 	u8 *cmd = scp->cmnd;
4544 	struct sdeb_zone_state *zsp;
4545 	bool all = cmd[14] & 0x01;
4546 	struct sdeb_store_info *sip = devip2sip(devip, false);
4547 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4548 
4549 	if (!sdebug_dev_is_zoned(devip)) {
4550 		mk_sense_invalid_opcode(scp);
4551 		return check_condition_result;
4552 	}
4553 
4554 	write_lock(macc_lckp);
4555 
4556 	if (all) {
4557 		zbc_close_all(devip);
4558 		goto fini;
4559 	}
4560 
4561 	/* Close specified zone */
4562 	z_id = get_unaligned_be64(cmd + 2);
4563 	if (z_id >= sdebug_capacity) {
4564 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4565 		res = check_condition_result;
4566 		goto fini;
4567 	}
4568 
4569 	zsp = zbc_zone(devip, z_id);
4570 	if (z_id != zsp->z_start) {
4571 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4572 		res = check_condition_result;
4573 		goto fini;
4574 	}
4575 	if (zbc_zone_is_conv(zsp)) {
4576 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4577 		res = check_condition_result;
4578 		goto fini;
4579 	}
4580 
4581 	zbc_close_zone(devip, zsp);
4582 fini:
4583 	write_unlock(macc_lckp);
4584 	return res;
4585 }
4586 
4587 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4588 			    struct sdeb_zone_state *zsp, bool empty)
4589 {
4590 	enum sdebug_z_cond zc = zsp->z_cond;
4591 
4592 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4593 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4594 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4595 			zbc_close_zone(devip, zsp);
4596 		if (zsp->z_cond == ZC4_CLOSED)
4597 			devip->nr_closed--;
4598 		zsp->z_wp = zsp->z_start + zsp->z_size;
4599 		zsp->z_cond = ZC5_FULL;
4600 	}
4601 }
4602 
4603 static void zbc_finish_all(struct sdebug_dev_info *devip)
4604 {
4605 	unsigned int i;
4606 
4607 	for (i = 0; i < devip->nr_zones; i++)
4608 		zbc_finish_zone(devip, &devip->zstate[i], false);
4609 }
4610 
4611 static int resp_finish_zone(struct scsi_cmnd *scp,
4612 			    struct sdebug_dev_info *devip)
4613 {
4614 	struct sdeb_zone_state *zsp;
4615 	int res = 0;
4616 	u64 z_id;
4617 	u8 *cmd = scp->cmnd;
4618 	bool all = cmd[14] & 0x01;
4619 	struct sdeb_store_info *sip = devip2sip(devip, false);
4620 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4621 
4622 	if (!sdebug_dev_is_zoned(devip)) {
4623 		mk_sense_invalid_opcode(scp);
4624 		return check_condition_result;
4625 	}
4626 
4627 	write_lock(macc_lckp);
4628 
4629 	if (all) {
4630 		zbc_finish_all(devip);
4631 		goto fini;
4632 	}
4633 
4634 	/* Finish the specified zone */
4635 	z_id = get_unaligned_be64(cmd + 2);
4636 	if (z_id >= sdebug_capacity) {
4637 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4638 		res = check_condition_result;
4639 		goto fini;
4640 	}
4641 
4642 	zsp = zbc_zone(devip, z_id);
4643 	if (z_id != zsp->z_start) {
4644 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4645 		res = check_condition_result;
4646 		goto fini;
4647 	}
4648 	if (zbc_zone_is_conv(zsp)) {
4649 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4650 		res = check_condition_result;
4651 		goto fini;
4652 	}
4653 
4654 	zbc_finish_zone(devip, zsp, true);
4655 fini:
4656 	write_unlock(macc_lckp);
4657 	return res;
4658 }
4659 
4660 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4661 			 struct sdeb_zone_state *zsp)
4662 {
4663 	enum sdebug_z_cond zc;
4664 	struct sdeb_store_info *sip = devip2sip(devip, false);
4665 
4666 	if (zbc_zone_is_conv(zsp))
4667 		return;
4668 
4669 	zc = zsp->z_cond;
4670 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4671 		zbc_close_zone(devip, zsp);
4672 
4673 	if (zsp->z_cond == ZC4_CLOSED)
4674 		devip->nr_closed--;
4675 
4676 	if (zsp->z_wp > zsp->z_start)
4677 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4678 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4679 
4680 	zsp->z_non_seq_resource = false;
4681 	zsp->z_wp = zsp->z_start;
4682 	zsp->z_cond = ZC1_EMPTY;
4683 }
4684 
4685 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4686 {
4687 	unsigned int i;
4688 
4689 	for (i = 0; i < devip->nr_zones; i++)
4690 		zbc_rwp_zone(devip, &devip->zstate[i]);
4691 }
4692 
4693 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4694 {
4695 	struct sdeb_zone_state *zsp;
4696 	int res = 0;
4697 	u64 z_id;
4698 	u8 *cmd = scp->cmnd;
4699 	bool all = cmd[14] & 0x01;
4700 	struct sdeb_store_info *sip = devip2sip(devip, false);
4701 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4702 
4703 	if (!sdebug_dev_is_zoned(devip)) {
4704 		mk_sense_invalid_opcode(scp);
4705 		return check_condition_result;
4706 	}
4707 
4708 	write_lock(macc_lckp);
4709 
4710 	if (all) {
4711 		zbc_rwp_all(devip);
4712 		goto fini;
4713 	}
4714 
4715 	z_id = get_unaligned_be64(cmd + 2);
4716 	if (z_id >= sdebug_capacity) {
4717 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4718 		res = check_condition_result;
4719 		goto fini;
4720 	}
4721 
4722 	zsp = zbc_zone(devip, z_id);
4723 	if (z_id != zsp->z_start) {
4724 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4725 		res = check_condition_result;
4726 		goto fini;
4727 	}
4728 	if (zbc_zone_is_conv(zsp)) {
4729 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4730 		res = check_condition_result;
4731 		goto fini;
4732 	}
4733 
4734 	zbc_rwp_zone(devip, zsp);
4735 fini:
4736 	write_unlock(macc_lckp);
4737 	return res;
4738 }
4739 
4740 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4741 {
4742 	u16 hwq;
4743 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4744 
4745 	hwq = blk_mq_unique_tag_to_hwq(tag);
4746 
4747 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4748 	if (WARN_ON_ONCE(hwq >= submit_queues))
4749 		hwq = 0;
4750 
4751 	return sdebug_q_arr + hwq;
4752 }
4753 
4754 static u32 get_tag(struct scsi_cmnd *cmnd)
4755 {
4756 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4757 }
4758 
4759 /* Queued (deferred) command completions converge here. */
4760 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4761 {
4762 	bool aborted = sd_dp->aborted;
4763 	int qc_idx;
4764 	int retiring = 0;
4765 	unsigned long iflags;
4766 	struct sdebug_queue *sqp;
4767 	struct sdebug_queued_cmd *sqcp;
4768 	struct scsi_cmnd *scp;
4769 	struct sdebug_dev_info *devip;
4770 
4771 	if (unlikely(aborted))
4772 		sd_dp->aborted = false;
4773 	qc_idx = sd_dp->qc_idx;
4774 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4775 	if (sdebug_statistics) {
4776 		atomic_inc(&sdebug_completions);
4777 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4778 			atomic_inc(&sdebug_miss_cpus);
4779 	}
4780 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4781 		pr_err("wild qc_idx=%d\n", qc_idx);
4782 		return;
4783 	}
4784 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4785 	sd_dp->defer_t = SDEB_DEFER_NONE;
4786 	sqcp = &sqp->qc_arr[qc_idx];
4787 	scp = sqcp->a_cmnd;
4788 	if (unlikely(scp == NULL)) {
4789 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4791 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4792 		return;
4793 	}
4794 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4795 	if (likely(devip))
4796 		atomic_dec(&devip->num_in_q);
4797 	else
4798 		pr_err("devip=NULL\n");
4799 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4800 		retiring = 1;
4801 
4802 	sqcp->a_cmnd = NULL;
4803 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4804 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4805 		pr_err("Unexpected completion\n");
4806 		return;
4807 	}
4808 
4809 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4810 		int k, retval;
4811 
4812 		retval = atomic_read(&retired_max_queue);
4813 		if (qc_idx >= retval) {
4814 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4815 			pr_err("index %d too large\n", retval);
4816 			return;
4817 		}
4818 		k = find_last_bit(sqp->in_use_bm, retval);
4819 		if ((k < sdebug_max_queue) || (k == retval))
4820 			atomic_set(&retired_max_queue, 0);
4821 		else
4822 			atomic_set(&retired_max_queue, k + 1);
4823 	}
4824 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4825 	if (unlikely(aborted)) {
4826 		if (sdebug_verbose)
4827 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4828 		return;
4829 	}
4830 	scsi_done(scp); /* callback to mid level */
4831 }
4832 
4833 /* When high resolution timer goes off this function is called. */
4834 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4835 {
4836 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4837 						  hrt);
4838 	sdebug_q_cmd_complete(sd_dp);
4839 	return HRTIMER_NORESTART;
4840 }
4841 
4842 /* When work queue schedules work, it calls this function. */
4843 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4844 {
4845 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4846 						  ew.work);
4847 	sdebug_q_cmd_complete(sd_dp);
4848 }
4849 
4850 static bool got_shared_uuid;
4851 static uuid_t shared_uuid;
4852 
4853 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4854 {
4855 	struct sdeb_zone_state *zsp;
4856 	sector_t capacity = get_sdebug_capacity();
4857 	sector_t zstart = 0;
4858 	unsigned int i;
4859 
4860 	/*
4861 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4862 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4863 	 * use the specified zone size checking that at least 2 zones can be
4864 	 * created for the device.
4865 	 */
4866 	if (!sdeb_zbc_zone_size_mb) {
4867 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4868 			>> ilog2(sdebug_sector_size);
4869 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4870 			devip->zsize >>= 1;
4871 		if (devip->zsize < 2) {
4872 			pr_err("Device capacity too small\n");
4873 			return -EINVAL;
4874 		}
4875 	} else {
4876 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4877 			pr_err("Zone size is not a power of 2\n");
4878 			return -EINVAL;
4879 		}
4880 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4881 			>> ilog2(sdebug_sector_size);
4882 		if (devip->zsize >= capacity) {
4883 			pr_err("Zone size too large for device capacity\n");
4884 			return -EINVAL;
4885 		}
4886 	}
4887 
4888 	devip->zsize_shift = ilog2(devip->zsize);
4889 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4890 
4891 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4892 		pr_err("Number of conventional zones too large\n");
4893 		return -EINVAL;
4894 	}
4895 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4896 
4897 	if (devip->zmodel == BLK_ZONED_HM) {
4898 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4899 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4900 			devip->max_open = (devip->nr_zones - 1) / 2;
4901 		else
4902 			devip->max_open = sdeb_zbc_max_open;
4903 	}
4904 
4905 	devip->zstate = kcalloc(devip->nr_zones,
4906 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4907 	if (!devip->zstate)
4908 		return -ENOMEM;
4909 
4910 	for (i = 0; i < devip->nr_zones; i++) {
4911 		zsp = &devip->zstate[i];
4912 
4913 		zsp->z_start = zstart;
4914 
4915 		if (i < devip->nr_conv_zones) {
4916 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4917 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4918 			zsp->z_wp = (sector_t)-1;
4919 		} else {
4920 			if (devip->zmodel == BLK_ZONED_HM)
4921 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4922 			else
4923 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4924 			zsp->z_cond = ZC1_EMPTY;
4925 			zsp->z_wp = zsp->z_start;
4926 		}
4927 
4928 		if (zsp->z_start + devip->zsize < capacity)
4929 			zsp->z_size = devip->zsize;
4930 		else
4931 			zsp->z_size = capacity - zsp->z_start;
4932 
4933 		zstart += zsp->z_size;
4934 	}
4935 
4936 	return 0;
4937 }
4938 
4939 static struct sdebug_dev_info *sdebug_device_create(
4940 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4941 {
4942 	struct sdebug_dev_info *devip;
4943 
4944 	devip = kzalloc(sizeof(*devip), flags);
4945 	if (devip) {
4946 		if (sdebug_uuid_ctl == 1)
4947 			uuid_gen(&devip->lu_name);
4948 		else if (sdebug_uuid_ctl == 2) {
4949 			if (got_shared_uuid)
4950 				devip->lu_name = shared_uuid;
4951 			else {
4952 				uuid_gen(&shared_uuid);
4953 				got_shared_uuid = true;
4954 				devip->lu_name = shared_uuid;
4955 			}
4956 		}
4957 		devip->sdbg_host = sdbg_host;
4958 		if (sdeb_zbc_in_use) {
4959 			devip->zmodel = sdeb_zbc_model;
4960 			if (sdebug_device_create_zones(devip)) {
4961 				kfree(devip);
4962 				return NULL;
4963 			}
4964 		} else {
4965 			devip->zmodel = BLK_ZONED_NONE;
4966 		}
4967 		devip->sdbg_host = sdbg_host;
4968 		devip->create_ts = ktime_get_boottime();
4969 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4970 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4971 	}
4972 	return devip;
4973 }
4974 
4975 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4976 {
4977 	struct sdebug_host_info *sdbg_host;
4978 	struct sdebug_dev_info *open_devip = NULL;
4979 	struct sdebug_dev_info *devip;
4980 
4981 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4982 	if (!sdbg_host) {
4983 		pr_err("Host info NULL\n");
4984 		return NULL;
4985 	}
4986 
4987 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4988 		if ((devip->used) && (devip->channel == sdev->channel) &&
4989 		    (devip->target == sdev->id) &&
4990 		    (devip->lun == sdev->lun))
4991 			return devip;
4992 		else {
4993 			if ((!devip->used) && (!open_devip))
4994 				open_devip = devip;
4995 		}
4996 	}
4997 	if (!open_devip) { /* try and make a new one */
4998 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4999 		if (!open_devip) {
5000 			pr_err("out of memory at line %d\n", __LINE__);
5001 			return NULL;
5002 		}
5003 	}
5004 
5005 	open_devip->channel = sdev->channel;
5006 	open_devip->target = sdev->id;
5007 	open_devip->lun = sdev->lun;
5008 	open_devip->sdbg_host = sdbg_host;
5009 	atomic_set(&open_devip->num_in_q, 0);
5010 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5011 	open_devip->used = true;
5012 	return open_devip;
5013 }
5014 
5015 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5016 {
5017 	if (sdebug_verbose)
5018 		pr_info("slave_alloc <%u %u %u %llu>\n",
5019 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5020 	return 0;
5021 }
5022 
5023 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5024 {
5025 	struct sdebug_dev_info *devip =
5026 			(struct sdebug_dev_info *)sdp->hostdata;
5027 
5028 	if (sdebug_verbose)
5029 		pr_info("slave_configure <%u %u %u %llu>\n",
5030 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5031 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5032 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5033 	if (smp_load_acquire(&sdebug_deflect_incoming)) {
5034 		pr_info("Exit early due to deflect_incoming\n");
5035 		return 1;
5036 	}
5037 	if (devip == NULL) {
5038 		devip = find_build_dev_info(sdp);
5039 		if (devip == NULL)
5040 			return 1;  /* no resources, will be marked offline */
5041 	}
5042 	sdp->hostdata = devip;
5043 	if (sdebug_no_uld)
5044 		sdp->no_uld_attach = 1;
5045 	config_cdb_len(sdp);
5046 	return 0;
5047 }
5048 
5049 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5050 {
5051 	struct sdebug_dev_info *devip =
5052 		(struct sdebug_dev_info *)sdp->hostdata;
5053 
5054 	if (sdebug_verbose)
5055 		pr_info("slave_destroy <%u %u %u %llu>\n",
5056 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5057 	if (devip) {
5058 		/* make this slot available for re-use */
5059 		devip->used = false;
5060 		sdp->hostdata = NULL;
5061 	}
5062 }
5063 
5064 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5065 			   enum sdeb_defer_type defer_t)
5066 {
5067 	if (!sd_dp)
5068 		return;
5069 	if (defer_t == SDEB_DEFER_HRT)
5070 		hrtimer_cancel(&sd_dp->hrt);
5071 	else if (defer_t == SDEB_DEFER_WQ)
5072 		cancel_work_sync(&sd_dp->ew.work);
5073 }
5074 
5075 /* If @cmnd found deletes its timer or work queue and returns true; else
5076    returns false */
5077 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5078 {
5079 	unsigned long iflags;
5080 	int j, k, qmax, r_qmax;
5081 	enum sdeb_defer_type l_defer_t;
5082 	struct sdebug_queue *sqp;
5083 	struct sdebug_queued_cmd *sqcp;
5084 	struct sdebug_dev_info *devip;
5085 	struct sdebug_defer *sd_dp;
5086 
5087 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5088 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5089 		qmax = sdebug_max_queue;
5090 		r_qmax = atomic_read(&retired_max_queue);
5091 		if (r_qmax > qmax)
5092 			qmax = r_qmax;
5093 		for (k = 0; k < qmax; ++k) {
5094 			if (test_bit(k, sqp->in_use_bm)) {
5095 				sqcp = &sqp->qc_arr[k];
5096 				if (cmnd != sqcp->a_cmnd)
5097 					continue;
5098 				/* found */
5099 				devip = (struct sdebug_dev_info *)
5100 						cmnd->device->hostdata;
5101 				if (devip)
5102 					atomic_dec(&devip->num_in_q);
5103 				sqcp->a_cmnd = NULL;
5104 				sd_dp = sqcp->sd_dp;
5105 				if (sd_dp) {
5106 					l_defer_t = sd_dp->defer_t;
5107 					sd_dp->defer_t = SDEB_DEFER_NONE;
5108 				} else
5109 					l_defer_t = SDEB_DEFER_NONE;
5110 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5111 				stop_qc_helper(sd_dp, l_defer_t);
5112 				clear_bit(k, sqp->in_use_bm);
5113 				return true;
5114 			}
5115 		}
5116 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5117 	}
5118 	return false;
5119 }
5120 
5121 /* Deletes (stops) timers or work queues of all queued commands */
5122 static void stop_all_queued(bool done_with_no_conn)
5123 {
5124 	unsigned long iflags;
5125 	int j, k;
5126 	enum sdeb_defer_type l_defer_t;
5127 	struct sdebug_queue *sqp;
5128 	struct sdebug_queued_cmd *sqcp;
5129 	struct sdebug_dev_info *devip;
5130 	struct sdebug_defer *sd_dp;
5131 	struct scsi_cmnd *scp;
5132 
5133 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5134 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5135 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5136 			if (test_bit(k, sqp->in_use_bm)) {
5137 				sqcp = &sqp->qc_arr[k];
5138 				scp = sqcp->a_cmnd;
5139 				if (!scp)
5140 					continue;
5141 				devip = (struct sdebug_dev_info *)
5142 					sqcp->a_cmnd->device->hostdata;
5143 				if (devip)
5144 					atomic_dec(&devip->num_in_q);
5145 				sqcp->a_cmnd = NULL;
5146 				sd_dp = sqcp->sd_dp;
5147 				if (sd_dp) {
5148 					l_defer_t = sd_dp->defer_t;
5149 					sd_dp->defer_t = SDEB_DEFER_NONE;
5150 				} else
5151 					l_defer_t = SDEB_DEFER_NONE;
5152 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5153 				stop_qc_helper(sd_dp, l_defer_t);
5154 				if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
5155 					scp->result = DID_NO_CONNECT << 16;
5156 					scsi_done(scp);
5157 				}
5158 				clear_bit(k, sqp->in_use_bm);
5159 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5160 			}
5161 		}
5162 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5163 	}
5164 }
5165 
5166 /* Free queued command memory on heap */
5167 static void free_all_queued(void)
5168 {
5169 	int j, k;
5170 	struct sdebug_queue *sqp;
5171 	struct sdebug_queued_cmd *sqcp;
5172 
5173 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5174 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5175 			sqcp = &sqp->qc_arr[k];
5176 			kfree(sqcp->sd_dp);
5177 			sqcp->sd_dp = NULL;
5178 		}
5179 	}
5180 }
5181 
5182 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5183 {
5184 	bool ok;
5185 
5186 	++num_aborts;
5187 	if (SCpnt) {
5188 		ok = stop_queued_cmnd(SCpnt);
5189 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5190 			sdev_printk(KERN_INFO, SCpnt->device,
5191 				    "%s: command%s found\n", __func__,
5192 				    ok ? "" : " not");
5193 	}
5194 	return SUCCESS;
5195 }
5196 
5197 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5198 {
5199 	++num_dev_resets;
5200 	if (SCpnt && SCpnt->device) {
5201 		struct scsi_device *sdp = SCpnt->device;
5202 		struct sdebug_dev_info *devip =
5203 				(struct sdebug_dev_info *)sdp->hostdata;
5204 
5205 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5206 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5207 		if (devip)
5208 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5209 	}
5210 	return SUCCESS;
5211 }
5212 
5213 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5214 {
5215 	struct sdebug_host_info *sdbg_host;
5216 	struct sdebug_dev_info *devip;
5217 	struct scsi_device *sdp;
5218 	struct Scsi_Host *hp;
5219 	int k = 0;
5220 
5221 	++num_target_resets;
5222 	if (!SCpnt)
5223 		goto lie;
5224 	sdp = SCpnt->device;
5225 	if (!sdp)
5226 		goto lie;
5227 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5228 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5229 	hp = sdp->host;
5230 	if (!hp)
5231 		goto lie;
5232 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5233 	if (sdbg_host) {
5234 		list_for_each_entry(devip,
5235 				    &sdbg_host->dev_info_list,
5236 				    dev_list)
5237 			if (devip->target == sdp->id) {
5238 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5239 				++k;
5240 			}
5241 	}
5242 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5243 		sdev_printk(KERN_INFO, sdp,
5244 			    "%s: %d device(s) found in target\n", __func__, k);
5245 lie:
5246 	return SUCCESS;
5247 }
5248 
5249 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5250 {
5251 	struct sdebug_host_info *sdbg_host;
5252 	struct sdebug_dev_info *devip;
5253 	struct scsi_device *sdp;
5254 	struct Scsi_Host *hp;
5255 	int k = 0;
5256 
5257 	++num_bus_resets;
5258 	if (!(SCpnt && SCpnt->device))
5259 		goto lie;
5260 	sdp = SCpnt->device;
5261 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5262 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5263 	hp = sdp->host;
5264 	if (hp) {
5265 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5266 		if (sdbg_host) {
5267 			list_for_each_entry(devip,
5268 					    &sdbg_host->dev_info_list,
5269 					    dev_list) {
5270 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5271 				++k;
5272 			}
5273 		}
5274 	}
5275 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5276 		sdev_printk(KERN_INFO, sdp,
5277 			    "%s: %d device(s) found in host\n", __func__, k);
5278 lie:
5279 	return SUCCESS;
5280 }
5281 
5282 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5283 {
5284 	struct sdebug_host_info *sdbg_host;
5285 	struct sdebug_dev_info *devip;
5286 	int k = 0;
5287 
5288 	++num_host_resets;
5289 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5290 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5291 	spin_lock(&sdebug_host_list_lock);
5292 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5293 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5294 				    dev_list) {
5295 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5296 			++k;
5297 		}
5298 	}
5299 	spin_unlock(&sdebug_host_list_lock);
5300 	stop_all_queued(false);
5301 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5302 		sdev_printk(KERN_INFO, SCpnt->device,
5303 			    "%s: %d device(s) found\n", __func__, k);
5304 	return SUCCESS;
5305 }
5306 
5307 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5308 {
5309 	struct msdos_partition *pp;
5310 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5311 	int sectors_per_part, num_sectors, k;
5312 	int heads_by_sects, start_sec, end_sec;
5313 
5314 	/* assume partition table already zeroed */
5315 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5316 		return;
5317 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5318 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5319 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5320 	}
5321 	num_sectors = (int)get_sdebug_capacity();
5322 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5323 			   / sdebug_num_parts;
5324 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5325 	starts[0] = sdebug_sectors_per;
5326 	max_part_secs = sectors_per_part;
5327 	for (k = 1; k < sdebug_num_parts; ++k) {
5328 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5329 			    * heads_by_sects;
5330 		if (starts[k] - starts[k - 1] < max_part_secs)
5331 			max_part_secs = starts[k] - starts[k - 1];
5332 	}
5333 	starts[sdebug_num_parts] = num_sectors;
5334 	starts[sdebug_num_parts + 1] = 0;
5335 
5336 	ramp[510] = 0x55;	/* magic partition markings */
5337 	ramp[511] = 0xAA;
5338 	pp = (struct msdos_partition *)(ramp + 0x1be);
5339 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5340 		start_sec = starts[k];
5341 		end_sec = starts[k] + max_part_secs - 1;
5342 		pp->boot_ind = 0;
5343 
5344 		pp->cyl = start_sec / heads_by_sects;
5345 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5346 			   / sdebug_sectors_per;
5347 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5348 
5349 		pp->end_cyl = end_sec / heads_by_sects;
5350 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5351 			       / sdebug_sectors_per;
5352 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5353 
5354 		pp->start_sect = cpu_to_le32(start_sec);
5355 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5356 		pp->sys_ind = 0x83;	/* plain Linux partition */
5357 	}
5358 }
5359 
5360 static void sdeb_block_all_queues(void)
5361 {
5362 	int j;
5363 	struct sdebug_queue *sqp;
5364 
5365 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5366 		atomic_set(&sqp->blocked, (int)true);
5367 }
5368 
5369 static void sdeb_unblock_all_queues(void)
5370 {
5371 	int j;
5372 	struct sdebug_queue *sqp;
5373 
5374 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5375 		atomic_set(&sqp->blocked, (int)false);
5376 }
5377 
5378 static void
5379 sdeb_add_n_hosts(int num_hosts)
5380 {
5381 	if (num_hosts < 1)
5382 		return;
5383 	do {
5384 		bool found;
5385 		unsigned long idx;
5386 		struct sdeb_store_info *sip;
5387 		bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5388 
5389 		found = false;
5390 		if (want_phs) {
5391 			xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
5392 				sdeb_most_recent_idx = (int)idx;
5393 				found = true;
5394 				break;
5395 			}
5396 			if (found)	/* re-use case */
5397 				sdebug_add_host_helper((int)idx);
5398 			else
5399 				sdebug_do_add_host(true	/* make new store */);
5400 		} else {
5401 			sdebug_do_add_host(false);
5402 		}
5403 	} while (--num_hosts);
5404 }
5405 
5406 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5407  * commands will be processed normally before triggers occur.
5408  */
5409 static void tweak_cmnd_count(void)
5410 {
5411 	int count, modulo;
5412 
5413 	modulo = abs(sdebug_every_nth);
5414 	if (modulo < 2)
5415 		return;
5416 	sdeb_block_all_queues();
5417 	count = atomic_read(&sdebug_cmnd_count);
5418 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5419 	sdeb_unblock_all_queues();
5420 }
5421 
5422 static void clear_queue_stats(void)
5423 {
5424 	atomic_set(&sdebug_cmnd_count, 0);
5425 	atomic_set(&sdebug_completions, 0);
5426 	atomic_set(&sdebug_miss_cpus, 0);
5427 	atomic_set(&sdebug_a_tsf, 0);
5428 }
5429 
5430 static bool inject_on_this_cmd(void)
5431 {
5432 	if (sdebug_every_nth == 0)
5433 		return false;
5434 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5435 }
5436 
5437 static int process_deflect_incoming(struct scsi_cmnd *scp)
5438 {
5439 	u8 opcode = scp->cmnd[0];
5440 
5441 	if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
5442 		return 0;
5443 	return DID_NO_CONNECT << 16;
5444 }
5445 
5446 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5447 
5448 /* Complete the processing of the thread that queued a SCSI command to this
5449  * driver. It either completes the command by calling cmnd_done() or
5450  * schedules a hr timer or work queue then returns 0. Returns
5451  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5452  */
5453 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5454 			 int scsi_result,
5455 			 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
5456 			 int delta_jiff, int ndelay)
5457 {
5458 	bool new_sd_dp;
5459 	bool inject = false;
5460 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5461 	int k, num_in_q, qdepth;
5462 	unsigned long iflags;
5463 	u64 ns_from_boot = 0;
5464 	struct sdebug_queue *sqp;
5465 	struct sdebug_queued_cmd *sqcp;
5466 	struct scsi_device *sdp;
5467 	struct sdebug_defer *sd_dp;
5468 
5469 	if (unlikely(devip == NULL)) {
5470 		if (scsi_result == 0)
5471 			scsi_result = DID_NO_CONNECT << 16;
5472 		goto respond_in_thread;
5473 	}
5474 	sdp = cmnd->device;
5475 
5476 	if (delta_jiff == 0) {
5477 		sqp = get_queue(cmnd);
5478 		if (atomic_read(&sqp->blocked)) {
5479 			if (smp_load_acquire(&sdebug_deflect_incoming))
5480 				return process_deflect_incoming(cmnd);
5481 			else
5482 				return SCSI_MLQUEUE_HOST_BUSY;
5483 		}
5484 		goto respond_in_thread;
5485 	}
5486 
5487 	sqp = get_queue(cmnd);
5488 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5489 	if (unlikely(atomic_read(&sqp->blocked))) {
5490 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5491 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
5492 			scsi_result = process_deflect_incoming(cmnd);
5493 			goto respond_in_thread;
5494 		}
5495 		if (sdebug_verbose)
5496 			pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
5497 		return SCSI_MLQUEUE_HOST_BUSY;
5498 	}
5499 	num_in_q = atomic_read(&devip->num_in_q);
5500 	qdepth = cmnd->device->queue_depth;
5501 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5502 		if (scsi_result) {
5503 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5504 			goto respond_in_thread;
5505 		} else
5506 			scsi_result = device_qfull_result;
5507 	} else if (unlikely(sdebug_every_nth &&
5508 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5509 			    (scsi_result == 0))) {
5510 		if ((num_in_q == (qdepth - 1)) &&
5511 		    (atomic_inc_return(&sdebug_a_tsf) >=
5512 		     abs(sdebug_every_nth))) {
5513 			atomic_set(&sdebug_a_tsf, 0);
5514 			inject = true;
5515 			scsi_result = device_qfull_result;
5516 		}
5517 	}
5518 
5519 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5520 	if (unlikely(k >= sdebug_max_queue)) {
5521 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5522 		if (scsi_result)
5523 			goto respond_in_thread;
5524 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5525 			scsi_result = device_qfull_result;
5526 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5527 			sdev_printk(KERN_INFO, sdp,
5528 				    "%s: max_queue=%d exceeded, %s\n",
5529 				    __func__, sdebug_max_queue,
5530 				    (scsi_result ?  "status: TASK SET FULL" :
5531 						    "report: host busy"));
5532 		if (scsi_result)
5533 			goto respond_in_thread;
5534 		else
5535 			return SCSI_MLQUEUE_HOST_BUSY;
5536 	}
5537 	set_bit(k, sqp->in_use_bm);
5538 	atomic_inc(&devip->num_in_q);
5539 	sqcp = &sqp->qc_arr[k];
5540 	sqcp->a_cmnd = cmnd;
5541 	cmnd->host_scribble = (unsigned char *)sqcp;
5542 	sd_dp = sqcp->sd_dp;
5543 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5544 
5545 	if (!sd_dp) {
5546 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5547 		if (!sd_dp) {
5548 			atomic_dec(&devip->num_in_q);
5549 			clear_bit(k, sqp->in_use_bm);
5550 			return SCSI_MLQUEUE_HOST_BUSY;
5551 		}
5552 		new_sd_dp = true;
5553 	} else {
5554 		new_sd_dp = false;
5555 	}
5556 
5557 	/* Set the hostwide tag */
5558 	if (sdebug_host_max_queue)
5559 		sd_dp->hc_idx = get_tag(cmnd);
5560 
5561 	if (polled)
5562 		ns_from_boot = ktime_get_boottime_ns();
5563 
5564 	/* one of the resp_*() response functions is called here */
5565 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5566 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5567 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5568 		delta_jiff = ndelay = 0;
5569 	}
5570 	if (cmnd->result == 0 && scsi_result != 0)
5571 		cmnd->result = scsi_result;
5572 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5573 		if (atomic_read(&sdeb_inject_pending)) {
5574 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5575 			atomic_set(&sdeb_inject_pending, 0);
5576 			cmnd->result = check_condition_result;
5577 		}
5578 	}
5579 
5580 	if (unlikely(sdebug_verbose && cmnd->result))
5581 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5582 			    __func__, cmnd->result);
5583 
5584 	if (delta_jiff > 0 || ndelay > 0) {
5585 		ktime_t kt;
5586 
5587 		if (delta_jiff > 0) {
5588 			u64 ns = jiffies_to_nsecs(delta_jiff);
5589 
5590 			if (sdebug_random && ns < U32_MAX) {
5591 				ns = prandom_u32_max((u32)ns);
5592 			} else if (sdebug_random) {
5593 				ns >>= 12;	/* scale to 4 usec precision */
5594 				if (ns < U32_MAX)	/* over 4 hours max */
5595 					ns = prandom_u32_max((u32)ns);
5596 				ns <<= 12;
5597 			}
5598 			kt = ns_to_ktime(ns);
5599 		} else {	/* ndelay has a 4.2 second max */
5600 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5601 					     (u32)ndelay;
5602 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5603 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5604 
5605 				if (kt <= d) {	/* elapsed duration >= kt */
5606 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5607 					sqcp->a_cmnd = NULL;
5608 					atomic_dec(&devip->num_in_q);
5609 					clear_bit(k, sqp->in_use_bm);
5610 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5611 					if (new_sd_dp)
5612 						kfree(sd_dp);
5613 					/* call scsi_done() from this thread */
5614 					scsi_done(cmnd);
5615 					return 0;
5616 				}
5617 				/* otherwise reduce kt by elapsed time */
5618 				kt -= d;
5619 			}
5620 		}
5621 		if (polled) {
5622 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5623 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5624 			if (!sd_dp->init_poll) {
5625 				sd_dp->init_poll = true;
5626 				sqcp->sd_dp = sd_dp;
5627 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5628 				sd_dp->qc_idx = k;
5629 			}
5630 			sd_dp->defer_t = SDEB_DEFER_POLL;
5631 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5632 		} else {
5633 			if (!sd_dp->init_hrt) {
5634 				sd_dp->init_hrt = true;
5635 				sqcp->sd_dp = sd_dp;
5636 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5637 					     HRTIMER_MODE_REL_PINNED);
5638 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5639 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5640 				sd_dp->qc_idx = k;
5641 			}
5642 			sd_dp->defer_t = SDEB_DEFER_HRT;
5643 			/* schedule the invocation of scsi_done() for a later time */
5644 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5645 		}
5646 		if (sdebug_statistics)
5647 			sd_dp->issuing_cpu = raw_smp_processor_id();
5648 	} else {	/* jdelay < 0, use work queue */
5649 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5650 			     atomic_read(&sdeb_inject_pending)))
5651 			sd_dp->aborted = true;
5652 		if (polled) {
5653 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5654 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5655 			if (!sd_dp->init_poll) {
5656 				sd_dp->init_poll = true;
5657 				sqcp->sd_dp = sd_dp;
5658 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5659 				sd_dp->qc_idx = k;
5660 			}
5661 			sd_dp->defer_t = SDEB_DEFER_POLL;
5662 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5663 		} else {
5664 			if (!sd_dp->init_wq) {
5665 				sd_dp->init_wq = true;
5666 				sqcp->sd_dp = sd_dp;
5667 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5668 				sd_dp->qc_idx = k;
5669 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5670 			}
5671 			sd_dp->defer_t = SDEB_DEFER_WQ;
5672 			schedule_work(&sd_dp->ew.work);
5673 		}
5674 		if (sdebug_statistics)
5675 			sd_dp->issuing_cpu = raw_smp_processor_id();
5676 		if (unlikely(sd_dp->aborted)) {
5677 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5678 				    scsi_cmd_to_rq(cmnd)->tag);
5679 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5680 			atomic_set(&sdeb_inject_pending, 0);
5681 			sd_dp->aborted = false;
5682 		}
5683 	}
5684 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5685 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5686 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5687 	return 0;
5688 
5689 respond_in_thread:	/* call back to mid-layer using invocation thread */
5690 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5691 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5692 	if (cmnd->result == 0 && scsi_result != 0) {
5693 		cmnd->result = scsi_result;
5694 		if (sdebug_verbose)
5695 			pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
5696 				blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
5697 	}
5698 	scsi_done(cmnd);
5699 	return 0;
5700 }
5701 
5702 /* Note: The following macros create attribute files in the
5703    /sys/module/scsi_debug/parameters directory. Unfortunately this
5704    driver is unaware of a change and cannot trigger auxiliary actions
5705    as it can when the corresponding attribute in the
5706    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5707  */
5708 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5709 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5710 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5711 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5712 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5713 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5714 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5715 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5716 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5717 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5718 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5719 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5720 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5721 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5722 module_param_string(inq_product, sdebug_inq_product_id,
5723 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5724 module_param_string(inq_rev, sdebug_inq_product_rev,
5725 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5726 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5727 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5728 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5729 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5730 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5731 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5732 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5733 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5734 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5735 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5736 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5737 		   S_IRUGO | S_IWUSR);
5738 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5739 		   S_IRUGO | S_IWUSR);
5740 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5741 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5742 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5743 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5744 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5745 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5746 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5747 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5748 module_param_named(per_host_store, sdebug_per_host_store, bool,
5749 		   S_IRUGO | S_IWUSR);
5750 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5751 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5752 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5753 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5754 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5755 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5756 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5757 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5758 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5759 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5760 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5761 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5762 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5763 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5764 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5765 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5766 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5767 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5768 		   S_IRUGO | S_IWUSR);
5769 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5770 module_param_named(write_same_length, sdebug_write_same_length, int,
5771 		   S_IRUGO | S_IWUSR);
5772 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5773 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5774 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5775 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5776 
5777 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5778 MODULE_DESCRIPTION("SCSI debug adapter driver");
5779 MODULE_LICENSE("GPL");
5780 MODULE_VERSION(SDEBUG_VERSION);
5781 
5782 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5783 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5784 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5785 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5786 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5787 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5788 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5789 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5790 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5791 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5792 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5793 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5794 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5795 MODULE_PARM_DESC(host_max_queue,
5796 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5797 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5798 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5799 		 SDEBUG_VERSION "\")");
5800 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5801 MODULE_PARM_DESC(lbprz,
5802 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5803 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5804 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5805 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5806 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5807 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5808 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5809 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5810 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5811 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5812 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5813 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5814 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5815 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5816 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5817 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5818 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5819 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5820 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5821 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5822 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5823 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5824 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5825 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5826 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5827 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5828 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5829 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5830 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5831 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5832 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5833 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5834 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5835 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5836 MODULE_PARM_DESC(uuid_ctl,
5837 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5838 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5839 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5840 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5841 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5842 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5843 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5844 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5845 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5846 
5847 #define SDEBUG_INFO_LEN 256
5848 static char sdebug_info[SDEBUG_INFO_LEN];
5849 
5850 static const char *scsi_debug_info(struct Scsi_Host *shp)
5851 {
5852 	int k;
5853 
5854 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5855 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5856 	if (k >= (SDEBUG_INFO_LEN - 1))
5857 		return sdebug_info;
5858 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5859 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5860 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5861 		  "statistics", (int)sdebug_statistics);
5862 	return sdebug_info;
5863 }
5864 
5865 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5866 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5867 				 int length)
5868 {
5869 	char arr[16];
5870 	int opts;
5871 	int minLen = length > 15 ? 15 : length;
5872 
5873 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5874 		return -EACCES;
5875 	memcpy(arr, buffer, minLen);
5876 	arr[minLen] = '\0';
5877 	if (1 != sscanf(arr, "%d", &opts))
5878 		return -EINVAL;
5879 	sdebug_opts = opts;
5880 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5881 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5882 	if (sdebug_every_nth != 0)
5883 		tweak_cmnd_count();
5884 	return length;
5885 }
5886 
5887 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5888  * same for each scsi_debug host (if more than one). Some of the counters
5889  * output are not atomics so might be inaccurate in a busy system. */
5890 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5891 {
5892 	int f, j, l;
5893 	struct sdebug_queue *sqp;
5894 	struct sdebug_host_info *sdhp;
5895 
5896 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5897 		   SDEBUG_VERSION, sdebug_version_date);
5898 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5899 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5900 		   sdebug_opts, sdebug_every_nth);
5901 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5902 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5903 		   sdebug_sector_size, "bytes");
5904 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5905 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5906 		   num_aborts);
5907 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5908 		   num_dev_resets, num_target_resets, num_bus_resets,
5909 		   num_host_resets);
5910 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5911 		   dix_reads, dix_writes, dif_errors);
5912 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5913 		   sdebug_statistics);
5914 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5915 		   atomic_read(&sdebug_cmnd_count),
5916 		   atomic_read(&sdebug_completions),
5917 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5918 		   atomic_read(&sdebug_a_tsf),
5919 		   atomic_read(&sdeb_mq_poll_count));
5920 
5921 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5922 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5923 		seq_printf(m, "  queue %d:\n", j);
5924 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5925 		if (f != sdebug_max_queue) {
5926 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5927 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5928 				   "first,last bits", f, l);
5929 		}
5930 	}
5931 
5932 	seq_printf(m, "this host_no=%d\n", host->host_no);
5933 	if (!xa_empty(per_store_ap)) {
5934 		bool niu;
5935 		int idx;
5936 		unsigned long l_idx;
5937 		struct sdeb_store_info *sip;
5938 
5939 		seq_puts(m, "\nhost list:\n");
5940 		j = 0;
5941 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5942 			idx = sdhp->si_idx;
5943 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5944 				   sdhp->shost->host_no, idx);
5945 			++j;
5946 		}
5947 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5948 			   sdeb_most_recent_idx);
5949 		j = 0;
5950 		xa_for_each(per_store_ap, l_idx, sip) {
5951 			niu = xa_get_mark(per_store_ap, l_idx,
5952 					  SDEB_XA_NOT_IN_USE);
5953 			idx = (int)l_idx;
5954 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5955 				   (niu ? "  not_in_use" : ""));
5956 			++j;
5957 		}
5958 	}
5959 	return 0;
5960 }
5961 
5962 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5963 {
5964 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5965 }
5966 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5967  * of delay is jiffies.
5968  */
5969 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5970 			   size_t count)
5971 {
5972 	int jdelay, res;
5973 
5974 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5975 		res = count;
5976 		if (sdebug_jdelay != jdelay) {
5977 			int j, k;
5978 			struct sdebug_queue *sqp;
5979 
5980 			sdeb_block_all_queues();
5981 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5982 			     ++j, ++sqp) {
5983 				k = find_first_bit(sqp->in_use_bm,
5984 						   sdebug_max_queue);
5985 				if (k != sdebug_max_queue) {
5986 					res = -EBUSY;   /* queued commands */
5987 					break;
5988 				}
5989 			}
5990 			if (res > 0) {
5991 				sdebug_jdelay = jdelay;
5992 				sdebug_ndelay = 0;
5993 			}
5994 			sdeb_unblock_all_queues();
5995 		}
5996 		return res;
5997 	}
5998 	return -EINVAL;
5999 }
6000 static DRIVER_ATTR_RW(delay);
6001 
6002 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6003 {
6004 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6005 }
6006 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6007 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6008 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6009 			    size_t count)
6010 {
6011 	int ndelay, res;
6012 
6013 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6014 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6015 		res = count;
6016 		if (sdebug_ndelay != ndelay) {
6017 			int j, k;
6018 			struct sdebug_queue *sqp;
6019 
6020 			sdeb_block_all_queues();
6021 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6022 			     ++j, ++sqp) {
6023 				k = find_first_bit(sqp->in_use_bm,
6024 						   sdebug_max_queue);
6025 				if (k != sdebug_max_queue) {
6026 					res = -EBUSY;   /* queued commands */
6027 					break;
6028 				}
6029 			}
6030 			if (res > 0) {
6031 				sdebug_ndelay = ndelay;
6032 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6033 							: DEF_JDELAY;
6034 			}
6035 			sdeb_unblock_all_queues();
6036 		}
6037 		return res;
6038 	}
6039 	return -EINVAL;
6040 }
6041 static DRIVER_ATTR_RW(ndelay);
6042 
6043 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6044 {
6045 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6046 }
6047 
6048 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6049 			  size_t count)
6050 {
6051 	int opts;
6052 	char work[20];
6053 
6054 	if (sscanf(buf, "%10s", work) == 1) {
6055 		if (strncasecmp(work, "0x", 2) == 0) {
6056 			if (kstrtoint(work + 2, 16, &opts) == 0)
6057 				goto opts_done;
6058 		} else {
6059 			if (kstrtoint(work, 10, &opts) == 0)
6060 				goto opts_done;
6061 		}
6062 	}
6063 	return -EINVAL;
6064 opts_done:
6065 	sdebug_opts = opts;
6066 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6067 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6068 	tweak_cmnd_count();
6069 	return count;
6070 }
6071 static DRIVER_ATTR_RW(opts);
6072 
6073 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6074 {
6075 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6076 }
6077 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6078 			   size_t count)
6079 {
6080 	int n;
6081 
6082 	/* Cannot change from or to TYPE_ZBC with sysfs */
6083 	if (sdebug_ptype == TYPE_ZBC)
6084 		return -EINVAL;
6085 
6086 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6087 		if (n == TYPE_ZBC)
6088 			return -EINVAL;
6089 		sdebug_ptype = n;
6090 		return count;
6091 	}
6092 	return -EINVAL;
6093 }
6094 static DRIVER_ATTR_RW(ptype);
6095 
6096 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6097 {
6098 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6099 }
6100 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6101 			    size_t count)
6102 {
6103 	int n;
6104 
6105 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6106 		sdebug_dsense = n;
6107 		return count;
6108 	}
6109 	return -EINVAL;
6110 }
6111 static DRIVER_ATTR_RW(dsense);
6112 
6113 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6114 {
6115 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6116 }
6117 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6118 			     size_t count)
6119 {
6120 	int n, idx;
6121 
6122 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6123 		bool want_store = (n == 0);
6124 		struct sdebug_host_info *sdhp;
6125 
6126 		n = (n > 0);
6127 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6128 		if (sdebug_fake_rw == n)
6129 			return count;	/* not transitioning so do nothing */
6130 
6131 		if (want_store) {	/* 1 --> 0 transition, set up store */
6132 			if (sdeb_first_idx < 0) {
6133 				idx = sdebug_add_store();
6134 				if (idx < 0)
6135 					return idx;
6136 			} else {
6137 				idx = sdeb_first_idx;
6138 				xa_clear_mark(per_store_ap, idx,
6139 					      SDEB_XA_NOT_IN_USE);
6140 			}
6141 			/* make all hosts use same store */
6142 			list_for_each_entry(sdhp, &sdebug_host_list,
6143 					    host_list) {
6144 				if (sdhp->si_idx != idx) {
6145 					xa_set_mark(per_store_ap, sdhp->si_idx,
6146 						    SDEB_XA_NOT_IN_USE);
6147 					sdhp->si_idx = idx;
6148 				}
6149 			}
6150 			sdeb_most_recent_idx = idx;
6151 		} else {	/* 0 --> 1 transition is trigger for shrink */
6152 			sdebug_erase_all_stores(true /* apart from first */);
6153 		}
6154 		sdebug_fake_rw = n;
6155 		return count;
6156 	}
6157 	return -EINVAL;
6158 }
6159 static DRIVER_ATTR_RW(fake_rw);
6160 
6161 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6162 {
6163 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6164 }
6165 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6166 			      size_t count)
6167 {
6168 	int n;
6169 
6170 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6171 		sdebug_no_lun_0 = n;
6172 		return count;
6173 	}
6174 	return -EINVAL;
6175 }
6176 static DRIVER_ATTR_RW(no_lun_0);
6177 
6178 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6179 {
6180 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6181 }
6182 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6183 			      size_t count)
6184 {
6185 	int n;
6186 
6187 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6188 		sdebug_num_tgts = n;
6189 		sdebug_max_tgts_luns();
6190 		return count;
6191 	}
6192 	return -EINVAL;
6193 }
6194 static DRIVER_ATTR_RW(num_tgts);
6195 
6196 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6197 {
6198 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6199 }
6200 static DRIVER_ATTR_RO(dev_size_mb);
6201 
6202 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6203 {
6204 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6205 }
6206 
6207 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6208 				    size_t count)
6209 {
6210 	bool v;
6211 
6212 	if (kstrtobool(buf, &v))
6213 		return -EINVAL;
6214 
6215 	sdebug_per_host_store = v;
6216 	return count;
6217 }
6218 static DRIVER_ATTR_RW(per_host_store);
6219 
6220 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6221 {
6222 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6223 }
6224 static DRIVER_ATTR_RO(num_parts);
6225 
6226 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6227 {
6228 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6229 }
6230 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6231 			       size_t count)
6232 {
6233 	int nth;
6234 	char work[20];
6235 
6236 	if (sscanf(buf, "%10s", work) == 1) {
6237 		if (strncasecmp(work, "0x", 2) == 0) {
6238 			if (kstrtoint(work + 2, 16, &nth) == 0)
6239 				goto every_nth_done;
6240 		} else {
6241 			if (kstrtoint(work, 10, &nth) == 0)
6242 				goto every_nth_done;
6243 		}
6244 	}
6245 	return -EINVAL;
6246 
6247 every_nth_done:
6248 	sdebug_every_nth = nth;
6249 	if (nth && !sdebug_statistics) {
6250 		pr_info("every_nth needs statistics=1, set it\n");
6251 		sdebug_statistics = true;
6252 	}
6253 	tweak_cmnd_count();
6254 	return count;
6255 }
6256 static DRIVER_ATTR_RW(every_nth);
6257 
6258 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6259 {
6260 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6261 }
6262 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6263 				size_t count)
6264 {
6265 	int n;
6266 	bool changed;
6267 
6268 	if (kstrtoint(buf, 0, &n))
6269 		return -EINVAL;
6270 	if (n >= 0) {
6271 		if (n > (int)SAM_LUN_AM_FLAT) {
6272 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6273 			return -EINVAL;
6274 		}
6275 		changed = ((int)sdebug_lun_am != n);
6276 		sdebug_lun_am = n;
6277 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6278 			struct sdebug_host_info *sdhp;
6279 			struct sdebug_dev_info *dp;
6280 
6281 			spin_lock(&sdebug_host_list_lock);
6282 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6283 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6284 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6285 				}
6286 			}
6287 			spin_unlock(&sdebug_host_list_lock);
6288 		}
6289 		return count;
6290 	}
6291 	return -EINVAL;
6292 }
6293 static DRIVER_ATTR_RW(lun_format);
6294 
6295 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6296 {
6297 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6298 }
6299 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6300 			      size_t count)
6301 {
6302 	int n;
6303 	bool changed;
6304 
6305 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6306 		if (n > 256) {
6307 			pr_warn("max_luns can be no more than 256\n");
6308 			return -EINVAL;
6309 		}
6310 		changed = (sdebug_max_luns != n);
6311 		sdebug_max_luns = n;
6312 		sdebug_max_tgts_luns();
6313 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6314 			struct sdebug_host_info *sdhp;
6315 			struct sdebug_dev_info *dp;
6316 
6317 			spin_lock(&sdebug_host_list_lock);
6318 			list_for_each_entry(sdhp, &sdebug_host_list,
6319 					    host_list) {
6320 				list_for_each_entry(dp, &sdhp->dev_info_list,
6321 						    dev_list) {
6322 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6323 						dp->uas_bm);
6324 				}
6325 			}
6326 			spin_unlock(&sdebug_host_list_lock);
6327 		}
6328 		return count;
6329 	}
6330 	return -EINVAL;
6331 }
6332 static DRIVER_ATTR_RW(max_luns);
6333 
6334 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6335 {
6336 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6337 }
6338 /* N.B. max_queue can be changed while there are queued commands. In flight
6339  * commands beyond the new max_queue will be completed. */
6340 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6341 			       size_t count)
6342 {
6343 	int j, n, k, a;
6344 	struct sdebug_queue *sqp;
6345 
6346 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6347 	    (n <= SDEBUG_CANQUEUE) &&
6348 	    (sdebug_host_max_queue == 0)) {
6349 		sdeb_block_all_queues();
6350 		k = 0;
6351 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6352 		     ++j, ++sqp) {
6353 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6354 			if (a > k)
6355 				k = a;
6356 		}
6357 		sdebug_max_queue = n;
6358 		if (k == SDEBUG_CANQUEUE)
6359 			atomic_set(&retired_max_queue, 0);
6360 		else if (k >= n)
6361 			atomic_set(&retired_max_queue, k + 1);
6362 		else
6363 			atomic_set(&retired_max_queue, 0);
6364 		sdeb_unblock_all_queues();
6365 		return count;
6366 	}
6367 	return -EINVAL;
6368 }
6369 static DRIVER_ATTR_RW(max_queue);
6370 
6371 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6372 {
6373 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6374 }
6375 
6376 /*
6377  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6378  * in range [0, sdebug_host_max_queue), we can't change it.
6379  */
6380 static DRIVER_ATTR_RO(host_max_queue);
6381 
6382 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6383 {
6384 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6385 }
6386 static DRIVER_ATTR_RO(no_uld);
6387 
6388 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6389 {
6390 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6391 }
6392 static DRIVER_ATTR_RO(scsi_level);
6393 
6394 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6395 {
6396 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6397 }
6398 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6399 				size_t count)
6400 {
6401 	int n;
6402 	bool changed;
6403 
6404 	/* Ignore capacity change for ZBC drives for now */
6405 	if (sdeb_zbc_in_use)
6406 		return -ENOTSUPP;
6407 
6408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6409 		changed = (sdebug_virtual_gb != n);
6410 		sdebug_virtual_gb = n;
6411 		sdebug_capacity = get_sdebug_capacity();
6412 		if (changed) {
6413 			struct sdebug_host_info *sdhp;
6414 			struct sdebug_dev_info *dp;
6415 
6416 			spin_lock(&sdebug_host_list_lock);
6417 			list_for_each_entry(sdhp, &sdebug_host_list,
6418 					    host_list) {
6419 				list_for_each_entry(dp, &sdhp->dev_info_list,
6420 						    dev_list) {
6421 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6422 						dp->uas_bm);
6423 				}
6424 			}
6425 			spin_unlock(&sdebug_host_list_lock);
6426 		}
6427 		return count;
6428 	}
6429 	return -EINVAL;
6430 }
6431 static DRIVER_ATTR_RW(virtual_gb);
6432 
6433 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6434 {
6435 	/* absolute number of hosts currently active is what is shown */
6436 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
6437 }
6438 
6439 /*
6440  * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
6441  * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
6442  * Returns -EBUSY if another add_host sysfs invocation is active.
6443  */
6444 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6445 			      size_t count)
6446 {
6447 	int delta_hosts;
6448 
6449 	if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
6450 		return -EINVAL;
6451 	if (sdebug_verbose)
6452 		pr_info("prior num_hosts=%d, num_to_add=%d\n",
6453 			atomic_read(&sdebug_num_hosts), delta_hosts);
6454 	if (delta_hosts == 0)
6455 		return count;
6456 	if (mutex_trylock(&add_host_mutex) == 0)
6457 		return -EBUSY;
6458 	if (delta_hosts > 0) {
6459 		sdeb_add_n_hosts(delta_hosts);
6460 	} else if (delta_hosts < 0) {
6461 		smp_store_release(&sdebug_deflect_incoming, true);
6462 		sdeb_block_all_queues();
6463 		if (delta_hosts >= atomic_read(&sdebug_num_hosts))
6464 			stop_all_queued(true);
6465 		do {
6466 			if (atomic_read(&sdebug_num_hosts) < 1) {
6467 				free_all_queued();
6468 				break;
6469 			}
6470 			sdebug_do_remove_host(false);
6471 		} while (++delta_hosts);
6472 		sdeb_unblock_all_queues();
6473 		smp_store_release(&sdebug_deflect_incoming, false);
6474 	}
6475 	mutex_unlock(&add_host_mutex);
6476 	if (sdebug_verbose)
6477 		pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
6478 	return count;
6479 }
6480 static DRIVER_ATTR_RW(add_host);
6481 
6482 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6483 {
6484 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6485 }
6486 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6487 				    size_t count)
6488 {
6489 	int n;
6490 
6491 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6492 		sdebug_vpd_use_hostno = n;
6493 		return count;
6494 	}
6495 	return -EINVAL;
6496 }
6497 static DRIVER_ATTR_RW(vpd_use_hostno);
6498 
6499 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6500 {
6501 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6502 }
6503 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6504 				size_t count)
6505 {
6506 	int n;
6507 
6508 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6509 		if (n > 0)
6510 			sdebug_statistics = true;
6511 		else {
6512 			clear_queue_stats();
6513 			sdebug_statistics = false;
6514 		}
6515 		return count;
6516 	}
6517 	return -EINVAL;
6518 }
6519 static DRIVER_ATTR_RW(statistics);
6520 
6521 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6522 {
6523 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6524 }
6525 static DRIVER_ATTR_RO(sector_size);
6526 
6527 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6528 {
6529 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6530 }
6531 static DRIVER_ATTR_RO(submit_queues);
6532 
6533 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6534 {
6535 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6536 }
6537 static DRIVER_ATTR_RO(dix);
6538 
6539 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6540 {
6541 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6542 }
6543 static DRIVER_ATTR_RO(dif);
6544 
6545 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6546 {
6547 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6548 }
6549 static DRIVER_ATTR_RO(guard);
6550 
6551 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6552 {
6553 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6554 }
6555 static DRIVER_ATTR_RO(ato);
6556 
6557 static ssize_t map_show(struct device_driver *ddp, char *buf)
6558 {
6559 	ssize_t count = 0;
6560 
6561 	if (!scsi_debug_lbp())
6562 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6563 				 sdebug_store_sectors);
6564 
6565 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6566 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6567 
6568 		if (sip)
6569 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6570 					  (int)map_size, sip->map_storep);
6571 	}
6572 	buf[count++] = '\n';
6573 	buf[count] = '\0';
6574 
6575 	return count;
6576 }
6577 static DRIVER_ATTR_RO(map);
6578 
6579 static ssize_t random_show(struct device_driver *ddp, char *buf)
6580 {
6581 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6582 }
6583 
6584 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6585 			    size_t count)
6586 {
6587 	bool v;
6588 
6589 	if (kstrtobool(buf, &v))
6590 		return -EINVAL;
6591 
6592 	sdebug_random = v;
6593 	return count;
6594 }
6595 static DRIVER_ATTR_RW(random);
6596 
6597 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6598 {
6599 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6600 }
6601 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6602 			       size_t count)
6603 {
6604 	int n;
6605 
6606 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6607 		sdebug_removable = (n > 0);
6608 		return count;
6609 	}
6610 	return -EINVAL;
6611 }
6612 static DRIVER_ATTR_RW(removable);
6613 
6614 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6615 {
6616 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6617 }
6618 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6619 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6620 			       size_t count)
6621 {
6622 	int n;
6623 
6624 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6625 		sdebug_host_lock = (n > 0);
6626 		return count;
6627 	}
6628 	return -EINVAL;
6629 }
6630 static DRIVER_ATTR_RW(host_lock);
6631 
6632 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6633 {
6634 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6635 }
6636 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6637 			    size_t count)
6638 {
6639 	int n;
6640 
6641 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6642 		sdebug_strict = (n > 0);
6643 		return count;
6644 	}
6645 	return -EINVAL;
6646 }
6647 static DRIVER_ATTR_RW(strict);
6648 
6649 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6650 {
6651 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6652 }
6653 static DRIVER_ATTR_RO(uuid_ctl);
6654 
6655 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6656 {
6657 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6658 }
6659 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6660 			     size_t count)
6661 {
6662 	int ret, n;
6663 
6664 	ret = kstrtoint(buf, 0, &n);
6665 	if (ret)
6666 		return ret;
6667 	sdebug_cdb_len = n;
6668 	all_config_cdb_len();
6669 	return count;
6670 }
6671 static DRIVER_ATTR_RW(cdb_len);
6672 
6673 static const char * const zbc_model_strs_a[] = {
6674 	[BLK_ZONED_NONE] = "none",
6675 	[BLK_ZONED_HA]   = "host-aware",
6676 	[BLK_ZONED_HM]   = "host-managed",
6677 };
6678 
6679 static const char * const zbc_model_strs_b[] = {
6680 	[BLK_ZONED_NONE] = "no",
6681 	[BLK_ZONED_HA]   = "aware",
6682 	[BLK_ZONED_HM]   = "managed",
6683 };
6684 
6685 static const char * const zbc_model_strs_c[] = {
6686 	[BLK_ZONED_NONE] = "0",
6687 	[BLK_ZONED_HA]   = "1",
6688 	[BLK_ZONED_HM]   = "2",
6689 };
6690 
6691 static int sdeb_zbc_model_str(const char *cp)
6692 {
6693 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6694 
6695 	if (res < 0) {
6696 		res = sysfs_match_string(zbc_model_strs_b, cp);
6697 		if (res < 0) {
6698 			res = sysfs_match_string(zbc_model_strs_c, cp);
6699 			if (res < 0)
6700 				return -EINVAL;
6701 		}
6702 	}
6703 	return res;
6704 }
6705 
6706 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6707 {
6708 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6709 			 zbc_model_strs_a[sdeb_zbc_model]);
6710 }
6711 static DRIVER_ATTR_RO(zbc);
6712 
6713 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6714 {
6715 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6716 }
6717 static DRIVER_ATTR_RO(tur_ms_to_ready);
6718 
6719 /* Note: The following array creates attribute files in the
6720    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6721    files (over those found in the /sys/module/scsi_debug/parameters
6722    directory) is that auxiliary actions can be triggered when an attribute
6723    is changed. For example see: add_host_store() above.
6724  */
6725 
6726 static struct attribute *sdebug_drv_attrs[] = {
6727 	&driver_attr_delay.attr,
6728 	&driver_attr_opts.attr,
6729 	&driver_attr_ptype.attr,
6730 	&driver_attr_dsense.attr,
6731 	&driver_attr_fake_rw.attr,
6732 	&driver_attr_host_max_queue.attr,
6733 	&driver_attr_no_lun_0.attr,
6734 	&driver_attr_num_tgts.attr,
6735 	&driver_attr_dev_size_mb.attr,
6736 	&driver_attr_num_parts.attr,
6737 	&driver_attr_every_nth.attr,
6738 	&driver_attr_lun_format.attr,
6739 	&driver_attr_max_luns.attr,
6740 	&driver_attr_max_queue.attr,
6741 	&driver_attr_no_uld.attr,
6742 	&driver_attr_scsi_level.attr,
6743 	&driver_attr_virtual_gb.attr,
6744 	&driver_attr_add_host.attr,
6745 	&driver_attr_per_host_store.attr,
6746 	&driver_attr_vpd_use_hostno.attr,
6747 	&driver_attr_sector_size.attr,
6748 	&driver_attr_statistics.attr,
6749 	&driver_attr_submit_queues.attr,
6750 	&driver_attr_dix.attr,
6751 	&driver_attr_dif.attr,
6752 	&driver_attr_guard.attr,
6753 	&driver_attr_ato.attr,
6754 	&driver_attr_map.attr,
6755 	&driver_attr_random.attr,
6756 	&driver_attr_removable.attr,
6757 	&driver_attr_host_lock.attr,
6758 	&driver_attr_ndelay.attr,
6759 	&driver_attr_strict.attr,
6760 	&driver_attr_uuid_ctl.attr,
6761 	&driver_attr_cdb_len.attr,
6762 	&driver_attr_tur_ms_to_ready.attr,
6763 	&driver_attr_zbc.attr,
6764 	NULL,
6765 };
6766 ATTRIBUTE_GROUPS(sdebug_drv);
6767 
6768 static struct device *pseudo_primary;
6769 
6770 static int __init scsi_debug_init(void)
6771 {
6772 	bool want_store = (sdebug_fake_rw == 0);
6773 	unsigned long sz;
6774 	int k, ret, hosts_to_add;
6775 	int idx = -1;
6776 
6777 	ramdisk_lck_a[0] = &atomic_rw;
6778 	ramdisk_lck_a[1] = &atomic_rw2;
6779 	atomic_set(&retired_max_queue, 0);
6780 
6781 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6782 		pr_warn("ndelay must be less than 1 second, ignored\n");
6783 		sdebug_ndelay = 0;
6784 	} else if (sdebug_ndelay > 0)
6785 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6786 
6787 	switch (sdebug_sector_size) {
6788 	case  512:
6789 	case 1024:
6790 	case 2048:
6791 	case 4096:
6792 		break;
6793 	default:
6794 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6795 		return -EINVAL;
6796 	}
6797 
6798 	switch (sdebug_dif) {
6799 	case T10_PI_TYPE0_PROTECTION:
6800 		break;
6801 	case T10_PI_TYPE1_PROTECTION:
6802 	case T10_PI_TYPE2_PROTECTION:
6803 	case T10_PI_TYPE3_PROTECTION:
6804 		have_dif_prot = true;
6805 		break;
6806 
6807 	default:
6808 		pr_err("dif must be 0, 1, 2 or 3\n");
6809 		return -EINVAL;
6810 	}
6811 
6812 	if (sdebug_num_tgts < 0) {
6813 		pr_err("num_tgts must be >= 0\n");
6814 		return -EINVAL;
6815 	}
6816 
6817 	if (sdebug_guard > 1) {
6818 		pr_err("guard must be 0 or 1\n");
6819 		return -EINVAL;
6820 	}
6821 
6822 	if (sdebug_ato > 1) {
6823 		pr_err("ato must be 0 or 1\n");
6824 		return -EINVAL;
6825 	}
6826 
6827 	if (sdebug_physblk_exp > 15) {
6828 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6829 		return -EINVAL;
6830 	}
6831 
6832 	sdebug_lun_am = sdebug_lun_am_i;
6833 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6834 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6835 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6836 	}
6837 
6838 	if (sdebug_max_luns > 256) {
6839 		if (sdebug_max_luns > 16384) {
6840 			pr_warn("max_luns can be no more than 16384, use default\n");
6841 			sdebug_max_luns = DEF_MAX_LUNS;
6842 		}
6843 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6844 	}
6845 
6846 	if (sdebug_lowest_aligned > 0x3fff) {
6847 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6848 		return -EINVAL;
6849 	}
6850 
6851 	if (submit_queues < 1) {
6852 		pr_err("submit_queues must be 1 or more\n");
6853 		return -EINVAL;
6854 	}
6855 
6856 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6857 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6858 		return -EINVAL;
6859 	}
6860 
6861 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6862 	    (sdebug_host_max_queue < 0)) {
6863 		pr_err("host_max_queue must be in range [0 %d]\n",
6864 		       SDEBUG_CANQUEUE);
6865 		return -EINVAL;
6866 	}
6867 
6868 	if (sdebug_host_max_queue &&
6869 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6870 		sdebug_max_queue = sdebug_host_max_queue;
6871 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6872 			sdebug_max_queue);
6873 	}
6874 
6875 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6876 			       GFP_KERNEL);
6877 	if (sdebug_q_arr == NULL)
6878 		return -ENOMEM;
6879 	for (k = 0; k < submit_queues; ++k)
6880 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6881 
6882 	/*
6883 	 * check for host managed zoned block device specified with
6884 	 * ptype=0x14 or zbc=XXX.
6885 	 */
6886 	if (sdebug_ptype == TYPE_ZBC) {
6887 		sdeb_zbc_model = BLK_ZONED_HM;
6888 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6889 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6890 		if (k < 0) {
6891 			ret = k;
6892 			goto free_q_arr;
6893 		}
6894 		sdeb_zbc_model = k;
6895 		switch (sdeb_zbc_model) {
6896 		case BLK_ZONED_NONE:
6897 		case BLK_ZONED_HA:
6898 			sdebug_ptype = TYPE_DISK;
6899 			break;
6900 		case BLK_ZONED_HM:
6901 			sdebug_ptype = TYPE_ZBC;
6902 			break;
6903 		default:
6904 			pr_err("Invalid ZBC model\n");
6905 			ret = -EINVAL;
6906 			goto free_q_arr;
6907 		}
6908 	}
6909 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6910 		sdeb_zbc_in_use = true;
6911 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6912 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6913 	}
6914 
6915 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6916 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6917 	if (sdebug_dev_size_mb < 1)
6918 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6919 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6920 	sdebug_store_sectors = sz / sdebug_sector_size;
6921 	sdebug_capacity = get_sdebug_capacity();
6922 
6923 	/* play around with geometry, don't waste too much on track 0 */
6924 	sdebug_heads = 8;
6925 	sdebug_sectors_per = 32;
6926 	if (sdebug_dev_size_mb >= 256)
6927 		sdebug_heads = 64;
6928 	else if (sdebug_dev_size_mb >= 16)
6929 		sdebug_heads = 32;
6930 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6931 			       (sdebug_sectors_per * sdebug_heads);
6932 	if (sdebug_cylinders_per >= 1024) {
6933 		/* other LLDs do this; implies >= 1GB ram disk ... */
6934 		sdebug_heads = 255;
6935 		sdebug_sectors_per = 63;
6936 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6937 			       (sdebug_sectors_per * sdebug_heads);
6938 	}
6939 	if (scsi_debug_lbp()) {
6940 		sdebug_unmap_max_blocks =
6941 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6942 
6943 		sdebug_unmap_max_desc =
6944 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6945 
6946 		sdebug_unmap_granularity =
6947 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6948 
6949 		if (sdebug_unmap_alignment &&
6950 		    sdebug_unmap_granularity <=
6951 		    sdebug_unmap_alignment) {
6952 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6953 			ret = -EINVAL;
6954 			goto free_q_arr;
6955 		}
6956 	}
6957 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6958 	if (want_store) {
6959 		idx = sdebug_add_store();
6960 		if (idx < 0) {
6961 			ret = idx;
6962 			goto free_q_arr;
6963 		}
6964 	}
6965 
6966 	pseudo_primary = root_device_register("pseudo_0");
6967 	if (IS_ERR(pseudo_primary)) {
6968 		pr_warn("root_device_register() error\n");
6969 		ret = PTR_ERR(pseudo_primary);
6970 		goto free_vm;
6971 	}
6972 	ret = bus_register(&pseudo_lld_bus);
6973 	if (ret < 0) {
6974 		pr_warn("bus_register error: %d\n", ret);
6975 		goto dev_unreg;
6976 	}
6977 	ret = driver_register(&sdebug_driverfs_driver);
6978 	if (ret < 0) {
6979 		pr_warn("driver_register error: %d\n", ret);
6980 		goto bus_unreg;
6981 	}
6982 
6983 	hosts_to_add = sdebug_add_host;
6984 	sdebug_add_host = 0;
6985 
6986 	for (k = 0; k < hosts_to_add; k++) {
6987 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
6988 			pr_info("exit early as sdebug_deflect_incoming is set\n");
6989 			return 0;
6990 		}
6991 		if (want_store && k == 0) {
6992 			ret = sdebug_add_host_helper(idx);
6993 			if (ret < 0) {
6994 				pr_err("add_host_helper k=%d, error=%d\n",
6995 				       k, -ret);
6996 				break;
6997 			}
6998 		} else {
6999 			ret = sdebug_do_add_host(want_store &&
7000 						 sdebug_per_host_store);
7001 			if (ret < 0) {
7002 				pr_err("add_host k=%d error=%d\n", k, -ret);
7003 				break;
7004 			}
7005 		}
7006 	}
7007 	if (sdebug_verbose)
7008 		pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
7009 
7010 	/*
7011 	 * Even though all the hosts have been established, due to async device (LU) scanning
7012 	 * by the scsi mid-level, there may still be devices (LUs) being set up.
7013 	 */
7014 	return 0;
7015 
7016 bus_unreg:
7017 	bus_unregister(&pseudo_lld_bus);
7018 dev_unreg:
7019 	root_device_unregister(pseudo_primary);
7020 free_vm:
7021 	sdebug_erase_store(idx, NULL);
7022 free_q_arr:
7023 	kfree(sdebug_q_arr);
7024 	return ret;
7025 }
7026 
7027 static void __exit scsi_debug_exit(void)
7028 {
7029 	int k;
7030 
7031 	/* Possible race with LUs still being set up; stop them asap */
7032 	sdeb_block_all_queues();
7033 	smp_store_release(&sdebug_deflect_incoming, true);
7034 	stop_all_queued(false);
7035 	for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
7036 		sdebug_do_remove_host(true);
7037 	free_all_queued();
7038 	if (sdebug_verbose)
7039 		pr_info("removed %d hosts\n", k);
7040 	driver_unregister(&sdebug_driverfs_driver);
7041 	bus_unregister(&pseudo_lld_bus);
7042 	root_device_unregister(pseudo_primary);
7043 
7044 	sdebug_erase_all_stores(false);
7045 	xa_destroy(per_store_ap);
7046 	kfree(sdebug_q_arr);
7047 }
7048 
7049 device_initcall(scsi_debug_init);
7050 module_exit(scsi_debug_exit);
7051 
7052 static void sdebug_release_adapter(struct device *dev)
7053 {
7054 	struct sdebug_host_info *sdbg_host;
7055 
7056 	sdbg_host = to_sdebug_host(dev);
7057 	kfree(sdbg_host);
7058 }
7059 
7060 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7061 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7062 {
7063 	if (idx < 0)
7064 		return;
7065 	if (!sip) {
7066 		if (xa_empty(per_store_ap))
7067 			return;
7068 		sip = xa_load(per_store_ap, idx);
7069 		if (!sip)
7070 			return;
7071 	}
7072 	vfree(sip->map_storep);
7073 	vfree(sip->dif_storep);
7074 	vfree(sip->storep);
7075 	xa_erase(per_store_ap, idx);
7076 	kfree(sip);
7077 }
7078 
7079 /* Assume apart_from_first==false only in shutdown case. */
7080 static void sdebug_erase_all_stores(bool apart_from_first)
7081 {
7082 	unsigned long idx;
7083 	struct sdeb_store_info *sip = NULL;
7084 
7085 	xa_for_each(per_store_ap, idx, sip) {
7086 		if (apart_from_first)
7087 			apart_from_first = false;
7088 		else
7089 			sdebug_erase_store(idx, sip);
7090 	}
7091 	if (apart_from_first)
7092 		sdeb_most_recent_idx = sdeb_first_idx;
7093 }
7094 
7095 /*
7096  * Returns store xarray new element index (idx) if >=0 else negated errno.
7097  * Limit the number of stores to 65536.
7098  */
7099 static int sdebug_add_store(void)
7100 {
7101 	int res;
7102 	u32 n_idx;
7103 	unsigned long iflags;
7104 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7105 	struct sdeb_store_info *sip = NULL;
7106 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7107 
7108 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7109 	if (!sip)
7110 		return -ENOMEM;
7111 
7112 	xa_lock_irqsave(per_store_ap, iflags);
7113 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7114 	if (unlikely(res < 0)) {
7115 		xa_unlock_irqrestore(per_store_ap, iflags);
7116 		kfree(sip);
7117 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7118 		return res;
7119 	}
7120 	sdeb_most_recent_idx = n_idx;
7121 	if (sdeb_first_idx < 0)
7122 		sdeb_first_idx = n_idx;
7123 	xa_unlock_irqrestore(per_store_ap, iflags);
7124 
7125 	res = -ENOMEM;
7126 	sip->storep = vzalloc(sz);
7127 	if (!sip->storep) {
7128 		pr_err("user data oom\n");
7129 		goto err;
7130 	}
7131 	if (sdebug_num_parts > 0)
7132 		sdebug_build_parts(sip->storep, sz);
7133 
7134 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7135 	if (sdebug_dix) {
7136 		int dif_size;
7137 
7138 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7139 		sip->dif_storep = vmalloc(dif_size);
7140 
7141 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7142 			sip->dif_storep);
7143 
7144 		if (!sip->dif_storep) {
7145 			pr_err("DIX oom\n");
7146 			goto err;
7147 		}
7148 		memset(sip->dif_storep, 0xff, dif_size);
7149 	}
7150 	/* Logical Block Provisioning */
7151 	if (scsi_debug_lbp()) {
7152 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7153 		sip->map_storep = vmalloc(array_size(sizeof(long),
7154 						     BITS_TO_LONGS(map_size)));
7155 
7156 		pr_info("%lu provisioning blocks\n", map_size);
7157 
7158 		if (!sip->map_storep) {
7159 			pr_err("LBP map oom\n");
7160 			goto err;
7161 		}
7162 
7163 		bitmap_zero(sip->map_storep, map_size);
7164 
7165 		/* Map first 1KB for partition table */
7166 		if (sdebug_num_parts)
7167 			map_region(sip, 0, 2);
7168 	}
7169 
7170 	rwlock_init(&sip->macc_lck);
7171 	return (int)n_idx;
7172 err:
7173 	sdebug_erase_store((int)n_idx, sip);
7174 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7175 	return res;
7176 }
7177 
7178 static int sdebug_add_host_helper(int per_host_idx)
7179 {
7180 	int k, devs_per_host, idx;
7181 	int error = -ENOMEM;
7182 	struct sdebug_host_info *sdbg_host;
7183 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7184 
7185 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7186 	if (!sdbg_host)
7187 		return -ENOMEM;
7188 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7189 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7190 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7191 	sdbg_host->si_idx = idx;
7192 
7193 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7194 
7195 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7196 	for (k = 0; k < devs_per_host; k++) {
7197 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7198 		if (!sdbg_devinfo)
7199 			goto clean;
7200 	}
7201 
7202 	spin_lock(&sdebug_host_list_lock);
7203 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7204 	spin_unlock(&sdebug_host_list_lock);
7205 
7206 	sdbg_host->dev.bus = &pseudo_lld_bus;
7207 	sdbg_host->dev.parent = pseudo_primary;
7208 	sdbg_host->dev.release = &sdebug_release_adapter;
7209 	dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
7210 
7211 	error = device_register(&sdbg_host->dev);
7212 	if (error)
7213 		goto clean;
7214 
7215 	atomic_inc(&sdebug_num_hosts);
7216 	return 0;
7217 
7218 clean:
7219 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7220 				 dev_list) {
7221 		list_del(&sdbg_devinfo->dev_list);
7222 		kfree(sdbg_devinfo->zstate);
7223 		kfree(sdbg_devinfo);
7224 	}
7225 	kfree(sdbg_host);
7226 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7227 	return error;
7228 }
7229 
7230 static int sdebug_do_add_host(bool mk_new_store)
7231 {
7232 	int ph_idx = sdeb_most_recent_idx;
7233 
7234 	if (mk_new_store) {
7235 		ph_idx = sdebug_add_store();
7236 		if (ph_idx < 0)
7237 			return ph_idx;
7238 	}
7239 	return sdebug_add_host_helper(ph_idx);
7240 }
7241 
7242 static void sdebug_do_remove_host(bool the_end)
7243 {
7244 	int idx = -1;
7245 	struct sdebug_host_info *sdbg_host = NULL;
7246 	struct sdebug_host_info *sdbg_host2;
7247 
7248 	spin_lock(&sdebug_host_list_lock);
7249 	if (!list_empty(&sdebug_host_list)) {
7250 		sdbg_host = list_entry(sdebug_host_list.prev,
7251 				       struct sdebug_host_info, host_list);
7252 		idx = sdbg_host->si_idx;
7253 	}
7254 	if (!the_end && idx >= 0) {
7255 		bool unique = true;
7256 
7257 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7258 			if (sdbg_host2 == sdbg_host)
7259 				continue;
7260 			if (idx == sdbg_host2->si_idx) {
7261 				unique = false;
7262 				break;
7263 			}
7264 		}
7265 		if (unique) {
7266 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7267 			if (idx == sdeb_most_recent_idx)
7268 				--sdeb_most_recent_idx;
7269 		}
7270 	}
7271 	if (sdbg_host)
7272 		list_del(&sdbg_host->host_list);
7273 	spin_unlock(&sdebug_host_list_lock);
7274 
7275 	if (!sdbg_host)
7276 		return;
7277 
7278 	device_unregister(&sdbg_host->dev);
7279 	atomic_dec(&sdebug_num_hosts);
7280 }
7281 
7282 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7283 {
7284 	int num_in_q = 0;
7285 	struct sdebug_dev_info *devip;
7286 
7287 	sdeb_block_all_queues();
7288 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7289 	if (NULL == devip) {
7290 		sdeb_unblock_all_queues();
7291 		return	-ENODEV;
7292 	}
7293 	num_in_q = atomic_read(&devip->num_in_q);
7294 
7295 	if (qdepth > SDEBUG_CANQUEUE) {
7296 		qdepth = SDEBUG_CANQUEUE;
7297 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7298 			qdepth, SDEBUG_CANQUEUE);
7299 	}
7300 	if (qdepth < 1)
7301 		qdepth = 1;
7302 	if (qdepth != sdev->queue_depth)
7303 		scsi_change_queue_depth(sdev, qdepth);
7304 
7305 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7306 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7307 			    __func__, qdepth, num_in_q);
7308 	}
7309 	sdeb_unblock_all_queues();
7310 	return sdev->queue_depth;
7311 }
7312 
7313 static bool fake_timeout(struct scsi_cmnd *scp)
7314 {
7315 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7316 		if (sdebug_every_nth < -1)
7317 			sdebug_every_nth = -1;
7318 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7319 			return true; /* ignore command causing timeout */
7320 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7321 			 scsi_medium_access_command(scp))
7322 			return true; /* time out reads and writes */
7323 	}
7324 	return false;
7325 }
7326 
7327 /* Response to TUR or media access command when device stopped */
7328 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7329 {
7330 	int stopped_state;
7331 	u64 diff_ns = 0;
7332 	ktime_t now_ts = ktime_get_boottime();
7333 	struct scsi_device *sdp = scp->device;
7334 
7335 	stopped_state = atomic_read(&devip->stopped);
7336 	if (stopped_state == 2) {
7337 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7338 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7339 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7340 				/* tur_ms_to_ready timer extinguished */
7341 				atomic_set(&devip->stopped, 0);
7342 				return 0;
7343 			}
7344 		}
7345 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7346 		if (sdebug_verbose)
7347 			sdev_printk(KERN_INFO, sdp,
7348 				    "%s: Not ready: in process of becoming ready\n", my_name);
7349 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7350 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7351 
7352 			if (diff_ns <= tur_nanosecs_to_ready)
7353 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7354 			else
7355 				diff_ns = tur_nanosecs_to_ready;
7356 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7357 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7358 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7359 						   diff_ns);
7360 			return check_condition_result;
7361 		}
7362 	}
7363 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7364 	if (sdebug_verbose)
7365 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7366 			    my_name);
7367 	return check_condition_result;
7368 }
7369 
7370 static int sdebug_map_queues(struct Scsi_Host *shost)
7371 {
7372 	int i, qoff;
7373 
7374 	if (shost->nr_hw_queues == 1)
7375 		return 0;
7376 
7377 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7378 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7379 
7380 		map->nr_queues  = 0;
7381 
7382 		if (i == HCTX_TYPE_DEFAULT)
7383 			map->nr_queues = submit_queues - poll_queues;
7384 		else if (i == HCTX_TYPE_POLL)
7385 			map->nr_queues = poll_queues;
7386 
7387 		if (!map->nr_queues) {
7388 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7389 			continue;
7390 		}
7391 
7392 		map->queue_offset = qoff;
7393 		blk_mq_map_queues(map);
7394 
7395 		qoff += map->nr_queues;
7396 	}
7397 
7398 	return 0;
7399 
7400 }
7401 
7402 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7403 {
7404 	bool first;
7405 	bool retiring = false;
7406 	int num_entries = 0;
7407 	unsigned int qc_idx = 0;
7408 	unsigned long iflags;
7409 	ktime_t kt_from_boot = ktime_get_boottime();
7410 	struct sdebug_queue *sqp;
7411 	struct sdebug_queued_cmd *sqcp;
7412 	struct scsi_cmnd *scp;
7413 	struct sdebug_dev_info *devip;
7414 	struct sdebug_defer *sd_dp;
7415 
7416 	sqp = sdebug_q_arr + queue_num;
7417 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7418 
7419 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7420 		if (first) {
7421 			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7422 			first = false;
7423 		} else {
7424 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7425 		}
7426 		if (unlikely(qc_idx >= sdebug_max_queue))
7427 			break;
7428 
7429 		sqcp = &sqp->qc_arr[qc_idx];
7430 		sd_dp = sqcp->sd_dp;
7431 		if (unlikely(!sd_dp))
7432 			continue;
7433 		scp = sqcp->a_cmnd;
7434 		if (unlikely(scp == NULL)) {
7435 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7436 			       queue_num, qc_idx, __func__);
7437 			break;
7438 		}
7439 		if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7440 			if (kt_from_boot < sd_dp->cmpl_ts)
7441 				continue;
7442 
7443 		} else		/* ignoring non REQ_POLLED requests */
7444 			continue;
7445 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7446 		if (likely(devip))
7447 			atomic_dec(&devip->num_in_q);
7448 		else
7449 			pr_err("devip=NULL from %s\n", __func__);
7450 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7451 			retiring = true;
7452 
7453 		sqcp->a_cmnd = NULL;
7454 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7455 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7456 				sqp, queue_num, qc_idx, __func__);
7457 			break;
7458 		}
7459 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7460 			int k, retval;
7461 
7462 			retval = atomic_read(&retired_max_queue);
7463 			if (qc_idx >= retval) {
7464 				pr_err("index %d too large\n", retval);
7465 				break;
7466 			}
7467 			k = find_last_bit(sqp->in_use_bm, retval);
7468 			if ((k < sdebug_max_queue) || (k == retval))
7469 				atomic_set(&retired_max_queue, 0);
7470 			else
7471 				atomic_set(&retired_max_queue, k + 1);
7472 		}
7473 		sd_dp->defer_t = SDEB_DEFER_NONE;
7474 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7475 		scsi_done(scp); /* callback to mid level */
7476 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7477 		num_entries++;
7478 	}
7479 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7480 	if (num_entries > 0)
7481 		atomic_add(num_entries, &sdeb_mq_poll_count);
7482 	return num_entries;
7483 }
7484 
7485 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7486 				   struct scsi_cmnd *scp)
7487 {
7488 	u8 sdeb_i;
7489 	struct scsi_device *sdp = scp->device;
7490 	const struct opcode_info_t *oip;
7491 	const struct opcode_info_t *r_oip;
7492 	struct sdebug_dev_info *devip;
7493 	u8 *cmd = scp->cmnd;
7494 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7495 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7496 	int k, na;
7497 	int errsts = 0;
7498 	u64 lun_index = sdp->lun & 0x3FFF;
7499 	u32 flags;
7500 	u16 sa;
7501 	u8 opcode = cmd[0];
7502 	bool has_wlun_rl;
7503 	bool inject_now;
7504 
7505 	scsi_set_resid(scp, 0);
7506 	if (sdebug_statistics) {
7507 		atomic_inc(&sdebug_cmnd_count);
7508 		inject_now = inject_on_this_cmd();
7509 	} else {
7510 		inject_now = false;
7511 	}
7512 	if (unlikely(sdebug_verbose &&
7513 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7514 		char b[120];
7515 		int n, len, sb;
7516 
7517 		len = scp->cmd_len;
7518 		sb = (int)sizeof(b);
7519 		if (len > 32)
7520 			strcpy(b, "too long, over 32 bytes");
7521 		else {
7522 			for (k = 0, n = 0; k < len && n < sb; ++k)
7523 				n += scnprintf(b + n, sb - n, "%02x ",
7524 					       (u32)cmd[k]);
7525 		}
7526 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7527 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7528 	}
7529 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7530 		return SCSI_MLQUEUE_HOST_BUSY;
7531 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7532 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7533 		goto err_out;
7534 
7535 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7536 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7537 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7538 	if (unlikely(!devip)) {
7539 		devip = find_build_dev_info(sdp);
7540 		if (NULL == devip)
7541 			goto err_out;
7542 	}
7543 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7544 		atomic_set(&sdeb_inject_pending, 1);
7545 
7546 	na = oip->num_attached;
7547 	r_pfp = oip->pfp;
7548 	if (na) {	/* multiple commands with this opcode */
7549 		r_oip = oip;
7550 		if (FF_SA & r_oip->flags) {
7551 			if (F_SA_LOW & oip->flags)
7552 				sa = 0x1f & cmd[1];
7553 			else
7554 				sa = get_unaligned_be16(cmd + 8);
7555 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7556 				if (opcode == oip->opcode && sa == oip->sa)
7557 					break;
7558 			}
7559 		} else {   /* since no service action only check opcode */
7560 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7561 				if (opcode == oip->opcode)
7562 					break;
7563 			}
7564 		}
7565 		if (k > na) {
7566 			if (F_SA_LOW & r_oip->flags)
7567 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7568 			else if (F_SA_HIGH & r_oip->flags)
7569 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7570 			else
7571 				mk_sense_invalid_opcode(scp);
7572 			goto check_cond;
7573 		}
7574 	}	/* else (when na==0) we assume the oip is a match */
7575 	flags = oip->flags;
7576 	if (unlikely(F_INV_OP & flags)) {
7577 		mk_sense_invalid_opcode(scp);
7578 		goto check_cond;
7579 	}
7580 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7581 		if (sdebug_verbose)
7582 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7583 				    my_name, opcode, " supported for wlun");
7584 		mk_sense_invalid_opcode(scp);
7585 		goto check_cond;
7586 	}
7587 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7588 		u8 rem;
7589 		int j;
7590 
7591 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7592 			rem = ~oip->len_mask[k] & cmd[k];
7593 			if (rem) {
7594 				for (j = 7; j >= 0; --j, rem <<= 1) {
7595 					if (0x80 & rem)
7596 						break;
7597 				}
7598 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7599 				goto check_cond;
7600 			}
7601 		}
7602 	}
7603 	if (unlikely(!(F_SKIP_UA & flags) &&
7604 		     find_first_bit(devip->uas_bm,
7605 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7606 		errsts = make_ua(scp, devip);
7607 		if (errsts)
7608 			goto check_cond;
7609 	}
7610 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7611 		     atomic_read(&devip->stopped))) {
7612 		errsts = resp_not_ready(scp, devip);
7613 		if (errsts)
7614 			goto fini;
7615 	}
7616 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7617 		goto fini;
7618 	if (unlikely(sdebug_every_nth)) {
7619 		if (fake_timeout(scp))
7620 			return 0;	/* ignore command: make trouble */
7621 	}
7622 	if (likely(oip->pfp))
7623 		pfp = oip->pfp;	/* calls a resp_* function */
7624 	else
7625 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7626 
7627 fini:
7628 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7629 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7630 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7631 					    sdebug_ndelay > 10000)) {
7632 		/*
7633 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7634 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7635 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7636 		 * For Synchronize Cache want 1/20 of SSU's delay.
7637 		 */
7638 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7639 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7640 
7641 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7642 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7643 	} else
7644 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7645 				     sdebug_ndelay);
7646 check_cond:
7647 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7648 err_out:
7649 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7650 }
7651 
7652 static struct scsi_host_template sdebug_driver_template = {
7653 	.show_info =		scsi_debug_show_info,
7654 	.write_info =		scsi_debug_write_info,
7655 	.proc_name =		sdebug_proc_name,
7656 	.name =			"SCSI DEBUG",
7657 	.info =			scsi_debug_info,
7658 	.slave_alloc =		scsi_debug_slave_alloc,
7659 	.slave_configure =	scsi_debug_slave_configure,
7660 	.slave_destroy =	scsi_debug_slave_destroy,
7661 	.ioctl =		scsi_debug_ioctl,
7662 	.queuecommand =		scsi_debug_queuecommand,
7663 	.change_queue_depth =	sdebug_change_qdepth,
7664 	.map_queues =		sdebug_map_queues,
7665 	.mq_poll =		sdebug_blk_mq_poll,
7666 	.eh_abort_handler =	scsi_debug_abort,
7667 	.eh_device_reset_handler = scsi_debug_device_reset,
7668 	.eh_target_reset_handler = scsi_debug_target_reset,
7669 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7670 	.eh_host_reset_handler = scsi_debug_host_reset,
7671 	.can_queue =		SDEBUG_CANQUEUE,
7672 	.this_id =		7,
7673 	.sg_tablesize =		SG_MAX_SEGMENTS,
7674 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7675 	.max_sectors =		-1U,
7676 	.max_segment_size =	-1U,
7677 	.module =		THIS_MODULE,
7678 	.track_queue_depth =	1,
7679 };
7680 
7681 static int sdebug_driver_probe(struct device *dev)
7682 {
7683 	int error = 0;
7684 	struct sdebug_host_info *sdbg_host;
7685 	struct Scsi_Host *hpnt;
7686 	int hprot;
7687 
7688 	sdbg_host = to_sdebug_host(dev);
7689 
7690 	sdebug_driver_template.can_queue = sdebug_max_queue;
7691 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7692 	if (!sdebug_clustering)
7693 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7694 
7695 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7696 	if (NULL == hpnt) {
7697 		pr_err("scsi_host_alloc failed\n");
7698 		error = -ENODEV;
7699 		return error;
7700 	}
7701 	if (submit_queues > nr_cpu_ids) {
7702 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7703 			my_name, submit_queues, nr_cpu_ids);
7704 		submit_queues = nr_cpu_ids;
7705 	}
7706 	/*
7707 	 * Decide whether to tell scsi subsystem that we want mq. The
7708 	 * following should give the same answer for each host.
7709 	 */
7710 	hpnt->nr_hw_queues = submit_queues;
7711 	if (sdebug_host_max_queue)
7712 		hpnt->host_tagset = 1;
7713 
7714 	/* poll queues are possible for nr_hw_queues > 1 */
7715 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7716 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7717 			 my_name, poll_queues, hpnt->nr_hw_queues);
7718 		poll_queues = 0;
7719 	}
7720 
7721 	/*
7722 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7723 	 * left over for non-polled I/O.
7724 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7725 	 */
7726 	if (poll_queues >= submit_queues) {
7727 		if (submit_queues < 3)
7728 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7729 		else
7730 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7731 				my_name, submit_queues - 1);
7732 		poll_queues = 1;
7733 	}
7734 	if (poll_queues)
7735 		hpnt->nr_maps = 3;
7736 
7737 	sdbg_host->shost = hpnt;
7738 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7739 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7740 		hpnt->max_id = sdebug_num_tgts + 1;
7741 	else
7742 		hpnt->max_id = sdebug_num_tgts;
7743 	/* = sdebug_max_luns; */
7744 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7745 
7746 	hprot = 0;
7747 
7748 	switch (sdebug_dif) {
7749 
7750 	case T10_PI_TYPE1_PROTECTION:
7751 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7752 		if (sdebug_dix)
7753 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7754 		break;
7755 
7756 	case T10_PI_TYPE2_PROTECTION:
7757 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7758 		if (sdebug_dix)
7759 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7760 		break;
7761 
7762 	case T10_PI_TYPE3_PROTECTION:
7763 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7764 		if (sdebug_dix)
7765 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7766 		break;
7767 
7768 	default:
7769 		if (sdebug_dix)
7770 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7771 		break;
7772 	}
7773 
7774 	scsi_host_set_prot(hpnt, hprot);
7775 
7776 	if (have_dif_prot || sdebug_dix)
7777 		pr_info("host protection%s%s%s%s%s%s%s\n",
7778 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7779 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7780 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7781 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7782 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7783 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7784 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7785 
7786 	if (sdebug_guard == 1)
7787 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7788 	else
7789 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7790 
7791 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7792 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7793 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7794 		sdebug_statistics = true;
7795 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7796 	if (error) {
7797 		pr_err("scsi_add_host failed\n");
7798 		error = -ENODEV;
7799 		scsi_host_put(hpnt);
7800 	} else {
7801 		scsi_scan_host(hpnt);
7802 	}
7803 
7804 	return error;
7805 }
7806 
7807 static void sdebug_driver_remove(struct device *dev)
7808 {
7809 	struct sdebug_host_info *sdbg_host;
7810 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7811 
7812 	sdbg_host = to_sdebug_host(dev);
7813 
7814 	scsi_remove_host(sdbg_host->shost);
7815 
7816 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7817 				 dev_list) {
7818 		list_del(&sdbg_devinfo->dev_list);
7819 		kfree(sdbg_devinfo->zstate);
7820 		kfree(sdbg_devinfo);
7821 	}
7822 
7823 	scsi_host_put(sdbg_host->shost);
7824 }
7825 
7826 static int pseudo_lld_bus_match(struct device *dev,
7827 				struct device_driver *dev_driver)
7828 {
7829 	return 1;
7830 }
7831 
7832 static struct bus_type pseudo_lld_bus = {
7833 	.name = "pseudo",
7834 	.match = pseudo_lld_bus_match,
7835 	.probe = sdebug_driver_probe,
7836 	.remove = sdebug_driver_remove,
7837 	.drv_groups = sdebug_drv_groups,
7838 };
7839