xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 47010c04)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
200 #define SDEBUG_UA_BUS_RESET 2
201 #define SDEBUG_UA_MODE_CHANGED 3
202 #define SDEBUG_UA_CAPACITY_CHANGED 4
203 #define SDEBUG_UA_LUNS_CHANGED 5
204 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
205 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
206 #define SDEBUG_NUM_UAS 8
207 
208 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
209  * sector on read commands: */
210 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
211 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
212 
213 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
214  * (for response) per submit queue at one time. Can be reduced by max_queue
215  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
216  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
217  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
218  * but cannot exceed SDEBUG_CANQUEUE .
219  */
220 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
221 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
222 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
223 
224 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
225 #define F_D_IN			1	/* Data-in command (e.g. READ) */
226 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
230 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
231 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
232 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
234 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
235 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
236 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
237 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
238 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
239 
240 /* Useful combinations of the above flags */
241 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
242 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
243 #define FF_SA (F_SA_HIGH | F_SA_LOW)
244 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
245 
246 #define SDEBUG_MAX_PARTS 4
247 
248 #define SDEBUG_MAX_CMD_LEN 32
249 
250 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 
252 /* Zone types (zbcr05 table 25) */
253 enum sdebug_z_type {
254 	ZBC_ZONE_TYPE_CNV	= 0x1,
255 	ZBC_ZONE_TYPE_SWR	= 0x2,
256 	ZBC_ZONE_TYPE_SWP	= 0x3,
257 };
258 
259 /* enumeration names taken from table 26, zbcr05 */
260 enum sdebug_z_cond {
261 	ZBC_NOT_WRITE_POINTER	= 0x0,
262 	ZC1_EMPTY		= 0x1,
263 	ZC2_IMPLICIT_OPEN	= 0x2,
264 	ZC3_EXPLICIT_OPEN	= 0x3,
265 	ZC4_CLOSED		= 0x4,
266 	ZC6_READ_ONLY		= 0xd,
267 	ZC5_FULL		= 0xe,
268 	ZC7_OFFLINE		= 0xf,
269 };
270 
271 struct sdeb_zone_state {	/* ZBC: per zone state */
272 	enum sdebug_z_type z_type;
273 	enum sdebug_z_cond z_cond;
274 	bool z_non_seq_resource;
275 	unsigned int z_size;
276 	sector_t z_start;
277 	sector_t z_wp;
278 };
279 
280 struct sdebug_dev_info {
281 	struct list_head dev_list;
282 	unsigned int channel;
283 	unsigned int target;
284 	u64 lun;
285 	uuid_t lu_name;
286 	struct sdebug_host_info *sdbg_host;
287 	unsigned long uas_bm[1];
288 	atomic_t num_in_q;
289 	atomic_t stopped;	/* 1: by SSU, 2: device start */
290 	bool used;
291 
292 	/* For ZBC devices */
293 	enum blk_zoned_model zmodel;
294 	unsigned int zsize;
295 	unsigned int zsize_shift;
296 	unsigned int nr_zones;
297 	unsigned int nr_conv_zones;
298 	unsigned int nr_imp_open;
299 	unsigned int nr_exp_open;
300 	unsigned int nr_closed;
301 	unsigned int max_open;
302 	ktime_t create_ts;	/* time since bootup that this device was created */
303 	struct sdeb_zone_state *zstate;
304 };
305 
306 struct sdebug_host_info {
307 	struct list_head host_list;
308 	int si_idx;	/* sdeb_store_info (per host) xarray index */
309 	struct Scsi_Host *shost;
310 	struct device dev;
311 	struct list_head dev_info_list;
312 };
313 
314 /* There is an xarray of pointers to this struct's objects, one per host */
315 struct sdeb_store_info {
316 	rwlock_t macc_lck;	/* for atomic media access on this store */
317 	u8 *storep;		/* user data storage (ram) */
318 	struct t10_pi_tuple *dif_storep; /* protection info */
319 	void *map_storep;	/* provisioning map */
320 };
321 
322 #define to_sdebug_host(d)	\
323 	container_of(d, struct sdebug_host_info, dev)
324 
325 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
326 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 
328 struct sdebug_defer {
329 	struct hrtimer hrt;
330 	struct execute_work ew;
331 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
332 	int sqa_idx;	/* index of sdebug_queue array */
333 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
334 	int hc_idx;	/* hostwide tag index */
335 	int issuing_cpu;
336 	bool init_hrt;
337 	bool init_wq;
338 	bool init_poll;
339 	bool aborted;	/* true when blk_abort_request() already called */
340 	enum sdeb_defer_type defer_t;
341 };
342 
343 struct sdebug_queued_cmd {
344 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
345 	 * instance indicates this slot is in use.
346 	 */
347 	struct sdebug_defer *sd_dp;
348 	struct scsi_cmnd *a_cmnd;
349 };
350 
351 struct sdebug_queue {
352 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
353 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 	spinlock_t qc_lock;
355 	atomic_t blocked;	/* to temporarily stop more being queued */
356 };
357 
358 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
359 static atomic_t sdebug_completions;  /* count of deferred completions */
360 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
361 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
362 static atomic_t sdeb_inject_pending;
363 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
364 
365 struct opcode_info_t {
366 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
367 				/* for terminating element */
368 	u8 opcode;		/* if num_attached > 0, preferred */
369 	u16 sa;			/* service action */
370 	u32 flags;		/* OR-ed set of SDEB_F_* */
371 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
372 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
373 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
374 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
375 };
376 
377 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
378 enum sdeb_opcode_index {
379 	SDEB_I_INVALID_OPCODE =	0,
380 	SDEB_I_INQUIRY = 1,
381 	SDEB_I_REPORT_LUNS = 2,
382 	SDEB_I_REQUEST_SENSE = 3,
383 	SDEB_I_TEST_UNIT_READY = 4,
384 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
385 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
386 	SDEB_I_LOG_SENSE = 7,
387 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
388 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
389 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
390 	SDEB_I_START_STOP = 11,
391 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
392 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
393 	SDEB_I_MAINT_IN = 14,
394 	SDEB_I_MAINT_OUT = 15,
395 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
396 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
397 	SDEB_I_RESERVE = 18,		/* 6, 10 */
398 	SDEB_I_RELEASE = 19,		/* 6, 10 */
399 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
400 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
401 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
402 	SDEB_I_SEND_DIAG = 23,
403 	SDEB_I_UNMAP = 24,
404 	SDEB_I_WRITE_BUFFER = 25,
405 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
406 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
407 	SDEB_I_COMP_WRITE = 28,
408 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
409 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
410 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
411 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
412 };
413 
414 
415 static const unsigned char opcode_ind_arr[256] = {
416 /* 0x0; 0x0->0x1f: 6 byte cdbs */
417 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 	    0, 0, 0, 0,
419 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
420 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 	    SDEB_I_RELEASE,
422 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
423 	    SDEB_I_ALLOW_REMOVAL, 0,
424 /* 0x20; 0x20->0x3f: 10 byte cdbs */
425 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
426 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
427 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
428 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
429 /* 0x40; 0x40->0x5f: 10 byte cdbs */
430 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
432 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 	    SDEB_I_RELEASE,
434 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
435 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 	0, SDEB_I_VARIABLE_LEN,
439 /* 0x80; 0x80->0x9f: 16 byte cdbs */
440 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
441 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
442 	0, 0, 0, SDEB_I_VERIFY,
443 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
444 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
445 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
446 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
447 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
448 	     SDEB_I_MAINT_OUT, 0, 0, 0,
449 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
450 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0,
453 /* 0xc0; 0xc0->0xff: vendor specific */
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 };
459 
460 /*
461  * The following "response" functions return the SCSI mid-level's 4 byte
462  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
463  * command completion, they can mask their return value with
464  * SDEG_RES_IMMED_MASK .
465  */
466 #define SDEG_RES_IMMED_MASK 0x40000000
467 
468 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 
498 static int sdebug_do_add_host(bool mk_new_store);
499 static int sdebug_add_host_helper(int per_host_idx);
500 static void sdebug_do_remove_host(bool the_end);
501 static int sdebug_add_store(void);
502 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
503 static void sdebug_erase_all_stores(bool apart_from_first);
504 
505 /*
506  * The following are overflow arrays for cdbs that "hit" the same index in
507  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
508  * should be placed in opcode_info_arr[], the others should be placed here.
509  */
510 static const struct opcode_info_t msense_iarr[] = {
511 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
512 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 };
514 
515 static const struct opcode_info_t mselect_iarr[] = {
516 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
517 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t read_iarr[] = {
521 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
522 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 	     0, 0, 0, 0} },
524 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
525 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
527 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
528 	     0xc7, 0, 0, 0, 0} },
529 };
530 
531 static const struct opcode_info_t write_iarr[] = {
532 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
533 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 		   0, 0, 0, 0, 0, 0} },
535 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
536 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 		   0, 0, 0} },
538 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
539 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
540 		   0xbf, 0xc7, 0, 0, 0, 0} },
541 };
542 
543 static const struct opcode_info_t verify_iarr[] = {
544 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
545 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
546 		   0, 0, 0, 0, 0, 0} },
547 };
548 
549 static const struct opcode_info_t sa_in_16_iarr[] = {
550 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
551 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
552 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
553 };
554 
555 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
556 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
557 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
558 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
559 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
560 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
561 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
562 };
563 
564 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
565 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
566 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
567 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
568 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
569 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
570 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
571 };
572 
573 static const struct opcode_info_t write_same_iarr[] = {
574 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
575 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
576 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
577 };
578 
579 static const struct opcode_info_t reserve_iarr[] = {
580 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
581 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 };
583 
584 static const struct opcode_info_t release_iarr[] = {
585 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
586 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t sync_cache_iarr[] = {
590 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
591 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
592 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
593 };
594 
595 static const struct opcode_info_t pre_fetch_iarr[] = {
596 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
597 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
599 };
600 
601 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
602 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
603 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
605 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
606 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
607 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
608 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
609 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
610 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
611 };
612 
613 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
614 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
615 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
617 };
618 
619 
620 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
621  * plus the terminating elements for logic that scans this table such as
622  * REPORT SUPPORTED OPERATION CODES. */
623 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 /* 0 */
625 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
626 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
628 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
630 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
631 	     0, 0} },					/* REPORT LUNS */
632 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
633 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
635 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 /* 5 */
637 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
638 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
639 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
640 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
641 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
642 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
643 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
644 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 	     0, 0, 0} },
646 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
647 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 	     0, 0} },
649 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
650 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
651 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 /* 10 */
653 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
654 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
655 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
656 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
658 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
659 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
660 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
661 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
662 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
663 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
664 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
665 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
666 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
667 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
668 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
669 				0xff, 0, 0xc7, 0, 0, 0, 0} },
670 /* 15 */
671 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
672 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
674 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
675 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
677 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
678 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
679 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 	     0xff, 0xff} },
681 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
682 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
683 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 	     0} },
685 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
686 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
687 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
688 	     0} },
689 /* 20 */
690 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
691 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
693 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
695 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
697 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
699 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 /* 25 */
701 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
702 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
703 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
704 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
705 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
706 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 		 0, 0, 0, 0, 0} },
708 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
709 	    resp_sync_cache, sync_cache_iarr,
710 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
711 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
712 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
713 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
714 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
715 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
716 	    resp_pre_fetch, pre_fetch_iarr,
717 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
718 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
719 
720 /* 30 */
721 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
722 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
723 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
725 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
726 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
727 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 /* sentinel */
730 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
731 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static int sdebug_num_hosts;
735 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
736 static int sdebug_ato = DEF_ATO;
737 static int sdebug_cdb_len = DEF_CDB_LEN;
738 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
739 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
740 static int sdebug_dif = DEF_DIF;
741 static int sdebug_dix = DEF_DIX;
742 static int sdebug_dsense = DEF_D_SENSE;
743 static int sdebug_every_nth = DEF_EVERY_NTH;
744 static int sdebug_fake_rw = DEF_FAKE_RW;
745 static unsigned int sdebug_guard = DEF_GUARD;
746 static int sdebug_host_max_queue;	/* per host */
747 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
748 static int sdebug_max_luns = DEF_MAX_LUNS;
749 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
750 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
751 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
752 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
753 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
754 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
755 static int sdebug_no_uld;
756 static int sdebug_num_parts = DEF_NUM_PARTS;
757 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
758 static int sdebug_opt_blks = DEF_OPT_BLKS;
759 static int sdebug_opts = DEF_OPTS;
760 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
761 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
762 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
763 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
764 static int sdebug_sector_size = DEF_SECTOR_SIZE;
765 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
766 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
767 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
768 static unsigned int sdebug_lbpu = DEF_LBPU;
769 static unsigned int sdebug_lbpws = DEF_LBPWS;
770 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
771 static unsigned int sdebug_lbprz = DEF_LBPRZ;
772 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
773 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
774 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
775 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
776 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
777 static int sdebug_uuid_ctl = DEF_UUID_CTL;
778 static bool sdebug_random = DEF_RANDOM;
779 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
780 static bool sdebug_removable = DEF_REMOVABLE;
781 static bool sdebug_clustering;
782 static bool sdebug_host_lock = DEF_HOST_LOCK;
783 static bool sdebug_strict = DEF_STRICT;
784 static bool sdebug_any_injecting_opt;
785 static bool sdebug_no_rwlock;
786 static bool sdebug_verbose;
787 static bool have_dif_prot;
788 static bool write_since_sync;
789 static bool sdebug_statistics = DEF_STATISTICS;
790 static bool sdebug_wp;
791 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
792 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
793 static char *sdeb_zbc_model_s;
794 
795 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
796 			  SAM_LUN_AM_FLAT = 0x1,
797 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
798 			  SAM_LUN_AM_EXTENDED = 0x3};
799 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
800 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
801 
802 static unsigned int sdebug_store_sectors;
803 static sector_t sdebug_capacity;	/* in sectors */
804 
805 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
806    may still need them */
807 static int sdebug_heads;		/* heads per disk */
808 static int sdebug_cylinders_per;	/* cylinders per surface */
809 static int sdebug_sectors_per;		/* sectors per cylinder */
810 
811 static LIST_HEAD(sdebug_host_list);
812 static DEFINE_SPINLOCK(sdebug_host_list_lock);
813 
814 static struct xarray per_store_arr;
815 static struct xarray *per_store_ap = &per_store_arr;
816 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
817 static int sdeb_most_recent_idx = -1;
818 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
819 
820 static unsigned long map_size;
821 static int num_aborts;
822 static int num_dev_resets;
823 static int num_target_resets;
824 static int num_bus_resets;
825 static int num_host_resets;
826 static int dix_writes;
827 static int dix_reads;
828 static int dif_errors;
829 
830 /* ZBC global data */
831 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
832 static int sdeb_zbc_zone_size_mb;
833 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
834 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
835 
836 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
837 static int poll_queues; /* iouring iopoll interface.*/
838 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
839 
840 static DEFINE_RWLOCK(atomic_rw);
841 static DEFINE_RWLOCK(atomic_rw2);
842 
843 static rwlock_t *ramdisk_lck_a[2];
844 
845 static char sdebug_proc_name[] = MY_NAME;
846 static const char *my_name = MY_NAME;
847 
848 static struct bus_type pseudo_lld_bus;
849 
850 static struct device_driver sdebug_driverfs_driver = {
851 	.name 		= sdebug_proc_name,
852 	.bus		= &pseudo_lld_bus,
853 };
854 
855 static const int check_condition_result =
856 	SAM_STAT_CHECK_CONDITION;
857 
858 static const int illegal_condition_result =
859 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
860 
861 static const int device_qfull_result =
862 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
863 
864 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 
866 
867 /* Only do the extra work involved in logical block provisioning if one or
868  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
869  * real reads and writes (i.e. not skipping them for speed).
870  */
871 static inline bool scsi_debug_lbp(void)
872 {
873 	return 0 == sdebug_fake_rw &&
874 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 }
876 
877 static void *lba2fake_store(struct sdeb_store_info *sip,
878 			    unsigned long long lba)
879 {
880 	struct sdeb_store_info *lsip = sip;
881 
882 	lba = do_div(lba, sdebug_store_sectors);
883 	if (!sip || !sip->storep) {
884 		WARN_ON_ONCE(true);
885 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
886 	}
887 	return lsip->storep + lba * sdebug_sector_size;
888 }
889 
890 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 				      sector_t sector)
892 {
893 	sector = sector_div(sector, sdebug_store_sectors);
894 
895 	return sip->dif_storep + sector;
896 }
897 
898 static void sdebug_max_tgts_luns(void)
899 {
900 	struct sdebug_host_info *sdbg_host;
901 	struct Scsi_Host *hpnt;
902 
903 	spin_lock(&sdebug_host_list_lock);
904 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
905 		hpnt = sdbg_host->shost;
906 		if ((hpnt->this_id >= 0) &&
907 		    (sdebug_num_tgts > hpnt->this_id))
908 			hpnt->max_id = sdebug_num_tgts + 1;
909 		else
910 			hpnt->max_id = sdebug_num_tgts;
911 		/* sdebug_max_luns; */
912 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
913 	}
914 	spin_unlock(&sdebug_host_list_lock);
915 }
916 
917 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
918 
919 /* Set in_bit to -1 to indicate no bit position of invalid field */
920 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
921 				 enum sdeb_cmd_data c_d,
922 				 int in_byte, int in_bit)
923 {
924 	unsigned char *sbuff;
925 	u8 sks[4];
926 	int sl, asc;
927 
928 	sbuff = scp->sense_buffer;
929 	if (!sbuff) {
930 		sdev_printk(KERN_ERR, scp->device,
931 			    "%s: sense_buffer is NULL\n", __func__);
932 		return;
933 	}
934 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
935 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
936 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
937 	memset(sks, 0, sizeof(sks));
938 	sks[0] = 0x80;
939 	if (c_d)
940 		sks[0] |= 0x40;
941 	if (in_bit >= 0) {
942 		sks[0] |= 0x8;
943 		sks[0] |= 0x7 & in_bit;
944 	}
945 	put_unaligned_be16(in_byte, sks + 1);
946 	if (sdebug_dsense) {
947 		sl = sbuff[7] + 8;
948 		sbuff[7] = sl;
949 		sbuff[sl] = 0x2;
950 		sbuff[sl + 1] = 0x6;
951 		memcpy(sbuff + sl + 4, sks, 3);
952 	} else
953 		memcpy(sbuff + 15, sks, 3);
954 	if (sdebug_verbose)
955 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
956 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
957 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 }
959 
960 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
961 {
962 	if (!scp->sense_buffer) {
963 		sdev_printk(KERN_ERR, scp->device,
964 			    "%s: sense_buffer is NULL\n", __func__);
965 		return;
966 	}
967 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
968 
969 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 
971 	if (sdebug_verbose)
972 		sdev_printk(KERN_INFO, scp->device,
973 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
974 			    my_name, key, asc, asq);
975 }
976 
977 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
978 {
979 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 }
981 
982 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 			    void __user *arg)
984 {
985 	if (sdebug_verbose) {
986 		if (0x1261 == cmd)
987 			sdev_printk(KERN_INFO, dev,
988 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
989 		else if (0x5331 == cmd)
990 			sdev_printk(KERN_INFO, dev,
991 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 				    __func__);
993 		else
994 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
995 				    __func__, cmd);
996 	}
997 	return -EINVAL;
998 	/* return -ENOTTY; // correct return but upsets fdisk */
999 }
1000 
1001 static void config_cdb_len(struct scsi_device *sdev)
1002 {
1003 	switch (sdebug_cdb_len) {
1004 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1005 		sdev->use_10_for_rw = false;
1006 		sdev->use_16_for_rw = false;
1007 		sdev->use_10_for_ms = false;
1008 		break;
1009 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1010 		sdev->use_10_for_rw = true;
1011 		sdev->use_16_for_rw = false;
1012 		sdev->use_10_for_ms = false;
1013 		break;
1014 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1015 		sdev->use_10_for_rw = true;
1016 		sdev->use_16_for_rw = false;
1017 		sdev->use_10_for_ms = true;
1018 		break;
1019 	case 16:
1020 		sdev->use_10_for_rw = false;
1021 		sdev->use_16_for_rw = true;
1022 		sdev->use_10_for_ms = true;
1023 		break;
1024 	case 32: /* No knobs to suggest this so same as 16 for now */
1025 		sdev->use_10_for_rw = false;
1026 		sdev->use_16_for_rw = true;
1027 		sdev->use_10_for_ms = true;
1028 		break;
1029 	default:
1030 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1031 			sdebug_cdb_len);
1032 		sdev->use_10_for_rw = true;
1033 		sdev->use_16_for_rw = false;
1034 		sdev->use_10_for_ms = false;
1035 		sdebug_cdb_len = 10;
1036 		break;
1037 	}
1038 }
1039 
1040 static void all_config_cdb_len(void)
1041 {
1042 	struct sdebug_host_info *sdbg_host;
1043 	struct Scsi_Host *shost;
1044 	struct scsi_device *sdev;
1045 
1046 	spin_lock(&sdebug_host_list_lock);
1047 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1048 		shost = sdbg_host->shost;
1049 		shost_for_each_device(sdev, shost) {
1050 			config_cdb_len(sdev);
1051 		}
1052 	}
1053 	spin_unlock(&sdebug_host_list_lock);
1054 }
1055 
1056 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1057 {
1058 	struct sdebug_host_info *sdhp;
1059 	struct sdebug_dev_info *dp;
1060 
1061 	spin_lock(&sdebug_host_list_lock);
1062 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1063 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1064 			if ((devip->sdbg_host == dp->sdbg_host) &&
1065 			    (devip->target == dp->target))
1066 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 		}
1068 	}
1069 	spin_unlock(&sdebug_host_list_lock);
1070 }
1071 
1072 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1073 {
1074 	int k;
1075 
1076 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1077 	if (k != SDEBUG_NUM_UAS) {
1078 		const char *cp = NULL;
1079 
1080 		switch (k) {
1081 		case SDEBUG_UA_POR:
1082 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1083 					POWER_ON_RESET_ASCQ);
1084 			if (sdebug_verbose)
1085 				cp = "power on reset";
1086 			break;
1087 		case SDEBUG_UA_POOCCUR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_OCCURRED_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on occurred";
1092 			break;
1093 		case SDEBUG_UA_BUS_RESET:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					BUS_RESET_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "bus reset";
1098 			break;
1099 		case SDEBUG_UA_MODE_CHANGED:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1101 					MODE_CHANGED_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "mode parameters changed";
1104 			break;
1105 		case SDEBUG_UA_CAPACITY_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					CAPACITY_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "capacity data changed";
1110 			break;
1111 		case SDEBUG_UA_MICROCODE_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION,
1113 					TARGET_CHANGED_ASC,
1114 					MICROCODE_CHANGED_ASCQ);
1115 			if (sdebug_verbose)
1116 				cp = "microcode has been changed";
1117 			break;
1118 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1119 			mk_sense_buffer(scp, UNIT_ATTENTION,
1120 					TARGET_CHANGED_ASC,
1121 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1122 			if (sdebug_verbose)
1123 				cp = "microcode has been changed without reset";
1124 			break;
1125 		case SDEBUG_UA_LUNS_CHANGED:
1126 			/*
1127 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1128 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1129 			 * on the target, until a REPORT LUNS command is
1130 			 * received.  SPC-4 behavior is to report it only once.
1131 			 * NOTE:  sdebug_scsi_level does not use the same
1132 			 * values as struct scsi_device->scsi_level.
1133 			 */
1134 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1135 				clear_luns_changed_on_target(devip);
1136 			mk_sense_buffer(scp, UNIT_ATTENTION,
1137 					TARGET_CHANGED_ASC,
1138 					LUNS_CHANGED_ASCQ);
1139 			if (sdebug_verbose)
1140 				cp = "reported luns data has changed";
1141 			break;
1142 		default:
1143 			pr_warn("unexpected unit attention code=%d\n", k);
1144 			if (sdebug_verbose)
1145 				cp = "unknown";
1146 			break;
1147 		}
1148 		clear_bit(k, devip->uas_bm);
1149 		if (sdebug_verbose)
1150 			sdev_printk(KERN_INFO, scp->device,
1151 				   "%s reports: Unit attention: %s\n",
1152 				   my_name, cp);
1153 		return check_condition_result;
1154 	}
1155 	return 0;
1156 }
1157 
1158 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1159 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1160 				int arr_len)
1161 {
1162 	int act_len;
1163 	struct scsi_data_buffer *sdb = &scp->sdb;
1164 
1165 	if (!sdb->length)
1166 		return 0;
1167 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1168 		return DID_ERROR << 16;
1169 
1170 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1171 				      arr, arr_len);
1172 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1173 
1174 	return 0;
1175 }
1176 
1177 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1178  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1179  * calls, not required to write in ascending offset order. Assumes resid
1180  * set to scsi_bufflen() prior to any calls.
1181  */
1182 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1183 				  int arr_len, unsigned int off_dst)
1184 {
1185 	unsigned int act_len, n;
1186 	struct scsi_data_buffer *sdb = &scp->sdb;
1187 	off_t skip = off_dst;
1188 
1189 	if (sdb->length <= off_dst)
1190 		return 0;
1191 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1192 		return DID_ERROR << 16;
1193 
1194 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1195 				       arr, arr_len, skip);
1196 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1197 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1198 		 scsi_get_resid(scp));
1199 	n = scsi_bufflen(scp) - (off_dst + act_len);
1200 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1201 	return 0;
1202 }
1203 
1204 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1205  * 'arr' or -1 if error.
1206  */
1207 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1208 			       int arr_len)
1209 {
1210 	if (!scsi_bufflen(scp))
1211 		return 0;
1212 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1213 		return -1;
1214 
1215 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1216 }
1217 
1218 
1219 static char sdebug_inq_vendor_id[9] = "Linux   ";
1220 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1221 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1222 /* Use some locally assigned NAAs for SAS addresses. */
1223 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1224 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1225 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1226 
1227 /* Device identification VPD page. Returns number of bytes placed in arr */
1228 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1229 			  int target_dev_id, int dev_id_num,
1230 			  const char *dev_id_str, int dev_id_str_len,
1231 			  const uuid_t *lu_name)
1232 {
1233 	int num, port_a;
1234 	char b[32];
1235 
1236 	port_a = target_dev_id + 1;
1237 	/* T10 vendor identifier field format (faked) */
1238 	arr[0] = 0x2;	/* ASCII */
1239 	arr[1] = 0x1;
1240 	arr[2] = 0x0;
1241 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1242 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1243 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1244 	num = 8 + 16 + dev_id_str_len;
1245 	arr[3] = num;
1246 	num += 4;
1247 	if (dev_id_num >= 0) {
1248 		if (sdebug_uuid_ctl) {
1249 			/* Locally assigned UUID */
1250 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1251 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1252 			arr[num++] = 0x0;
1253 			arr[num++] = 0x12;
1254 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1255 			arr[num++] = 0x0;
1256 			memcpy(arr + num, lu_name, 16);
1257 			num += 16;
1258 		} else {
1259 			/* NAA-3, Logical unit identifier (binary) */
1260 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1261 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1262 			arr[num++] = 0x0;
1263 			arr[num++] = 0x8;
1264 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1265 			num += 8;
1266 		}
1267 		/* Target relative port number */
1268 		arr[num++] = 0x61;	/* proto=sas, binary */
1269 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1270 		arr[num++] = 0x0;	/* reserved */
1271 		arr[num++] = 0x4;	/* length */
1272 		arr[num++] = 0x0;	/* reserved */
1273 		arr[num++] = 0x0;	/* reserved */
1274 		arr[num++] = 0x0;
1275 		arr[num++] = 0x1;	/* relative port A */
1276 	}
1277 	/* NAA-3, Target port identifier */
1278 	arr[num++] = 0x61;	/* proto=sas, binary */
1279 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1280 	arr[num++] = 0x0;
1281 	arr[num++] = 0x8;
1282 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1283 	num += 8;
1284 	/* NAA-3, Target port group identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0x95;	/* piv=1, target port group id */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x4;
1289 	arr[num++] = 0;
1290 	arr[num++] = 0;
1291 	put_unaligned_be16(port_group_id, arr + num);
1292 	num += 2;
1293 	/* NAA-3, Target device identifier */
1294 	arr[num++] = 0x61;	/* proto=sas, binary */
1295 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1296 	arr[num++] = 0x0;
1297 	arr[num++] = 0x8;
1298 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1299 	num += 8;
1300 	/* SCSI name string: Target device identifier */
1301 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1302 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1303 	arr[num++] = 0x0;
1304 	arr[num++] = 24;
1305 	memcpy(arr + num, "naa.32222220", 12);
1306 	num += 12;
1307 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1308 	memcpy(arr + num, b, 8);
1309 	num += 8;
1310 	memset(arr + num, 0, 4);
1311 	num += 4;
1312 	return num;
1313 }
1314 
1315 static unsigned char vpd84_data[] = {
1316 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1317     0x22,0x22,0x22,0x0,0xbb,0x1,
1318     0x22,0x22,0x22,0x0,0xbb,0x2,
1319 };
1320 
1321 /*  Software interface identification VPD page */
1322 static int inquiry_vpd_84(unsigned char *arr)
1323 {
1324 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1325 	return sizeof(vpd84_data);
1326 }
1327 
1328 /* Management network addresses VPD page */
1329 static int inquiry_vpd_85(unsigned char *arr)
1330 {
1331 	int num = 0;
1332 	const char *na1 = "https://www.kernel.org/config";
1333 	const char *na2 = "http://www.kernel.org/log";
1334 	int plen, olen;
1335 
1336 	arr[num++] = 0x1;	/* lu, storage config */
1337 	arr[num++] = 0x0;	/* reserved */
1338 	arr[num++] = 0x0;
1339 	olen = strlen(na1);
1340 	plen = olen + 1;
1341 	if (plen % 4)
1342 		plen = ((plen / 4) + 1) * 4;
1343 	arr[num++] = plen;	/* length, null termianted, padded */
1344 	memcpy(arr + num, na1, olen);
1345 	memset(arr + num + olen, 0, plen - olen);
1346 	num += plen;
1347 
1348 	arr[num++] = 0x4;	/* lu, logging */
1349 	arr[num++] = 0x0;	/* reserved */
1350 	arr[num++] = 0x0;
1351 	olen = strlen(na2);
1352 	plen = olen + 1;
1353 	if (plen % 4)
1354 		plen = ((plen / 4) + 1) * 4;
1355 	arr[num++] = plen;	/* length, null terminated, padded */
1356 	memcpy(arr + num, na2, olen);
1357 	memset(arr + num + olen, 0, plen - olen);
1358 	num += plen;
1359 
1360 	return num;
1361 }
1362 
1363 /* SCSI ports VPD page */
1364 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1365 {
1366 	int num = 0;
1367 	int port_a, port_b;
1368 
1369 	port_a = target_dev_id + 1;
1370 	port_b = port_a + 1;
1371 	arr[num++] = 0x0;	/* reserved */
1372 	arr[num++] = 0x0;	/* reserved */
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1375 	memset(arr + num, 0, 6);
1376 	num += 6;
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 12;	/* length tp descriptor */
1379 	/* naa-5 target port identifier (A) */
1380 	arr[num++] = 0x61;	/* proto=sas, binary */
1381 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x8;	/* length */
1384 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1385 	num += 8;
1386 	arr[num++] = 0x0;	/* reserved */
1387 	arr[num++] = 0x0;	/* reserved */
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1390 	memset(arr + num, 0, 6);
1391 	num += 6;
1392 	arr[num++] = 0x0;
1393 	arr[num++] = 12;	/* length tp descriptor */
1394 	/* naa-5 target port identifier (B) */
1395 	arr[num++] = 0x61;	/* proto=sas, binary */
1396 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1397 	arr[num++] = 0x0;	/* reserved */
1398 	arr[num++] = 0x8;	/* length */
1399 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1400 	num += 8;
1401 
1402 	return num;
1403 }
1404 
1405 
1406 static unsigned char vpd89_data[] = {
1407 /* from 4th byte */ 0,0,0,0,
1408 'l','i','n','u','x',' ',' ',' ',
1409 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1410 '1','2','3','4',
1411 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1412 0xec,0,0,0,
1413 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1414 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1415 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1416 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1417 0x53,0x41,
1418 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1419 0x20,0x20,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1421 0x10,0x80,
1422 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1423 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1424 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1426 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1427 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1428 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1433 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1434 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1435 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1448 };
1449 
1450 /* ATA Information VPD page */
1451 static int inquiry_vpd_89(unsigned char *arr)
1452 {
1453 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1454 	return sizeof(vpd89_data);
1455 }
1456 
1457 
1458 static unsigned char vpdb0_data[] = {
1459 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1460 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1461 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1462 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463 };
1464 
1465 /* Block limits VPD page (SBC-3) */
1466 static int inquiry_vpd_b0(unsigned char *arr)
1467 {
1468 	unsigned int gran;
1469 
1470 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1471 
1472 	/* Optimal transfer length granularity */
1473 	if (sdebug_opt_xferlen_exp != 0 &&
1474 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1475 		gran = 1 << sdebug_opt_xferlen_exp;
1476 	else
1477 		gran = 1 << sdebug_physblk_exp;
1478 	put_unaligned_be16(gran, arr + 2);
1479 
1480 	/* Maximum Transfer Length */
1481 	if (sdebug_store_sectors > 0x400)
1482 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1483 
1484 	/* Optimal Transfer Length */
1485 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1486 
1487 	if (sdebug_lbpu) {
1488 		/* Maximum Unmap LBA Count */
1489 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1490 
1491 		/* Maximum Unmap Block Descriptor Count */
1492 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1493 	}
1494 
1495 	/* Unmap Granularity Alignment */
1496 	if (sdebug_unmap_alignment) {
1497 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1498 		arr[28] |= 0x80; /* UGAVALID */
1499 	}
1500 
1501 	/* Optimal Unmap Granularity */
1502 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1503 
1504 	/* Maximum WRITE SAME Length */
1505 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1506 
1507 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1508 
1509 	return sizeof(vpdb0_data);
1510 }
1511 
1512 /* Block device characteristics VPD page (SBC-3) */
1513 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1514 {
1515 	memset(arr, 0, 0x3c);
1516 	arr[0] = 0;
1517 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1518 	arr[2] = 0;
1519 	arr[3] = 5;	/* less than 1.8" */
1520 	if (devip->zmodel == BLK_ZONED_HA)
1521 		arr[4] = 1 << 4;	/* zoned field = 01b */
1522 
1523 	return 0x3c;
1524 }
1525 
1526 /* Logical block provisioning VPD page (SBC-4) */
1527 static int inquiry_vpd_b2(unsigned char *arr)
1528 {
1529 	memset(arr, 0, 0x4);
1530 	arr[0] = 0;			/* threshold exponent */
1531 	if (sdebug_lbpu)
1532 		arr[1] = 1 << 7;
1533 	if (sdebug_lbpws)
1534 		arr[1] |= 1 << 6;
1535 	if (sdebug_lbpws10)
1536 		arr[1] |= 1 << 5;
1537 	if (sdebug_lbprz && scsi_debug_lbp())
1538 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1539 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1540 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1541 	/* threshold_percentage=0 */
1542 	return 0x4;
1543 }
1544 
1545 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1546 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1547 {
1548 	memset(arr, 0, 0x3c);
1549 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1550 	/*
1551 	 * Set Optimal number of open sequential write preferred zones and
1552 	 * Optimal number of non-sequentially written sequential write
1553 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1554 	 * fields set to zero, apart from Max. number of open swrz_s field.
1555 	 */
1556 	put_unaligned_be32(0xffffffff, &arr[4]);
1557 	put_unaligned_be32(0xffffffff, &arr[8]);
1558 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1559 		put_unaligned_be32(devip->max_open, &arr[12]);
1560 	else
1561 		put_unaligned_be32(0xffffffff, &arr[12]);
1562 	return 0x3c;
1563 }
1564 
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1567 
1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1569 {
1570 	unsigned char pq_pdt;
1571 	unsigned char *arr;
1572 	unsigned char *cmd = scp->cmnd;
1573 	u32 alloc_len, n;
1574 	int ret;
1575 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1576 
1577 	alloc_len = get_unaligned_be16(cmd + 3);
1578 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1579 	if (! arr)
1580 		return DID_REQUEUE << 16;
1581 	is_disk = (sdebug_ptype == TYPE_DISK);
1582 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1583 	is_disk_zbc = (is_disk || is_zbc);
1584 	have_wlun = scsi_is_wlun(scp->device->lun);
1585 	if (have_wlun)
1586 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1587 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1588 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1589 	else
1590 		pq_pdt = (sdebug_ptype & 0x1f);
1591 	arr[0] = pq_pdt;
1592 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1593 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1594 		kfree(arr);
1595 		return check_condition_result;
1596 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1597 		int lu_id_num, port_group_id, target_dev_id;
1598 		u32 len;
1599 		char lu_id_str[6];
1600 		int host_no = devip->sdbg_host->shost->host_no;
1601 
1602 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1603 		    (devip->channel & 0x7f);
1604 		if (sdebug_vpd_use_hostno == 0)
1605 			host_no = 0;
1606 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1607 			    (devip->target * 1000) + devip->lun);
1608 		target_dev_id = ((host_no + 1) * 2000) +
1609 				 (devip->target * 1000) - 3;
1610 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1611 		if (0 == cmd[2]) { /* supported vital product data pages */
1612 			arr[1] = cmd[2];	/*sanity */
1613 			n = 4;
1614 			arr[n++] = 0x0;   /* this page */
1615 			arr[n++] = 0x80;  /* unit serial number */
1616 			arr[n++] = 0x83;  /* device identification */
1617 			arr[n++] = 0x84;  /* software interface ident. */
1618 			arr[n++] = 0x85;  /* management network addresses */
1619 			arr[n++] = 0x86;  /* extended inquiry */
1620 			arr[n++] = 0x87;  /* mode page policy */
1621 			arr[n++] = 0x88;  /* SCSI ports */
1622 			if (is_disk_zbc) {	  /* SBC or ZBC */
1623 				arr[n++] = 0x89;  /* ATA information */
1624 				arr[n++] = 0xb0;  /* Block limits */
1625 				arr[n++] = 0xb1;  /* Block characteristics */
1626 				if (is_disk)
1627 					arr[n++] = 0xb2;  /* LB Provisioning */
1628 				if (is_zbc)
1629 					arr[n++] = 0xb6;  /* ZB dev. char. */
1630 			}
1631 			arr[3] = n - 4;	  /* number of supported VPD pages */
1632 		} else if (0x80 == cmd[2]) { /* unit serial number */
1633 			arr[1] = cmd[2];	/*sanity */
1634 			arr[3] = len;
1635 			memcpy(&arr[4], lu_id_str, len);
1636 		} else if (0x83 == cmd[2]) { /* device identification */
1637 			arr[1] = cmd[2];	/*sanity */
1638 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1639 						target_dev_id, lu_id_num,
1640 						lu_id_str, len,
1641 						&devip->lu_name);
1642 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1643 			arr[1] = cmd[2];	/*sanity */
1644 			arr[3] = inquiry_vpd_84(&arr[4]);
1645 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = inquiry_vpd_85(&arr[4]);
1648 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = 0x3c;	/* number of following entries */
1651 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1652 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1653 			else if (have_dif_prot)
1654 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1655 			else
1656 				arr[4] = 0x0;   /* no protection stuff */
1657 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1658 		} else if (0x87 == cmd[2]) { /* mode page policy */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = 0x8;	/* number of following entries */
1661 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1662 			arr[6] = 0x80;	/* mlus, shared */
1663 			arr[8] = 0x18;	 /* protocol specific lu */
1664 			arr[10] = 0x82;	 /* mlus, per initiator port */
1665 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1666 			arr[1] = cmd[2];	/*sanity */
1667 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1668 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1669 			arr[1] = cmd[2];        /*sanity */
1670 			n = inquiry_vpd_89(&arr[4]);
1671 			put_unaligned_be16(n, arr + 2);
1672 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1673 			arr[1] = cmd[2];        /*sanity */
1674 			arr[3] = inquiry_vpd_b0(&arr[4]);
1675 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1676 			arr[1] = cmd[2];        /*sanity */
1677 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1678 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1679 			arr[1] = cmd[2];        /*sanity */
1680 			arr[3] = inquiry_vpd_b2(&arr[4]);
1681 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1684 		} else {
1685 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1686 			kfree(arr);
1687 			return check_condition_result;
1688 		}
1689 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1690 		ret = fill_from_dev_buffer(scp, arr,
1691 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1692 		kfree(arr);
1693 		return ret;
1694 	}
1695 	/* drops through here for a standard inquiry */
1696 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1697 	arr[2] = sdebug_scsi_level;
1698 	arr[3] = 2;    /* response_data_format==2 */
1699 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1700 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1701 	if (sdebug_vpd_use_hostno == 0)
1702 		arr[5] |= 0x10; /* claim: implicit TPGS */
1703 	arr[6] = 0x10; /* claim: MultiP */
1704 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1705 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1706 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1707 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1708 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1709 	/* Use Vendor Specific area to place driver date in ASCII hex */
1710 	memcpy(&arr[36], sdebug_version_date, 8);
1711 	/* version descriptors (2 bytes each) follow */
1712 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1713 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1714 	n = 62;
1715 	if (is_disk) {		/* SBC-4 no version claimed */
1716 		put_unaligned_be16(0x600, arr + n);
1717 		n += 2;
1718 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1719 		put_unaligned_be16(0x525, arr + n);
1720 		n += 2;
1721 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1722 		put_unaligned_be16(0x624, arr + n);
1723 		n += 2;
1724 	}
1725 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1726 	ret = fill_from_dev_buffer(scp, arr,
1727 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1728 	kfree(arr);
1729 	return ret;
1730 }
1731 
1732 /* See resp_iec_m_pg() for how this data is manipulated */
1733 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1734 				   0, 0, 0x0, 0x0};
1735 
1736 static int resp_requests(struct scsi_cmnd *scp,
1737 			 struct sdebug_dev_info *devip)
1738 {
1739 	unsigned char *cmd = scp->cmnd;
1740 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1741 	bool dsense = !!(cmd[1] & 1);
1742 	u32 alloc_len = cmd[4];
1743 	u32 len = 18;
1744 	int stopped_state = atomic_read(&devip->stopped);
1745 
1746 	memset(arr, 0, sizeof(arr));
1747 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1748 		if (dsense) {
1749 			arr[0] = 0x72;
1750 			arr[1] = NOT_READY;
1751 			arr[2] = LOGICAL_UNIT_NOT_READY;
1752 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1753 			len = 8;
1754 		} else {
1755 			arr[0] = 0x70;
1756 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1757 			arr[7] = 0xa;			/* 18 byte sense buffer */
1758 			arr[12] = LOGICAL_UNIT_NOT_READY;
1759 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1760 		}
1761 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1762 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1763 		if (dsense) {
1764 			arr[0] = 0x72;
1765 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1766 			arr[2] = THRESHOLD_EXCEEDED;
1767 			arr[3] = 0xff;		/* Failure prediction(false) */
1768 			len = 8;
1769 		} else {
1770 			arr[0] = 0x70;
1771 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1772 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1773 			arr[12] = THRESHOLD_EXCEEDED;
1774 			arr[13] = 0xff;		/* Failure prediction(false) */
1775 		}
1776 	} else {	/* nothing to report */
1777 		if (dsense) {
1778 			len = 8;
1779 			memset(arr, 0, len);
1780 			arr[0] = 0x72;
1781 		} else {
1782 			memset(arr, 0, len);
1783 			arr[0] = 0x70;
1784 			arr[7] = 0xa;
1785 		}
1786 	}
1787 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1788 }
1789 
1790 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1791 {
1792 	unsigned char *cmd = scp->cmnd;
1793 	int power_cond, want_stop, stopped_state;
1794 	bool changing;
1795 
1796 	power_cond = (cmd[4] & 0xf0) >> 4;
1797 	if (power_cond) {
1798 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1799 		return check_condition_result;
1800 	}
1801 	want_stop = !(cmd[4] & 1);
1802 	stopped_state = atomic_read(&devip->stopped);
1803 	if (stopped_state == 2) {
1804 		ktime_t now_ts = ktime_get_boottime();
1805 
1806 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1807 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1808 
1809 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1810 				/* tur_ms_to_ready timer extinguished */
1811 				atomic_set(&devip->stopped, 0);
1812 				stopped_state = 0;
1813 			}
1814 		}
1815 		if (stopped_state == 2) {
1816 			if (want_stop) {
1817 				stopped_state = 1;	/* dummy up success */
1818 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1819 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1820 				return check_condition_result;
1821 			}
1822 		}
1823 	}
1824 	changing = (stopped_state != want_stop);
1825 	if (changing)
1826 		atomic_xchg(&devip->stopped, want_stop);
1827 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1828 		return SDEG_RES_IMMED_MASK;
1829 	else
1830 		return 0;
1831 }
1832 
1833 static sector_t get_sdebug_capacity(void)
1834 {
1835 	static const unsigned int gibibyte = 1073741824;
1836 
1837 	if (sdebug_virtual_gb > 0)
1838 		return (sector_t)sdebug_virtual_gb *
1839 			(gibibyte / sdebug_sector_size);
1840 	else
1841 		return sdebug_store_sectors;
1842 }
1843 
1844 #define SDEBUG_READCAP_ARR_SZ 8
1845 static int resp_readcap(struct scsi_cmnd *scp,
1846 			struct sdebug_dev_info *devip)
1847 {
1848 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1849 	unsigned int capac;
1850 
1851 	/* following just in case virtual_gb changed */
1852 	sdebug_capacity = get_sdebug_capacity();
1853 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1854 	if (sdebug_capacity < 0xffffffff) {
1855 		capac = (unsigned int)sdebug_capacity - 1;
1856 		put_unaligned_be32(capac, arr + 0);
1857 	} else
1858 		put_unaligned_be32(0xffffffff, arr + 0);
1859 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1860 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1861 }
1862 
1863 #define SDEBUG_READCAP16_ARR_SZ 32
1864 static int resp_readcap16(struct scsi_cmnd *scp,
1865 			  struct sdebug_dev_info *devip)
1866 {
1867 	unsigned char *cmd = scp->cmnd;
1868 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1869 	u32 alloc_len;
1870 
1871 	alloc_len = get_unaligned_be32(cmd + 10);
1872 	/* following just in case virtual_gb changed */
1873 	sdebug_capacity = get_sdebug_capacity();
1874 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1875 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1876 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1877 	arr[13] = sdebug_physblk_exp & 0xf;
1878 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1879 
1880 	if (scsi_debug_lbp()) {
1881 		arr[14] |= 0x80; /* LBPME */
1882 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1883 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1884 		 * in the wider field maps to 0 in this field.
1885 		 */
1886 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1887 			arr[14] |= 0x40;
1888 	}
1889 
1890 	arr[15] = sdebug_lowest_aligned & 0xff;
1891 
1892 	if (have_dif_prot) {
1893 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1894 		arr[12] |= 1; /* PROT_EN */
1895 	}
1896 
1897 	return fill_from_dev_buffer(scp, arr,
1898 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1899 }
1900 
1901 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1902 
1903 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1904 			      struct sdebug_dev_info *devip)
1905 {
1906 	unsigned char *cmd = scp->cmnd;
1907 	unsigned char *arr;
1908 	int host_no = devip->sdbg_host->shost->host_no;
1909 	int port_group_a, port_group_b, port_a, port_b;
1910 	u32 alen, n, rlen;
1911 	int ret;
1912 
1913 	alen = get_unaligned_be32(cmd + 6);
1914 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1915 	if (! arr)
1916 		return DID_REQUEUE << 16;
1917 	/*
1918 	 * EVPD page 0x88 states we have two ports, one
1919 	 * real and a fake port with no device connected.
1920 	 * So we create two port groups with one port each
1921 	 * and set the group with port B to unavailable.
1922 	 */
1923 	port_a = 0x1; /* relative port A */
1924 	port_b = 0x2; /* relative port B */
1925 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1926 			(devip->channel & 0x7f);
1927 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1928 			(devip->channel & 0x7f) + 0x80;
1929 
1930 	/*
1931 	 * The asymmetric access state is cycled according to the host_id.
1932 	 */
1933 	n = 4;
1934 	if (sdebug_vpd_use_hostno == 0) {
1935 		arr[n++] = host_no % 3; /* Asymm access state */
1936 		arr[n++] = 0x0F; /* claim: all states are supported */
1937 	} else {
1938 		arr[n++] = 0x0; /* Active/Optimized path */
1939 		arr[n++] = 0x01; /* only support active/optimized paths */
1940 	}
1941 	put_unaligned_be16(port_group_a, arr + n);
1942 	n += 2;
1943 	arr[n++] = 0;    /* Reserved */
1944 	arr[n++] = 0;    /* Status code */
1945 	arr[n++] = 0;    /* Vendor unique */
1946 	arr[n++] = 0x1;  /* One port per group */
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Reserved */
1949 	put_unaligned_be16(port_a, arr + n);
1950 	n += 2;
1951 	arr[n++] = 3;    /* Port unavailable */
1952 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1953 	put_unaligned_be16(port_group_b, arr + n);
1954 	n += 2;
1955 	arr[n++] = 0;    /* Reserved */
1956 	arr[n++] = 0;    /* Status code */
1957 	arr[n++] = 0;    /* Vendor unique */
1958 	arr[n++] = 0x1;  /* One port per group */
1959 	arr[n++] = 0;    /* Reserved */
1960 	arr[n++] = 0;    /* Reserved */
1961 	put_unaligned_be16(port_b, arr + n);
1962 	n += 2;
1963 
1964 	rlen = n - 4;
1965 	put_unaligned_be32(rlen, arr + 0);
1966 
1967 	/*
1968 	 * Return the smallest value of either
1969 	 * - The allocated length
1970 	 * - The constructed command length
1971 	 * - The maximum array size
1972 	 */
1973 	rlen = min(alen, n);
1974 	ret = fill_from_dev_buffer(scp, arr,
1975 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1976 	kfree(arr);
1977 	return ret;
1978 }
1979 
1980 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1981 			     struct sdebug_dev_info *devip)
1982 {
1983 	bool rctd;
1984 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1985 	u16 req_sa, u;
1986 	u32 alloc_len, a_len;
1987 	int k, offset, len, errsts, count, bump, na;
1988 	const struct opcode_info_t *oip;
1989 	const struct opcode_info_t *r_oip;
1990 	u8 *arr;
1991 	u8 *cmd = scp->cmnd;
1992 
1993 	rctd = !!(cmd[2] & 0x80);
1994 	reporting_opts = cmd[2] & 0x7;
1995 	req_opcode = cmd[3];
1996 	req_sa = get_unaligned_be16(cmd + 4);
1997 	alloc_len = get_unaligned_be32(cmd + 6);
1998 	if (alloc_len < 4 || alloc_len > 0xffff) {
1999 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2000 		return check_condition_result;
2001 	}
2002 	if (alloc_len > 8192)
2003 		a_len = 8192;
2004 	else
2005 		a_len = alloc_len;
2006 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2007 	if (NULL == arr) {
2008 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2009 				INSUFF_RES_ASCQ);
2010 		return check_condition_result;
2011 	}
2012 	switch (reporting_opts) {
2013 	case 0:	/* all commands */
2014 		/* count number of commands */
2015 		for (count = 0, oip = opcode_info_arr;
2016 		     oip->num_attached != 0xff; ++oip) {
2017 			if (F_INV_OP & oip->flags)
2018 				continue;
2019 			count += (oip->num_attached + 1);
2020 		}
2021 		bump = rctd ? 20 : 8;
2022 		put_unaligned_be32(count * bump, arr);
2023 		for (offset = 4, oip = opcode_info_arr;
2024 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2025 			if (F_INV_OP & oip->flags)
2026 				continue;
2027 			na = oip->num_attached;
2028 			arr[offset] = oip->opcode;
2029 			put_unaligned_be16(oip->sa, arr + offset + 2);
2030 			if (rctd)
2031 				arr[offset + 5] |= 0x2;
2032 			if (FF_SA & oip->flags)
2033 				arr[offset + 5] |= 0x1;
2034 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2035 			if (rctd)
2036 				put_unaligned_be16(0xa, arr + offset + 8);
2037 			r_oip = oip;
2038 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2039 				if (F_INV_OP & oip->flags)
2040 					continue;
2041 				offset += bump;
2042 				arr[offset] = oip->opcode;
2043 				put_unaligned_be16(oip->sa, arr + offset + 2);
2044 				if (rctd)
2045 					arr[offset + 5] |= 0x2;
2046 				if (FF_SA & oip->flags)
2047 					arr[offset + 5] |= 0x1;
2048 				put_unaligned_be16(oip->len_mask[0],
2049 						   arr + offset + 6);
2050 				if (rctd)
2051 					put_unaligned_be16(0xa,
2052 							   arr + offset + 8);
2053 			}
2054 			oip = r_oip;
2055 			offset += bump;
2056 		}
2057 		break;
2058 	case 1:	/* one command: opcode only */
2059 	case 2:	/* one command: opcode plus service action */
2060 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2061 		sdeb_i = opcode_ind_arr[req_opcode];
2062 		oip = &opcode_info_arr[sdeb_i];
2063 		if (F_INV_OP & oip->flags) {
2064 			supp = 1;
2065 			offset = 4;
2066 		} else {
2067 			if (1 == reporting_opts) {
2068 				if (FF_SA & oip->flags) {
2069 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2070 							     2, 2);
2071 					kfree(arr);
2072 					return check_condition_result;
2073 				}
2074 				req_sa = 0;
2075 			} else if (2 == reporting_opts &&
2076 				   0 == (FF_SA & oip->flags)) {
2077 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2078 				kfree(arr);	/* point at requested sa */
2079 				return check_condition_result;
2080 			}
2081 			if (0 == (FF_SA & oip->flags) &&
2082 			    req_opcode == oip->opcode)
2083 				supp = 3;
2084 			else if (0 == (FF_SA & oip->flags)) {
2085 				na = oip->num_attached;
2086 				for (k = 0, oip = oip->arrp; k < na;
2087 				     ++k, ++oip) {
2088 					if (req_opcode == oip->opcode)
2089 						break;
2090 				}
2091 				supp = (k >= na) ? 1 : 3;
2092 			} else if (req_sa != oip->sa) {
2093 				na = oip->num_attached;
2094 				for (k = 0, oip = oip->arrp; k < na;
2095 				     ++k, ++oip) {
2096 					if (req_sa == oip->sa)
2097 						break;
2098 				}
2099 				supp = (k >= na) ? 1 : 3;
2100 			} else
2101 				supp = 3;
2102 			if (3 == supp) {
2103 				u = oip->len_mask[0];
2104 				put_unaligned_be16(u, arr + 2);
2105 				arr[4] = oip->opcode;
2106 				for (k = 1; k < u; ++k)
2107 					arr[4 + k] = (k < 16) ?
2108 						 oip->len_mask[k] : 0xff;
2109 				offset = 4 + u;
2110 			} else
2111 				offset = 4;
2112 		}
2113 		arr[1] = (rctd ? 0x80 : 0) | supp;
2114 		if (rctd) {
2115 			put_unaligned_be16(0xa, arr + offset);
2116 			offset += 12;
2117 		}
2118 		break;
2119 	default:
2120 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2121 		kfree(arr);
2122 		return check_condition_result;
2123 	}
2124 	offset = (offset < a_len) ? offset : a_len;
2125 	len = (offset < alloc_len) ? offset : alloc_len;
2126 	errsts = fill_from_dev_buffer(scp, arr, len);
2127 	kfree(arr);
2128 	return errsts;
2129 }
2130 
2131 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2132 			  struct sdebug_dev_info *devip)
2133 {
2134 	bool repd;
2135 	u32 alloc_len, len;
2136 	u8 arr[16];
2137 	u8 *cmd = scp->cmnd;
2138 
2139 	memset(arr, 0, sizeof(arr));
2140 	repd = !!(cmd[2] & 0x80);
2141 	alloc_len = get_unaligned_be32(cmd + 6);
2142 	if (alloc_len < 4) {
2143 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2144 		return check_condition_result;
2145 	}
2146 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2147 	arr[1] = 0x1;		/* ITNRS */
2148 	if (repd) {
2149 		arr[3] = 0xc;
2150 		len = 16;
2151 	} else
2152 		len = 4;
2153 
2154 	len = (len < alloc_len) ? len : alloc_len;
2155 	return fill_from_dev_buffer(scp, arr, len);
2156 }
2157 
2158 /* <<Following mode page info copied from ST318451LW>> */
2159 
2160 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2161 {	/* Read-Write Error Recovery page for mode_sense */
2162 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2163 					5, 0, 0xff, 0xff};
2164 
2165 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2166 	if (1 == pcontrol)
2167 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2168 	return sizeof(err_recov_pg);
2169 }
2170 
2171 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2172 { 	/* Disconnect-Reconnect page for mode_sense */
2173 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2174 					 0, 0, 0, 0, 0, 0, 0, 0};
2175 
2176 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2177 	if (1 == pcontrol)
2178 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2179 	return sizeof(disconnect_pg);
2180 }
2181 
2182 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2183 {       /* Format device page for mode_sense */
2184 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2185 				     0, 0, 0, 0, 0, 0, 0, 0,
2186 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2187 
2188 	memcpy(p, format_pg, sizeof(format_pg));
2189 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2190 	put_unaligned_be16(sdebug_sector_size, p + 12);
2191 	if (sdebug_removable)
2192 		p[20] |= 0x20; /* should agree with INQUIRY */
2193 	if (1 == pcontrol)
2194 		memset(p + 2, 0, sizeof(format_pg) - 2);
2195 	return sizeof(format_pg);
2196 }
2197 
2198 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2200 				     0, 0, 0, 0};
2201 
2202 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2203 { 	/* Caching page for mode_sense */
2204 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2205 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2206 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2207 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2208 
2209 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2210 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2211 	memcpy(p, caching_pg, sizeof(caching_pg));
2212 	if (1 == pcontrol)
2213 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2214 	else if (2 == pcontrol)
2215 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2216 	return sizeof(caching_pg);
2217 }
2218 
2219 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2220 				    0, 0, 0x2, 0x4b};
2221 
2222 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2223 { 	/* Control mode page for mode_sense */
2224 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2225 					0, 0, 0, 0};
2226 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2227 				     0, 0, 0x2, 0x4b};
2228 
2229 	if (sdebug_dsense)
2230 		ctrl_m_pg[2] |= 0x4;
2231 	else
2232 		ctrl_m_pg[2] &= ~0x4;
2233 
2234 	if (sdebug_ato)
2235 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2236 
2237 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2238 	if (1 == pcontrol)
2239 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2240 	else if (2 == pcontrol)
2241 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2242 	return sizeof(ctrl_m_pg);
2243 }
2244 
2245 
2246 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2247 {	/* Informational Exceptions control mode page for mode_sense */
2248 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2249 				       0, 0, 0x0, 0x0};
2250 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2251 				      0, 0, 0x0, 0x0};
2252 
2253 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2254 	if (1 == pcontrol)
2255 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2256 	else if (2 == pcontrol)
2257 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2258 	return sizeof(iec_m_pg);
2259 }
2260 
2261 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2262 {	/* SAS SSP mode page - short format for mode_sense */
2263 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2264 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2265 
2266 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2267 	if (1 == pcontrol)
2268 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2269 	return sizeof(sas_sf_m_pg);
2270 }
2271 
2272 
2273 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2274 			      int target_dev_id)
2275 {	/* SAS phy control and discover mode page for mode_sense */
2276 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2277 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2278 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2279 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2280 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2281 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2282 		    0, 0, 0, 0, 0, 0, 0, 0,
2283 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2284 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2285 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2286 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2287 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2288 		    0, 0, 0, 0, 0, 0, 0, 0,
2289 		};
2290 	int port_a, port_b;
2291 
2292 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2293 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2294 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2295 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2296 	port_a = target_dev_id + 1;
2297 	port_b = port_a + 1;
2298 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2299 	put_unaligned_be32(port_a, p + 20);
2300 	put_unaligned_be32(port_b, p + 48 + 20);
2301 	if (1 == pcontrol)
2302 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2303 	return sizeof(sas_pcd_m_pg);
2304 }
2305 
2306 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2307 {	/* SAS SSP shared protocol specific port mode subpage */
2308 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2309 		    0, 0, 0, 0, 0, 0, 0, 0,
2310 		};
2311 
2312 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2313 	if (1 == pcontrol)
2314 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2315 	return sizeof(sas_sha_m_pg);
2316 }
2317 
2318 #define SDEBUG_MAX_MSENSE_SZ 256
2319 
2320 static int resp_mode_sense(struct scsi_cmnd *scp,
2321 			   struct sdebug_dev_info *devip)
2322 {
2323 	int pcontrol, pcode, subpcode, bd_len;
2324 	unsigned char dev_spec;
2325 	u32 alloc_len, offset, len;
2326 	int target_dev_id;
2327 	int target = scp->device->id;
2328 	unsigned char *ap;
2329 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2330 	unsigned char *cmd = scp->cmnd;
2331 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2332 
2333 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2334 	pcontrol = (cmd[2] & 0xc0) >> 6;
2335 	pcode = cmd[2] & 0x3f;
2336 	subpcode = cmd[3];
2337 	msense_6 = (MODE_SENSE == cmd[0]);
2338 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2339 	is_disk = (sdebug_ptype == TYPE_DISK);
2340 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2341 	if ((is_disk || is_zbc) && !dbd)
2342 		bd_len = llbaa ? 16 : 8;
2343 	else
2344 		bd_len = 0;
2345 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2346 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2347 	if (0x3 == pcontrol) {  /* Saving values not supported */
2348 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2349 		return check_condition_result;
2350 	}
2351 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2352 			(devip->target * 1000) - 3;
2353 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2354 	if (is_disk || is_zbc) {
2355 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2356 		if (sdebug_wp)
2357 			dev_spec |= 0x80;
2358 	} else
2359 		dev_spec = 0x0;
2360 	if (msense_6) {
2361 		arr[2] = dev_spec;
2362 		arr[3] = bd_len;
2363 		offset = 4;
2364 	} else {
2365 		arr[3] = dev_spec;
2366 		if (16 == bd_len)
2367 			arr[4] = 0x1;	/* set LONGLBA bit */
2368 		arr[7] = bd_len;	/* assume 255 or less */
2369 		offset = 8;
2370 	}
2371 	ap = arr + offset;
2372 	if ((bd_len > 0) && (!sdebug_capacity))
2373 		sdebug_capacity = get_sdebug_capacity();
2374 
2375 	if (8 == bd_len) {
2376 		if (sdebug_capacity > 0xfffffffe)
2377 			put_unaligned_be32(0xffffffff, ap + 0);
2378 		else
2379 			put_unaligned_be32(sdebug_capacity, ap + 0);
2380 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2381 		offset += bd_len;
2382 		ap = arr + offset;
2383 	} else if (16 == bd_len) {
2384 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2385 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2386 		offset += bd_len;
2387 		ap = arr + offset;
2388 	}
2389 
2390 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2391 		/* TODO: Control Extension page */
2392 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2393 		return check_condition_result;
2394 	}
2395 	bad_pcode = false;
2396 
2397 	switch (pcode) {
2398 	case 0x1:	/* Read-Write error recovery page, direct access */
2399 		len = resp_err_recov_pg(ap, pcontrol, target);
2400 		offset += len;
2401 		break;
2402 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2403 		len = resp_disconnect_pg(ap, pcontrol, target);
2404 		offset += len;
2405 		break;
2406 	case 0x3:       /* Format device page, direct access */
2407 		if (is_disk) {
2408 			len = resp_format_pg(ap, pcontrol, target);
2409 			offset += len;
2410 		} else
2411 			bad_pcode = true;
2412 		break;
2413 	case 0x8:	/* Caching page, direct access */
2414 		if (is_disk || is_zbc) {
2415 			len = resp_caching_pg(ap, pcontrol, target);
2416 			offset += len;
2417 		} else
2418 			bad_pcode = true;
2419 		break;
2420 	case 0xa:	/* Control Mode page, all devices */
2421 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2422 		offset += len;
2423 		break;
2424 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2425 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2426 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2427 			return check_condition_result;
2428 		}
2429 		len = 0;
2430 		if ((0x0 == subpcode) || (0xff == subpcode))
2431 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2432 		if ((0x1 == subpcode) || (0xff == subpcode))
2433 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2434 						  target_dev_id);
2435 		if ((0x2 == subpcode) || (0xff == subpcode))
2436 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2437 		offset += len;
2438 		break;
2439 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2440 		len = resp_iec_m_pg(ap, pcontrol, target);
2441 		offset += len;
2442 		break;
2443 	case 0x3f:	/* Read all Mode pages */
2444 		if ((0 == subpcode) || (0xff == subpcode)) {
2445 			len = resp_err_recov_pg(ap, pcontrol, target);
2446 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2447 			if (is_disk) {
2448 				len += resp_format_pg(ap + len, pcontrol,
2449 						      target);
2450 				len += resp_caching_pg(ap + len, pcontrol,
2451 						       target);
2452 			} else if (is_zbc) {
2453 				len += resp_caching_pg(ap + len, pcontrol,
2454 						       target);
2455 			}
2456 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2457 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2458 			if (0xff == subpcode) {
2459 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2460 						  target, target_dev_id);
2461 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2462 			}
2463 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2464 			offset += len;
2465 		} else {
2466 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2467 			return check_condition_result;
2468 		}
2469 		break;
2470 	default:
2471 		bad_pcode = true;
2472 		break;
2473 	}
2474 	if (bad_pcode) {
2475 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2476 		return check_condition_result;
2477 	}
2478 	if (msense_6)
2479 		arr[0] = offset - 1;
2480 	else
2481 		put_unaligned_be16((offset - 2), arr + 0);
2482 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2483 }
2484 
2485 #define SDEBUG_MAX_MSELECT_SZ 512
2486 
2487 static int resp_mode_select(struct scsi_cmnd *scp,
2488 			    struct sdebug_dev_info *devip)
2489 {
2490 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2491 	int param_len, res, mpage;
2492 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2493 	unsigned char *cmd = scp->cmnd;
2494 	int mselect6 = (MODE_SELECT == cmd[0]);
2495 
2496 	memset(arr, 0, sizeof(arr));
2497 	pf = cmd[1] & 0x10;
2498 	sp = cmd[1] & 0x1;
2499 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2500 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2501 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2502 		return check_condition_result;
2503 	}
2504 	res = fetch_to_dev_buffer(scp, arr, param_len);
2505 	if (-1 == res)
2506 		return DID_ERROR << 16;
2507 	else if (sdebug_verbose && (res < param_len))
2508 		sdev_printk(KERN_INFO, scp->device,
2509 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2510 			    __func__, param_len, res);
2511 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2512 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2513 	off = bd_len + (mselect6 ? 4 : 8);
2514 	if (md_len > 2 || off >= res) {
2515 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2516 		return check_condition_result;
2517 	}
2518 	mpage = arr[off] & 0x3f;
2519 	ps = !!(arr[off] & 0x80);
2520 	if (ps) {
2521 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2522 		return check_condition_result;
2523 	}
2524 	spf = !!(arr[off] & 0x40);
2525 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2526 		       (arr[off + 1] + 2);
2527 	if ((pg_len + off) > param_len) {
2528 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2529 				PARAMETER_LIST_LENGTH_ERR, 0);
2530 		return check_condition_result;
2531 	}
2532 	switch (mpage) {
2533 	case 0x8:      /* Caching Mode page */
2534 		if (caching_pg[1] == arr[off + 1]) {
2535 			memcpy(caching_pg + 2, arr + off + 2,
2536 			       sizeof(caching_pg) - 2);
2537 			goto set_mode_changed_ua;
2538 		}
2539 		break;
2540 	case 0xa:      /* Control Mode page */
2541 		if (ctrl_m_pg[1] == arr[off + 1]) {
2542 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2543 			       sizeof(ctrl_m_pg) - 2);
2544 			if (ctrl_m_pg[4] & 0x8)
2545 				sdebug_wp = true;
2546 			else
2547 				sdebug_wp = false;
2548 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2549 			goto set_mode_changed_ua;
2550 		}
2551 		break;
2552 	case 0x1c:      /* Informational Exceptions Mode page */
2553 		if (iec_m_pg[1] == arr[off + 1]) {
2554 			memcpy(iec_m_pg + 2, arr + off + 2,
2555 			       sizeof(iec_m_pg) - 2);
2556 			goto set_mode_changed_ua;
2557 		}
2558 		break;
2559 	default:
2560 		break;
2561 	}
2562 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2563 	return check_condition_result;
2564 set_mode_changed_ua:
2565 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2566 	return 0;
2567 }
2568 
2569 static int resp_temp_l_pg(unsigned char *arr)
2570 {
2571 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2572 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2573 		};
2574 
2575 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2576 	return sizeof(temp_l_pg);
2577 }
2578 
2579 static int resp_ie_l_pg(unsigned char *arr)
2580 {
2581 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2582 		};
2583 
2584 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2585 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2586 		arr[4] = THRESHOLD_EXCEEDED;
2587 		arr[5] = 0xff;
2588 	}
2589 	return sizeof(ie_l_pg);
2590 }
2591 
2592 static int resp_env_rep_l_spg(unsigned char *arr)
2593 {
2594 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2595 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2596 					 0x1, 0x0, 0x23, 0x8,
2597 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2598 		};
2599 
2600 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2601 	return sizeof(env_rep_l_spg);
2602 }
2603 
2604 #define SDEBUG_MAX_LSENSE_SZ 512
2605 
2606 static int resp_log_sense(struct scsi_cmnd *scp,
2607 			  struct sdebug_dev_info *devip)
2608 {
2609 	int ppc, sp, pcode, subpcode;
2610 	u32 alloc_len, len, n;
2611 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2612 	unsigned char *cmd = scp->cmnd;
2613 
2614 	memset(arr, 0, sizeof(arr));
2615 	ppc = cmd[1] & 0x2;
2616 	sp = cmd[1] & 0x1;
2617 	if (ppc || sp) {
2618 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2619 		return check_condition_result;
2620 	}
2621 	pcode = cmd[2] & 0x3f;
2622 	subpcode = cmd[3] & 0xff;
2623 	alloc_len = get_unaligned_be16(cmd + 7);
2624 	arr[0] = pcode;
2625 	if (0 == subpcode) {
2626 		switch (pcode) {
2627 		case 0x0:	/* Supported log pages log page */
2628 			n = 4;
2629 			arr[n++] = 0x0;		/* this page */
2630 			arr[n++] = 0xd;		/* Temperature */
2631 			arr[n++] = 0x2f;	/* Informational exceptions */
2632 			arr[3] = n - 4;
2633 			break;
2634 		case 0xd:	/* Temperature log page */
2635 			arr[3] = resp_temp_l_pg(arr + 4);
2636 			break;
2637 		case 0x2f:	/* Informational exceptions log page */
2638 			arr[3] = resp_ie_l_pg(arr + 4);
2639 			break;
2640 		default:
2641 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2642 			return check_condition_result;
2643 		}
2644 	} else if (0xff == subpcode) {
2645 		arr[0] |= 0x40;
2646 		arr[1] = subpcode;
2647 		switch (pcode) {
2648 		case 0x0:	/* Supported log pages and subpages log page */
2649 			n = 4;
2650 			arr[n++] = 0x0;
2651 			arr[n++] = 0x0;		/* 0,0 page */
2652 			arr[n++] = 0x0;
2653 			arr[n++] = 0xff;	/* this page */
2654 			arr[n++] = 0xd;
2655 			arr[n++] = 0x0;		/* Temperature */
2656 			arr[n++] = 0xd;
2657 			arr[n++] = 0x1;		/* Environment reporting */
2658 			arr[n++] = 0xd;
2659 			arr[n++] = 0xff;	/* all 0xd subpages */
2660 			arr[n++] = 0x2f;
2661 			arr[n++] = 0x0;	/* Informational exceptions */
2662 			arr[n++] = 0x2f;
2663 			arr[n++] = 0xff;	/* all 0x2f subpages */
2664 			arr[3] = n - 4;
2665 			break;
2666 		case 0xd:	/* Temperature subpages */
2667 			n = 4;
2668 			arr[n++] = 0xd;
2669 			arr[n++] = 0x0;		/* Temperature */
2670 			arr[n++] = 0xd;
2671 			arr[n++] = 0x1;		/* Environment reporting */
2672 			arr[n++] = 0xd;
2673 			arr[n++] = 0xff;	/* these subpages */
2674 			arr[3] = n - 4;
2675 			break;
2676 		case 0x2f:	/* Informational exceptions subpages */
2677 			n = 4;
2678 			arr[n++] = 0x2f;
2679 			arr[n++] = 0x0;		/* Informational exceptions */
2680 			arr[n++] = 0x2f;
2681 			arr[n++] = 0xff;	/* these subpages */
2682 			arr[3] = n - 4;
2683 			break;
2684 		default:
2685 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2686 			return check_condition_result;
2687 		}
2688 	} else if (subpcode > 0) {
2689 		arr[0] |= 0x40;
2690 		arr[1] = subpcode;
2691 		if (pcode == 0xd && subpcode == 1)
2692 			arr[3] = resp_env_rep_l_spg(arr + 4);
2693 		else {
2694 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2695 			return check_condition_result;
2696 		}
2697 	} else {
2698 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2699 		return check_condition_result;
2700 	}
2701 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2702 	return fill_from_dev_buffer(scp, arr,
2703 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2704 }
2705 
2706 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2707 {
2708 	return devip->nr_zones != 0;
2709 }
2710 
2711 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2712 					unsigned long long lba)
2713 {
2714 	return &devip->zstate[lba >> devip->zsize_shift];
2715 }
2716 
2717 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2718 {
2719 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2720 }
2721 
2722 static void zbc_close_zone(struct sdebug_dev_info *devip,
2723 			   struct sdeb_zone_state *zsp)
2724 {
2725 	enum sdebug_z_cond zc;
2726 
2727 	if (zbc_zone_is_conv(zsp))
2728 		return;
2729 
2730 	zc = zsp->z_cond;
2731 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2732 		return;
2733 
2734 	if (zc == ZC2_IMPLICIT_OPEN)
2735 		devip->nr_imp_open--;
2736 	else
2737 		devip->nr_exp_open--;
2738 
2739 	if (zsp->z_wp == zsp->z_start) {
2740 		zsp->z_cond = ZC1_EMPTY;
2741 	} else {
2742 		zsp->z_cond = ZC4_CLOSED;
2743 		devip->nr_closed++;
2744 	}
2745 }
2746 
2747 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2748 {
2749 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2750 	unsigned int i;
2751 
2752 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2753 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2754 			zbc_close_zone(devip, zsp);
2755 			return;
2756 		}
2757 	}
2758 }
2759 
2760 static void zbc_open_zone(struct sdebug_dev_info *devip,
2761 			  struct sdeb_zone_state *zsp, bool explicit)
2762 {
2763 	enum sdebug_z_cond zc;
2764 
2765 	if (zbc_zone_is_conv(zsp))
2766 		return;
2767 
2768 	zc = zsp->z_cond;
2769 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2770 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2771 		return;
2772 
2773 	/* Close an implicit open zone if necessary */
2774 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2775 		zbc_close_zone(devip, zsp);
2776 	else if (devip->max_open &&
2777 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2778 		zbc_close_imp_open_zone(devip);
2779 
2780 	if (zsp->z_cond == ZC4_CLOSED)
2781 		devip->nr_closed--;
2782 	if (explicit) {
2783 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2784 		devip->nr_exp_open++;
2785 	} else {
2786 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2787 		devip->nr_imp_open++;
2788 	}
2789 }
2790 
2791 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2792 		       unsigned long long lba, unsigned int num)
2793 {
2794 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2795 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2796 
2797 	if (zbc_zone_is_conv(zsp))
2798 		return;
2799 
2800 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2801 		zsp->z_wp += num;
2802 		if (zsp->z_wp >= zend)
2803 			zsp->z_cond = ZC5_FULL;
2804 		return;
2805 	}
2806 
2807 	while (num) {
2808 		if (lba != zsp->z_wp)
2809 			zsp->z_non_seq_resource = true;
2810 
2811 		end = lba + num;
2812 		if (end >= zend) {
2813 			n = zend - lba;
2814 			zsp->z_wp = zend;
2815 		} else if (end > zsp->z_wp) {
2816 			n = num;
2817 			zsp->z_wp = end;
2818 		} else {
2819 			n = num;
2820 		}
2821 		if (zsp->z_wp >= zend)
2822 			zsp->z_cond = ZC5_FULL;
2823 
2824 		num -= n;
2825 		lba += n;
2826 		if (num) {
2827 			zsp++;
2828 			zend = zsp->z_start + zsp->z_size;
2829 		}
2830 	}
2831 }
2832 
2833 static int check_zbc_access_params(struct scsi_cmnd *scp,
2834 			unsigned long long lba, unsigned int num, bool write)
2835 {
2836 	struct scsi_device *sdp = scp->device;
2837 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2838 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2839 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2840 
2841 	if (!write) {
2842 		if (devip->zmodel == BLK_ZONED_HA)
2843 			return 0;
2844 		/* For host-managed, reads cannot cross zone types boundaries */
2845 		if (zsp_end != zsp &&
2846 		    zbc_zone_is_conv(zsp) &&
2847 		    !zbc_zone_is_conv(zsp_end)) {
2848 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2849 					LBA_OUT_OF_RANGE,
2850 					READ_INVDATA_ASCQ);
2851 			return check_condition_result;
2852 		}
2853 		return 0;
2854 	}
2855 
2856 	/* No restrictions for writes within conventional zones */
2857 	if (zbc_zone_is_conv(zsp)) {
2858 		if (!zbc_zone_is_conv(zsp_end)) {
2859 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2860 					LBA_OUT_OF_RANGE,
2861 					WRITE_BOUNDARY_ASCQ);
2862 			return check_condition_result;
2863 		}
2864 		return 0;
2865 	}
2866 
2867 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2868 		/* Writes cannot cross sequential zone boundaries */
2869 		if (zsp_end != zsp) {
2870 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2871 					LBA_OUT_OF_RANGE,
2872 					WRITE_BOUNDARY_ASCQ);
2873 			return check_condition_result;
2874 		}
2875 		/* Cannot write full zones */
2876 		if (zsp->z_cond == ZC5_FULL) {
2877 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2878 					INVALID_FIELD_IN_CDB, 0);
2879 			return check_condition_result;
2880 		}
2881 		/* Writes must be aligned to the zone WP */
2882 		if (lba != zsp->z_wp) {
2883 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2884 					LBA_OUT_OF_RANGE,
2885 					UNALIGNED_WRITE_ASCQ);
2886 			return check_condition_result;
2887 		}
2888 	}
2889 
2890 	/* Handle implicit open of closed and empty zones */
2891 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2892 		if (devip->max_open &&
2893 		    devip->nr_exp_open >= devip->max_open) {
2894 			mk_sense_buffer(scp, DATA_PROTECT,
2895 					INSUFF_RES_ASC,
2896 					INSUFF_ZONE_ASCQ);
2897 			return check_condition_result;
2898 		}
2899 		zbc_open_zone(devip, zsp, false);
2900 	}
2901 
2902 	return 0;
2903 }
2904 
2905 static inline int check_device_access_params
2906 			(struct scsi_cmnd *scp, unsigned long long lba,
2907 			 unsigned int num, bool write)
2908 {
2909 	struct scsi_device *sdp = scp->device;
2910 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2911 
2912 	if (lba + num > sdebug_capacity) {
2913 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2914 		return check_condition_result;
2915 	}
2916 	/* transfer length excessive (tie in to block limits VPD page) */
2917 	if (num > sdebug_store_sectors) {
2918 		/* needs work to find which cdb byte 'num' comes from */
2919 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2920 		return check_condition_result;
2921 	}
2922 	if (write && unlikely(sdebug_wp)) {
2923 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2924 		return check_condition_result;
2925 	}
2926 	if (sdebug_dev_is_zoned(devip))
2927 		return check_zbc_access_params(scp, lba, num, write);
2928 
2929 	return 0;
2930 }
2931 
2932 /*
2933  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2934  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2935  * that access any of the "stores" in struct sdeb_store_info should call this
2936  * function with bug_if_fake_rw set to true.
2937  */
2938 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2939 						bool bug_if_fake_rw)
2940 {
2941 	if (sdebug_fake_rw) {
2942 		BUG_ON(bug_if_fake_rw);	/* See note above */
2943 		return NULL;
2944 	}
2945 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2946 }
2947 
2948 /* Returns number of bytes copied or -1 if error. */
2949 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2950 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2951 {
2952 	int ret;
2953 	u64 block, rest = 0;
2954 	enum dma_data_direction dir;
2955 	struct scsi_data_buffer *sdb = &scp->sdb;
2956 	u8 *fsp;
2957 
2958 	if (do_write) {
2959 		dir = DMA_TO_DEVICE;
2960 		write_since_sync = true;
2961 	} else {
2962 		dir = DMA_FROM_DEVICE;
2963 	}
2964 
2965 	if (!sdb->length || !sip)
2966 		return 0;
2967 	if (scp->sc_data_direction != dir)
2968 		return -1;
2969 	fsp = sip->storep;
2970 
2971 	block = do_div(lba, sdebug_store_sectors);
2972 	if (block + num > sdebug_store_sectors)
2973 		rest = block + num - sdebug_store_sectors;
2974 
2975 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2976 		   fsp + (block * sdebug_sector_size),
2977 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2978 	if (ret != (num - rest) * sdebug_sector_size)
2979 		return ret;
2980 
2981 	if (rest) {
2982 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2983 			    fsp, rest * sdebug_sector_size,
2984 			    sg_skip + ((num - rest) * sdebug_sector_size),
2985 			    do_write);
2986 	}
2987 
2988 	return ret;
2989 }
2990 
2991 /* Returns number of bytes copied or -1 if error. */
2992 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2993 {
2994 	struct scsi_data_buffer *sdb = &scp->sdb;
2995 
2996 	if (!sdb->length)
2997 		return 0;
2998 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2999 		return -1;
3000 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3001 			      num * sdebug_sector_size, 0, true);
3002 }
3003 
3004 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3005  * arr into sip->storep+lba and return true. If comparison fails then
3006  * return false. */
3007 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3008 			      const u8 *arr, bool compare_only)
3009 {
3010 	bool res;
3011 	u64 block, rest = 0;
3012 	u32 store_blks = sdebug_store_sectors;
3013 	u32 lb_size = sdebug_sector_size;
3014 	u8 *fsp = sip->storep;
3015 
3016 	block = do_div(lba, store_blks);
3017 	if (block + num > store_blks)
3018 		rest = block + num - store_blks;
3019 
3020 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3021 	if (!res)
3022 		return res;
3023 	if (rest)
3024 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3025 			     rest * lb_size);
3026 	if (!res)
3027 		return res;
3028 	if (compare_only)
3029 		return true;
3030 	arr += num * lb_size;
3031 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3032 	if (rest)
3033 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3034 	return res;
3035 }
3036 
3037 static __be16 dif_compute_csum(const void *buf, int len)
3038 {
3039 	__be16 csum;
3040 
3041 	if (sdebug_guard)
3042 		csum = (__force __be16)ip_compute_csum(buf, len);
3043 	else
3044 		csum = cpu_to_be16(crc_t10dif(buf, len));
3045 
3046 	return csum;
3047 }
3048 
3049 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3050 		      sector_t sector, u32 ei_lba)
3051 {
3052 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3053 
3054 	if (sdt->guard_tag != csum) {
3055 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3056 			(unsigned long)sector,
3057 			be16_to_cpu(sdt->guard_tag),
3058 			be16_to_cpu(csum));
3059 		return 0x01;
3060 	}
3061 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3062 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3063 		pr_err("REF check failed on sector %lu\n",
3064 			(unsigned long)sector);
3065 		return 0x03;
3066 	}
3067 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3068 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3069 		pr_err("REF check failed on sector %lu\n",
3070 			(unsigned long)sector);
3071 		return 0x03;
3072 	}
3073 	return 0;
3074 }
3075 
3076 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3077 			  unsigned int sectors, bool read)
3078 {
3079 	size_t resid;
3080 	void *paddr;
3081 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3082 						scp->device->hostdata, true);
3083 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3084 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3085 	struct sg_mapping_iter miter;
3086 
3087 	/* Bytes of protection data to copy into sgl */
3088 	resid = sectors * sizeof(*dif_storep);
3089 
3090 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3091 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3092 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3093 
3094 	while (sg_miter_next(&miter) && resid > 0) {
3095 		size_t len = min_t(size_t, miter.length, resid);
3096 		void *start = dif_store(sip, sector);
3097 		size_t rest = 0;
3098 
3099 		if (dif_store_end < start + len)
3100 			rest = start + len - dif_store_end;
3101 
3102 		paddr = miter.addr;
3103 
3104 		if (read)
3105 			memcpy(paddr, start, len - rest);
3106 		else
3107 			memcpy(start, paddr, len - rest);
3108 
3109 		if (rest) {
3110 			if (read)
3111 				memcpy(paddr + len - rest, dif_storep, rest);
3112 			else
3113 				memcpy(dif_storep, paddr + len - rest, rest);
3114 		}
3115 
3116 		sector += len / sizeof(*dif_storep);
3117 		resid -= len;
3118 	}
3119 	sg_miter_stop(&miter);
3120 }
3121 
3122 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3123 			    unsigned int sectors, u32 ei_lba)
3124 {
3125 	int ret = 0;
3126 	unsigned int i;
3127 	sector_t sector;
3128 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3129 						scp->device->hostdata, true);
3130 	struct t10_pi_tuple *sdt;
3131 
3132 	for (i = 0; i < sectors; i++, ei_lba++) {
3133 		sector = start_sec + i;
3134 		sdt = dif_store(sip, sector);
3135 
3136 		if (sdt->app_tag == cpu_to_be16(0xffff))
3137 			continue;
3138 
3139 		/*
3140 		 * Because scsi_debug acts as both initiator and
3141 		 * target we proceed to verify the PI even if
3142 		 * RDPROTECT=3. This is done so the "initiator" knows
3143 		 * which type of error to return. Otherwise we would
3144 		 * have to iterate over the PI twice.
3145 		 */
3146 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3147 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3148 					 sector, ei_lba);
3149 			if (ret) {
3150 				dif_errors++;
3151 				break;
3152 			}
3153 		}
3154 	}
3155 
3156 	dif_copy_prot(scp, start_sec, sectors, true);
3157 	dix_reads++;
3158 
3159 	return ret;
3160 }
3161 
3162 static inline void
3163 sdeb_read_lock(struct sdeb_store_info *sip)
3164 {
3165 	if (sdebug_no_rwlock) {
3166 		if (sip)
3167 			__acquire(&sip->macc_lck);
3168 		else
3169 			__acquire(&sdeb_fake_rw_lck);
3170 	} else {
3171 		if (sip)
3172 			read_lock(&sip->macc_lck);
3173 		else
3174 			read_lock(&sdeb_fake_rw_lck);
3175 	}
3176 }
3177 
3178 static inline void
3179 sdeb_read_unlock(struct sdeb_store_info *sip)
3180 {
3181 	if (sdebug_no_rwlock) {
3182 		if (sip)
3183 			__release(&sip->macc_lck);
3184 		else
3185 			__release(&sdeb_fake_rw_lck);
3186 	} else {
3187 		if (sip)
3188 			read_unlock(&sip->macc_lck);
3189 		else
3190 			read_unlock(&sdeb_fake_rw_lck);
3191 	}
3192 }
3193 
3194 static inline void
3195 sdeb_write_lock(struct sdeb_store_info *sip)
3196 {
3197 	if (sdebug_no_rwlock) {
3198 		if (sip)
3199 			__acquire(&sip->macc_lck);
3200 		else
3201 			__acquire(&sdeb_fake_rw_lck);
3202 	} else {
3203 		if (sip)
3204 			write_lock(&sip->macc_lck);
3205 		else
3206 			write_lock(&sdeb_fake_rw_lck);
3207 	}
3208 }
3209 
3210 static inline void
3211 sdeb_write_unlock(struct sdeb_store_info *sip)
3212 {
3213 	if (sdebug_no_rwlock) {
3214 		if (sip)
3215 			__release(&sip->macc_lck);
3216 		else
3217 			__release(&sdeb_fake_rw_lck);
3218 	} else {
3219 		if (sip)
3220 			write_unlock(&sip->macc_lck);
3221 		else
3222 			write_unlock(&sdeb_fake_rw_lck);
3223 	}
3224 }
3225 
3226 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3227 {
3228 	bool check_prot;
3229 	u32 num;
3230 	u32 ei_lba;
3231 	int ret;
3232 	u64 lba;
3233 	struct sdeb_store_info *sip = devip2sip(devip, true);
3234 	u8 *cmd = scp->cmnd;
3235 
3236 	switch (cmd[0]) {
3237 	case READ_16:
3238 		ei_lba = 0;
3239 		lba = get_unaligned_be64(cmd + 2);
3240 		num = get_unaligned_be32(cmd + 10);
3241 		check_prot = true;
3242 		break;
3243 	case READ_10:
3244 		ei_lba = 0;
3245 		lba = get_unaligned_be32(cmd + 2);
3246 		num = get_unaligned_be16(cmd + 7);
3247 		check_prot = true;
3248 		break;
3249 	case READ_6:
3250 		ei_lba = 0;
3251 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3252 		      (u32)(cmd[1] & 0x1f) << 16;
3253 		num = (0 == cmd[4]) ? 256 : cmd[4];
3254 		check_prot = true;
3255 		break;
3256 	case READ_12:
3257 		ei_lba = 0;
3258 		lba = get_unaligned_be32(cmd + 2);
3259 		num = get_unaligned_be32(cmd + 6);
3260 		check_prot = true;
3261 		break;
3262 	case XDWRITEREAD_10:
3263 		ei_lba = 0;
3264 		lba = get_unaligned_be32(cmd + 2);
3265 		num = get_unaligned_be16(cmd + 7);
3266 		check_prot = false;
3267 		break;
3268 	default:	/* assume READ(32) */
3269 		lba = get_unaligned_be64(cmd + 12);
3270 		ei_lba = get_unaligned_be32(cmd + 20);
3271 		num = get_unaligned_be32(cmd + 28);
3272 		check_prot = false;
3273 		break;
3274 	}
3275 	if (unlikely(have_dif_prot && check_prot)) {
3276 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3277 		    (cmd[1] & 0xe0)) {
3278 			mk_sense_invalid_opcode(scp);
3279 			return check_condition_result;
3280 		}
3281 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3282 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3283 		    (cmd[1] & 0xe0) == 0)
3284 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3285 				    "to DIF device\n");
3286 	}
3287 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3288 		     atomic_read(&sdeb_inject_pending))) {
3289 		num /= 2;
3290 		atomic_set(&sdeb_inject_pending, 0);
3291 	}
3292 
3293 	ret = check_device_access_params(scp, lba, num, false);
3294 	if (ret)
3295 		return ret;
3296 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3297 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3298 		     ((lba + num) > sdebug_medium_error_start))) {
3299 		/* claim unrecoverable read error */
3300 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3301 		/* set info field and valid bit for fixed descriptor */
3302 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3303 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3304 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3305 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3306 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3307 		}
3308 		scsi_set_resid(scp, scsi_bufflen(scp));
3309 		return check_condition_result;
3310 	}
3311 
3312 	sdeb_read_lock(sip);
3313 
3314 	/* DIX + T10 DIF */
3315 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3316 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3317 		case 1: /* Guard tag error */
3318 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3319 				sdeb_read_unlock(sip);
3320 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3321 				return check_condition_result;
3322 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3323 				sdeb_read_unlock(sip);
3324 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3325 				return illegal_condition_result;
3326 			}
3327 			break;
3328 		case 3: /* Reference tag error */
3329 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3330 				sdeb_read_unlock(sip);
3331 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3332 				return check_condition_result;
3333 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3334 				sdeb_read_unlock(sip);
3335 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3336 				return illegal_condition_result;
3337 			}
3338 			break;
3339 		}
3340 	}
3341 
3342 	ret = do_device_access(sip, scp, 0, lba, num, false);
3343 	sdeb_read_unlock(sip);
3344 	if (unlikely(ret == -1))
3345 		return DID_ERROR << 16;
3346 
3347 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3348 
3349 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3350 		     atomic_read(&sdeb_inject_pending))) {
3351 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3352 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3353 			atomic_set(&sdeb_inject_pending, 0);
3354 			return check_condition_result;
3355 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3356 			/* Logical block guard check failed */
3357 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3358 			atomic_set(&sdeb_inject_pending, 0);
3359 			return illegal_condition_result;
3360 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3361 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3362 			atomic_set(&sdeb_inject_pending, 0);
3363 			return illegal_condition_result;
3364 		}
3365 	}
3366 	return 0;
3367 }
3368 
3369 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3370 			     unsigned int sectors, u32 ei_lba)
3371 {
3372 	int ret;
3373 	struct t10_pi_tuple *sdt;
3374 	void *daddr;
3375 	sector_t sector = start_sec;
3376 	int ppage_offset;
3377 	int dpage_offset;
3378 	struct sg_mapping_iter diter;
3379 	struct sg_mapping_iter piter;
3380 
3381 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3382 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3383 
3384 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3385 			scsi_prot_sg_count(SCpnt),
3386 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3387 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3388 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3389 
3390 	/* For each protection page */
3391 	while (sg_miter_next(&piter)) {
3392 		dpage_offset = 0;
3393 		if (WARN_ON(!sg_miter_next(&diter))) {
3394 			ret = 0x01;
3395 			goto out;
3396 		}
3397 
3398 		for (ppage_offset = 0; ppage_offset < piter.length;
3399 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3400 			/* If we're at the end of the current
3401 			 * data page advance to the next one
3402 			 */
3403 			if (dpage_offset >= diter.length) {
3404 				if (WARN_ON(!sg_miter_next(&diter))) {
3405 					ret = 0x01;
3406 					goto out;
3407 				}
3408 				dpage_offset = 0;
3409 			}
3410 
3411 			sdt = piter.addr + ppage_offset;
3412 			daddr = diter.addr + dpage_offset;
3413 
3414 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3415 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3416 				if (ret)
3417 					goto out;
3418 			}
3419 
3420 			sector++;
3421 			ei_lba++;
3422 			dpage_offset += sdebug_sector_size;
3423 		}
3424 		diter.consumed = dpage_offset;
3425 		sg_miter_stop(&diter);
3426 	}
3427 	sg_miter_stop(&piter);
3428 
3429 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3430 	dix_writes++;
3431 
3432 	return 0;
3433 
3434 out:
3435 	dif_errors++;
3436 	sg_miter_stop(&diter);
3437 	sg_miter_stop(&piter);
3438 	return ret;
3439 }
3440 
3441 static unsigned long lba_to_map_index(sector_t lba)
3442 {
3443 	if (sdebug_unmap_alignment)
3444 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3445 	sector_div(lba, sdebug_unmap_granularity);
3446 	return lba;
3447 }
3448 
3449 static sector_t map_index_to_lba(unsigned long index)
3450 {
3451 	sector_t lba = index * sdebug_unmap_granularity;
3452 
3453 	if (sdebug_unmap_alignment)
3454 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3455 	return lba;
3456 }
3457 
3458 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3459 			      unsigned int *num)
3460 {
3461 	sector_t end;
3462 	unsigned int mapped;
3463 	unsigned long index;
3464 	unsigned long next;
3465 
3466 	index = lba_to_map_index(lba);
3467 	mapped = test_bit(index, sip->map_storep);
3468 
3469 	if (mapped)
3470 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3471 	else
3472 		next = find_next_bit(sip->map_storep, map_size, index);
3473 
3474 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3475 	*num = end - lba;
3476 	return mapped;
3477 }
3478 
3479 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3480 		       unsigned int len)
3481 {
3482 	sector_t end = lba + len;
3483 
3484 	while (lba < end) {
3485 		unsigned long index = lba_to_map_index(lba);
3486 
3487 		if (index < map_size)
3488 			set_bit(index, sip->map_storep);
3489 
3490 		lba = map_index_to_lba(index + 1);
3491 	}
3492 }
3493 
3494 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3495 			 unsigned int len)
3496 {
3497 	sector_t end = lba + len;
3498 	u8 *fsp = sip->storep;
3499 
3500 	while (lba < end) {
3501 		unsigned long index = lba_to_map_index(lba);
3502 
3503 		if (lba == map_index_to_lba(index) &&
3504 		    lba + sdebug_unmap_granularity <= end &&
3505 		    index < map_size) {
3506 			clear_bit(index, sip->map_storep);
3507 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3508 				memset(fsp + lba * sdebug_sector_size,
3509 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3510 				       sdebug_sector_size *
3511 				       sdebug_unmap_granularity);
3512 			}
3513 			if (sip->dif_storep) {
3514 				memset(sip->dif_storep + lba, 0xff,
3515 				       sizeof(*sip->dif_storep) *
3516 				       sdebug_unmap_granularity);
3517 			}
3518 		}
3519 		lba = map_index_to_lba(index + 1);
3520 	}
3521 }
3522 
3523 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3524 {
3525 	bool check_prot;
3526 	u32 num;
3527 	u32 ei_lba;
3528 	int ret;
3529 	u64 lba;
3530 	struct sdeb_store_info *sip = devip2sip(devip, true);
3531 	u8 *cmd = scp->cmnd;
3532 
3533 	switch (cmd[0]) {
3534 	case WRITE_16:
3535 		ei_lba = 0;
3536 		lba = get_unaligned_be64(cmd + 2);
3537 		num = get_unaligned_be32(cmd + 10);
3538 		check_prot = true;
3539 		break;
3540 	case WRITE_10:
3541 		ei_lba = 0;
3542 		lba = get_unaligned_be32(cmd + 2);
3543 		num = get_unaligned_be16(cmd + 7);
3544 		check_prot = true;
3545 		break;
3546 	case WRITE_6:
3547 		ei_lba = 0;
3548 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3549 		      (u32)(cmd[1] & 0x1f) << 16;
3550 		num = (0 == cmd[4]) ? 256 : cmd[4];
3551 		check_prot = true;
3552 		break;
3553 	case WRITE_12:
3554 		ei_lba = 0;
3555 		lba = get_unaligned_be32(cmd + 2);
3556 		num = get_unaligned_be32(cmd + 6);
3557 		check_prot = true;
3558 		break;
3559 	case 0x53:	/* XDWRITEREAD(10) */
3560 		ei_lba = 0;
3561 		lba = get_unaligned_be32(cmd + 2);
3562 		num = get_unaligned_be16(cmd + 7);
3563 		check_prot = false;
3564 		break;
3565 	default:	/* assume WRITE(32) */
3566 		lba = get_unaligned_be64(cmd + 12);
3567 		ei_lba = get_unaligned_be32(cmd + 20);
3568 		num = get_unaligned_be32(cmd + 28);
3569 		check_prot = false;
3570 		break;
3571 	}
3572 	if (unlikely(have_dif_prot && check_prot)) {
3573 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3574 		    (cmd[1] & 0xe0)) {
3575 			mk_sense_invalid_opcode(scp);
3576 			return check_condition_result;
3577 		}
3578 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3579 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3580 		    (cmd[1] & 0xe0) == 0)
3581 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3582 				    "to DIF device\n");
3583 	}
3584 
3585 	sdeb_write_lock(sip);
3586 	ret = check_device_access_params(scp, lba, num, true);
3587 	if (ret) {
3588 		sdeb_write_unlock(sip);
3589 		return ret;
3590 	}
3591 
3592 	/* DIX + T10 DIF */
3593 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3594 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3595 		case 1: /* Guard tag error */
3596 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3597 				sdeb_write_unlock(sip);
3598 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3599 				return illegal_condition_result;
3600 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3601 				sdeb_write_unlock(sip);
3602 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3603 				return check_condition_result;
3604 			}
3605 			break;
3606 		case 3: /* Reference tag error */
3607 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3608 				sdeb_write_unlock(sip);
3609 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3610 				return illegal_condition_result;
3611 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3612 				sdeb_write_unlock(sip);
3613 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3614 				return check_condition_result;
3615 			}
3616 			break;
3617 		}
3618 	}
3619 
3620 	ret = do_device_access(sip, scp, 0, lba, num, true);
3621 	if (unlikely(scsi_debug_lbp()))
3622 		map_region(sip, lba, num);
3623 	/* If ZBC zone then bump its write pointer */
3624 	if (sdebug_dev_is_zoned(devip))
3625 		zbc_inc_wp(devip, lba, num);
3626 	sdeb_write_unlock(sip);
3627 	if (unlikely(-1 == ret))
3628 		return DID_ERROR << 16;
3629 	else if (unlikely(sdebug_verbose &&
3630 			  (ret < (num * sdebug_sector_size))))
3631 		sdev_printk(KERN_INFO, scp->device,
3632 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3633 			    my_name, num * sdebug_sector_size, ret);
3634 
3635 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3636 		     atomic_read(&sdeb_inject_pending))) {
3637 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3638 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3639 			atomic_set(&sdeb_inject_pending, 0);
3640 			return check_condition_result;
3641 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3642 			/* Logical block guard check failed */
3643 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3644 			atomic_set(&sdeb_inject_pending, 0);
3645 			return illegal_condition_result;
3646 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3647 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3648 			atomic_set(&sdeb_inject_pending, 0);
3649 			return illegal_condition_result;
3650 		}
3651 	}
3652 	return 0;
3653 }
3654 
3655 /*
3656  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3657  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3658  */
3659 static int resp_write_scat(struct scsi_cmnd *scp,
3660 			   struct sdebug_dev_info *devip)
3661 {
3662 	u8 *cmd = scp->cmnd;
3663 	u8 *lrdp = NULL;
3664 	u8 *up;
3665 	struct sdeb_store_info *sip = devip2sip(devip, true);
3666 	u8 wrprotect;
3667 	u16 lbdof, num_lrd, k;
3668 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3669 	u32 lb_size = sdebug_sector_size;
3670 	u32 ei_lba;
3671 	u64 lba;
3672 	int ret, res;
3673 	bool is_16;
3674 	static const u32 lrd_size = 32; /* + parameter list header size */
3675 
3676 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3677 		is_16 = false;
3678 		wrprotect = (cmd[10] >> 5) & 0x7;
3679 		lbdof = get_unaligned_be16(cmd + 12);
3680 		num_lrd = get_unaligned_be16(cmd + 16);
3681 		bt_len = get_unaligned_be32(cmd + 28);
3682 	} else {        /* that leaves WRITE SCATTERED(16) */
3683 		is_16 = true;
3684 		wrprotect = (cmd[2] >> 5) & 0x7;
3685 		lbdof = get_unaligned_be16(cmd + 4);
3686 		num_lrd = get_unaligned_be16(cmd + 8);
3687 		bt_len = get_unaligned_be32(cmd + 10);
3688 		if (unlikely(have_dif_prot)) {
3689 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3690 			    wrprotect) {
3691 				mk_sense_invalid_opcode(scp);
3692 				return illegal_condition_result;
3693 			}
3694 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3695 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3696 			     wrprotect == 0)
3697 				sdev_printk(KERN_ERR, scp->device,
3698 					    "Unprotected WR to DIF device\n");
3699 		}
3700 	}
3701 	if ((num_lrd == 0) || (bt_len == 0))
3702 		return 0;       /* T10 says these do-nothings are not errors */
3703 	if (lbdof == 0) {
3704 		if (sdebug_verbose)
3705 			sdev_printk(KERN_INFO, scp->device,
3706 				"%s: %s: LB Data Offset field bad\n",
3707 				my_name, __func__);
3708 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3709 		return illegal_condition_result;
3710 	}
3711 	lbdof_blen = lbdof * lb_size;
3712 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3713 		if (sdebug_verbose)
3714 			sdev_printk(KERN_INFO, scp->device,
3715 				"%s: %s: LBA range descriptors don't fit\n",
3716 				my_name, __func__);
3717 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3718 		return illegal_condition_result;
3719 	}
3720 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3721 	if (lrdp == NULL)
3722 		return SCSI_MLQUEUE_HOST_BUSY;
3723 	if (sdebug_verbose)
3724 		sdev_printk(KERN_INFO, scp->device,
3725 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3726 			my_name, __func__, lbdof_blen);
3727 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3728 	if (res == -1) {
3729 		ret = DID_ERROR << 16;
3730 		goto err_out;
3731 	}
3732 
3733 	sdeb_write_lock(sip);
3734 	sg_off = lbdof_blen;
3735 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3736 	cum_lb = 0;
3737 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3738 		lba = get_unaligned_be64(up + 0);
3739 		num = get_unaligned_be32(up + 8);
3740 		if (sdebug_verbose)
3741 			sdev_printk(KERN_INFO, scp->device,
3742 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3743 				my_name, __func__, k, lba, num, sg_off);
3744 		if (num == 0)
3745 			continue;
3746 		ret = check_device_access_params(scp, lba, num, true);
3747 		if (ret)
3748 			goto err_out_unlock;
3749 		num_by = num * lb_size;
3750 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3751 
3752 		if ((cum_lb + num) > bt_len) {
3753 			if (sdebug_verbose)
3754 				sdev_printk(KERN_INFO, scp->device,
3755 				    "%s: %s: sum of blocks > data provided\n",
3756 				    my_name, __func__);
3757 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3758 					0);
3759 			ret = illegal_condition_result;
3760 			goto err_out_unlock;
3761 		}
3762 
3763 		/* DIX + T10 DIF */
3764 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3765 			int prot_ret = prot_verify_write(scp, lba, num,
3766 							 ei_lba);
3767 
3768 			if (prot_ret) {
3769 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3770 						prot_ret);
3771 				ret = illegal_condition_result;
3772 				goto err_out_unlock;
3773 			}
3774 		}
3775 
3776 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3777 		/* If ZBC zone then bump its write pointer */
3778 		if (sdebug_dev_is_zoned(devip))
3779 			zbc_inc_wp(devip, lba, num);
3780 		if (unlikely(scsi_debug_lbp()))
3781 			map_region(sip, lba, num);
3782 		if (unlikely(-1 == ret)) {
3783 			ret = DID_ERROR << 16;
3784 			goto err_out_unlock;
3785 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3786 			sdev_printk(KERN_INFO, scp->device,
3787 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3788 			    my_name, num_by, ret);
3789 
3790 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3791 			     atomic_read(&sdeb_inject_pending))) {
3792 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3793 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3794 				atomic_set(&sdeb_inject_pending, 0);
3795 				ret = check_condition_result;
3796 				goto err_out_unlock;
3797 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3798 				/* Logical block guard check failed */
3799 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3800 				atomic_set(&sdeb_inject_pending, 0);
3801 				ret = illegal_condition_result;
3802 				goto err_out_unlock;
3803 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3804 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3805 				atomic_set(&sdeb_inject_pending, 0);
3806 				ret = illegal_condition_result;
3807 				goto err_out_unlock;
3808 			}
3809 		}
3810 		sg_off += num_by;
3811 		cum_lb += num;
3812 	}
3813 	ret = 0;
3814 err_out_unlock:
3815 	sdeb_write_unlock(sip);
3816 err_out:
3817 	kfree(lrdp);
3818 	return ret;
3819 }
3820 
3821 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3822 			   u32 ei_lba, bool unmap, bool ndob)
3823 {
3824 	struct scsi_device *sdp = scp->device;
3825 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3826 	unsigned long long i;
3827 	u64 block, lbaa;
3828 	u32 lb_size = sdebug_sector_size;
3829 	int ret;
3830 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3831 						scp->device->hostdata, true);
3832 	u8 *fs1p;
3833 	u8 *fsp;
3834 
3835 	sdeb_write_lock(sip);
3836 
3837 	ret = check_device_access_params(scp, lba, num, true);
3838 	if (ret) {
3839 		sdeb_write_unlock(sip);
3840 		return ret;
3841 	}
3842 
3843 	if (unmap && scsi_debug_lbp()) {
3844 		unmap_region(sip, lba, num);
3845 		goto out;
3846 	}
3847 	lbaa = lba;
3848 	block = do_div(lbaa, sdebug_store_sectors);
3849 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3850 	fsp = sip->storep;
3851 	fs1p = fsp + (block * lb_size);
3852 	if (ndob) {
3853 		memset(fs1p, 0, lb_size);
3854 		ret = 0;
3855 	} else
3856 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3857 
3858 	if (-1 == ret) {
3859 		sdeb_write_unlock(sip);
3860 		return DID_ERROR << 16;
3861 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3862 		sdev_printk(KERN_INFO, scp->device,
3863 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3864 			    my_name, "write same", lb_size, ret);
3865 
3866 	/* Copy first sector to remaining blocks */
3867 	for (i = 1 ; i < num ; i++) {
3868 		lbaa = lba + i;
3869 		block = do_div(lbaa, sdebug_store_sectors);
3870 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3871 	}
3872 	if (scsi_debug_lbp())
3873 		map_region(sip, lba, num);
3874 	/* If ZBC zone then bump its write pointer */
3875 	if (sdebug_dev_is_zoned(devip))
3876 		zbc_inc_wp(devip, lba, num);
3877 out:
3878 	sdeb_write_unlock(sip);
3879 
3880 	return 0;
3881 }
3882 
3883 static int resp_write_same_10(struct scsi_cmnd *scp,
3884 			      struct sdebug_dev_info *devip)
3885 {
3886 	u8 *cmd = scp->cmnd;
3887 	u32 lba;
3888 	u16 num;
3889 	u32 ei_lba = 0;
3890 	bool unmap = false;
3891 
3892 	if (cmd[1] & 0x8) {
3893 		if (sdebug_lbpws10 == 0) {
3894 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3895 			return check_condition_result;
3896 		} else
3897 			unmap = true;
3898 	}
3899 	lba = get_unaligned_be32(cmd + 2);
3900 	num = get_unaligned_be16(cmd + 7);
3901 	if (num > sdebug_write_same_length) {
3902 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3903 		return check_condition_result;
3904 	}
3905 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3906 }
3907 
3908 static int resp_write_same_16(struct scsi_cmnd *scp,
3909 			      struct sdebug_dev_info *devip)
3910 {
3911 	u8 *cmd = scp->cmnd;
3912 	u64 lba;
3913 	u32 num;
3914 	u32 ei_lba = 0;
3915 	bool unmap = false;
3916 	bool ndob = false;
3917 
3918 	if (cmd[1] & 0x8) {	/* UNMAP */
3919 		if (sdebug_lbpws == 0) {
3920 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3921 			return check_condition_result;
3922 		} else
3923 			unmap = true;
3924 	}
3925 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3926 		ndob = true;
3927 	lba = get_unaligned_be64(cmd + 2);
3928 	num = get_unaligned_be32(cmd + 10);
3929 	if (num > sdebug_write_same_length) {
3930 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3931 		return check_condition_result;
3932 	}
3933 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3934 }
3935 
3936 /* Note the mode field is in the same position as the (lower) service action
3937  * field. For the Report supported operation codes command, SPC-4 suggests
3938  * each mode of this command should be reported separately; for future. */
3939 static int resp_write_buffer(struct scsi_cmnd *scp,
3940 			     struct sdebug_dev_info *devip)
3941 {
3942 	u8 *cmd = scp->cmnd;
3943 	struct scsi_device *sdp = scp->device;
3944 	struct sdebug_dev_info *dp;
3945 	u8 mode;
3946 
3947 	mode = cmd[1] & 0x1f;
3948 	switch (mode) {
3949 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3950 		/* set UAs on this device only */
3951 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3952 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3953 		break;
3954 	case 0x5:	/* download MC, save and ACT */
3955 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3956 		break;
3957 	case 0x6:	/* download MC with offsets and ACT */
3958 		/* set UAs on most devices (LUs) in this target */
3959 		list_for_each_entry(dp,
3960 				    &devip->sdbg_host->dev_info_list,
3961 				    dev_list)
3962 			if (dp->target == sdp->id) {
3963 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3964 				if (devip != dp)
3965 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3966 						dp->uas_bm);
3967 			}
3968 		break;
3969 	case 0x7:	/* download MC with offsets, save, and ACT */
3970 		/* set UA on all devices (LUs) in this target */
3971 		list_for_each_entry(dp,
3972 				    &devip->sdbg_host->dev_info_list,
3973 				    dev_list)
3974 			if (dp->target == sdp->id)
3975 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3976 					dp->uas_bm);
3977 		break;
3978 	default:
3979 		/* do nothing for this command for other mode values */
3980 		break;
3981 	}
3982 	return 0;
3983 }
3984 
3985 static int resp_comp_write(struct scsi_cmnd *scp,
3986 			   struct sdebug_dev_info *devip)
3987 {
3988 	u8 *cmd = scp->cmnd;
3989 	u8 *arr;
3990 	struct sdeb_store_info *sip = devip2sip(devip, true);
3991 	u64 lba;
3992 	u32 dnum;
3993 	u32 lb_size = sdebug_sector_size;
3994 	u8 num;
3995 	int ret;
3996 	int retval = 0;
3997 
3998 	lba = get_unaligned_be64(cmd + 2);
3999 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4000 	if (0 == num)
4001 		return 0;	/* degenerate case, not an error */
4002 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4003 	    (cmd[1] & 0xe0)) {
4004 		mk_sense_invalid_opcode(scp);
4005 		return check_condition_result;
4006 	}
4007 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4008 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4009 	    (cmd[1] & 0xe0) == 0)
4010 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4011 			    "to DIF device\n");
4012 	ret = check_device_access_params(scp, lba, num, false);
4013 	if (ret)
4014 		return ret;
4015 	dnum = 2 * num;
4016 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4017 	if (NULL == arr) {
4018 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4019 				INSUFF_RES_ASCQ);
4020 		return check_condition_result;
4021 	}
4022 
4023 	sdeb_write_lock(sip);
4024 
4025 	ret = do_dout_fetch(scp, dnum, arr);
4026 	if (ret == -1) {
4027 		retval = DID_ERROR << 16;
4028 		goto cleanup;
4029 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4030 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4031 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4032 			    dnum * lb_size, ret);
4033 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4034 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4035 		retval = check_condition_result;
4036 		goto cleanup;
4037 	}
4038 	if (scsi_debug_lbp())
4039 		map_region(sip, lba, num);
4040 cleanup:
4041 	sdeb_write_unlock(sip);
4042 	kfree(arr);
4043 	return retval;
4044 }
4045 
4046 struct unmap_block_desc {
4047 	__be64	lba;
4048 	__be32	blocks;
4049 	__be32	__reserved;
4050 };
4051 
4052 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4053 {
4054 	unsigned char *buf;
4055 	struct unmap_block_desc *desc;
4056 	struct sdeb_store_info *sip = devip2sip(devip, true);
4057 	unsigned int i, payload_len, descriptors;
4058 	int ret;
4059 
4060 	if (!scsi_debug_lbp())
4061 		return 0;	/* fib and say its done */
4062 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4063 	BUG_ON(scsi_bufflen(scp) != payload_len);
4064 
4065 	descriptors = (payload_len - 8) / 16;
4066 	if (descriptors > sdebug_unmap_max_desc) {
4067 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4068 		return check_condition_result;
4069 	}
4070 
4071 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4072 	if (!buf) {
4073 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4074 				INSUFF_RES_ASCQ);
4075 		return check_condition_result;
4076 	}
4077 
4078 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4079 
4080 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4081 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4082 
4083 	desc = (void *)&buf[8];
4084 
4085 	sdeb_write_lock(sip);
4086 
4087 	for (i = 0 ; i < descriptors ; i++) {
4088 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4089 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4090 
4091 		ret = check_device_access_params(scp, lba, num, true);
4092 		if (ret)
4093 			goto out;
4094 
4095 		unmap_region(sip, lba, num);
4096 	}
4097 
4098 	ret = 0;
4099 
4100 out:
4101 	sdeb_write_unlock(sip);
4102 	kfree(buf);
4103 
4104 	return ret;
4105 }
4106 
4107 #define SDEBUG_GET_LBA_STATUS_LEN 32
4108 
4109 static int resp_get_lba_status(struct scsi_cmnd *scp,
4110 			       struct sdebug_dev_info *devip)
4111 {
4112 	u8 *cmd = scp->cmnd;
4113 	u64 lba;
4114 	u32 alloc_len, mapped, num;
4115 	int ret;
4116 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4117 
4118 	lba = get_unaligned_be64(cmd + 2);
4119 	alloc_len = get_unaligned_be32(cmd + 10);
4120 
4121 	if (alloc_len < 24)
4122 		return 0;
4123 
4124 	ret = check_device_access_params(scp, lba, 1, false);
4125 	if (ret)
4126 		return ret;
4127 
4128 	if (scsi_debug_lbp()) {
4129 		struct sdeb_store_info *sip = devip2sip(devip, true);
4130 
4131 		mapped = map_state(sip, lba, &num);
4132 	} else {
4133 		mapped = 1;
4134 		/* following just in case virtual_gb changed */
4135 		sdebug_capacity = get_sdebug_capacity();
4136 		if (sdebug_capacity - lba <= 0xffffffff)
4137 			num = sdebug_capacity - lba;
4138 		else
4139 			num = 0xffffffff;
4140 	}
4141 
4142 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4143 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4144 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4145 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4146 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4147 
4148 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4149 }
4150 
4151 static int resp_sync_cache(struct scsi_cmnd *scp,
4152 			   struct sdebug_dev_info *devip)
4153 {
4154 	int res = 0;
4155 	u64 lba;
4156 	u32 num_blocks;
4157 	u8 *cmd = scp->cmnd;
4158 
4159 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4160 		lba = get_unaligned_be32(cmd + 2);
4161 		num_blocks = get_unaligned_be16(cmd + 7);
4162 	} else {				/* SYNCHRONIZE_CACHE(16) */
4163 		lba = get_unaligned_be64(cmd + 2);
4164 		num_blocks = get_unaligned_be32(cmd + 10);
4165 	}
4166 	if (lba + num_blocks > sdebug_capacity) {
4167 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4168 		return check_condition_result;
4169 	}
4170 	if (!write_since_sync || (cmd[1] & 0x2))
4171 		res = SDEG_RES_IMMED_MASK;
4172 	else		/* delay if write_since_sync and IMMED clear */
4173 		write_since_sync = false;
4174 	return res;
4175 }
4176 
4177 /*
4178  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4179  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4180  * a GOOD status otherwise. Model a disk with a big cache and yield
4181  * CONDITION MET. Actually tries to bring range in main memory into the
4182  * cache associated with the CPU(s).
4183  */
4184 static int resp_pre_fetch(struct scsi_cmnd *scp,
4185 			  struct sdebug_dev_info *devip)
4186 {
4187 	int res = 0;
4188 	u64 lba;
4189 	u64 block, rest = 0;
4190 	u32 nblks;
4191 	u8 *cmd = scp->cmnd;
4192 	struct sdeb_store_info *sip = devip2sip(devip, true);
4193 	u8 *fsp = sip->storep;
4194 
4195 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4196 		lba = get_unaligned_be32(cmd + 2);
4197 		nblks = get_unaligned_be16(cmd + 7);
4198 	} else {			/* PRE-FETCH(16) */
4199 		lba = get_unaligned_be64(cmd + 2);
4200 		nblks = get_unaligned_be32(cmd + 10);
4201 	}
4202 	if (lba + nblks > sdebug_capacity) {
4203 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4204 		return check_condition_result;
4205 	}
4206 	if (!fsp)
4207 		goto fini;
4208 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4209 	block = do_div(lba, sdebug_store_sectors);
4210 	if (block + nblks > sdebug_store_sectors)
4211 		rest = block + nblks - sdebug_store_sectors;
4212 
4213 	/* Try to bring the PRE-FETCH range into CPU's cache */
4214 	sdeb_read_lock(sip);
4215 	prefetch_range(fsp + (sdebug_sector_size * block),
4216 		       (nblks - rest) * sdebug_sector_size);
4217 	if (rest)
4218 		prefetch_range(fsp, rest * sdebug_sector_size);
4219 	sdeb_read_unlock(sip);
4220 fini:
4221 	if (cmd[1] & 0x2)
4222 		res = SDEG_RES_IMMED_MASK;
4223 	return res | condition_met_result;
4224 }
4225 
4226 #define RL_BUCKET_ELEMS 8
4227 
4228 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4229  * (W-LUN), the normal Linux scanning logic does not associate it with a
4230  * device (e.g. /dev/sg7). The following magic will make that association:
4231  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4232  * where <n> is a host number. If there are multiple targets in a host then
4233  * the above will associate a W-LUN to each target. To only get a W-LUN
4234  * for target 2, then use "echo '- 2 49409' > scan" .
4235  */
4236 static int resp_report_luns(struct scsi_cmnd *scp,
4237 			    struct sdebug_dev_info *devip)
4238 {
4239 	unsigned char *cmd = scp->cmnd;
4240 	unsigned int alloc_len;
4241 	unsigned char select_report;
4242 	u64 lun;
4243 	struct scsi_lun *lun_p;
4244 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4245 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4246 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4247 	unsigned int tlun_cnt;	/* total LUN count */
4248 	unsigned int rlen;	/* response length (in bytes) */
4249 	int k, j, n, res;
4250 	unsigned int off_rsp = 0;
4251 	const int sz_lun = sizeof(struct scsi_lun);
4252 
4253 	clear_luns_changed_on_target(devip);
4254 
4255 	select_report = cmd[2];
4256 	alloc_len = get_unaligned_be32(cmd + 6);
4257 
4258 	if (alloc_len < 4) {
4259 		pr_err("alloc len too small %d\n", alloc_len);
4260 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4261 		return check_condition_result;
4262 	}
4263 
4264 	switch (select_report) {
4265 	case 0:		/* all LUNs apart from W-LUNs */
4266 		lun_cnt = sdebug_max_luns;
4267 		wlun_cnt = 0;
4268 		break;
4269 	case 1:		/* only W-LUNs */
4270 		lun_cnt = 0;
4271 		wlun_cnt = 1;
4272 		break;
4273 	case 2:		/* all LUNs */
4274 		lun_cnt = sdebug_max_luns;
4275 		wlun_cnt = 1;
4276 		break;
4277 	case 0x10:	/* only administrative LUs */
4278 	case 0x11:	/* see SPC-5 */
4279 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4280 	default:
4281 		pr_debug("select report invalid %d\n", select_report);
4282 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4283 		return check_condition_result;
4284 	}
4285 
4286 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4287 		--lun_cnt;
4288 
4289 	tlun_cnt = lun_cnt + wlun_cnt;
4290 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4291 	scsi_set_resid(scp, scsi_bufflen(scp));
4292 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4293 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4294 
4295 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4296 	lun = sdebug_no_lun_0 ? 1 : 0;
4297 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4298 		memset(arr, 0, sizeof(arr));
4299 		lun_p = (struct scsi_lun *)&arr[0];
4300 		if (k == 0) {
4301 			put_unaligned_be32(rlen, &arr[0]);
4302 			++lun_p;
4303 			j = 1;
4304 		}
4305 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4306 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4307 				break;
4308 			int_to_scsilun(lun++, lun_p);
4309 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4310 				lun_p->scsi_lun[0] |= 0x40;
4311 		}
4312 		if (j < RL_BUCKET_ELEMS)
4313 			break;
4314 		n = j * sz_lun;
4315 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4316 		if (res)
4317 			return res;
4318 		off_rsp += n;
4319 	}
4320 	if (wlun_cnt) {
4321 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4322 		++j;
4323 	}
4324 	if (j > 0)
4325 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4326 	return res;
4327 }
4328 
4329 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4330 {
4331 	bool is_bytchk3 = false;
4332 	u8 bytchk;
4333 	int ret, j;
4334 	u32 vnum, a_num, off;
4335 	const u32 lb_size = sdebug_sector_size;
4336 	u64 lba;
4337 	u8 *arr;
4338 	u8 *cmd = scp->cmnd;
4339 	struct sdeb_store_info *sip = devip2sip(devip, true);
4340 
4341 	bytchk = (cmd[1] >> 1) & 0x3;
4342 	if (bytchk == 0) {
4343 		return 0;	/* always claim internal verify okay */
4344 	} else if (bytchk == 2) {
4345 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4346 		return check_condition_result;
4347 	} else if (bytchk == 3) {
4348 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4349 	}
4350 	switch (cmd[0]) {
4351 	case VERIFY_16:
4352 		lba = get_unaligned_be64(cmd + 2);
4353 		vnum = get_unaligned_be32(cmd + 10);
4354 		break;
4355 	case VERIFY:		/* is VERIFY(10) */
4356 		lba = get_unaligned_be32(cmd + 2);
4357 		vnum = get_unaligned_be16(cmd + 7);
4358 		break;
4359 	default:
4360 		mk_sense_invalid_opcode(scp);
4361 		return check_condition_result;
4362 	}
4363 	if (vnum == 0)
4364 		return 0;	/* not an error */
4365 	a_num = is_bytchk3 ? 1 : vnum;
4366 	/* Treat following check like one for read (i.e. no write) access */
4367 	ret = check_device_access_params(scp, lba, a_num, false);
4368 	if (ret)
4369 		return ret;
4370 
4371 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4372 	if (!arr) {
4373 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4374 				INSUFF_RES_ASCQ);
4375 		return check_condition_result;
4376 	}
4377 	/* Not changing store, so only need read access */
4378 	sdeb_read_lock(sip);
4379 
4380 	ret = do_dout_fetch(scp, a_num, arr);
4381 	if (ret == -1) {
4382 		ret = DID_ERROR << 16;
4383 		goto cleanup;
4384 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4385 		sdev_printk(KERN_INFO, scp->device,
4386 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4387 			    my_name, __func__, a_num * lb_size, ret);
4388 	}
4389 	if (is_bytchk3) {
4390 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4391 			memcpy(arr + off, arr, lb_size);
4392 	}
4393 	ret = 0;
4394 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4395 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4396 		ret = check_condition_result;
4397 		goto cleanup;
4398 	}
4399 cleanup:
4400 	sdeb_read_unlock(sip);
4401 	kfree(arr);
4402 	return ret;
4403 }
4404 
4405 #define RZONES_DESC_HD 64
4406 
4407 /* Report zones depending on start LBA nad reporting options */
4408 static int resp_report_zones(struct scsi_cmnd *scp,
4409 			     struct sdebug_dev_info *devip)
4410 {
4411 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4412 	int ret = 0;
4413 	u32 alloc_len, rep_opts, rep_len;
4414 	bool partial;
4415 	u64 lba, zs_lba;
4416 	u8 *arr = NULL, *desc;
4417 	u8 *cmd = scp->cmnd;
4418 	struct sdeb_zone_state *zsp;
4419 	struct sdeb_store_info *sip = devip2sip(devip, false);
4420 
4421 	if (!sdebug_dev_is_zoned(devip)) {
4422 		mk_sense_invalid_opcode(scp);
4423 		return check_condition_result;
4424 	}
4425 	zs_lba = get_unaligned_be64(cmd + 2);
4426 	alloc_len = get_unaligned_be32(cmd + 10);
4427 	if (alloc_len == 0)
4428 		return 0;	/* not an error */
4429 	rep_opts = cmd[14] & 0x3f;
4430 	partial = cmd[14] & 0x80;
4431 
4432 	if (zs_lba >= sdebug_capacity) {
4433 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4434 		return check_condition_result;
4435 	}
4436 
4437 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4438 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4439 			    max_zones);
4440 
4441 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4442 	if (!arr) {
4443 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4444 				INSUFF_RES_ASCQ);
4445 		return check_condition_result;
4446 	}
4447 
4448 	sdeb_read_lock(sip);
4449 
4450 	desc = arr + 64;
4451 	for (i = 0; i < max_zones; i++) {
4452 		lba = zs_lba + devip->zsize * i;
4453 		if (lba > sdebug_capacity)
4454 			break;
4455 		zsp = zbc_zone(devip, lba);
4456 		switch (rep_opts) {
4457 		case 0x00:
4458 			/* All zones */
4459 			break;
4460 		case 0x01:
4461 			/* Empty zones */
4462 			if (zsp->z_cond != ZC1_EMPTY)
4463 				continue;
4464 			break;
4465 		case 0x02:
4466 			/* Implicit open zones */
4467 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4468 				continue;
4469 			break;
4470 		case 0x03:
4471 			/* Explicit open zones */
4472 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4473 				continue;
4474 			break;
4475 		case 0x04:
4476 			/* Closed zones */
4477 			if (zsp->z_cond != ZC4_CLOSED)
4478 				continue;
4479 			break;
4480 		case 0x05:
4481 			/* Full zones */
4482 			if (zsp->z_cond != ZC5_FULL)
4483 				continue;
4484 			break;
4485 		case 0x06:
4486 		case 0x07:
4487 		case 0x10:
4488 			/*
4489 			 * Read-only, offline, reset WP recommended are
4490 			 * not emulated: no zones to report;
4491 			 */
4492 			continue;
4493 		case 0x11:
4494 			/* non-seq-resource set */
4495 			if (!zsp->z_non_seq_resource)
4496 				continue;
4497 			break;
4498 		case 0x3f:
4499 			/* Not write pointer (conventional) zones */
4500 			if (!zbc_zone_is_conv(zsp))
4501 				continue;
4502 			break;
4503 		default:
4504 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4505 					INVALID_FIELD_IN_CDB, 0);
4506 			ret = check_condition_result;
4507 			goto fini;
4508 		}
4509 
4510 		if (nrz < rep_max_zones) {
4511 			/* Fill zone descriptor */
4512 			desc[0] = zsp->z_type;
4513 			desc[1] = zsp->z_cond << 4;
4514 			if (zsp->z_non_seq_resource)
4515 				desc[1] |= 1 << 1;
4516 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4517 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4518 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4519 			desc += 64;
4520 		}
4521 
4522 		if (partial && nrz >= rep_max_zones)
4523 			break;
4524 
4525 		nrz++;
4526 	}
4527 
4528 	/* Report header */
4529 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4530 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4531 
4532 	rep_len = (unsigned long)desc - (unsigned long)arr;
4533 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4534 
4535 fini:
4536 	sdeb_read_unlock(sip);
4537 	kfree(arr);
4538 	return ret;
4539 }
4540 
4541 /* Logic transplanted from tcmu-runner, file_zbc.c */
4542 static void zbc_open_all(struct sdebug_dev_info *devip)
4543 {
4544 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4545 	unsigned int i;
4546 
4547 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4548 		if (zsp->z_cond == ZC4_CLOSED)
4549 			zbc_open_zone(devip, &devip->zstate[i], true);
4550 	}
4551 }
4552 
4553 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4554 {
4555 	int res = 0;
4556 	u64 z_id;
4557 	enum sdebug_z_cond zc;
4558 	u8 *cmd = scp->cmnd;
4559 	struct sdeb_zone_state *zsp;
4560 	bool all = cmd[14] & 0x01;
4561 	struct sdeb_store_info *sip = devip2sip(devip, false);
4562 
4563 	if (!sdebug_dev_is_zoned(devip)) {
4564 		mk_sense_invalid_opcode(scp);
4565 		return check_condition_result;
4566 	}
4567 
4568 	sdeb_write_lock(sip);
4569 
4570 	if (all) {
4571 		/* Check if all closed zones can be open */
4572 		if (devip->max_open &&
4573 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4574 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4575 					INSUFF_ZONE_ASCQ);
4576 			res = check_condition_result;
4577 			goto fini;
4578 		}
4579 		/* Open all closed zones */
4580 		zbc_open_all(devip);
4581 		goto fini;
4582 	}
4583 
4584 	/* Open the specified zone */
4585 	z_id = get_unaligned_be64(cmd + 2);
4586 	if (z_id >= sdebug_capacity) {
4587 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4588 		res = check_condition_result;
4589 		goto fini;
4590 	}
4591 
4592 	zsp = zbc_zone(devip, z_id);
4593 	if (z_id != zsp->z_start) {
4594 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4595 		res = check_condition_result;
4596 		goto fini;
4597 	}
4598 	if (zbc_zone_is_conv(zsp)) {
4599 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4600 		res = check_condition_result;
4601 		goto fini;
4602 	}
4603 
4604 	zc = zsp->z_cond;
4605 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4606 		goto fini;
4607 
4608 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4609 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4610 				INSUFF_ZONE_ASCQ);
4611 		res = check_condition_result;
4612 		goto fini;
4613 	}
4614 
4615 	zbc_open_zone(devip, zsp, true);
4616 fini:
4617 	sdeb_write_unlock(sip);
4618 	return res;
4619 }
4620 
4621 static void zbc_close_all(struct sdebug_dev_info *devip)
4622 {
4623 	unsigned int i;
4624 
4625 	for (i = 0; i < devip->nr_zones; i++)
4626 		zbc_close_zone(devip, &devip->zstate[i]);
4627 }
4628 
4629 static int resp_close_zone(struct scsi_cmnd *scp,
4630 			   struct sdebug_dev_info *devip)
4631 {
4632 	int res = 0;
4633 	u64 z_id;
4634 	u8 *cmd = scp->cmnd;
4635 	struct sdeb_zone_state *zsp;
4636 	bool all = cmd[14] & 0x01;
4637 	struct sdeb_store_info *sip = devip2sip(devip, false);
4638 
4639 	if (!sdebug_dev_is_zoned(devip)) {
4640 		mk_sense_invalid_opcode(scp);
4641 		return check_condition_result;
4642 	}
4643 
4644 	sdeb_write_lock(sip);
4645 
4646 	if (all) {
4647 		zbc_close_all(devip);
4648 		goto fini;
4649 	}
4650 
4651 	/* Close specified zone */
4652 	z_id = get_unaligned_be64(cmd + 2);
4653 	if (z_id >= sdebug_capacity) {
4654 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4655 		res = check_condition_result;
4656 		goto fini;
4657 	}
4658 
4659 	zsp = zbc_zone(devip, z_id);
4660 	if (z_id != zsp->z_start) {
4661 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4662 		res = check_condition_result;
4663 		goto fini;
4664 	}
4665 	if (zbc_zone_is_conv(zsp)) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 
4671 	zbc_close_zone(devip, zsp);
4672 fini:
4673 	sdeb_write_unlock(sip);
4674 	return res;
4675 }
4676 
4677 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4678 			    struct sdeb_zone_state *zsp, bool empty)
4679 {
4680 	enum sdebug_z_cond zc = zsp->z_cond;
4681 
4682 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4683 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4684 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4685 			zbc_close_zone(devip, zsp);
4686 		if (zsp->z_cond == ZC4_CLOSED)
4687 			devip->nr_closed--;
4688 		zsp->z_wp = zsp->z_start + zsp->z_size;
4689 		zsp->z_cond = ZC5_FULL;
4690 	}
4691 }
4692 
4693 static void zbc_finish_all(struct sdebug_dev_info *devip)
4694 {
4695 	unsigned int i;
4696 
4697 	for (i = 0; i < devip->nr_zones; i++)
4698 		zbc_finish_zone(devip, &devip->zstate[i], false);
4699 }
4700 
4701 static int resp_finish_zone(struct scsi_cmnd *scp,
4702 			    struct sdebug_dev_info *devip)
4703 {
4704 	struct sdeb_zone_state *zsp;
4705 	int res = 0;
4706 	u64 z_id;
4707 	u8 *cmd = scp->cmnd;
4708 	bool all = cmd[14] & 0x01;
4709 	struct sdeb_store_info *sip = devip2sip(devip, false);
4710 
4711 	if (!sdebug_dev_is_zoned(devip)) {
4712 		mk_sense_invalid_opcode(scp);
4713 		return check_condition_result;
4714 	}
4715 
4716 	sdeb_write_lock(sip);
4717 
4718 	if (all) {
4719 		zbc_finish_all(devip);
4720 		goto fini;
4721 	}
4722 
4723 	/* Finish the specified zone */
4724 	z_id = get_unaligned_be64(cmd + 2);
4725 	if (z_id >= sdebug_capacity) {
4726 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4727 		res = check_condition_result;
4728 		goto fini;
4729 	}
4730 
4731 	zsp = zbc_zone(devip, z_id);
4732 	if (z_id != zsp->z_start) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4734 		res = check_condition_result;
4735 		goto fini;
4736 	}
4737 	if (zbc_zone_is_conv(zsp)) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 
4743 	zbc_finish_zone(devip, zsp, true);
4744 fini:
4745 	sdeb_write_unlock(sip);
4746 	return res;
4747 }
4748 
4749 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4750 			 struct sdeb_zone_state *zsp)
4751 {
4752 	enum sdebug_z_cond zc;
4753 	struct sdeb_store_info *sip = devip2sip(devip, false);
4754 
4755 	if (zbc_zone_is_conv(zsp))
4756 		return;
4757 
4758 	zc = zsp->z_cond;
4759 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4760 		zbc_close_zone(devip, zsp);
4761 
4762 	if (zsp->z_cond == ZC4_CLOSED)
4763 		devip->nr_closed--;
4764 
4765 	if (zsp->z_wp > zsp->z_start)
4766 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4767 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4768 
4769 	zsp->z_non_seq_resource = false;
4770 	zsp->z_wp = zsp->z_start;
4771 	zsp->z_cond = ZC1_EMPTY;
4772 }
4773 
4774 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4775 {
4776 	unsigned int i;
4777 
4778 	for (i = 0; i < devip->nr_zones; i++)
4779 		zbc_rwp_zone(devip, &devip->zstate[i]);
4780 }
4781 
4782 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4783 {
4784 	struct sdeb_zone_state *zsp;
4785 	int res = 0;
4786 	u64 z_id;
4787 	u8 *cmd = scp->cmnd;
4788 	bool all = cmd[14] & 0x01;
4789 	struct sdeb_store_info *sip = devip2sip(devip, false);
4790 
4791 	if (!sdebug_dev_is_zoned(devip)) {
4792 		mk_sense_invalid_opcode(scp);
4793 		return check_condition_result;
4794 	}
4795 
4796 	sdeb_write_lock(sip);
4797 
4798 	if (all) {
4799 		zbc_rwp_all(devip);
4800 		goto fini;
4801 	}
4802 
4803 	z_id = get_unaligned_be64(cmd + 2);
4804 	if (z_id >= sdebug_capacity) {
4805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4806 		res = check_condition_result;
4807 		goto fini;
4808 	}
4809 
4810 	zsp = zbc_zone(devip, z_id);
4811 	if (z_id != zsp->z_start) {
4812 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4813 		res = check_condition_result;
4814 		goto fini;
4815 	}
4816 	if (zbc_zone_is_conv(zsp)) {
4817 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4818 		res = check_condition_result;
4819 		goto fini;
4820 	}
4821 
4822 	zbc_rwp_zone(devip, zsp);
4823 fini:
4824 	sdeb_write_unlock(sip);
4825 	return res;
4826 }
4827 
4828 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4829 {
4830 	u16 hwq;
4831 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4832 
4833 	hwq = blk_mq_unique_tag_to_hwq(tag);
4834 
4835 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4836 	if (WARN_ON_ONCE(hwq >= submit_queues))
4837 		hwq = 0;
4838 
4839 	return sdebug_q_arr + hwq;
4840 }
4841 
4842 static u32 get_tag(struct scsi_cmnd *cmnd)
4843 {
4844 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4845 }
4846 
4847 /* Queued (deferred) command completions converge here. */
4848 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4849 {
4850 	bool aborted = sd_dp->aborted;
4851 	int qc_idx;
4852 	int retiring = 0;
4853 	unsigned long iflags;
4854 	struct sdebug_queue *sqp;
4855 	struct sdebug_queued_cmd *sqcp;
4856 	struct scsi_cmnd *scp;
4857 	struct sdebug_dev_info *devip;
4858 
4859 	if (unlikely(aborted))
4860 		sd_dp->aborted = false;
4861 	qc_idx = sd_dp->qc_idx;
4862 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4863 	if (sdebug_statistics) {
4864 		atomic_inc(&sdebug_completions);
4865 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4866 			atomic_inc(&sdebug_miss_cpus);
4867 	}
4868 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4869 		pr_err("wild qc_idx=%d\n", qc_idx);
4870 		return;
4871 	}
4872 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4873 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4874 	sqcp = &sqp->qc_arr[qc_idx];
4875 	scp = sqcp->a_cmnd;
4876 	if (unlikely(scp == NULL)) {
4877 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4878 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4879 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4880 		return;
4881 	}
4882 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4883 	if (likely(devip))
4884 		atomic_dec(&devip->num_in_q);
4885 	else
4886 		pr_err("devip=NULL\n");
4887 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4888 		retiring = 1;
4889 
4890 	sqcp->a_cmnd = NULL;
4891 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4892 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4893 		pr_err("Unexpected completion\n");
4894 		return;
4895 	}
4896 
4897 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4898 		int k, retval;
4899 
4900 		retval = atomic_read(&retired_max_queue);
4901 		if (qc_idx >= retval) {
4902 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4903 			pr_err("index %d too large\n", retval);
4904 			return;
4905 		}
4906 		k = find_last_bit(sqp->in_use_bm, retval);
4907 		if ((k < sdebug_max_queue) || (k == retval))
4908 			atomic_set(&retired_max_queue, 0);
4909 		else
4910 			atomic_set(&retired_max_queue, k + 1);
4911 	}
4912 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4913 	if (unlikely(aborted)) {
4914 		if (sdebug_verbose)
4915 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4916 		return;
4917 	}
4918 	scsi_done(scp); /* callback to mid level */
4919 }
4920 
4921 /* When high resolution timer goes off this function is called. */
4922 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4923 {
4924 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4925 						  hrt);
4926 	sdebug_q_cmd_complete(sd_dp);
4927 	return HRTIMER_NORESTART;
4928 }
4929 
4930 /* When work queue schedules work, it calls this function. */
4931 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4932 {
4933 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4934 						  ew.work);
4935 	sdebug_q_cmd_complete(sd_dp);
4936 }
4937 
4938 static bool got_shared_uuid;
4939 static uuid_t shared_uuid;
4940 
4941 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4942 {
4943 	struct sdeb_zone_state *zsp;
4944 	sector_t capacity = get_sdebug_capacity();
4945 	sector_t zstart = 0;
4946 	unsigned int i;
4947 
4948 	/*
4949 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4950 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4951 	 * use the specified zone size checking that at least 2 zones can be
4952 	 * created for the device.
4953 	 */
4954 	if (!sdeb_zbc_zone_size_mb) {
4955 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4956 			>> ilog2(sdebug_sector_size);
4957 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4958 			devip->zsize >>= 1;
4959 		if (devip->zsize < 2) {
4960 			pr_err("Device capacity too small\n");
4961 			return -EINVAL;
4962 		}
4963 	} else {
4964 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4965 			pr_err("Zone size is not a power of 2\n");
4966 			return -EINVAL;
4967 		}
4968 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4969 			>> ilog2(sdebug_sector_size);
4970 		if (devip->zsize >= capacity) {
4971 			pr_err("Zone size too large for device capacity\n");
4972 			return -EINVAL;
4973 		}
4974 	}
4975 
4976 	devip->zsize_shift = ilog2(devip->zsize);
4977 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4978 
4979 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4980 		pr_err("Number of conventional zones too large\n");
4981 		return -EINVAL;
4982 	}
4983 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4984 
4985 	if (devip->zmodel == BLK_ZONED_HM) {
4986 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4987 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4988 			devip->max_open = (devip->nr_zones - 1) / 2;
4989 		else
4990 			devip->max_open = sdeb_zbc_max_open;
4991 	}
4992 
4993 	devip->zstate = kcalloc(devip->nr_zones,
4994 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4995 	if (!devip->zstate)
4996 		return -ENOMEM;
4997 
4998 	for (i = 0; i < devip->nr_zones; i++) {
4999 		zsp = &devip->zstate[i];
5000 
5001 		zsp->z_start = zstart;
5002 
5003 		if (i < devip->nr_conv_zones) {
5004 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
5005 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5006 			zsp->z_wp = (sector_t)-1;
5007 		} else {
5008 			if (devip->zmodel == BLK_ZONED_HM)
5009 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
5010 			else
5011 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
5012 			zsp->z_cond = ZC1_EMPTY;
5013 			zsp->z_wp = zsp->z_start;
5014 		}
5015 
5016 		if (zsp->z_start + devip->zsize < capacity)
5017 			zsp->z_size = devip->zsize;
5018 		else
5019 			zsp->z_size = capacity - zsp->z_start;
5020 
5021 		zstart += zsp->z_size;
5022 	}
5023 
5024 	return 0;
5025 }
5026 
5027 static struct sdebug_dev_info *sdebug_device_create(
5028 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5029 {
5030 	struct sdebug_dev_info *devip;
5031 
5032 	devip = kzalloc(sizeof(*devip), flags);
5033 	if (devip) {
5034 		if (sdebug_uuid_ctl == 1)
5035 			uuid_gen(&devip->lu_name);
5036 		else if (sdebug_uuid_ctl == 2) {
5037 			if (got_shared_uuid)
5038 				devip->lu_name = shared_uuid;
5039 			else {
5040 				uuid_gen(&shared_uuid);
5041 				got_shared_uuid = true;
5042 				devip->lu_name = shared_uuid;
5043 			}
5044 		}
5045 		devip->sdbg_host = sdbg_host;
5046 		if (sdeb_zbc_in_use) {
5047 			devip->zmodel = sdeb_zbc_model;
5048 			if (sdebug_device_create_zones(devip)) {
5049 				kfree(devip);
5050 				return NULL;
5051 			}
5052 		} else {
5053 			devip->zmodel = BLK_ZONED_NONE;
5054 		}
5055 		devip->sdbg_host = sdbg_host;
5056 		devip->create_ts = ktime_get_boottime();
5057 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5058 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5059 	}
5060 	return devip;
5061 }
5062 
5063 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5064 {
5065 	struct sdebug_host_info *sdbg_host;
5066 	struct sdebug_dev_info *open_devip = NULL;
5067 	struct sdebug_dev_info *devip;
5068 
5069 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5070 	if (!sdbg_host) {
5071 		pr_err("Host info NULL\n");
5072 		return NULL;
5073 	}
5074 
5075 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5076 		if ((devip->used) && (devip->channel == sdev->channel) &&
5077 		    (devip->target == sdev->id) &&
5078 		    (devip->lun == sdev->lun))
5079 			return devip;
5080 		else {
5081 			if ((!devip->used) && (!open_devip))
5082 				open_devip = devip;
5083 		}
5084 	}
5085 	if (!open_devip) { /* try and make a new one */
5086 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5087 		if (!open_devip) {
5088 			pr_err("out of memory at line %d\n", __LINE__);
5089 			return NULL;
5090 		}
5091 	}
5092 
5093 	open_devip->channel = sdev->channel;
5094 	open_devip->target = sdev->id;
5095 	open_devip->lun = sdev->lun;
5096 	open_devip->sdbg_host = sdbg_host;
5097 	atomic_set(&open_devip->num_in_q, 0);
5098 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5099 	open_devip->used = true;
5100 	return open_devip;
5101 }
5102 
5103 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5104 {
5105 	if (sdebug_verbose)
5106 		pr_info("slave_alloc <%u %u %u %llu>\n",
5107 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5108 	return 0;
5109 }
5110 
5111 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5112 {
5113 	struct sdebug_dev_info *devip =
5114 			(struct sdebug_dev_info *)sdp->hostdata;
5115 
5116 	if (sdebug_verbose)
5117 		pr_info("slave_configure <%u %u %u %llu>\n",
5118 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5119 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5120 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5121 	if (devip == NULL) {
5122 		devip = find_build_dev_info(sdp);
5123 		if (devip == NULL)
5124 			return 1;  /* no resources, will be marked offline */
5125 	}
5126 	sdp->hostdata = devip;
5127 	if (sdebug_no_uld)
5128 		sdp->no_uld_attach = 1;
5129 	config_cdb_len(sdp);
5130 	return 0;
5131 }
5132 
5133 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5134 {
5135 	struct sdebug_dev_info *devip =
5136 		(struct sdebug_dev_info *)sdp->hostdata;
5137 
5138 	if (sdebug_verbose)
5139 		pr_info("slave_destroy <%u %u %u %llu>\n",
5140 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5141 	if (devip) {
5142 		/* make this slot available for re-use */
5143 		devip->used = false;
5144 		sdp->hostdata = NULL;
5145 	}
5146 }
5147 
5148 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5149 			   enum sdeb_defer_type defer_t)
5150 {
5151 	if (!sd_dp)
5152 		return;
5153 	if (defer_t == SDEB_DEFER_HRT)
5154 		hrtimer_cancel(&sd_dp->hrt);
5155 	else if (defer_t == SDEB_DEFER_WQ)
5156 		cancel_work_sync(&sd_dp->ew.work);
5157 }
5158 
5159 /* If @cmnd found deletes its timer or work queue and returns true; else
5160    returns false */
5161 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5162 {
5163 	unsigned long iflags;
5164 	int j, k, qmax, r_qmax;
5165 	enum sdeb_defer_type l_defer_t;
5166 	struct sdebug_queue *sqp;
5167 	struct sdebug_queued_cmd *sqcp;
5168 	struct sdebug_dev_info *devip;
5169 	struct sdebug_defer *sd_dp;
5170 
5171 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5172 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5173 		qmax = sdebug_max_queue;
5174 		r_qmax = atomic_read(&retired_max_queue);
5175 		if (r_qmax > qmax)
5176 			qmax = r_qmax;
5177 		for (k = 0; k < qmax; ++k) {
5178 			if (test_bit(k, sqp->in_use_bm)) {
5179 				sqcp = &sqp->qc_arr[k];
5180 				if (cmnd != sqcp->a_cmnd)
5181 					continue;
5182 				/* found */
5183 				devip = (struct sdebug_dev_info *)
5184 						cmnd->device->hostdata;
5185 				if (devip)
5186 					atomic_dec(&devip->num_in_q);
5187 				sqcp->a_cmnd = NULL;
5188 				sd_dp = sqcp->sd_dp;
5189 				if (sd_dp) {
5190 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5191 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5192 				} else
5193 					l_defer_t = SDEB_DEFER_NONE;
5194 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5195 				stop_qc_helper(sd_dp, l_defer_t);
5196 				clear_bit(k, sqp->in_use_bm);
5197 				return true;
5198 			}
5199 		}
5200 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5201 	}
5202 	return false;
5203 }
5204 
5205 /* Deletes (stops) timers or work queues of all queued commands */
5206 static void stop_all_queued(void)
5207 {
5208 	unsigned long iflags;
5209 	int j, k;
5210 	enum sdeb_defer_type l_defer_t;
5211 	struct sdebug_queue *sqp;
5212 	struct sdebug_queued_cmd *sqcp;
5213 	struct sdebug_dev_info *devip;
5214 	struct sdebug_defer *sd_dp;
5215 
5216 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5217 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5218 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5219 			if (test_bit(k, sqp->in_use_bm)) {
5220 				sqcp = &sqp->qc_arr[k];
5221 				if (sqcp->a_cmnd == NULL)
5222 					continue;
5223 				devip = (struct sdebug_dev_info *)
5224 					sqcp->a_cmnd->device->hostdata;
5225 				if (devip)
5226 					atomic_dec(&devip->num_in_q);
5227 				sqcp->a_cmnd = NULL;
5228 				sd_dp = sqcp->sd_dp;
5229 				if (sd_dp) {
5230 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5231 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5232 				} else
5233 					l_defer_t = SDEB_DEFER_NONE;
5234 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5235 				stop_qc_helper(sd_dp, l_defer_t);
5236 				clear_bit(k, sqp->in_use_bm);
5237 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5238 			}
5239 		}
5240 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5241 	}
5242 }
5243 
5244 /* Free queued command memory on heap */
5245 static void free_all_queued(void)
5246 {
5247 	int j, k;
5248 	struct sdebug_queue *sqp;
5249 	struct sdebug_queued_cmd *sqcp;
5250 
5251 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5252 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5253 			sqcp = &sqp->qc_arr[k];
5254 			kfree(sqcp->sd_dp);
5255 			sqcp->sd_dp = NULL;
5256 		}
5257 	}
5258 }
5259 
5260 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5261 {
5262 	bool ok;
5263 
5264 	++num_aborts;
5265 	if (SCpnt) {
5266 		ok = stop_queued_cmnd(SCpnt);
5267 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5268 			sdev_printk(KERN_INFO, SCpnt->device,
5269 				    "%s: command%s found\n", __func__,
5270 				    ok ? "" : " not");
5271 	}
5272 	return SUCCESS;
5273 }
5274 
5275 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5276 {
5277 	++num_dev_resets;
5278 	if (SCpnt && SCpnt->device) {
5279 		struct scsi_device *sdp = SCpnt->device;
5280 		struct sdebug_dev_info *devip =
5281 				(struct sdebug_dev_info *)sdp->hostdata;
5282 
5283 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5284 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5285 		if (devip)
5286 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5287 	}
5288 	return SUCCESS;
5289 }
5290 
5291 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5292 {
5293 	struct sdebug_host_info *sdbg_host;
5294 	struct sdebug_dev_info *devip;
5295 	struct scsi_device *sdp;
5296 	struct Scsi_Host *hp;
5297 	int k = 0;
5298 
5299 	++num_target_resets;
5300 	if (!SCpnt)
5301 		goto lie;
5302 	sdp = SCpnt->device;
5303 	if (!sdp)
5304 		goto lie;
5305 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5306 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5307 	hp = sdp->host;
5308 	if (!hp)
5309 		goto lie;
5310 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5311 	if (sdbg_host) {
5312 		list_for_each_entry(devip,
5313 				    &sdbg_host->dev_info_list,
5314 				    dev_list)
5315 			if (devip->target == sdp->id) {
5316 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5317 				++k;
5318 			}
5319 	}
5320 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5321 		sdev_printk(KERN_INFO, sdp,
5322 			    "%s: %d device(s) found in target\n", __func__, k);
5323 lie:
5324 	return SUCCESS;
5325 }
5326 
5327 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5328 {
5329 	struct sdebug_host_info *sdbg_host;
5330 	struct sdebug_dev_info *devip;
5331 	struct scsi_device *sdp;
5332 	struct Scsi_Host *hp;
5333 	int k = 0;
5334 
5335 	++num_bus_resets;
5336 	if (!(SCpnt && SCpnt->device))
5337 		goto lie;
5338 	sdp = SCpnt->device;
5339 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5340 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5341 	hp = sdp->host;
5342 	if (hp) {
5343 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5344 		if (sdbg_host) {
5345 			list_for_each_entry(devip,
5346 					    &sdbg_host->dev_info_list,
5347 					    dev_list) {
5348 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5349 				++k;
5350 			}
5351 		}
5352 	}
5353 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5354 		sdev_printk(KERN_INFO, sdp,
5355 			    "%s: %d device(s) found in host\n", __func__, k);
5356 lie:
5357 	return SUCCESS;
5358 }
5359 
5360 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5361 {
5362 	struct sdebug_host_info *sdbg_host;
5363 	struct sdebug_dev_info *devip;
5364 	int k = 0;
5365 
5366 	++num_host_resets;
5367 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5368 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5369 	spin_lock(&sdebug_host_list_lock);
5370 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5371 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5372 				    dev_list) {
5373 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5374 			++k;
5375 		}
5376 	}
5377 	spin_unlock(&sdebug_host_list_lock);
5378 	stop_all_queued();
5379 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5380 		sdev_printk(KERN_INFO, SCpnt->device,
5381 			    "%s: %d device(s) found\n", __func__, k);
5382 	return SUCCESS;
5383 }
5384 
5385 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5386 {
5387 	struct msdos_partition *pp;
5388 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5389 	int sectors_per_part, num_sectors, k;
5390 	int heads_by_sects, start_sec, end_sec;
5391 
5392 	/* assume partition table already zeroed */
5393 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5394 		return;
5395 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5396 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5397 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5398 	}
5399 	num_sectors = (int)get_sdebug_capacity();
5400 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5401 			   / sdebug_num_parts;
5402 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5403 	starts[0] = sdebug_sectors_per;
5404 	max_part_secs = sectors_per_part;
5405 	for (k = 1; k < sdebug_num_parts; ++k) {
5406 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5407 			    * heads_by_sects;
5408 		if (starts[k] - starts[k - 1] < max_part_secs)
5409 			max_part_secs = starts[k] - starts[k - 1];
5410 	}
5411 	starts[sdebug_num_parts] = num_sectors;
5412 	starts[sdebug_num_parts + 1] = 0;
5413 
5414 	ramp[510] = 0x55;	/* magic partition markings */
5415 	ramp[511] = 0xAA;
5416 	pp = (struct msdos_partition *)(ramp + 0x1be);
5417 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5418 		start_sec = starts[k];
5419 		end_sec = starts[k] + max_part_secs - 1;
5420 		pp->boot_ind = 0;
5421 
5422 		pp->cyl = start_sec / heads_by_sects;
5423 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5424 			   / sdebug_sectors_per;
5425 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5426 
5427 		pp->end_cyl = end_sec / heads_by_sects;
5428 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5429 			       / sdebug_sectors_per;
5430 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5431 
5432 		pp->start_sect = cpu_to_le32(start_sec);
5433 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5434 		pp->sys_ind = 0x83;	/* plain Linux partition */
5435 	}
5436 }
5437 
5438 static void block_unblock_all_queues(bool block)
5439 {
5440 	int j;
5441 	struct sdebug_queue *sqp;
5442 
5443 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5444 		atomic_set(&sqp->blocked, (int)block);
5445 }
5446 
5447 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5448  * commands will be processed normally before triggers occur.
5449  */
5450 static void tweak_cmnd_count(void)
5451 {
5452 	int count, modulo;
5453 
5454 	modulo = abs(sdebug_every_nth);
5455 	if (modulo < 2)
5456 		return;
5457 	block_unblock_all_queues(true);
5458 	count = atomic_read(&sdebug_cmnd_count);
5459 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5460 	block_unblock_all_queues(false);
5461 }
5462 
5463 static void clear_queue_stats(void)
5464 {
5465 	atomic_set(&sdebug_cmnd_count, 0);
5466 	atomic_set(&sdebug_completions, 0);
5467 	atomic_set(&sdebug_miss_cpus, 0);
5468 	atomic_set(&sdebug_a_tsf, 0);
5469 }
5470 
5471 static bool inject_on_this_cmd(void)
5472 {
5473 	if (sdebug_every_nth == 0)
5474 		return false;
5475 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5476 }
5477 
5478 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5479 
5480 /* Complete the processing of the thread that queued a SCSI command to this
5481  * driver. It either completes the command by calling cmnd_done() or
5482  * schedules a hr timer or work queue then returns 0. Returns
5483  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5484  */
5485 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5486 			 int scsi_result,
5487 			 int (*pfp)(struct scsi_cmnd *,
5488 				    struct sdebug_dev_info *),
5489 			 int delta_jiff, int ndelay)
5490 {
5491 	bool new_sd_dp;
5492 	bool inject = false;
5493 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5494 	int k, num_in_q, qdepth;
5495 	unsigned long iflags;
5496 	u64 ns_from_boot = 0;
5497 	struct sdebug_queue *sqp;
5498 	struct sdebug_queued_cmd *sqcp;
5499 	struct scsi_device *sdp;
5500 	struct sdebug_defer *sd_dp;
5501 
5502 	if (unlikely(devip == NULL)) {
5503 		if (scsi_result == 0)
5504 			scsi_result = DID_NO_CONNECT << 16;
5505 		goto respond_in_thread;
5506 	}
5507 	sdp = cmnd->device;
5508 
5509 	if (delta_jiff == 0)
5510 		goto respond_in_thread;
5511 
5512 	sqp = get_queue(cmnd);
5513 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5514 	if (unlikely(atomic_read(&sqp->blocked))) {
5515 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5516 		return SCSI_MLQUEUE_HOST_BUSY;
5517 	}
5518 	num_in_q = atomic_read(&devip->num_in_q);
5519 	qdepth = cmnd->device->queue_depth;
5520 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5521 		if (scsi_result) {
5522 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5523 			goto respond_in_thread;
5524 		} else
5525 			scsi_result = device_qfull_result;
5526 	} else if (unlikely(sdebug_every_nth &&
5527 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5528 			    (scsi_result == 0))) {
5529 		if ((num_in_q == (qdepth - 1)) &&
5530 		    (atomic_inc_return(&sdebug_a_tsf) >=
5531 		     abs(sdebug_every_nth))) {
5532 			atomic_set(&sdebug_a_tsf, 0);
5533 			inject = true;
5534 			scsi_result = device_qfull_result;
5535 		}
5536 	}
5537 
5538 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5539 	if (unlikely(k >= sdebug_max_queue)) {
5540 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5541 		if (scsi_result)
5542 			goto respond_in_thread;
5543 		scsi_result = device_qfull_result;
5544 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5545 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5546 				    __func__, sdebug_max_queue);
5547 		goto respond_in_thread;
5548 	}
5549 	set_bit(k, sqp->in_use_bm);
5550 	atomic_inc(&devip->num_in_q);
5551 	sqcp = &sqp->qc_arr[k];
5552 	sqcp->a_cmnd = cmnd;
5553 	cmnd->host_scribble = (unsigned char *)sqcp;
5554 	sd_dp = sqcp->sd_dp;
5555 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5556 
5557 	if (!sd_dp) {
5558 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5559 		if (!sd_dp) {
5560 			atomic_dec(&devip->num_in_q);
5561 			clear_bit(k, sqp->in_use_bm);
5562 			return SCSI_MLQUEUE_HOST_BUSY;
5563 		}
5564 		new_sd_dp = true;
5565 	} else {
5566 		new_sd_dp = false;
5567 	}
5568 
5569 	/* Set the hostwide tag */
5570 	if (sdebug_host_max_queue)
5571 		sd_dp->hc_idx = get_tag(cmnd);
5572 
5573 	if (polled)
5574 		ns_from_boot = ktime_get_boottime_ns();
5575 
5576 	/* one of the resp_*() response functions is called here */
5577 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5578 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5579 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5580 		delta_jiff = ndelay = 0;
5581 	}
5582 	if (cmnd->result == 0 && scsi_result != 0)
5583 		cmnd->result = scsi_result;
5584 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5585 		if (atomic_read(&sdeb_inject_pending)) {
5586 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5587 			atomic_set(&sdeb_inject_pending, 0);
5588 			cmnd->result = check_condition_result;
5589 		}
5590 	}
5591 
5592 	if (unlikely(sdebug_verbose && cmnd->result))
5593 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5594 			    __func__, cmnd->result);
5595 
5596 	if (delta_jiff > 0 || ndelay > 0) {
5597 		ktime_t kt;
5598 
5599 		if (delta_jiff > 0) {
5600 			u64 ns = jiffies_to_nsecs(delta_jiff);
5601 
5602 			if (sdebug_random && ns < U32_MAX) {
5603 				ns = prandom_u32_max((u32)ns);
5604 			} else if (sdebug_random) {
5605 				ns >>= 12;	/* scale to 4 usec precision */
5606 				if (ns < U32_MAX)	/* over 4 hours max */
5607 					ns = prandom_u32_max((u32)ns);
5608 				ns <<= 12;
5609 			}
5610 			kt = ns_to_ktime(ns);
5611 		} else {	/* ndelay has a 4.2 second max */
5612 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5613 					     (u32)ndelay;
5614 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5615 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5616 
5617 				if (kt <= d) {	/* elapsed duration >= kt */
5618 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5619 					sqcp->a_cmnd = NULL;
5620 					atomic_dec(&devip->num_in_q);
5621 					clear_bit(k, sqp->in_use_bm);
5622 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5623 					if (new_sd_dp)
5624 						kfree(sd_dp);
5625 					/* call scsi_done() from this thread */
5626 					scsi_done(cmnd);
5627 					return 0;
5628 				}
5629 				/* otherwise reduce kt by elapsed time */
5630 				kt -= d;
5631 			}
5632 		}
5633 		if (polled) {
5634 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5635 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5636 			if (!sd_dp->init_poll) {
5637 				sd_dp->init_poll = true;
5638 				sqcp->sd_dp = sd_dp;
5639 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5640 				sd_dp->qc_idx = k;
5641 			}
5642 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5643 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5644 		} else {
5645 			if (!sd_dp->init_hrt) {
5646 				sd_dp->init_hrt = true;
5647 				sqcp->sd_dp = sd_dp;
5648 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5649 					     HRTIMER_MODE_REL_PINNED);
5650 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5651 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5652 				sd_dp->qc_idx = k;
5653 			}
5654 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5655 			/* schedule the invocation of scsi_done() for a later time */
5656 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5657 		}
5658 		if (sdebug_statistics)
5659 			sd_dp->issuing_cpu = raw_smp_processor_id();
5660 	} else {	/* jdelay < 0, use work queue */
5661 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5662 			     atomic_read(&sdeb_inject_pending)))
5663 			sd_dp->aborted = true;
5664 		if (polled) {
5665 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5666 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5667 			if (!sd_dp->init_poll) {
5668 				sd_dp->init_poll = true;
5669 				sqcp->sd_dp = sd_dp;
5670 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5671 				sd_dp->qc_idx = k;
5672 			}
5673 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5674 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5675 		} else {
5676 			if (!sd_dp->init_wq) {
5677 				sd_dp->init_wq = true;
5678 				sqcp->sd_dp = sd_dp;
5679 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5680 				sd_dp->qc_idx = k;
5681 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5682 			}
5683 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5684 			schedule_work(&sd_dp->ew.work);
5685 		}
5686 		if (sdebug_statistics)
5687 			sd_dp->issuing_cpu = raw_smp_processor_id();
5688 		if (unlikely(sd_dp->aborted)) {
5689 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5690 				    scsi_cmd_to_rq(cmnd)->tag);
5691 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5692 			atomic_set(&sdeb_inject_pending, 0);
5693 			sd_dp->aborted = false;
5694 		}
5695 	}
5696 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5697 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5698 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5699 	return 0;
5700 
5701 respond_in_thread:	/* call back to mid-layer using invocation thread */
5702 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5703 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5704 	if (cmnd->result == 0 && scsi_result != 0)
5705 		cmnd->result = scsi_result;
5706 	scsi_done(cmnd);
5707 	return 0;
5708 }
5709 
5710 /* Note: The following macros create attribute files in the
5711    /sys/module/scsi_debug/parameters directory. Unfortunately this
5712    driver is unaware of a change and cannot trigger auxiliary actions
5713    as it can when the corresponding attribute in the
5714    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5715  */
5716 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5717 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5718 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5719 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5720 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5721 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5722 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5723 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5724 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5725 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5726 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5727 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5728 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5729 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5730 module_param_string(inq_product, sdebug_inq_product_id,
5731 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5732 module_param_string(inq_rev, sdebug_inq_product_rev,
5733 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5734 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5735 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5736 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5737 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5738 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5739 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5740 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5741 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5742 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5743 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5744 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5745 		   S_IRUGO | S_IWUSR);
5746 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5747 		   S_IRUGO | S_IWUSR);
5748 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5749 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5750 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5751 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5752 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5753 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5754 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5755 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5756 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5757 module_param_named(per_host_store, sdebug_per_host_store, bool,
5758 		   S_IRUGO | S_IWUSR);
5759 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5760 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5761 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5762 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5763 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5764 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5765 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5766 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5767 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5768 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5769 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5770 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5771 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5772 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5773 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5774 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5775 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5776 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5777 		   S_IRUGO | S_IWUSR);
5778 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5779 module_param_named(write_same_length, sdebug_write_same_length, int,
5780 		   S_IRUGO | S_IWUSR);
5781 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5782 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5783 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5784 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5785 
5786 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5787 MODULE_DESCRIPTION("SCSI debug adapter driver");
5788 MODULE_LICENSE("GPL");
5789 MODULE_VERSION(SDEBUG_VERSION);
5790 
5791 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5792 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5793 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5794 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5795 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5796 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5797 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5798 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5799 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5800 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5801 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5802 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5803 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5804 MODULE_PARM_DESC(host_max_queue,
5805 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5806 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5807 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5808 		 SDEBUG_VERSION "\")");
5809 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5810 MODULE_PARM_DESC(lbprz,
5811 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5812 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5813 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5814 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5815 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5816 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5817 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5818 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5819 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5820 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5821 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5822 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5823 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5824 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5825 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5826 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5827 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5828 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5829 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5830 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5831 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5832 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5833 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5834 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5835 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5836 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5837 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5838 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5839 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5840 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5841 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5842 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5843 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5844 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5845 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5846 MODULE_PARM_DESC(uuid_ctl,
5847 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5848 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5849 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5850 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5851 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5852 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5853 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5854 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5855 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5856 
5857 #define SDEBUG_INFO_LEN 256
5858 static char sdebug_info[SDEBUG_INFO_LEN];
5859 
5860 static const char *scsi_debug_info(struct Scsi_Host *shp)
5861 {
5862 	int k;
5863 
5864 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5865 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5866 	if (k >= (SDEBUG_INFO_LEN - 1))
5867 		return sdebug_info;
5868 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5869 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5870 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5871 		  "statistics", (int)sdebug_statistics);
5872 	return sdebug_info;
5873 }
5874 
5875 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5876 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5877 				 int length)
5878 {
5879 	char arr[16];
5880 	int opts;
5881 	int minLen = length > 15 ? 15 : length;
5882 
5883 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5884 		return -EACCES;
5885 	memcpy(arr, buffer, minLen);
5886 	arr[minLen] = '\0';
5887 	if (1 != sscanf(arr, "%d", &opts))
5888 		return -EINVAL;
5889 	sdebug_opts = opts;
5890 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5891 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5892 	if (sdebug_every_nth != 0)
5893 		tweak_cmnd_count();
5894 	return length;
5895 }
5896 
5897 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5898  * same for each scsi_debug host (if more than one). Some of the counters
5899  * output are not atomics so might be inaccurate in a busy system. */
5900 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5901 {
5902 	int f, j, l;
5903 	struct sdebug_queue *sqp;
5904 	struct sdebug_host_info *sdhp;
5905 
5906 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5907 		   SDEBUG_VERSION, sdebug_version_date);
5908 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5909 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5910 		   sdebug_opts, sdebug_every_nth);
5911 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5912 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5913 		   sdebug_sector_size, "bytes");
5914 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5915 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5916 		   num_aborts);
5917 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5918 		   num_dev_resets, num_target_resets, num_bus_resets,
5919 		   num_host_resets);
5920 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5921 		   dix_reads, dix_writes, dif_errors);
5922 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5923 		   sdebug_statistics);
5924 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5925 		   atomic_read(&sdebug_cmnd_count),
5926 		   atomic_read(&sdebug_completions),
5927 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5928 		   atomic_read(&sdebug_a_tsf),
5929 		   atomic_read(&sdeb_mq_poll_count));
5930 
5931 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5932 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5933 		seq_printf(m, "  queue %d:\n", j);
5934 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5935 		if (f != sdebug_max_queue) {
5936 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5937 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5938 				   "first,last bits", f, l);
5939 		}
5940 	}
5941 
5942 	seq_printf(m, "this host_no=%d\n", host->host_no);
5943 	if (!xa_empty(per_store_ap)) {
5944 		bool niu;
5945 		int idx;
5946 		unsigned long l_idx;
5947 		struct sdeb_store_info *sip;
5948 
5949 		seq_puts(m, "\nhost list:\n");
5950 		j = 0;
5951 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5952 			idx = sdhp->si_idx;
5953 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5954 				   sdhp->shost->host_no, idx);
5955 			++j;
5956 		}
5957 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5958 			   sdeb_most_recent_idx);
5959 		j = 0;
5960 		xa_for_each(per_store_ap, l_idx, sip) {
5961 			niu = xa_get_mark(per_store_ap, l_idx,
5962 					  SDEB_XA_NOT_IN_USE);
5963 			idx = (int)l_idx;
5964 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5965 				   (niu ? "  not_in_use" : ""));
5966 			++j;
5967 		}
5968 	}
5969 	return 0;
5970 }
5971 
5972 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5973 {
5974 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5975 }
5976 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5977  * of delay is jiffies.
5978  */
5979 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5980 			   size_t count)
5981 {
5982 	int jdelay, res;
5983 
5984 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5985 		res = count;
5986 		if (sdebug_jdelay != jdelay) {
5987 			int j, k;
5988 			struct sdebug_queue *sqp;
5989 
5990 			block_unblock_all_queues(true);
5991 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5992 			     ++j, ++sqp) {
5993 				k = find_first_bit(sqp->in_use_bm,
5994 						   sdebug_max_queue);
5995 				if (k != sdebug_max_queue) {
5996 					res = -EBUSY;   /* queued commands */
5997 					break;
5998 				}
5999 			}
6000 			if (res > 0) {
6001 				sdebug_jdelay = jdelay;
6002 				sdebug_ndelay = 0;
6003 			}
6004 			block_unblock_all_queues(false);
6005 		}
6006 		return res;
6007 	}
6008 	return -EINVAL;
6009 }
6010 static DRIVER_ATTR_RW(delay);
6011 
6012 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6013 {
6014 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6015 }
6016 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6017 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6018 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6019 			    size_t count)
6020 {
6021 	int ndelay, res;
6022 
6023 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6024 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6025 		res = count;
6026 		if (sdebug_ndelay != ndelay) {
6027 			int j, k;
6028 			struct sdebug_queue *sqp;
6029 
6030 			block_unblock_all_queues(true);
6031 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6032 			     ++j, ++sqp) {
6033 				k = find_first_bit(sqp->in_use_bm,
6034 						   sdebug_max_queue);
6035 				if (k != sdebug_max_queue) {
6036 					res = -EBUSY;   /* queued commands */
6037 					break;
6038 				}
6039 			}
6040 			if (res > 0) {
6041 				sdebug_ndelay = ndelay;
6042 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6043 							: DEF_JDELAY;
6044 			}
6045 			block_unblock_all_queues(false);
6046 		}
6047 		return res;
6048 	}
6049 	return -EINVAL;
6050 }
6051 static DRIVER_ATTR_RW(ndelay);
6052 
6053 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6054 {
6055 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6056 }
6057 
6058 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6059 			  size_t count)
6060 {
6061 	int opts;
6062 	char work[20];
6063 
6064 	if (sscanf(buf, "%10s", work) == 1) {
6065 		if (strncasecmp(work, "0x", 2) == 0) {
6066 			if (kstrtoint(work + 2, 16, &opts) == 0)
6067 				goto opts_done;
6068 		} else {
6069 			if (kstrtoint(work, 10, &opts) == 0)
6070 				goto opts_done;
6071 		}
6072 	}
6073 	return -EINVAL;
6074 opts_done:
6075 	sdebug_opts = opts;
6076 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6077 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6078 	tweak_cmnd_count();
6079 	return count;
6080 }
6081 static DRIVER_ATTR_RW(opts);
6082 
6083 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6084 {
6085 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6086 }
6087 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6088 			   size_t count)
6089 {
6090 	int n;
6091 
6092 	/* Cannot change from or to TYPE_ZBC with sysfs */
6093 	if (sdebug_ptype == TYPE_ZBC)
6094 		return -EINVAL;
6095 
6096 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6097 		if (n == TYPE_ZBC)
6098 			return -EINVAL;
6099 		sdebug_ptype = n;
6100 		return count;
6101 	}
6102 	return -EINVAL;
6103 }
6104 static DRIVER_ATTR_RW(ptype);
6105 
6106 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6107 {
6108 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6109 }
6110 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6111 			    size_t count)
6112 {
6113 	int n;
6114 
6115 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6116 		sdebug_dsense = n;
6117 		return count;
6118 	}
6119 	return -EINVAL;
6120 }
6121 static DRIVER_ATTR_RW(dsense);
6122 
6123 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6124 {
6125 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6126 }
6127 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6128 			     size_t count)
6129 {
6130 	int n, idx;
6131 
6132 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6133 		bool want_store = (n == 0);
6134 		struct sdebug_host_info *sdhp;
6135 
6136 		n = (n > 0);
6137 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6138 		if (sdebug_fake_rw == n)
6139 			return count;	/* not transitioning so do nothing */
6140 
6141 		if (want_store) {	/* 1 --> 0 transition, set up store */
6142 			if (sdeb_first_idx < 0) {
6143 				idx = sdebug_add_store();
6144 				if (idx < 0)
6145 					return idx;
6146 			} else {
6147 				idx = sdeb_first_idx;
6148 				xa_clear_mark(per_store_ap, idx,
6149 					      SDEB_XA_NOT_IN_USE);
6150 			}
6151 			/* make all hosts use same store */
6152 			list_for_each_entry(sdhp, &sdebug_host_list,
6153 					    host_list) {
6154 				if (sdhp->si_idx != idx) {
6155 					xa_set_mark(per_store_ap, sdhp->si_idx,
6156 						    SDEB_XA_NOT_IN_USE);
6157 					sdhp->si_idx = idx;
6158 				}
6159 			}
6160 			sdeb_most_recent_idx = idx;
6161 		} else {	/* 0 --> 1 transition is trigger for shrink */
6162 			sdebug_erase_all_stores(true /* apart from first */);
6163 		}
6164 		sdebug_fake_rw = n;
6165 		return count;
6166 	}
6167 	return -EINVAL;
6168 }
6169 static DRIVER_ATTR_RW(fake_rw);
6170 
6171 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6172 {
6173 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6174 }
6175 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6176 			      size_t count)
6177 {
6178 	int n;
6179 
6180 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6181 		sdebug_no_lun_0 = n;
6182 		return count;
6183 	}
6184 	return -EINVAL;
6185 }
6186 static DRIVER_ATTR_RW(no_lun_0);
6187 
6188 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6189 {
6190 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6191 }
6192 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6193 			      size_t count)
6194 {
6195 	int n;
6196 
6197 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6198 		sdebug_num_tgts = n;
6199 		sdebug_max_tgts_luns();
6200 		return count;
6201 	}
6202 	return -EINVAL;
6203 }
6204 static DRIVER_ATTR_RW(num_tgts);
6205 
6206 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6207 {
6208 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6209 }
6210 static DRIVER_ATTR_RO(dev_size_mb);
6211 
6212 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6213 {
6214 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6215 }
6216 
6217 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6218 				    size_t count)
6219 {
6220 	bool v;
6221 
6222 	if (kstrtobool(buf, &v))
6223 		return -EINVAL;
6224 
6225 	sdebug_per_host_store = v;
6226 	return count;
6227 }
6228 static DRIVER_ATTR_RW(per_host_store);
6229 
6230 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6231 {
6232 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6233 }
6234 static DRIVER_ATTR_RO(num_parts);
6235 
6236 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6237 {
6238 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6239 }
6240 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6241 			       size_t count)
6242 {
6243 	int nth;
6244 	char work[20];
6245 
6246 	if (sscanf(buf, "%10s", work) == 1) {
6247 		if (strncasecmp(work, "0x", 2) == 0) {
6248 			if (kstrtoint(work + 2, 16, &nth) == 0)
6249 				goto every_nth_done;
6250 		} else {
6251 			if (kstrtoint(work, 10, &nth) == 0)
6252 				goto every_nth_done;
6253 		}
6254 	}
6255 	return -EINVAL;
6256 
6257 every_nth_done:
6258 	sdebug_every_nth = nth;
6259 	if (nth && !sdebug_statistics) {
6260 		pr_info("every_nth needs statistics=1, set it\n");
6261 		sdebug_statistics = true;
6262 	}
6263 	tweak_cmnd_count();
6264 	return count;
6265 }
6266 static DRIVER_ATTR_RW(every_nth);
6267 
6268 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6269 {
6270 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6271 }
6272 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6273 				size_t count)
6274 {
6275 	int n;
6276 	bool changed;
6277 
6278 	if (kstrtoint(buf, 0, &n))
6279 		return -EINVAL;
6280 	if (n >= 0) {
6281 		if (n > (int)SAM_LUN_AM_FLAT) {
6282 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6283 			return -EINVAL;
6284 		}
6285 		changed = ((int)sdebug_lun_am != n);
6286 		sdebug_lun_am = n;
6287 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6288 			struct sdebug_host_info *sdhp;
6289 			struct sdebug_dev_info *dp;
6290 
6291 			spin_lock(&sdebug_host_list_lock);
6292 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6293 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6294 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6295 				}
6296 			}
6297 			spin_unlock(&sdebug_host_list_lock);
6298 		}
6299 		return count;
6300 	}
6301 	return -EINVAL;
6302 }
6303 static DRIVER_ATTR_RW(lun_format);
6304 
6305 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6308 }
6309 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6310 			      size_t count)
6311 {
6312 	int n;
6313 	bool changed;
6314 
6315 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6316 		if (n > 256) {
6317 			pr_warn("max_luns can be no more than 256\n");
6318 			return -EINVAL;
6319 		}
6320 		changed = (sdebug_max_luns != n);
6321 		sdebug_max_luns = n;
6322 		sdebug_max_tgts_luns();
6323 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6324 			struct sdebug_host_info *sdhp;
6325 			struct sdebug_dev_info *dp;
6326 
6327 			spin_lock(&sdebug_host_list_lock);
6328 			list_for_each_entry(sdhp, &sdebug_host_list,
6329 					    host_list) {
6330 				list_for_each_entry(dp, &sdhp->dev_info_list,
6331 						    dev_list) {
6332 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6333 						dp->uas_bm);
6334 				}
6335 			}
6336 			spin_unlock(&sdebug_host_list_lock);
6337 		}
6338 		return count;
6339 	}
6340 	return -EINVAL;
6341 }
6342 static DRIVER_ATTR_RW(max_luns);
6343 
6344 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6345 {
6346 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6347 }
6348 /* N.B. max_queue can be changed while there are queued commands. In flight
6349  * commands beyond the new max_queue will be completed. */
6350 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6351 			       size_t count)
6352 {
6353 	int j, n, k, a;
6354 	struct sdebug_queue *sqp;
6355 
6356 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6357 	    (n <= SDEBUG_CANQUEUE) &&
6358 	    (sdebug_host_max_queue == 0)) {
6359 		block_unblock_all_queues(true);
6360 		k = 0;
6361 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6362 		     ++j, ++sqp) {
6363 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6364 			if (a > k)
6365 				k = a;
6366 		}
6367 		sdebug_max_queue = n;
6368 		if (k == SDEBUG_CANQUEUE)
6369 			atomic_set(&retired_max_queue, 0);
6370 		else if (k >= n)
6371 			atomic_set(&retired_max_queue, k + 1);
6372 		else
6373 			atomic_set(&retired_max_queue, 0);
6374 		block_unblock_all_queues(false);
6375 		return count;
6376 	}
6377 	return -EINVAL;
6378 }
6379 static DRIVER_ATTR_RW(max_queue);
6380 
6381 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6382 {
6383 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6384 }
6385 
6386 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6387 {
6388 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6389 }
6390 
6391 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6392 {
6393 	bool v;
6394 
6395 	if (kstrtobool(buf, &v))
6396 		return -EINVAL;
6397 
6398 	sdebug_no_rwlock = v;
6399 	return count;
6400 }
6401 static DRIVER_ATTR_RW(no_rwlock);
6402 
6403 /*
6404  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6405  * in range [0, sdebug_host_max_queue), we can't change it.
6406  */
6407 static DRIVER_ATTR_RO(host_max_queue);
6408 
6409 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6410 {
6411 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6412 }
6413 static DRIVER_ATTR_RO(no_uld);
6414 
6415 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6416 {
6417 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6418 }
6419 static DRIVER_ATTR_RO(scsi_level);
6420 
6421 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6422 {
6423 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6424 }
6425 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6426 				size_t count)
6427 {
6428 	int n;
6429 	bool changed;
6430 
6431 	/* Ignore capacity change for ZBC drives for now */
6432 	if (sdeb_zbc_in_use)
6433 		return -ENOTSUPP;
6434 
6435 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6436 		changed = (sdebug_virtual_gb != n);
6437 		sdebug_virtual_gb = n;
6438 		sdebug_capacity = get_sdebug_capacity();
6439 		if (changed) {
6440 			struct sdebug_host_info *sdhp;
6441 			struct sdebug_dev_info *dp;
6442 
6443 			spin_lock(&sdebug_host_list_lock);
6444 			list_for_each_entry(sdhp, &sdebug_host_list,
6445 					    host_list) {
6446 				list_for_each_entry(dp, &sdhp->dev_info_list,
6447 						    dev_list) {
6448 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6449 						dp->uas_bm);
6450 				}
6451 			}
6452 			spin_unlock(&sdebug_host_list_lock);
6453 		}
6454 		return count;
6455 	}
6456 	return -EINVAL;
6457 }
6458 static DRIVER_ATTR_RW(virtual_gb);
6459 
6460 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6461 {
6462 	/* absolute number of hosts currently active is what is shown */
6463 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6464 }
6465 
6466 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6467 			      size_t count)
6468 {
6469 	bool found;
6470 	unsigned long idx;
6471 	struct sdeb_store_info *sip;
6472 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6473 	int delta_hosts;
6474 
6475 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6476 		return -EINVAL;
6477 	if (delta_hosts > 0) {
6478 		do {
6479 			found = false;
6480 			if (want_phs) {
6481 				xa_for_each_marked(per_store_ap, idx, sip,
6482 						   SDEB_XA_NOT_IN_USE) {
6483 					sdeb_most_recent_idx = (int)idx;
6484 					found = true;
6485 					break;
6486 				}
6487 				if (found)	/* re-use case */
6488 					sdebug_add_host_helper((int)idx);
6489 				else
6490 					sdebug_do_add_host(true);
6491 			} else {
6492 				sdebug_do_add_host(false);
6493 			}
6494 		} while (--delta_hosts);
6495 	} else if (delta_hosts < 0) {
6496 		do {
6497 			sdebug_do_remove_host(false);
6498 		} while (++delta_hosts);
6499 	}
6500 	return count;
6501 }
6502 static DRIVER_ATTR_RW(add_host);
6503 
6504 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6505 {
6506 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6507 }
6508 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6509 				    size_t count)
6510 {
6511 	int n;
6512 
6513 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6514 		sdebug_vpd_use_hostno = n;
6515 		return count;
6516 	}
6517 	return -EINVAL;
6518 }
6519 static DRIVER_ATTR_RW(vpd_use_hostno);
6520 
6521 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6522 {
6523 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6524 }
6525 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6526 				size_t count)
6527 {
6528 	int n;
6529 
6530 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6531 		if (n > 0)
6532 			sdebug_statistics = true;
6533 		else {
6534 			clear_queue_stats();
6535 			sdebug_statistics = false;
6536 		}
6537 		return count;
6538 	}
6539 	return -EINVAL;
6540 }
6541 static DRIVER_ATTR_RW(statistics);
6542 
6543 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6544 {
6545 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6546 }
6547 static DRIVER_ATTR_RO(sector_size);
6548 
6549 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6550 {
6551 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6552 }
6553 static DRIVER_ATTR_RO(submit_queues);
6554 
6555 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6556 {
6557 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6558 }
6559 static DRIVER_ATTR_RO(dix);
6560 
6561 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6562 {
6563 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6564 }
6565 static DRIVER_ATTR_RO(dif);
6566 
6567 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6568 {
6569 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6570 }
6571 static DRIVER_ATTR_RO(guard);
6572 
6573 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6574 {
6575 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6576 }
6577 static DRIVER_ATTR_RO(ato);
6578 
6579 static ssize_t map_show(struct device_driver *ddp, char *buf)
6580 {
6581 	ssize_t count = 0;
6582 
6583 	if (!scsi_debug_lbp())
6584 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6585 				 sdebug_store_sectors);
6586 
6587 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6588 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6589 
6590 		if (sip)
6591 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6592 					  (int)map_size, sip->map_storep);
6593 	}
6594 	buf[count++] = '\n';
6595 	buf[count] = '\0';
6596 
6597 	return count;
6598 }
6599 static DRIVER_ATTR_RO(map);
6600 
6601 static ssize_t random_show(struct device_driver *ddp, char *buf)
6602 {
6603 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6604 }
6605 
6606 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6607 			    size_t count)
6608 {
6609 	bool v;
6610 
6611 	if (kstrtobool(buf, &v))
6612 		return -EINVAL;
6613 
6614 	sdebug_random = v;
6615 	return count;
6616 }
6617 static DRIVER_ATTR_RW(random);
6618 
6619 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6620 {
6621 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6622 }
6623 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6624 			       size_t count)
6625 {
6626 	int n;
6627 
6628 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6629 		sdebug_removable = (n > 0);
6630 		return count;
6631 	}
6632 	return -EINVAL;
6633 }
6634 static DRIVER_ATTR_RW(removable);
6635 
6636 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6637 {
6638 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6639 }
6640 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6641 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6642 			       size_t count)
6643 {
6644 	int n;
6645 
6646 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6647 		sdebug_host_lock = (n > 0);
6648 		return count;
6649 	}
6650 	return -EINVAL;
6651 }
6652 static DRIVER_ATTR_RW(host_lock);
6653 
6654 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6655 {
6656 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6657 }
6658 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6659 			    size_t count)
6660 {
6661 	int n;
6662 
6663 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6664 		sdebug_strict = (n > 0);
6665 		return count;
6666 	}
6667 	return -EINVAL;
6668 }
6669 static DRIVER_ATTR_RW(strict);
6670 
6671 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6672 {
6673 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6674 }
6675 static DRIVER_ATTR_RO(uuid_ctl);
6676 
6677 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6678 {
6679 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6680 }
6681 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6682 			     size_t count)
6683 {
6684 	int ret, n;
6685 
6686 	ret = kstrtoint(buf, 0, &n);
6687 	if (ret)
6688 		return ret;
6689 	sdebug_cdb_len = n;
6690 	all_config_cdb_len();
6691 	return count;
6692 }
6693 static DRIVER_ATTR_RW(cdb_len);
6694 
6695 static const char * const zbc_model_strs_a[] = {
6696 	[BLK_ZONED_NONE] = "none",
6697 	[BLK_ZONED_HA]   = "host-aware",
6698 	[BLK_ZONED_HM]   = "host-managed",
6699 };
6700 
6701 static const char * const zbc_model_strs_b[] = {
6702 	[BLK_ZONED_NONE] = "no",
6703 	[BLK_ZONED_HA]   = "aware",
6704 	[BLK_ZONED_HM]   = "managed",
6705 };
6706 
6707 static const char * const zbc_model_strs_c[] = {
6708 	[BLK_ZONED_NONE] = "0",
6709 	[BLK_ZONED_HA]   = "1",
6710 	[BLK_ZONED_HM]   = "2",
6711 };
6712 
6713 static int sdeb_zbc_model_str(const char *cp)
6714 {
6715 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6716 
6717 	if (res < 0) {
6718 		res = sysfs_match_string(zbc_model_strs_b, cp);
6719 		if (res < 0) {
6720 			res = sysfs_match_string(zbc_model_strs_c, cp);
6721 			if (res < 0)
6722 				return -EINVAL;
6723 		}
6724 	}
6725 	return res;
6726 }
6727 
6728 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6729 {
6730 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6731 			 zbc_model_strs_a[sdeb_zbc_model]);
6732 }
6733 static DRIVER_ATTR_RO(zbc);
6734 
6735 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6736 {
6737 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6738 }
6739 static DRIVER_ATTR_RO(tur_ms_to_ready);
6740 
6741 /* Note: The following array creates attribute files in the
6742    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6743    files (over those found in the /sys/module/scsi_debug/parameters
6744    directory) is that auxiliary actions can be triggered when an attribute
6745    is changed. For example see: add_host_store() above.
6746  */
6747 
6748 static struct attribute *sdebug_drv_attrs[] = {
6749 	&driver_attr_delay.attr,
6750 	&driver_attr_opts.attr,
6751 	&driver_attr_ptype.attr,
6752 	&driver_attr_dsense.attr,
6753 	&driver_attr_fake_rw.attr,
6754 	&driver_attr_host_max_queue.attr,
6755 	&driver_attr_no_lun_0.attr,
6756 	&driver_attr_num_tgts.attr,
6757 	&driver_attr_dev_size_mb.attr,
6758 	&driver_attr_num_parts.attr,
6759 	&driver_attr_every_nth.attr,
6760 	&driver_attr_lun_format.attr,
6761 	&driver_attr_max_luns.attr,
6762 	&driver_attr_max_queue.attr,
6763 	&driver_attr_no_rwlock.attr,
6764 	&driver_attr_no_uld.attr,
6765 	&driver_attr_scsi_level.attr,
6766 	&driver_attr_virtual_gb.attr,
6767 	&driver_attr_add_host.attr,
6768 	&driver_attr_per_host_store.attr,
6769 	&driver_attr_vpd_use_hostno.attr,
6770 	&driver_attr_sector_size.attr,
6771 	&driver_attr_statistics.attr,
6772 	&driver_attr_submit_queues.attr,
6773 	&driver_attr_dix.attr,
6774 	&driver_attr_dif.attr,
6775 	&driver_attr_guard.attr,
6776 	&driver_attr_ato.attr,
6777 	&driver_attr_map.attr,
6778 	&driver_attr_random.attr,
6779 	&driver_attr_removable.attr,
6780 	&driver_attr_host_lock.attr,
6781 	&driver_attr_ndelay.attr,
6782 	&driver_attr_strict.attr,
6783 	&driver_attr_uuid_ctl.attr,
6784 	&driver_attr_cdb_len.attr,
6785 	&driver_attr_tur_ms_to_ready.attr,
6786 	&driver_attr_zbc.attr,
6787 	NULL,
6788 };
6789 ATTRIBUTE_GROUPS(sdebug_drv);
6790 
6791 static struct device *pseudo_primary;
6792 
6793 static int __init scsi_debug_init(void)
6794 {
6795 	bool want_store = (sdebug_fake_rw == 0);
6796 	unsigned long sz;
6797 	int k, ret, hosts_to_add;
6798 	int idx = -1;
6799 
6800 	ramdisk_lck_a[0] = &atomic_rw;
6801 	ramdisk_lck_a[1] = &atomic_rw2;
6802 	atomic_set(&retired_max_queue, 0);
6803 
6804 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6805 		pr_warn("ndelay must be less than 1 second, ignored\n");
6806 		sdebug_ndelay = 0;
6807 	} else if (sdebug_ndelay > 0)
6808 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6809 
6810 	switch (sdebug_sector_size) {
6811 	case  512:
6812 	case 1024:
6813 	case 2048:
6814 	case 4096:
6815 		break;
6816 	default:
6817 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6818 		return -EINVAL;
6819 	}
6820 
6821 	switch (sdebug_dif) {
6822 	case T10_PI_TYPE0_PROTECTION:
6823 		break;
6824 	case T10_PI_TYPE1_PROTECTION:
6825 	case T10_PI_TYPE2_PROTECTION:
6826 	case T10_PI_TYPE3_PROTECTION:
6827 		have_dif_prot = true;
6828 		break;
6829 
6830 	default:
6831 		pr_err("dif must be 0, 1, 2 or 3\n");
6832 		return -EINVAL;
6833 	}
6834 
6835 	if (sdebug_num_tgts < 0) {
6836 		pr_err("num_tgts must be >= 0\n");
6837 		return -EINVAL;
6838 	}
6839 
6840 	if (sdebug_guard > 1) {
6841 		pr_err("guard must be 0 or 1\n");
6842 		return -EINVAL;
6843 	}
6844 
6845 	if (sdebug_ato > 1) {
6846 		pr_err("ato must be 0 or 1\n");
6847 		return -EINVAL;
6848 	}
6849 
6850 	if (sdebug_physblk_exp > 15) {
6851 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6852 		return -EINVAL;
6853 	}
6854 
6855 	sdebug_lun_am = sdebug_lun_am_i;
6856 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6857 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6858 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6859 	}
6860 
6861 	if (sdebug_max_luns > 256) {
6862 		if (sdebug_max_luns > 16384) {
6863 			pr_warn("max_luns can be no more than 16384, use default\n");
6864 			sdebug_max_luns = DEF_MAX_LUNS;
6865 		}
6866 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6867 	}
6868 
6869 	if (sdebug_lowest_aligned > 0x3fff) {
6870 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6871 		return -EINVAL;
6872 	}
6873 
6874 	if (submit_queues < 1) {
6875 		pr_err("submit_queues must be 1 or more\n");
6876 		return -EINVAL;
6877 	}
6878 
6879 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6880 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6881 		return -EINVAL;
6882 	}
6883 
6884 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6885 	    (sdebug_host_max_queue < 0)) {
6886 		pr_err("host_max_queue must be in range [0 %d]\n",
6887 		       SDEBUG_CANQUEUE);
6888 		return -EINVAL;
6889 	}
6890 
6891 	if (sdebug_host_max_queue &&
6892 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6893 		sdebug_max_queue = sdebug_host_max_queue;
6894 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6895 			sdebug_max_queue);
6896 	}
6897 
6898 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6899 			       GFP_KERNEL);
6900 	if (sdebug_q_arr == NULL)
6901 		return -ENOMEM;
6902 	for (k = 0; k < submit_queues; ++k)
6903 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6904 
6905 	/*
6906 	 * check for host managed zoned block device specified with
6907 	 * ptype=0x14 or zbc=XXX.
6908 	 */
6909 	if (sdebug_ptype == TYPE_ZBC) {
6910 		sdeb_zbc_model = BLK_ZONED_HM;
6911 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6912 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6913 		if (k < 0) {
6914 			ret = k;
6915 			goto free_q_arr;
6916 		}
6917 		sdeb_zbc_model = k;
6918 		switch (sdeb_zbc_model) {
6919 		case BLK_ZONED_NONE:
6920 		case BLK_ZONED_HA:
6921 			sdebug_ptype = TYPE_DISK;
6922 			break;
6923 		case BLK_ZONED_HM:
6924 			sdebug_ptype = TYPE_ZBC;
6925 			break;
6926 		default:
6927 			pr_err("Invalid ZBC model\n");
6928 			ret = -EINVAL;
6929 			goto free_q_arr;
6930 		}
6931 	}
6932 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6933 		sdeb_zbc_in_use = true;
6934 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6935 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6936 	}
6937 
6938 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6939 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6940 	if (sdebug_dev_size_mb < 1)
6941 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6942 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6943 	sdebug_store_sectors = sz / sdebug_sector_size;
6944 	sdebug_capacity = get_sdebug_capacity();
6945 
6946 	/* play around with geometry, don't waste too much on track 0 */
6947 	sdebug_heads = 8;
6948 	sdebug_sectors_per = 32;
6949 	if (sdebug_dev_size_mb >= 256)
6950 		sdebug_heads = 64;
6951 	else if (sdebug_dev_size_mb >= 16)
6952 		sdebug_heads = 32;
6953 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6954 			       (sdebug_sectors_per * sdebug_heads);
6955 	if (sdebug_cylinders_per >= 1024) {
6956 		/* other LLDs do this; implies >= 1GB ram disk ... */
6957 		sdebug_heads = 255;
6958 		sdebug_sectors_per = 63;
6959 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6960 			       (sdebug_sectors_per * sdebug_heads);
6961 	}
6962 	if (scsi_debug_lbp()) {
6963 		sdebug_unmap_max_blocks =
6964 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6965 
6966 		sdebug_unmap_max_desc =
6967 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6968 
6969 		sdebug_unmap_granularity =
6970 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6971 
6972 		if (sdebug_unmap_alignment &&
6973 		    sdebug_unmap_granularity <=
6974 		    sdebug_unmap_alignment) {
6975 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6976 			ret = -EINVAL;
6977 			goto free_q_arr;
6978 		}
6979 	}
6980 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6981 	if (want_store) {
6982 		idx = sdebug_add_store();
6983 		if (idx < 0) {
6984 			ret = idx;
6985 			goto free_q_arr;
6986 		}
6987 	}
6988 
6989 	pseudo_primary = root_device_register("pseudo_0");
6990 	if (IS_ERR(pseudo_primary)) {
6991 		pr_warn("root_device_register() error\n");
6992 		ret = PTR_ERR(pseudo_primary);
6993 		goto free_vm;
6994 	}
6995 	ret = bus_register(&pseudo_lld_bus);
6996 	if (ret < 0) {
6997 		pr_warn("bus_register error: %d\n", ret);
6998 		goto dev_unreg;
6999 	}
7000 	ret = driver_register(&sdebug_driverfs_driver);
7001 	if (ret < 0) {
7002 		pr_warn("driver_register error: %d\n", ret);
7003 		goto bus_unreg;
7004 	}
7005 
7006 	hosts_to_add = sdebug_add_host;
7007 	sdebug_add_host = 0;
7008 
7009 	for (k = 0; k < hosts_to_add; k++) {
7010 		if (want_store && k == 0) {
7011 			ret = sdebug_add_host_helper(idx);
7012 			if (ret < 0) {
7013 				pr_err("add_host_helper k=%d, error=%d\n",
7014 				       k, -ret);
7015 				break;
7016 			}
7017 		} else {
7018 			ret = sdebug_do_add_host(want_store &&
7019 						 sdebug_per_host_store);
7020 			if (ret < 0) {
7021 				pr_err("add_host k=%d error=%d\n", k, -ret);
7022 				break;
7023 			}
7024 		}
7025 	}
7026 	if (sdebug_verbose)
7027 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7028 
7029 	return 0;
7030 
7031 bus_unreg:
7032 	bus_unregister(&pseudo_lld_bus);
7033 dev_unreg:
7034 	root_device_unregister(pseudo_primary);
7035 free_vm:
7036 	sdebug_erase_store(idx, NULL);
7037 free_q_arr:
7038 	kfree(sdebug_q_arr);
7039 	return ret;
7040 }
7041 
7042 static void __exit scsi_debug_exit(void)
7043 {
7044 	int k = sdebug_num_hosts;
7045 
7046 	stop_all_queued();
7047 	for (; k; k--)
7048 		sdebug_do_remove_host(true);
7049 	free_all_queued();
7050 	driver_unregister(&sdebug_driverfs_driver);
7051 	bus_unregister(&pseudo_lld_bus);
7052 	root_device_unregister(pseudo_primary);
7053 
7054 	sdebug_erase_all_stores(false);
7055 	xa_destroy(per_store_ap);
7056 	kfree(sdebug_q_arr);
7057 }
7058 
7059 device_initcall(scsi_debug_init);
7060 module_exit(scsi_debug_exit);
7061 
7062 static void sdebug_release_adapter(struct device *dev)
7063 {
7064 	struct sdebug_host_info *sdbg_host;
7065 
7066 	sdbg_host = to_sdebug_host(dev);
7067 	kfree(sdbg_host);
7068 }
7069 
7070 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7071 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7072 {
7073 	if (idx < 0)
7074 		return;
7075 	if (!sip) {
7076 		if (xa_empty(per_store_ap))
7077 			return;
7078 		sip = xa_load(per_store_ap, idx);
7079 		if (!sip)
7080 			return;
7081 	}
7082 	vfree(sip->map_storep);
7083 	vfree(sip->dif_storep);
7084 	vfree(sip->storep);
7085 	xa_erase(per_store_ap, idx);
7086 	kfree(sip);
7087 }
7088 
7089 /* Assume apart_from_first==false only in shutdown case. */
7090 static void sdebug_erase_all_stores(bool apart_from_first)
7091 {
7092 	unsigned long idx;
7093 	struct sdeb_store_info *sip = NULL;
7094 
7095 	xa_for_each(per_store_ap, idx, sip) {
7096 		if (apart_from_first)
7097 			apart_from_first = false;
7098 		else
7099 			sdebug_erase_store(idx, sip);
7100 	}
7101 	if (apart_from_first)
7102 		sdeb_most_recent_idx = sdeb_first_idx;
7103 }
7104 
7105 /*
7106  * Returns store xarray new element index (idx) if >=0 else negated errno.
7107  * Limit the number of stores to 65536.
7108  */
7109 static int sdebug_add_store(void)
7110 {
7111 	int res;
7112 	u32 n_idx;
7113 	unsigned long iflags;
7114 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7115 	struct sdeb_store_info *sip = NULL;
7116 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7117 
7118 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7119 	if (!sip)
7120 		return -ENOMEM;
7121 
7122 	xa_lock_irqsave(per_store_ap, iflags);
7123 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7124 	if (unlikely(res < 0)) {
7125 		xa_unlock_irqrestore(per_store_ap, iflags);
7126 		kfree(sip);
7127 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7128 		return res;
7129 	}
7130 	sdeb_most_recent_idx = n_idx;
7131 	if (sdeb_first_idx < 0)
7132 		sdeb_first_idx = n_idx;
7133 	xa_unlock_irqrestore(per_store_ap, iflags);
7134 
7135 	res = -ENOMEM;
7136 	sip->storep = vzalloc(sz);
7137 	if (!sip->storep) {
7138 		pr_err("user data oom\n");
7139 		goto err;
7140 	}
7141 	if (sdebug_num_parts > 0)
7142 		sdebug_build_parts(sip->storep, sz);
7143 
7144 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7145 	if (sdebug_dix) {
7146 		int dif_size;
7147 
7148 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7149 		sip->dif_storep = vmalloc(dif_size);
7150 
7151 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7152 			sip->dif_storep);
7153 
7154 		if (!sip->dif_storep) {
7155 			pr_err("DIX oom\n");
7156 			goto err;
7157 		}
7158 		memset(sip->dif_storep, 0xff, dif_size);
7159 	}
7160 	/* Logical Block Provisioning */
7161 	if (scsi_debug_lbp()) {
7162 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7163 		sip->map_storep = vmalloc(array_size(sizeof(long),
7164 						     BITS_TO_LONGS(map_size)));
7165 
7166 		pr_info("%lu provisioning blocks\n", map_size);
7167 
7168 		if (!sip->map_storep) {
7169 			pr_err("LBP map oom\n");
7170 			goto err;
7171 		}
7172 
7173 		bitmap_zero(sip->map_storep, map_size);
7174 
7175 		/* Map first 1KB for partition table */
7176 		if (sdebug_num_parts)
7177 			map_region(sip, 0, 2);
7178 	}
7179 
7180 	rwlock_init(&sip->macc_lck);
7181 	return (int)n_idx;
7182 err:
7183 	sdebug_erase_store((int)n_idx, sip);
7184 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7185 	return res;
7186 }
7187 
7188 static int sdebug_add_host_helper(int per_host_idx)
7189 {
7190 	int k, devs_per_host, idx;
7191 	int error = -ENOMEM;
7192 	struct sdebug_host_info *sdbg_host;
7193 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7194 
7195 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7196 	if (!sdbg_host)
7197 		return -ENOMEM;
7198 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7199 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7200 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7201 	sdbg_host->si_idx = idx;
7202 
7203 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7204 
7205 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7206 	for (k = 0; k < devs_per_host; k++) {
7207 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7208 		if (!sdbg_devinfo)
7209 			goto clean;
7210 	}
7211 
7212 	spin_lock(&sdebug_host_list_lock);
7213 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7214 	spin_unlock(&sdebug_host_list_lock);
7215 
7216 	sdbg_host->dev.bus = &pseudo_lld_bus;
7217 	sdbg_host->dev.parent = pseudo_primary;
7218 	sdbg_host->dev.release = &sdebug_release_adapter;
7219 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7220 
7221 	error = device_register(&sdbg_host->dev);
7222 	if (error)
7223 		goto clean;
7224 
7225 	++sdebug_num_hosts;
7226 	return 0;
7227 
7228 clean:
7229 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7230 				 dev_list) {
7231 		list_del(&sdbg_devinfo->dev_list);
7232 		kfree(sdbg_devinfo->zstate);
7233 		kfree(sdbg_devinfo);
7234 	}
7235 	kfree(sdbg_host);
7236 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7237 	return error;
7238 }
7239 
7240 static int sdebug_do_add_host(bool mk_new_store)
7241 {
7242 	int ph_idx = sdeb_most_recent_idx;
7243 
7244 	if (mk_new_store) {
7245 		ph_idx = sdebug_add_store();
7246 		if (ph_idx < 0)
7247 			return ph_idx;
7248 	}
7249 	return sdebug_add_host_helper(ph_idx);
7250 }
7251 
7252 static void sdebug_do_remove_host(bool the_end)
7253 {
7254 	int idx = -1;
7255 	struct sdebug_host_info *sdbg_host = NULL;
7256 	struct sdebug_host_info *sdbg_host2;
7257 
7258 	spin_lock(&sdebug_host_list_lock);
7259 	if (!list_empty(&sdebug_host_list)) {
7260 		sdbg_host = list_entry(sdebug_host_list.prev,
7261 				       struct sdebug_host_info, host_list);
7262 		idx = sdbg_host->si_idx;
7263 	}
7264 	if (!the_end && idx >= 0) {
7265 		bool unique = true;
7266 
7267 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7268 			if (sdbg_host2 == sdbg_host)
7269 				continue;
7270 			if (idx == sdbg_host2->si_idx) {
7271 				unique = false;
7272 				break;
7273 			}
7274 		}
7275 		if (unique) {
7276 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7277 			if (idx == sdeb_most_recent_idx)
7278 				--sdeb_most_recent_idx;
7279 		}
7280 	}
7281 	if (sdbg_host)
7282 		list_del(&sdbg_host->host_list);
7283 	spin_unlock(&sdebug_host_list_lock);
7284 
7285 	if (!sdbg_host)
7286 		return;
7287 
7288 	device_unregister(&sdbg_host->dev);
7289 	--sdebug_num_hosts;
7290 }
7291 
7292 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7293 {
7294 	int num_in_q = 0;
7295 	struct sdebug_dev_info *devip;
7296 
7297 	block_unblock_all_queues(true);
7298 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7299 	if (NULL == devip) {
7300 		block_unblock_all_queues(false);
7301 		return	-ENODEV;
7302 	}
7303 	num_in_q = atomic_read(&devip->num_in_q);
7304 
7305 	if (qdepth > SDEBUG_CANQUEUE) {
7306 		qdepth = SDEBUG_CANQUEUE;
7307 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7308 			qdepth, SDEBUG_CANQUEUE);
7309 	}
7310 	if (qdepth < 1)
7311 		qdepth = 1;
7312 	if (qdepth != sdev->queue_depth)
7313 		scsi_change_queue_depth(sdev, qdepth);
7314 
7315 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7316 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7317 			    __func__, qdepth, num_in_q);
7318 	}
7319 	block_unblock_all_queues(false);
7320 	return sdev->queue_depth;
7321 }
7322 
7323 static bool fake_timeout(struct scsi_cmnd *scp)
7324 {
7325 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7326 		if (sdebug_every_nth < -1)
7327 			sdebug_every_nth = -1;
7328 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7329 			return true; /* ignore command causing timeout */
7330 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7331 			 scsi_medium_access_command(scp))
7332 			return true; /* time out reads and writes */
7333 	}
7334 	return false;
7335 }
7336 
7337 /* Response to TUR or media access command when device stopped */
7338 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7339 {
7340 	int stopped_state;
7341 	u64 diff_ns = 0;
7342 	ktime_t now_ts = ktime_get_boottime();
7343 	struct scsi_device *sdp = scp->device;
7344 
7345 	stopped_state = atomic_read(&devip->stopped);
7346 	if (stopped_state == 2) {
7347 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7348 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7349 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7350 				/* tur_ms_to_ready timer extinguished */
7351 				atomic_set(&devip->stopped, 0);
7352 				return 0;
7353 			}
7354 		}
7355 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7356 		if (sdebug_verbose)
7357 			sdev_printk(KERN_INFO, sdp,
7358 				    "%s: Not ready: in process of becoming ready\n", my_name);
7359 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7360 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7361 
7362 			if (diff_ns <= tur_nanosecs_to_ready)
7363 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7364 			else
7365 				diff_ns = tur_nanosecs_to_ready;
7366 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7367 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7368 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7369 						   diff_ns);
7370 			return check_condition_result;
7371 		}
7372 	}
7373 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7374 	if (sdebug_verbose)
7375 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7376 			    my_name);
7377 	return check_condition_result;
7378 }
7379 
7380 static int sdebug_map_queues(struct Scsi_Host *shost)
7381 {
7382 	int i, qoff;
7383 
7384 	if (shost->nr_hw_queues == 1)
7385 		return 0;
7386 
7387 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7388 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7389 
7390 		map->nr_queues  = 0;
7391 
7392 		if (i == HCTX_TYPE_DEFAULT)
7393 			map->nr_queues = submit_queues - poll_queues;
7394 		else if (i == HCTX_TYPE_POLL)
7395 			map->nr_queues = poll_queues;
7396 
7397 		if (!map->nr_queues) {
7398 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7399 			continue;
7400 		}
7401 
7402 		map->queue_offset = qoff;
7403 		blk_mq_map_queues(map);
7404 
7405 		qoff += map->nr_queues;
7406 	}
7407 
7408 	return 0;
7409 
7410 }
7411 
7412 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7413 {
7414 	bool first;
7415 	bool retiring = false;
7416 	int num_entries = 0;
7417 	unsigned int qc_idx = 0;
7418 	unsigned long iflags;
7419 	ktime_t kt_from_boot = ktime_get_boottime();
7420 	struct sdebug_queue *sqp;
7421 	struct sdebug_queued_cmd *sqcp;
7422 	struct scsi_cmnd *scp;
7423 	struct sdebug_dev_info *devip;
7424 	struct sdebug_defer *sd_dp;
7425 
7426 	sqp = sdebug_q_arr + queue_num;
7427 
7428 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7429 
7430 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7431 	if (qc_idx >= sdebug_max_queue)
7432 		goto unlock;
7433 
7434 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7435 		if (first) {
7436 			first = false;
7437 			if (!test_bit(qc_idx, sqp->in_use_bm))
7438 				continue;
7439 		} else {
7440 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7441 		}
7442 		if (qc_idx >= sdebug_max_queue)
7443 			break;
7444 
7445 		sqcp = &sqp->qc_arr[qc_idx];
7446 		sd_dp = sqcp->sd_dp;
7447 		if (unlikely(!sd_dp))
7448 			continue;
7449 		scp = sqcp->a_cmnd;
7450 		if (unlikely(scp == NULL)) {
7451 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7452 			       queue_num, qc_idx, __func__);
7453 			break;
7454 		}
7455 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7456 			if (kt_from_boot < sd_dp->cmpl_ts)
7457 				continue;
7458 
7459 		} else		/* ignoring non REQ_POLLED requests */
7460 			continue;
7461 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7462 		if (likely(devip))
7463 			atomic_dec(&devip->num_in_q);
7464 		else
7465 			pr_err("devip=NULL from %s\n", __func__);
7466 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7467 			retiring = true;
7468 
7469 		sqcp->a_cmnd = NULL;
7470 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7471 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7472 				sqp, queue_num, qc_idx, __func__);
7473 			break;
7474 		}
7475 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7476 			int k, retval;
7477 
7478 			retval = atomic_read(&retired_max_queue);
7479 			if (qc_idx >= retval) {
7480 				pr_err("index %d too large\n", retval);
7481 				break;
7482 			}
7483 			k = find_last_bit(sqp->in_use_bm, retval);
7484 			if ((k < sdebug_max_queue) || (k == retval))
7485 				atomic_set(&retired_max_queue, 0);
7486 			else
7487 				atomic_set(&retired_max_queue, k + 1);
7488 		}
7489 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7490 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7491 		scsi_done(scp); /* callback to mid level */
7492 		num_entries++;
7493 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7494 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7495 			break;
7496 	}
7497 
7498 unlock:
7499 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7500 
7501 	if (num_entries > 0)
7502 		atomic_add(num_entries, &sdeb_mq_poll_count);
7503 	return num_entries;
7504 }
7505 
7506 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7507 				   struct scsi_cmnd *scp)
7508 {
7509 	u8 sdeb_i;
7510 	struct scsi_device *sdp = scp->device;
7511 	const struct opcode_info_t *oip;
7512 	const struct opcode_info_t *r_oip;
7513 	struct sdebug_dev_info *devip;
7514 	u8 *cmd = scp->cmnd;
7515 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7516 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7517 	int k, na;
7518 	int errsts = 0;
7519 	u64 lun_index = sdp->lun & 0x3FFF;
7520 	u32 flags;
7521 	u16 sa;
7522 	u8 opcode = cmd[0];
7523 	bool has_wlun_rl;
7524 	bool inject_now;
7525 
7526 	scsi_set_resid(scp, 0);
7527 	if (sdebug_statistics) {
7528 		atomic_inc(&sdebug_cmnd_count);
7529 		inject_now = inject_on_this_cmd();
7530 	} else {
7531 		inject_now = false;
7532 	}
7533 	if (unlikely(sdebug_verbose &&
7534 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7535 		char b[120];
7536 		int n, len, sb;
7537 
7538 		len = scp->cmd_len;
7539 		sb = (int)sizeof(b);
7540 		if (len > 32)
7541 			strcpy(b, "too long, over 32 bytes");
7542 		else {
7543 			for (k = 0, n = 0; k < len && n < sb; ++k)
7544 				n += scnprintf(b + n, sb - n, "%02x ",
7545 					       (u32)cmd[k]);
7546 		}
7547 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7548 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7549 	}
7550 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7551 		return SCSI_MLQUEUE_HOST_BUSY;
7552 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7553 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7554 		goto err_out;
7555 
7556 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7557 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7558 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7559 	if (unlikely(!devip)) {
7560 		devip = find_build_dev_info(sdp);
7561 		if (NULL == devip)
7562 			goto err_out;
7563 	}
7564 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7565 		atomic_set(&sdeb_inject_pending, 1);
7566 
7567 	na = oip->num_attached;
7568 	r_pfp = oip->pfp;
7569 	if (na) {	/* multiple commands with this opcode */
7570 		r_oip = oip;
7571 		if (FF_SA & r_oip->flags) {
7572 			if (F_SA_LOW & oip->flags)
7573 				sa = 0x1f & cmd[1];
7574 			else
7575 				sa = get_unaligned_be16(cmd + 8);
7576 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7577 				if (opcode == oip->opcode && sa == oip->sa)
7578 					break;
7579 			}
7580 		} else {   /* since no service action only check opcode */
7581 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7582 				if (opcode == oip->opcode)
7583 					break;
7584 			}
7585 		}
7586 		if (k > na) {
7587 			if (F_SA_LOW & r_oip->flags)
7588 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7589 			else if (F_SA_HIGH & r_oip->flags)
7590 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7591 			else
7592 				mk_sense_invalid_opcode(scp);
7593 			goto check_cond;
7594 		}
7595 	}	/* else (when na==0) we assume the oip is a match */
7596 	flags = oip->flags;
7597 	if (unlikely(F_INV_OP & flags)) {
7598 		mk_sense_invalid_opcode(scp);
7599 		goto check_cond;
7600 	}
7601 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7602 		if (sdebug_verbose)
7603 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7604 				    my_name, opcode, " supported for wlun");
7605 		mk_sense_invalid_opcode(scp);
7606 		goto check_cond;
7607 	}
7608 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7609 		u8 rem;
7610 		int j;
7611 
7612 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7613 			rem = ~oip->len_mask[k] & cmd[k];
7614 			if (rem) {
7615 				for (j = 7; j >= 0; --j, rem <<= 1) {
7616 					if (0x80 & rem)
7617 						break;
7618 				}
7619 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7620 				goto check_cond;
7621 			}
7622 		}
7623 	}
7624 	if (unlikely(!(F_SKIP_UA & flags) &&
7625 		     find_first_bit(devip->uas_bm,
7626 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7627 		errsts = make_ua(scp, devip);
7628 		if (errsts)
7629 			goto check_cond;
7630 	}
7631 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7632 		     atomic_read(&devip->stopped))) {
7633 		errsts = resp_not_ready(scp, devip);
7634 		if (errsts)
7635 			goto fini;
7636 	}
7637 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7638 		goto fini;
7639 	if (unlikely(sdebug_every_nth)) {
7640 		if (fake_timeout(scp))
7641 			return 0;	/* ignore command: make trouble */
7642 	}
7643 	if (likely(oip->pfp))
7644 		pfp = oip->pfp;	/* calls a resp_* function */
7645 	else
7646 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7647 
7648 fini:
7649 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7650 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7651 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7652 					    sdebug_ndelay > 10000)) {
7653 		/*
7654 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7655 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7656 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7657 		 * For Synchronize Cache want 1/20 of SSU's delay.
7658 		 */
7659 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7660 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7661 
7662 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7663 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7664 	} else
7665 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7666 				     sdebug_ndelay);
7667 check_cond:
7668 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7669 err_out:
7670 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7671 }
7672 
7673 static struct scsi_host_template sdebug_driver_template = {
7674 	.show_info =		scsi_debug_show_info,
7675 	.write_info =		scsi_debug_write_info,
7676 	.proc_name =		sdebug_proc_name,
7677 	.name =			"SCSI DEBUG",
7678 	.info =			scsi_debug_info,
7679 	.slave_alloc =		scsi_debug_slave_alloc,
7680 	.slave_configure =	scsi_debug_slave_configure,
7681 	.slave_destroy =	scsi_debug_slave_destroy,
7682 	.ioctl =		scsi_debug_ioctl,
7683 	.queuecommand =		scsi_debug_queuecommand,
7684 	.change_queue_depth =	sdebug_change_qdepth,
7685 	.map_queues =		sdebug_map_queues,
7686 	.mq_poll =		sdebug_blk_mq_poll,
7687 	.eh_abort_handler =	scsi_debug_abort,
7688 	.eh_device_reset_handler = scsi_debug_device_reset,
7689 	.eh_target_reset_handler = scsi_debug_target_reset,
7690 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7691 	.eh_host_reset_handler = scsi_debug_host_reset,
7692 	.can_queue =		SDEBUG_CANQUEUE,
7693 	.this_id =		7,
7694 	.sg_tablesize =		SG_MAX_SEGMENTS,
7695 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7696 	.max_sectors =		-1U,
7697 	.max_segment_size =	-1U,
7698 	.module =		THIS_MODULE,
7699 	.track_queue_depth =	1,
7700 };
7701 
7702 static int sdebug_driver_probe(struct device *dev)
7703 {
7704 	int error = 0;
7705 	struct sdebug_host_info *sdbg_host;
7706 	struct Scsi_Host *hpnt;
7707 	int hprot;
7708 
7709 	sdbg_host = to_sdebug_host(dev);
7710 
7711 	sdebug_driver_template.can_queue = sdebug_max_queue;
7712 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7713 	if (!sdebug_clustering)
7714 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7715 
7716 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7717 	if (NULL == hpnt) {
7718 		pr_err("scsi_host_alloc failed\n");
7719 		error = -ENODEV;
7720 		return error;
7721 	}
7722 	if (submit_queues > nr_cpu_ids) {
7723 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7724 			my_name, submit_queues, nr_cpu_ids);
7725 		submit_queues = nr_cpu_ids;
7726 	}
7727 	/*
7728 	 * Decide whether to tell scsi subsystem that we want mq. The
7729 	 * following should give the same answer for each host.
7730 	 */
7731 	hpnt->nr_hw_queues = submit_queues;
7732 	if (sdebug_host_max_queue)
7733 		hpnt->host_tagset = 1;
7734 
7735 	/* poll queues are possible for nr_hw_queues > 1 */
7736 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7737 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7738 			 my_name, poll_queues, hpnt->nr_hw_queues);
7739 		poll_queues = 0;
7740 	}
7741 
7742 	/*
7743 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7744 	 * left over for non-polled I/O.
7745 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7746 	 */
7747 	if (poll_queues >= submit_queues) {
7748 		if (submit_queues < 3)
7749 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7750 		else
7751 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7752 				my_name, submit_queues - 1);
7753 		poll_queues = 1;
7754 	}
7755 	if (poll_queues)
7756 		hpnt->nr_maps = 3;
7757 
7758 	sdbg_host->shost = hpnt;
7759 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7760 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7761 		hpnt->max_id = sdebug_num_tgts + 1;
7762 	else
7763 		hpnt->max_id = sdebug_num_tgts;
7764 	/* = sdebug_max_luns; */
7765 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7766 
7767 	hprot = 0;
7768 
7769 	switch (sdebug_dif) {
7770 
7771 	case T10_PI_TYPE1_PROTECTION:
7772 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7773 		if (sdebug_dix)
7774 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7775 		break;
7776 
7777 	case T10_PI_TYPE2_PROTECTION:
7778 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7779 		if (sdebug_dix)
7780 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7781 		break;
7782 
7783 	case T10_PI_TYPE3_PROTECTION:
7784 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7785 		if (sdebug_dix)
7786 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7787 		break;
7788 
7789 	default:
7790 		if (sdebug_dix)
7791 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7792 		break;
7793 	}
7794 
7795 	scsi_host_set_prot(hpnt, hprot);
7796 
7797 	if (have_dif_prot || sdebug_dix)
7798 		pr_info("host protection%s%s%s%s%s%s%s\n",
7799 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7800 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7801 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7802 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7803 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7804 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7805 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7806 
7807 	if (sdebug_guard == 1)
7808 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7809 	else
7810 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7811 
7812 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7813 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7814 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7815 		sdebug_statistics = true;
7816 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7817 	if (error) {
7818 		pr_err("scsi_add_host failed\n");
7819 		error = -ENODEV;
7820 		scsi_host_put(hpnt);
7821 	} else {
7822 		scsi_scan_host(hpnt);
7823 	}
7824 
7825 	return error;
7826 }
7827 
7828 static void sdebug_driver_remove(struct device *dev)
7829 {
7830 	struct sdebug_host_info *sdbg_host;
7831 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7832 
7833 	sdbg_host = to_sdebug_host(dev);
7834 
7835 	scsi_remove_host(sdbg_host->shost);
7836 
7837 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7838 				 dev_list) {
7839 		list_del(&sdbg_devinfo->dev_list);
7840 		kfree(sdbg_devinfo->zstate);
7841 		kfree(sdbg_devinfo);
7842 	}
7843 
7844 	scsi_host_put(sdbg_host->shost);
7845 }
7846 
7847 static int pseudo_lld_bus_match(struct device *dev,
7848 				struct device_driver *dev_driver)
7849 {
7850 	return 1;
7851 }
7852 
7853 static struct bus_type pseudo_lld_bus = {
7854 	.name = "pseudo",
7855 	.match = pseudo_lld_bus_match,
7856 	.probe = sdebug_driver_probe,
7857 	.remove = sdebug_driver_remove,
7858 	.drv_groups = sdebug_drv_groups,
7859 };
7860