xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision e802ca75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/mutex.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20210520";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define POWER_ON_OCCURRED_ASCQ 0x1
87 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
88 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
89 #define CAPACITY_CHANGED_ASCQ 0x9
90 #define SAVING_PARAMS_UNSUP 0x39
91 #define TRANSPORT_PROBLEM 0x4b
92 #define THRESHOLD_EXCEEDED 0x5d
93 #define LOW_POWER_COND_ON 0x5e
94 #define MISCOMPARE_VERIFY_ASC 0x1d
95 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
96 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
97 #define WRITE_ERROR_ASC 0xc
98 #define UNALIGNED_WRITE_ASCQ 0x4
99 #define WRITE_BOUNDARY_ASCQ 0x5
100 #define READ_INVDATA_ASCQ 0x6
101 #define READ_BOUNDARY_ASCQ 0x7
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZONE_TYPE_CNV	= 0x1,
256 	ZBC_ZONE_TYPE_SWR	= 0x2,
257 	ZBC_ZONE_TYPE_SWP	= 0x3,
258 };
259 
260 /* enumeration names taken from table 26, zbcr05 */
261 enum sdebug_z_cond {
262 	ZBC_NOT_WRITE_POINTER	= 0x0,
263 	ZC1_EMPTY		= 0x1,
264 	ZC2_IMPLICIT_OPEN	= 0x2,
265 	ZC3_EXPLICIT_OPEN	= 0x3,
266 	ZC4_CLOSED		= 0x4,
267 	ZC6_READ_ONLY		= 0xd,
268 	ZC5_FULL		= 0xe,
269 	ZC7_OFFLINE		= 0xf,
270 };
271 
272 struct sdeb_zone_state {	/* ZBC: per zone state */
273 	enum sdebug_z_type z_type;
274 	enum sdebug_z_cond z_cond;
275 	bool z_non_seq_resource;
276 	unsigned int z_size;
277 	sector_t z_start;
278 	sector_t z_wp;
279 };
280 
281 struct sdebug_dev_info {
282 	struct list_head dev_list;
283 	unsigned int channel;
284 	unsigned int target;
285 	u64 lun;
286 	uuid_t lu_name;
287 	struct sdebug_host_info *sdbg_host;
288 	unsigned long uas_bm[1];
289 	atomic_t num_in_q;
290 	atomic_t stopped;	/* 1: by SSU, 2: device start */
291 	bool used;
292 
293 	/* For ZBC devices */
294 	enum blk_zoned_model zmodel;
295 	unsigned int zsize;
296 	unsigned int zsize_shift;
297 	unsigned int nr_zones;
298 	unsigned int nr_conv_zones;
299 	unsigned int nr_imp_open;
300 	unsigned int nr_exp_open;
301 	unsigned int nr_closed;
302 	unsigned int max_open;
303 	ktime_t create_ts;	/* time since bootup that this device was created */
304 	struct sdeb_zone_state *zstate;
305 };
306 
307 struct sdebug_host_info {
308 	struct list_head host_list;
309 	int si_idx;	/* sdeb_store_info (per host) xarray index */
310 	struct Scsi_Host *shost;
311 	struct device dev;
312 	struct list_head dev_info_list;
313 };
314 
315 /* There is an xarray of pointers to this struct's objects, one per host */
316 struct sdeb_store_info {
317 	rwlock_t macc_lck;	/* for atomic media access on this store */
318 	u8 *storep;		/* user data storage (ram) */
319 	struct t10_pi_tuple *dif_storep; /* protection info */
320 	void *map_storep;	/* provisioning map */
321 };
322 
323 #define to_sdebug_host(d)	\
324 	container_of(d, struct sdebug_host_info, dev)
325 
326 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
327 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
328 
329 struct sdebug_defer {
330 	struct hrtimer hrt;
331 	struct execute_work ew;
332 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
333 	int sqa_idx;	/* index of sdebug_queue array */
334 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
335 	int hc_idx;	/* hostwide tag index */
336 	int issuing_cpu;
337 	bool init_hrt;
338 	bool init_wq;
339 	bool init_poll;
340 	bool aborted;	/* true when blk_abort_request() already called */
341 	enum sdeb_defer_type defer_t;
342 };
343 
344 struct sdebug_queued_cmd {
345 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
346 	 * instance indicates this slot is in use.
347 	 */
348 	struct sdebug_defer *sd_dp;
349 	struct scsi_cmnd *a_cmnd;
350 };
351 
352 struct sdebug_queue {
353 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
354 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
355 	spinlock_t qc_lock;
356 	atomic_t blocked;	/* to temporarily stop more being queued */
357 };
358 
359 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
360 static atomic_t sdebug_completions;  /* count of deferred completions */
361 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
362 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
363 static atomic_t sdeb_inject_pending;
364 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
365 
366 struct opcode_info_t {
367 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
368 				/* for terminating element */
369 	u8 opcode;		/* if num_attached > 0, preferred */
370 	u16 sa;			/* service action */
371 	u32 flags;		/* OR-ed set of SDEB_F_* */
372 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
373 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
374 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
375 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 };
377 
378 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
379 enum sdeb_opcode_index {
380 	SDEB_I_INVALID_OPCODE =	0,
381 	SDEB_I_INQUIRY = 1,
382 	SDEB_I_REPORT_LUNS = 2,
383 	SDEB_I_REQUEST_SENSE = 3,
384 	SDEB_I_TEST_UNIT_READY = 4,
385 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
386 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
387 	SDEB_I_LOG_SENSE = 7,
388 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
389 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
390 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
391 	SDEB_I_START_STOP = 11,
392 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
393 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
394 	SDEB_I_MAINT_IN = 14,
395 	SDEB_I_MAINT_OUT = 15,
396 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
397 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
398 	SDEB_I_RESERVE = 18,		/* 6, 10 */
399 	SDEB_I_RELEASE = 19,		/* 6, 10 */
400 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
401 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
402 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
403 	SDEB_I_SEND_DIAG = 23,
404 	SDEB_I_UNMAP = 24,
405 	SDEB_I_WRITE_BUFFER = 25,
406 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
407 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
408 	SDEB_I_COMP_WRITE = 28,
409 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
410 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
411 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
412 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
413 };
414 
415 
416 static const unsigned char opcode_ind_arr[256] = {
417 /* 0x0; 0x0->0x1f: 6 byte cdbs */
418 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
419 	    0, 0, 0, 0,
420 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
421 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
422 	    SDEB_I_RELEASE,
423 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
424 	    SDEB_I_ALLOW_REMOVAL, 0,
425 /* 0x20; 0x20->0x3f: 10 byte cdbs */
426 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
427 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
428 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
429 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
430 /* 0x40; 0x40->0x5f: 10 byte cdbs */
431 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
432 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
433 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
434 	    SDEB_I_RELEASE,
435 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
436 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
437 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439 	0, SDEB_I_VARIABLE_LEN,
440 /* 0x80; 0x80->0x9f: 16 byte cdbs */
441 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
442 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
443 	0, 0, 0, SDEB_I_VERIFY,
444 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
445 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
446 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
447 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
448 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
449 	     SDEB_I_MAINT_OUT, 0, 0, 0,
450 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
451 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0,
454 /* 0xc0; 0xc0->0xff: vendor specific */
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 };
460 
461 /*
462  * The following "response" functions return the SCSI mid-level's 4 byte
463  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
464  * command completion, they can mask their return value with
465  * SDEG_RES_IMMED_MASK .
466  */
467 #define SDEG_RES_IMMED_MASK 0x40000000
468 
469 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 
499 static int sdebug_do_add_host(bool mk_new_store);
500 static int sdebug_add_host_helper(int per_host_idx);
501 static void sdebug_do_remove_host(bool the_end);
502 static int sdebug_add_store(void);
503 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
504 static void sdebug_erase_all_stores(bool apart_from_first);
505 
506 /*
507  * The following are overflow arrays for cdbs that "hit" the same index in
508  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
509  * should be placed in opcode_info_arr[], the others should be placed here.
510  */
511 static const struct opcode_info_t msense_iarr[] = {
512 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
513 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515 
516 static const struct opcode_info_t mselect_iarr[] = {
517 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
518 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 };
520 
521 static const struct opcode_info_t read_iarr[] = {
522 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
523 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
524 	     0, 0, 0, 0} },
525 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
526 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
528 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
529 	     0xc7, 0, 0, 0, 0} },
530 };
531 
532 static const struct opcode_info_t write_iarr[] = {
533 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
534 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
535 		   0, 0, 0, 0, 0, 0} },
536 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
537 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
538 		   0, 0, 0} },
539 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
540 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
541 		   0xbf, 0xc7, 0, 0, 0, 0} },
542 };
543 
544 static const struct opcode_info_t verify_iarr[] = {
545 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
546 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
547 		   0, 0, 0, 0, 0, 0} },
548 };
549 
550 static const struct opcode_info_t sa_in_16_iarr[] = {
551 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
552 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
554 };
555 
556 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
557 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
558 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
559 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
560 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
561 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
562 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
563 };
564 
565 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
566 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
567 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
568 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
569 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
570 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
571 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 };
573 
574 static const struct opcode_info_t write_same_iarr[] = {
575 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
576 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
577 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
578 };
579 
580 static const struct opcode_info_t reserve_iarr[] = {
581 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
582 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584 
585 static const struct opcode_info_t release_iarr[] = {
586 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
587 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 };
589 
590 static const struct opcode_info_t sync_cache_iarr[] = {
591 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
592 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
593 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
594 };
595 
596 static const struct opcode_info_t pre_fetch_iarr[] = {
597 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
598 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
600 };
601 
602 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
603 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
604 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
606 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
607 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
609 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
610 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
612 };
613 
614 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
615 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
616 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
618 };
619 
620 
621 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
622  * plus the terminating elements for logic that scans this table such as
623  * REPORT SUPPORTED OPERATION CODES. */
624 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
625 /* 0 */
626 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
627 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
629 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
631 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632 	     0, 0} },					/* REPORT LUNS */
633 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
634 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
636 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
637 /* 5 */
638 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
639 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
640 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
641 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
642 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
643 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
644 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
645 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
646 	     0, 0, 0} },
647 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
648 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
649 	     0, 0} },
650 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
651 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
652 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 /* 10 */
654 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
655 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
656 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
657 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
659 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
660 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
661 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
662 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
664 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
665 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
666 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
667 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
668 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
669 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
670 				0xff, 0, 0xc7, 0, 0, 0, 0} },
671 /* 15 */
672 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
673 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
674 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
675 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
676 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
677 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
678 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
679 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
680 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
681 	     0xff, 0xff} },
682 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
683 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
684 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
685 	     0} },
686 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
687 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
688 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 /* 20 */
691 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
692 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
694 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
696 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
698 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
700 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
701 /* 25 */
702 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
703 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
704 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
705 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
706 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
707 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
708 		 0, 0, 0, 0, 0} },
709 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
710 	    resp_sync_cache, sync_cache_iarr,
711 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
712 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
713 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
714 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
715 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
716 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
717 	    resp_pre_fetch, pre_fetch_iarr,
718 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
719 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
720 
721 /* 30 */
722 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
723 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
724 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
725 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
726 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
727 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
728 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
730 /* sentinel */
731 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
732 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 };
734 
735 static atomic_t sdebug_num_hosts;
736 static DEFINE_MUTEX(add_host_mutex);
737 
738 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
739 static int sdebug_ato = DEF_ATO;
740 static int sdebug_cdb_len = DEF_CDB_LEN;
741 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
742 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
743 static int sdebug_dif = DEF_DIF;
744 static int sdebug_dix = DEF_DIX;
745 static int sdebug_dsense = DEF_D_SENSE;
746 static int sdebug_every_nth = DEF_EVERY_NTH;
747 static int sdebug_fake_rw = DEF_FAKE_RW;
748 static unsigned int sdebug_guard = DEF_GUARD;
749 static int sdebug_host_max_queue;	/* per host */
750 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
751 static int sdebug_max_luns = DEF_MAX_LUNS;
752 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
753 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
754 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
755 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
756 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
757 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
758 static int sdebug_no_uld;
759 static int sdebug_num_parts = DEF_NUM_PARTS;
760 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
761 static int sdebug_opt_blks = DEF_OPT_BLKS;
762 static int sdebug_opts = DEF_OPTS;
763 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
764 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
765 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
766 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
767 static int sdebug_sector_size = DEF_SECTOR_SIZE;
768 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
769 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
770 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
771 static unsigned int sdebug_lbpu = DEF_LBPU;
772 static unsigned int sdebug_lbpws = DEF_LBPWS;
773 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
774 static unsigned int sdebug_lbprz = DEF_LBPRZ;
775 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
776 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
777 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
778 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
779 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
780 static int sdebug_uuid_ctl = DEF_UUID_CTL;
781 static bool sdebug_random = DEF_RANDOM;
782 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
783 static bool sdebug_removable = DEF_REMOVABLE;
784 static bool sdebug_deflect_incoming;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
798 
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 			  SAM_LUN_AM_FLAT = 0x1,
801 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 			  SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
805 
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity;	/* in sectors */
808 
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810    may still need them */
811 static int sdebug_heads;		/* heads per disk */
812 static int sdebug_cylinders_per;	/* cylinders per surface */
813 static int sdebug_sectors_per;		/* sectors per cylinder */
814 
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_SPINLOCK(sdebug_host_list_lock);
817 
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
823 
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833 
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_size_mb;
837 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
838 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
839 
840 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
841 static int poll_queues; /* iouring iopoll interface.*/
842 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
843 
844 static DEFINE_RWLOCK(atomic_rw);
845 static DEFINE_RWLOCK(atomic_rw2);
846 
847 static rwlock_t *ramdisk_lck_a[2];
848 
849 static char sdebug_proc_name[] = MY_NAME;
850 static const char *my_name = MY_NAME;
851 
852 static struct bus_type pseudo_lld_bus;
853 
854 static struct device_driver sdebug_driverfs_driver = {
855 	.name 		= sdebug_proc_name,
856 	.bus		= &pseudo_lld_bus,
857 };
858 
859 static const int check_condition_result =
860 	SAM_STAT_CHECK_CONDITION;
861 
862 static const int illegal_condition_result =
863 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
864 
865 static const int device_qfull_result =
866 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
867 
868 static const int condition_met_result = SAM_STAT_CONDITION_MET;
869 
870 
871 /* Only do the extra work involved in logical block provisioning if one or
872  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
873  * real reads and writes (i.e. not skipping them for speed).
874  */
875 static inline bool scsi_debug_lbp(void)
876 {
877 	return 0 == sdebug_fake_rw &&
878 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
879 }
880 
881 static void *lba2fake_store(struct sdeb_store_info *sip,
882 			    unsigned long long lba)
883 {
884 	struct sdeb_store_info *lsip = sip;
885 
886 	lba = do_div(lba, sdebug_store_sectors);
887 	if (!sip || !sip->storep) {
888 		WARN_ON_ONCE(true);
889 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
890 	}
891 	return lsip->storep + lba * sdebug_sector_size;
892 }
893 
894 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
895 				      sector_t sector)
896 {
897 	sector = sector_div(sector, sdebug_store_sectors);
898 
899 	return sip->dif_storep + sector;
900 }
901 
902 static void sdebug_max_tgts_luns(void)
903 {
904 	struct sdebug_host_info *sdbg_host;
905 	struct Scsi_Host *hpnt;
906 
907 	spin_lock(&sdebug_host_list_lock);
908 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
909 		hpnt = sdbg_host->shost;
910 		if ((hpnt->this_id >= 0) &&
911 		    (sdebug_num_tgts > hpnt->this_id))
912 			hpnt->max_id = sdebug_num_tgts + 1;
913 		else
914 			hpnt->max_id = sdebug_num_tgts;
915 		/* sdebug_max_luns; */
916 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
917 	}
918 	spin_unlock(&sdebug_host_list_lock);
919 }
920 
921 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
922 
923 /* Set in_bit to -1 to indicate no bit position of invalid field */
924 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
925 				 enum sdeb_cmd_data c_d,
926 				 int in_byte, int in_bit)
927 {
928 	unsigned char *sbuff;
929 	u8 sks[4];
930 	int sl, asc;
931 
932 	sbuff = scp->sense_buffer;
933 	if (!sbuff) {
934 		sdev_printk(KERN_ERR, scp->device,
935 			    "%s: sense_buffer is NULL\n", __func__);
936 		return;
937 	}
938 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
939 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
940 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
941 	memset(sks, 0, sizeof(sks));
942 	sks[0] = 0x80;
943 	if (c_d)
944 		sks[0] |= 0x40;
945 	if (in_bit >= 0) {
946 		sks[0] |= 0x8;
947 		sks[0] |= 0x7 & in_bit;
948 	}
949 	put_unaligned_be16(in_byte, sks + 1);
950 	if (sdebug_dsense) {
951 		sl = sbuff[7] + 8;
952 		sbuff[7] = sl;
953 		sbuff[sl] = 0x2;
954 		sbuff[sl + 1] = 0x6;
955 		memcpy(sbuff + sl + 4, sks, 3);
956 	} else
957 		memcpy(sbuff + 15, sks, 3);
958 	if (sdebug_verbose)
959 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
960 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
961 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
962 }
963 
964 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
965 {
966 	if (!scp->sense_buffer) {
967 		sdev_printk(KERN_ERR, scp->device,
968 			    "%s: sense_buffer is NULL\n", __func__);
969 		return;
970 	}
971 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
972 
973 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
974 
975 	if (sdebug_verbose)
976 		sdev_printk(KERN_INFO, scp->device,
977 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
978 			    my_name, key, asc, asq);
979 }
980 
981 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
982 {
983 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
984 }
985 
986 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
987 			    void __user *arg)
988 {
989 	if (sdebug_verbose) {
990 		if (0x1261 == cmd)
991 			sdev_printk(KERN_INFO, dev,
992 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
993 		else if (0x5331 == cmd)
994 			sdev_printk(KERN_INFO, dev,
995 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
996 				    __func__);
997 		else
998 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
999 				    __func__, cmd);
1000 	}
1001 	return -EINVAL;
1002 	/* return -ENOTTY; // correct return but upsets fdisk */
1003 }
1004 
1005 static void config_cdb_len(struct scsi_device *sdev)
1006 {
1007 	switch (sdebug_cdb_len) {
1008 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1009 		sdev->use_10_for_rw = false;
1010 		sdev->use_16_for_rw = false;
1011 		sdev->use_10_for_ms = false;
1012 		break;
1013 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1014 		sdev->use_10_for_rw = true;
1015 		sdev->use_16_for_rw = false;
1016 		sdev->use_10_for_ms = false;
1017 		break;
1018 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1019 		sdev->use_10_for_rw = true;
1020 		sdev->use_16_for_rw = false;
1021 		sdev->use_10_for_ms = true;
1022 		break;
1023 	case 16:
1024 		sdev->use_10_for_rw = false;
1025 		sdev->use_16_for_rw = true;
1026 		sdev->use_10_for_ms = true;
1027 		break;
1028 	case 32: /* No knobs to suggest this so same as 16 for now */
1029 		sdev->use_10_for_rw = false;
1030 		sdev->use_16_for_rw = true;
1031 		sdev->use_10_for_ms = true;
1032 		break;
1033 	default:
1034 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1035 			sdebug_cdb_len);
1036 		sdev->use_10_for_rw = true;
1037 		sdev->use_16_for_rw = false;
1038 		sdev->use_10_for_ms = false;
1039 		sdebug_cdb_len = 10;
1040 		break;
1041 	}
1042 }
1043 
1044 static void all_config_cdb_len(void)
1045 {
1046 	struct sdebug_host_info *sdbg_host;
1047 	struct Scsi_Host *shost;
1048 	struct scsi_device *sdev;
1049 
1050 	spin_lock(&sdebug_host_list_lock);
1051 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1052 		shost = sdbg_host->shost;
1053 		shost_for_each_device(sdev, shost) {
1054 			config_cdb_len(sdev);
1055 		}
1056 	}
1057 	spin_unlock(&sdebug_host_list_lock);
1058 }
1059 
1060 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1061 {
1062 	struct sdebug_host_info *sdhp;
1063 	struct sdebug_dev_info *dp;
1064 
1065 	spin_lock(&sdebug_host_list_lock);
1066 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1067 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1068 			if ((devip->sdbg_host == dp->sdbg_host) &&
1069 			    (devip->target == dp->target))
1070 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1071 		}
1072 	}
1073 	spin_unlock(&sdebug_host_list_lock);
1074 }
1075 
1076 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1077 {
1078 	int k;
1079 
1080 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1081 	if (k != SDEBUG_NUM_UAS) {
1082 		const char *cp = NULL;
1083 
1084 		switch (k) {
1085 		case SDEBUG_UA_POR:
1086 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 					POWER_ON_RESET_ASCQ);
1088 			if (sdebug_verbose)
1089 				cp = "power on reset";
1090 			break;
1091 		case SDEBUG_UA_POOCCUR:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1093 					POWER_ON_OCCURRED_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "power on occurred";
1096 			break;
1097 		case SDEBUG_UA_BUS_RESET:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1099 					BUS_RESET_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "bus reset";
1102 			break;
1103 		case SDEBUG_UA_MODE_CHANGED:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1105 					MODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "mode parameters changed";
1108 			break;
1109 		case SDEBUG_UA_CAPACITY_CHANGED:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1111 					CAPACITY_CHANGED_ASCQ);
1112 			if (sdebug_verbose)
1113 				cp = "capacity data changed";
1114 			break;
1115 		case SDEBUG_UA_MICROCODE_CHANGED:
1116 			mk_sense_buffer(scp, UNIT_ATTENTION,
1117 					TARGET_CHANGED_ASC,
1118 					MICROCODE_CHANGED_ASCQ);
1119 			if (sdebug_verbose)
1120 				cp = "microcode has been changed";
1121 			break;
1122 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1123 			mk_sense_buffer(scp, UNIT_ATTENTION,
1124 					TARGET_CHANGED_ASC,
1125 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1126 			if (sdebug_verbose)
1127 				cp = "microcode has been changed without reset";
1128 			break;
1129 		case SDEBUG_UA_LUNS_CHANGED:
1130 			/*
1131 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1132 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1133 			 * on the target, until a REPORT LUNS command is
1134 			 * received.  SPC-4 behavior is to report it only once.
1135 			 * NOTE:  sdebug_scsi_level does not use the same
1136 			 * values as struct scsi_device->scsi_level.
1137 			 */
1138 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1139 				clear_luns_changed_on_target(devip);
1140 			mk_sense_buffer(scp, UNIT_ATTENTION,
1141 					TARGET_CHANGED_ASC,
1142 					LUNS_CHANGED_ASCQ);
1143 			if (sdebug_verbose)
1144 				cp = "reported luns data has changed";
1145 			break;
1146 		default:
1147 			pr_warn("unexpected unit attention code=%d\n", k);
1148 			if (sdebug_verbose)
1149 				cp = "unknown";
1150 			break;
1151 		}
1152 		clear_bit(k, devip->uas_bm);
1153 		if (sdebug_verbose)
1154 			sdev_printk(KERN_INFO, scp->device,
1155 				   "%s reports: Unit attention: %s\n",
1156 				   my_name, cp);
1157 		return check_condition_result;
1158 	}
1159 	return 0;
1160 }
1161 
1162 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1163 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1164 				int arr_len)
1165 {
1166 	int act_len;
1167 	struct scsi_data_buffer *sdb = &scp->sdb;
1168 
1169 	if (!sdb->length)
1170 		return 0;
1171 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1172 		return DID_ERROR << 16;
1173 
1174 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1175 				      arr, arr_len);
1176 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1177 
1178 	return 0;
1179 }
1180 
1181 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1182  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1183  * calls, not required to write in ascending offset order. Assumes resid
1184  * set to scsi_bufflen() prior to any calls.
1185  */
1186 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1187 				  int arr_len, unsigned int off_dst)
1188 {
1189 	unsigned int act_len, n;
1190 	struct scsi_data_buffer *sdb = &scp->sdb;
1191 	off_t skip = off_dst;
1192 
1193 	if (sdb->length <= off_dst)
1194 		return 0;
1195 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1196 		return DID_ERROR << 16;
1197 
1198 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1199 				       arr, arr_len, skip);
1200 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1201 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1202 		 scsi_get_resid(scp));
1203 	n = scsi_bufflen(scp) - (off_dst + act_len);
1204 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1205 	return 0;
1206 }
1207 
1208 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1209  * 'arr' or -1 if error.
1210  */
1211 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1212 			       int arr_len)
1213 {
1214 	if (!scsi_bufflen(scp))
1215 		return 0;
1216 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1217 		return -1;
1218 
1219 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1220 }
1221 
1222 
1223 static char sdebug_inq_vendor_id[9] = "Linux   ";
1224 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1225 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1226 /* Use some locally assigned NAAs for SAS addresses. */
1227 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1228 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1229 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1230 
1231 /* Device identification VPD page. Returns number of bytes placed in arr */
1232 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1233 			  int target_dev_id, int dev_id_num,
1234 			  const char *dev_id_str, int dev_id_str_len,
1235 			  const uuid_t *lu_name)
1236 {
1237 	int num, port_a;
1238 	char b[32];
1239 
1240 	port_a = target_dev_id + 1;
1241 	/* T10 vendor identifier field format (faked) */
1242 	arr[0] = 0x2;	/* ASCII */
1243 	arr[1] = 0x1;
1244 	arr[2] = 0x0;
1245 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1246 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1247 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1248 	num = 8 + 16 + dev_id_str_len;
1249 	arr[3] = num;
1250 	num += 4;
1251 	if (dev_id_num >= 0) {
1252 		if (sdebug_uuid_ctl) {
1253 			/* Locally assigned UUID */
1254 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1255 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1256 			arr[num++] = 0x0;
1257 			arr[num++] = 0x12;
1258 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1259 			arr[num++] = 0x0;
1260 			memcpy(arr + num, lu_name, 16);
1261 			num += 16;
1262 		} else {
1263 			/* NAA-3, Logical unit identifier (binary) */
1264 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1265 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1266 			arr[num++] = 0x0;
1267 			arr[num++] = 0x8;
1268 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1269 			num += 8;
1270 		}
1271 		/* Target relative port number */
1272 		arr[num++] = 0x61;	/* proto=sas, binary */
1273 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1274 		arr[num++] = 0x0;	/* reserved */
1275 		arr[num++] = 0x4;	/* length */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x0;
1279 		arr[num++] = 0x1;	/* relative port A */
1280 	}
1281 	/* NAA-3, Target port identifier */
1282 	arr[num++] = 0x61;	/* proto=sas, binary */
1283 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1284 	arr[num++] = 0x0;
1285 	arr[num++] = 0x8;
1286 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1287 	num += 8;
1288 	/* NAA-3, Target port group identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0x95;	/* piv=1, target port group id */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x4;
1293 	arr[num++] = 0;
1294 	arr[num++] = 0;
1295 	put_unaligned_be16(port_group_id, arr + num);
1296 	num += 2;
1297 	/* NAA-3, Target device identifier */
1298 	arr[num++] = 0x61;	/* proto=sas, binary */
1299 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1300 	arr[num++] = 0x0;
1301 	arr[num++] = 0x8;
1302 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1303 	num += 8;
1304 	/* SCSI name string: Target device identifier */
1305 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1306 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1307 	arr[num++] = 0x0;
1308 	arr[num++] = 24;
1309 	memcpy(arr + num, "naa.32222220", 12);
1310 	num += 12;
1311 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1312 	memcpy(arr + num, b, 8);
1313 	num += 8;
1314 	memset(arr + num, 0, 4);
1315 	num += 4;
1316 	return num;
1317 }
1318 
1319 static unsigned char vpd84_data[] = {
1320 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1321     0x22,0x22,0x22,0x0,0xbb,0x1,
1322     0x22,0x22,0x22,0x0,0xbb,0x2,
1323 };
1324 
1325 /*  Software interface identification VPD page */
1326 static int inquiry_vpd_84(unsigned char *arr)
1327 {
1328 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1329 	return sizeof(vpd84_data);
1330 }
1331 
1332 /* Management network addresses VPD page */
1333 static int inquiry_vpd_85(unsigned char *arr)
1334 {
1335 	int num = 0;
1336 	const char *na1 = "https://www.kernel.org/config";
1337 	const char *na2 = "http://www.kernel.org/log";
1338 	int plen, olen;
1339 
1340 	arr[num++] = 0x1;	/* lu, storage config */
1341 	arr[num++] = 0x0;	/* reserved */
1342 	arr[num++] = 0x0;
1343 	olen = strlen(na1);
1344 	plen = olen + 1;
1345 	if (plen % 4)
1346 		plen = ((plen / 4) + 1) * 4;
1347 	arr[num++] = plen;	/* length, null termianted, padded */
1348 	memcpy(arr + num, na1, olen);
1349 	memset(arr + num + olen, 0, plen - olen);
1350 	num += plen;
1351 
1352 	arr[num++] = 0x4;	/* lu, logging */
1353 	arr[num++] = 0x0;	/* reserved */
1354 	arr[num++] = 0x0;
1355 	olen = strlen(na2);
1356 	plen = olen + 1;
1357 	if (plen % 4)
1358 		plen = ((plen / 4) + 1) * 4;
1359 	arr[num++] = plen;	/* length, null terminated, padded */
1360 	memcpy(arr + num, na2, olen);
1361 	memset(arr + num + olen, 0, plen - olen);
1362 	num += plen;
1363 
1364 	return num;
1365 }
1366 
1367 /* SCSI ports VPD page */
1368 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1369 {
1370 	int num = 0;
1371 	int port_a, port_b;
1372 
1373 	port_a = target_dev_id + 1;
1374 	port_b = port_a + 1;
1375 	arr[num++] = 0x0;	/* reserved */
1376 	arr[num++] = 0x0;	/* reserved */
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1379 	memset(arr + num, 0, 6);
1380 	num += 6;
1381 	arr[num++] = 0x0;
1382 	arr[num++] = 12;	/* length tp descriptor */
1383 	/* naa-5 target port identifier (A) */
1384 	arr[num++] = 0x61;	/* proto=sas, binary */
1385 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1386 	arr[num++] = 0x0;	/* reserved */
1387 	arr[num++] = 0x8;	/* length */
1388 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1389 	num += 8;
1390 	arr[num++] = 0x0;	/* reserved */
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x0;
1393 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1394 	memset(arr + num, 0, 6);
1395 	num += 6;
1396 	arr[num++] = 0x0;
1397 	arr[num++] = 12;	/* length tp descriptor */
1398 	/* naa-5 target port identifier (B) */
1399 	arr[num++] = 0x61;	/* proto=sas, binary */
1400 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1401 	arr[num++] = 0x0;	/* reserved */
1402 	arr[num++] = 0x8;	/* length */
1403 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1404 	num += 8;
1405 
1406 	return num;
1407 }
1408 
1409 
1410 static unsigned char vpd89_data[] = {
1411 /* from 4th byte */ 0,0,0,0,
1412 'l','i','n','u','x',' ',' ',' ',
1413 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1414 '1','2','3','4',
1415 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1416 0xec,0,0,0,
1417 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1418 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1420 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1421 0x53,0x41,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1423 0x20,0x20,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x10,0x80,
1426 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1427 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1428 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1430 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1431 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1432 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1437 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1438 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1439 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1452 };
1453 
1454 /* ATA Information VPD page */
1455 static int inquiry_vpd_89(unsigned char *arr)
1456 {
1457 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1458 	return sizeof(vpd89_data);
1459 }
1460 
1461 
1462 static unsigned char vpdb0_data[] = {
1463 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1464 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1465 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 };
1468 
1469 /* Block limits VPD page (SBC-3) */
1470 static int inquiry_vpd_b0(unsigned char *arr)
1471 {
1472 	unsigned int gran;
1473 
1474 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1475 
1476 	/* Optimal transfer length granularity */
1477 	if (sdebug_opt_xferlen_exp != 0 &&
1478 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1479 		gran = 1 << sdebug_opt_xferlen_exp;
1480 	else
1481 		gran = 1 << sdebug_physblk_exp;
1482 	put_unaligned_be16(gran, arr + 2);
1483 
1484 	/* Maximum Transfer Length */
1485 	if (sdebug_store_sectors > 0x400)
1486 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1487 
1488 	/* Optimal Transfer Length */
1489 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1490 
1491 	if (sdebug_lbpu) {
1492 		/* Maximum Unmap LBA Count */
1493 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1494 
1495 		/* Maximum Unmap Block Descriptor Count */
1496 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1497 	}
1498 
1499 	/* Unmap Granularity Alignment */
1500 	if (sdebug_unmap_alignment) {
1501 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1502 		arr[28] |= 0x80; /* UGAVALID */
1503 	}
1504 
1505 	/* Optimal Unmap Granularity */
1506 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1507 
1508 	/* Maximum WRITE SAME Length */
1509 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1510 
1511 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1512 
1513 	return sizeof(vpdb0_data);
1514 }
1515 
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1518 {
1519 	memset(arr, 0, 0x3c);
1520 	arr[0] = 0;
1521 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1522 	arr[2] = 0;
1523 	arr[3] = 5;	/* less than 1.8" */
1524 	if (devip->zmodel == BLK_ZONED_HA)
1525 		arr[4] = 1 << 4;	/* zoned field = 01b */
1526 
1527 	return 0x3c;
1528 }
1529 
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x4);
1534 	arr[0] = 0;			/* threshold exponent */
1535 	if (sdebug_lbpu)
1536 		arr[1] = 1 << 7;
1537 	if (sdebug_lbpws)
1538 		arr[1] |= 1 << 6;
1539 	if (sdebug_lbpws10)
1540 		arr[1] |= 1 << 5;
1541 	if (sdebug_lbprz && scsi_debug_lbp())
1542 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1543 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 	/* threshold_percentage=0 */
1546 	return 0x4;
1547 }
1548 
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1551 {
1552 	memset(arr, 0, 0x3c);
1553 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1554 	/*
1555 	 * Set Optimal number of open sequential write preferred zones and
1556 	 * Optimal number of non-sequentially written sequential write
1557 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 	 * fields set to zero, apart from Max. number of open swrz_s field.
1559 	 */
1560 	put_unaligned_be32(0xffffffff, &arr[4]);
1561 	put_unaligned_be32(0xffffffff, &arr[8]);
1562 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 		put_unaligned_be32(devip->max_open, &arr[12]);
1564 	else
1565 		put_unaligned_be32(0xffffffff, &arr[12]);
1566 	return 0x3c;
1567 }
1568 
1569 #define SDEBUG_LONG_INQ_SZ 96
1570 #define SDEBUG_MAX_INQ_ARR_SZ 584
1571 
1572 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1573 {
1574 	unsigned char pq_pdt;
1575 	unsigned char *arr;
1576 	unsigned char *cmd = scp->cmnd;
1577 	u32 alloc_len, n;
1578 	int ret;
1579 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1580 
1581 	alloc_len = get_unaligned_be16(cmd + 3);
1582 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1583 	if (! arr)
1584 		return DID_REQUEUE << 16;
1585 	is_disk = (sdebug_ptype == TYPE_DISK);
1586 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1587 	is_disk_zbc = (is_disk || is_zbc);
1588 	have_wlun = scsi_is_wlun(scp->device->lun);
1589 	if (have_wlun)
1590 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1591 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1592 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1593 	else
1594 		pq_pdt = (sdebug_ptype & 0x1f);
1595 	arr[0] = pq_pdt;
1596 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1597 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1598 		kfree(arr);
1599 		return check_condition_result;
1600 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1601 		int lu_id_num, port_group_id, target_dev_id;
1602 		u32 len;
1603 		char lu_id_str[6];
1604 		int host_no = devip->sdbg_host->shost->host_no;
1605 
1606 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1607 		    (devip->channel & 0x7f);
1608 		if (sdebug_vpd_use_hostno == 0)
1609 			host_no = 0;
1610 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1611 			    (devip->target * 1000) + devip->lun);
1612 		target_dev_id = ((host_no + 1) * 2000) +
1613 				 (devip->target * 1000) - 3;
1614 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1615 		if (0 == cmd[2]) { /* supported vital product data pages */
1616 			arr[1] = cmd[2];	/*sanity */
1617 			n = 4;
1618 			arr[n++] = 0x0;   /* this page */
1619 			arr[n++] = 0x80;  /* unit serial number */
1620 			arr[n++] = 0x83;  /* device identification */
1621 			arr[n++] = 0x84;  /* software interface ident. */
1622 			arr[n++] = 0x85;  /* management network addresses */
1623 			arr[n++] = 0x86;  /* extended inquiry */
1624 			arr[n++] = 0x87;  /* mode page policy */
1625 			arr[n++] = 0x88;  /* SCSI ports */
1626 			if (is_disk_zbc) {	  /* SBC or ZBC */
1627 				arr[n++] = 0x89;  /* ATA information */
1628 				arr[n++] = 0xb0;  /* Block limits */
1629 				arr[n++] = 0xb1;  /* Block characteristics */
1630 				if (is_disk)
1631 					arr[n++] = 0xb2;  /* LB Provisioning */
1632 				if (is_zbc)
1633 					arr[n++] = 0xb6;  /* ZB dev. char. */
1634 			}
1635 			arr[3] = n - 4;	  /* number of supported VPD pages */
1636 		} else if (0x80 == cmd[2]) { /* unit serial number */
1637 			arr[1] = cmd[2];	/*sanity */
1638 			arr[3] = len;
1639 			memcpy(&arr[4], lu_id_str, len);
1640 		} else if (0x83 == cmd[2]) { /* device identification */
1641 			arr[1] = cmd[2];	/*sanity */
1642 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1643 						target_dev_id, lu_id_num,
1644 						lu_id_str, len,
1645 						&devip->lu_name);
1646 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1647 			arr[1] = cmd[2];	/*sanity */
1648 			arr[3] = inquiry_vpd_84(&arr[4]);
1649 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1650 			arr[1] = cmd[2];	/*sanity */
1651 			arr[3] = inquiry_vpd_85(&arr[4]);
1652 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1653 			arr[1] = cmd[2];	/*sanity */
1654 			arr[3] = 0x3c;	/* number of following entries */
1655 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1656 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1657 			else if (have_dif_prot)
1658 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1659 			else
1660 				arr[4] = 0x0;   /* no protection stuff */
1661 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1662 		} else if (0x87 == cmd[2]) { /* mode page policy */
1663 			arr[1] = cmd[2];	/*sanity */
1664 			arr[3] = 0x8;	/* number of following entries */
1665 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1666 			arr[6] = 0x80;	/* mlus, shared */
1667 			arr[8] = 0x18;	 /* protocol specific lu */
1668 			arr[10] = 0x82;	 /* mlus, per initiator port */
1669 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1670 			arr[1] = cmd[2];	/*sanity */
1671 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1672 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1673 			arr[1] = cmd[2];        /*sanity */
1674 			n = inquiry_vpd_89(&arr[4]);
1675 			put_unaligned_be16(n, arr + 2);
1676 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1677 			arr[1] = cmd[2];        /*sanity */
1678 			arr[3] = inquiry_vpd_b0(&arr[4]);
1679 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1680 			arr[1] = cmd[2];        /*sanity */
1681 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1682 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1683 			arr[1] = cmd[2];        /*sanity */
1684 			arr[3] = inquiry_vpd_b2(&arr[4]);
1685 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1686 			arr[1] = cmd[2];        /*sanity */
1687 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1688 		} else {
1689 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1690 			kfree(arr);
1691 			return check_condition_result;
1692 		}
1693 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1694 		ret = fill_from_dev_buffer(scp, arr,
1695 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1696 		kfree(arr);
1697 		return ret;
1698 	}
1699 	/* drops through here for a standard inquiry */
1700 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1701 	arr[2] = sdebug_scsi_level;
1702 	arr[3] = 2;    /* response_data_format==2 */
1703 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1704 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1705 	if (sdebug_vpd_use_hostno == 0)
1706 		arr[5] |= 0x10; /* claim: implicit TPGS */
1707 	arr[6] = 0x10; /* claim: MultiP */
1708 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1709 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1710 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1711 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1712 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1713 	/* Use Vendor Specific area to place driver date in ASCII hex */
1714 	memcpy(&arr[36], sdebug_version_date, 8);
1715 	/* version descriptors (2 bytes each) follow */
1716 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1717 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1718 	n = 62;
1719 	if (is_disk) {		/* SBC-4 no version claimed */
1720 		put_unaligned_be16(0x600, arr + n);
1721 		n += 2;
1722 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1723 		put_unaligned_be16(0x525, arr + n);
1724 		n += 2;
1725 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1726 		put_unaligned_be16(0x624, arr + n);
1727 		n += 2;
1728 	}
1729 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1730 	ret = fill_from_dev_buffer(scp, arr,
1731 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1732 	kfree(arr);
1733 	return ret;
1734 }
1735 
1736 /* See resp_iec_m_pg() for how this data is manipulated */
1737 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1738 				   0, 0, 0x0, 0x0};
1739 
1740 static int resp_requests(struct scsi_cmnd *scp,
1741 			 struct sdebug_dev_info *devip)
1742 {
1743 	unsigned char *cmd = scp->cmnd;
1744 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1745 	bool dsense = !!(cmd[1] & 1);
1746 	u32 alloc_len = cmd[4];
1747 	u32 len = 18;
1748 	int stopped_state = atomic_read(&devip->stopped);
1749 
1750 	memset(arr, 0, sizeof(arr));
1751 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1752 		if (dsense) {
1753 			arr[0] = 0x72;
1754 			arr[1] = NOT_READY;
1755 			arr[2] = LOGICAL_UNIT_NOT_READY;
1756 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1757 			len = 8;
1758 		} else {
1759 			arr[0] = 0x70;
1760 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1761 			arr[7] = 0xa;			/* 18 byte sense buffer */
1762 			arr[12] = LOGICAL_UNIT_NOT_READY;
1763 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1764 		}
1765 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1766 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1767 		if (dsense) {
1768 			arr[0] = 0x72;
1769 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1770 			arr[2] = THRESHOLD_EXCEEDED;
1771 			arr[3] = 0xff;		/* Failure prediction(false) */
1772 			len = 8;
1773 		} else {
1774 			arr[0] = 0x70;
1775 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1776 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1777 			arr[12] = THRESHOLD_EXCEEDED;
1778 			arr[13] = 0xff;		/* Failure prediction(false) */
1779 		}
1780 	} else {	/* nothing to report */
1781 		if (dsense) {
1782 			len = 8;
1783 			memset(arr, 0, len);
1784 			arr[0] = 0x72;
1785 		} else {
1786 			memset(arr, 0, len);
1787 			arr[0] = 0x70;
1788 			arr[7] = 0xa;
1789 		}
1790 	}
1791 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1792 }
1793 
1794 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1795 {
1796 	unsigned char *cmd = scp->cmnd;
1797 	int power_cond, want_stop, stopped_state;
1798 	bool changing;
1799 
1800 	power_cond = (cmd[4] & 0xf0) >> 4;
1801 	if (power_cond) {
1802 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1803 		return check_condition_result;
1804 	}
1805 	want_stop = !(cmd[4] & 1);
1806 	stopped_state = atomic_read(&devip->stopped);
1807 	if (stopped_state == 2) {
1808 		ktime_t now_ts = ktime_get_boottime();
1809 
1810 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1811 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1812 
1813 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1814 				/* tur_ms_to_ready timer extinguished */
1815 				atomic_set(&devip->stopped, 0);
1816 				stopped_state = 0;
1817 			}
1818 		}
1819 		if (stopped_state == 2) {
1820 			if (want_stop) {
1821 				stopped_state = 1;	/* dummy up success */
1822 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1823 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1824 				return check_condition_result;
1825 			}
1826 		}
1827 	}
1828 	changing = (stopped_state != want_stop);
1829 	if (changing)
1830 		atomic_xchg(&devip->stopped, want_stop);
1831 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1832 		return SDEG_RES_IMMED_MASK;
1833 	else
1834 		return 0;
1835 }
1836 
1837 static sector_t get_sdebug_capacity(void)
1838 {
1839 	static const unsigned int gibibyte = 1073741824;
1840 
1841 	if (sdebug_virtual_gb > 0)
1842 		return (sector_t)sdebug_virtual_gb *
1843 			(gibibyte / sdebug_sector_size);
1844 	else
1845 		return sdebug_store_sectors;
1846 }
1847 
1848 #define SDEBUG_READCAP_ARR_SZ 8
1849 static int resp_readcap(struct scsi_cmnd *scp,
1850 			struct sdebug_dev_info *devip)
1851 {
1852 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1853 	unsigned int capac;
1854 
1855 	/* following just in case virtual_gb changed */
1856 	sdebug_capacity = get_sdebug_capacity();
1857 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1858 	if (sdebug_capacity < 0xffffffff) {
1859 		capac = (unsigned int)sdebug_capacity - 1;
1860 		put_unaligned_be32(capac, arr + 0);
1861 	} else
1862 		put_unaligned_be32(0xffffffff, arr + 0);
1863 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1864 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1865 }
1866 
1867 #define SDEBUG_READCAP16_ARR_SZ 32
1868 static int resp_readcap16(struct scsi_cmnd *scp,
1869 			  struct sdebug_dev_info *devip)
1870 {
1871 	unsigned char *cmd = scp->cmnd;
1872 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1873 	u32 alloc_len;
1874 
1875 	alloc_len = get_unaligned_be32(cmd + 10);
1876 	/* following just in case virtual_gb changed */
1877 	sdebug_capacity = get_sdebug_capacity();
1878 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1879 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1880 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1881 	arr[13] = sdebug_physblk_exp & 0xf;
1882 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1883 
1884 	if (scsi_debug_lbp()) {
1885 		arr[14] |= 0x80; /* LBPME */
1886 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1887 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1888 		 * in the wider field maps to 0 in this field.
1889 		 */
1890 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1891 			arr[14] |= 0x40;
1892 	}
1893 
1894 	arr[15] = sdebug_lowest_aligned & 0xff;
1895 
1896 	if (have_dif_prot) {
1897 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1898 		arr[12] |= 1; /* PROT_EN */
1899 	}
1900 
1901 	return fill_from_dev_buffer(scp, arr,
1902 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1903 }
1904 
1905 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1906 
1907 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1908 			      struct sdebug_dev_info *devip)
1909 {
1910 	unsigned char *cmd = scp->cmnd;
1911 	unsigned char *arr;
1912 	int host_no = devip->sdbg_host->shost->host_no;
1913 	int port_group_a, port_group_b, port_a, port_b;
1914 	u32 alen, n, rlen;
1915 	int ret;
1916 
1917 	alen = get_unaligned_be32(cmd + 6);
1918 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1919 	if (! arr)
1920 		return DID_REQUEUE << 16;
1921 	/*
1922 	 * EVPD page 0x88 states we have two ports, one
1923 	 * real and a fake port with no device connected.
1924 	 * So we create two port groups with one port each
1925 	 * and set the group with port B to unavailable.
1926 	 */
1927 	port_a = 0x1; /* relative port A */
1928 	port_b = 0x2; /* relative port B */
1929 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1930 			(devip->channel & 0x7f);
1931 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1932 			(devip->channel & 0x7f) + 0x80;
1933 
1934 	/*
1935 	 * The asymmetric access state is cycled according to the host_id.
1936 	 */
1937 	n = 4;
1938 	if (sdebug_vpd_use_hostno == 0) {
1939 		arr[n++] = host_no % 3; /* Asymm access state */
1940 		arr[n++] = 0x0F; /* claim: all states are supported */
1941 	} else {
1942 		arr[n++] = 0x0; /* Active/Optimized path */
1943 		arr[n++] = 0x01; /* only support active/optimized paths */
1944 	}
1945 	put_unaligned_be16(port_group_a, arr + n);
1946 	n += 2;
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Status code */
1949 	arr[n++] = 0;    /* Vendor unique */
1950 	arr[n++] = 0x1;  /* One port per group */
1951 	arr[n++] = 0;    /* Reserved */
1952 	arr[n++] = 0;    /* Reserved */
1953 	put_unaligned_be16(port_a, arr + n);
1954 	n += 2;
1955 	arr[n++] = 3;    /* Port unavailable */
1956 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1957 	put_unaligned_be16(port_group_b, arr + n);
1958 	n += 2;
1959 	arr[n++] = 0;    /* Reserved */
1960 	arr[n++] = 0;    /* Status code */
1961 	arr[n++] = 0;    /* Vendor unique */
1962 	arr[n++] = 0x1;  /* One port per group */
1963 	arr[n++] = 0;    /* Reserved */
1964 	arr[n++] = 0;    /* Reserved */
1965 	put_unaligned_be16(port_b, arr + n);
1966 	n += 2;
1967 
1968 	rlen = n - 4;
1969 	put_unaligned_be32(rlen, arr + 0);
1970 
1971 	/*
1972 	 * Return the smallest value of either
1973 	 * - The allocated length
1974 	 * - The constructed command length
1975 	 * - The maximum array size
1976 	 */
1977 	rlen = min(alen, n);
1978 	ret = fill_from_dev_buffer(scp, arr,
1979 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1980 	kfree(arr);
1981 	return ret;
1982 }
1983 
1984 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1985 			     struct sdebug_dev_info *devip)
1986 {
1987 	bool rctd;
1988 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1989 	u16 req_sa, u;
1990 	u32 alloc_len, a_len;
1991 	int k, offset, len, errsts, count, bump, na;
1992 	const struct opcode_info_t *oip;
1993 	const struct opcode_info_t *r_oip;
1994 	u8 *arr;
1995 	u8 *cmd = scp->cmnd;
1996 
1997 	rctd = !!(cmd[2] & 0x80);
1998 	reporting_opts = cmd[2] & 0x7;
1999 	req_opcode = cmd[3];
2000 	req_sa = get_unaligned_be16(cmd + 4);
2001 	alloc_len = get_unaligned_be32(cmd + 6);
2002 	if (alloc_len < 4 || alloc_len > 0xffff) {
2003 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2004 		return check_condition_result;
2005 	}
2006 	if (alloc_len > 8192)
2007 		a_len = 8192;
2008 	else
2009 		a_len = alloc_len;
2010 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2011 	if (NULL == arr) {
2012 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2013 				INSUFF_RES_ASCQ);
2014 		return check_condition_result;
2015 	}
2016 	switch (reporting_opts) {
2017 	case 0:	/* all commands */
2018 		/* count number of commands */
2019 		for (count = 0, oip = opcode_info_arr;
2020 		     oip->num_attached != 0xff; ++oip) {
2021 			if (F_INV_OP & oip->flags)
2022 				continue;
2023 			count += (oip->num_attached + 1);
2024 		}
2025 		bump = rctd ? 20 : 8;
2026 		put_unaligned_be32(count * bump, arr);
2027 		for (offset = 4, oip = opcode_info_arr;
2028 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2029 			if (F_INV_OP & oip->flags)
2030 				continue;
2031 			na = oip->num_attached;
2032 			arr[offset] = oip->opcode;
2033 			put_unaligned_be16(oip->sa, arr + offset + 2);
2034 			if (rctd)
2035 				arr[offset + 5] |= 0x2;
2036 			if (FF_SA & oip->flags)
2037 				arr[offset + 5] |= 0x1;
2038 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2039 			if (rctd)
2040 				put_unaligned_be16(0xa, arr + offset + 8);
2041 			r_oip = oip;
2042 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2043 				if (F_INV_OP & oip->flags)
2044 					continue;
2045 				offset += bump;
2046 				arr[offset] = oip->opcode;
2047 				put_unaligned_be16(oip->sa, arr + offset + 2);
2048 				if (rctd)
2049 					arr[offset + 5] |= 0x2;
2050 				if (FF_SA & oip->flags)
2051 					arr[offset + 5] |= 0x1;
2052 				put_unaligned_be16(oip->len_mask[0],
2053 						   arr + offset + 6);
2054 				if (rctd)
2055 					put_unaligned_be16(0xa,
2056 							   arr + offset + 8);
2057 			}
2058 			oip = r_oip;
2059 			offset += bump;
2060 		}
2061 		break;
2062 	case 1:	/* one command: opcode only */
2063 	case 2:	/* one command: opcode plus service action */
2064 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2065 		sdeb_i = opcode_ind_arr[req_opcode];
2066 		oip = &opcode_info_arr[sdeb_i];
2067 		if (F_INV_OP & oip->flags) {
2068 			supp = 1;
2069 			offset = 4;
2070 		} else {
2071 			if (1 == reporting_opts) {
2072 				if (FF_SA & oip->flags) {
2073 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2074 							     2, 2);
2075 					kfree(arr);
2076 					return check_condition_result;
2077 				}
2078 				req_sa = 0;
2079 			} else if (2 == reporting_opts &&
2080 				   0 == (FF_SA & oip->flags)) {
2081 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2082 				kfree(arr);	/* point at requested sa */
2083 				return check_condition_result;
2084 			}
2085 			if (0 == (FF_SA & oip->flags) &&
2086 			    req_opcode == oip->opcode)
2087 				supp = 3;
2088 			else if (0 == (FF_SA & oip->flags)) {
2089 				na = oip->num_attached;
2090 				for (k = 0, oip = oip->arrp; k < na;
2091 				     ++k, ++oip) {
2092 					if (req_opcode == oip->opcode)
2093 						break;
2094 				}
2095 				supp = (k >= na) ? 1 : 3;
2096 			} else if (req_sa != oip->sa) {
2097 				na = oip->num_attached;
2098 				for (k = 0, oip = oip->arrp; k < na;
2099 				     ++k, ++oip) {
2100 					if (req_sa == oip->sa)
2101 						break;
2102 				}
2103 				supp = (k >= na) ? 1 : 3;
2104 			} else
2105 				supp = 3;
2106 			if (3 == supp) {
2107 				u = oip->len_mask[0];
2108 				put_unaligned_be16(u, arr + 2);
2109 				arr[4] = oip->opcode;
2110 				for (k = 1; k < u; ++k)
2111 					arr[4 + k] = (k < 16) ?
2112 						 oip->len_mask[k] : 0xff;
2113 				offset = 4 + u;
2114 			} else
2115 				offset = 4;
2116 		}
2117 		arr[1] = (rctd ? 0x80 : 0) | supp;
2118 		if (rctd) {
2119 			put_unaligned_be16(0xa, arr + offset);
2120 			offset += 12;
2121 		}
2122 		break;
2123 	default:
2124 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2125 		kfree(arr);
2126 		return check_condition_result;
2127 	}
2128 	offset = (offset < a_len) ? offset : a_len;
2129 	len = (offset < alloc_len) ? offset : alloc_len;
2130 	errsts = fill_from_dev_buffer(scp, arr, len);
2131 	kfree(arr);
2132 	return errsts;
2133 }
2134 
2135 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2136 			  struct sdebug_dev_info *devip)
2137 {
2138 	bool repd;
2139 	u32 alloc_len, len;
2140 	u8 arr[16];
2141 	u8 *cmd = scp->cmnd;
2142 
2143 	memset(arr, 0, sizeof(arr));
2144 	repd = !!(cmd[2] & 0x80);
2145 	alloc_len = get_unaligned_be32(cmd + 6);
2146 	if (alloc_len < 4) {
2147 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2148 		return check_condition_result;
2149 	}
2150 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2151 	arr[1] = 0x1;		/* ITNRS */
2152 	if (repd) {
2153 		arr[3] = 0xc;
2154 		len = 16;
2155 	} else
2156 		len = 4;
2157 
2158 	len = (len < alloc_len) ? len : alloc_len;
2159 	return fill_from_dev_buffer(scp, arr, len);
2160 }
2161 
2162 /* <<Following mode page info copied from ST318451LW>> */
2163 
2164 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2165 {	/* Read-Write Error Recovery page for mode_sense */
2166 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2167 					5, 0, 0xff, 0xff};
2168 
2169 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2170 	if (1 == pcontrol)
2171 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2172 	return sizeof(err_recov_pg);
2173 }
2174 
2175 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2176 { 	/* Disconnect-Reconnect page for mode_sense */
2177 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2178 					 0, 0, 0, 0, 0, 0, 0, 0};
2179 
2180 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2181 	if (1 == pcontrol)
2182 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2183 	return sizeof(disconnect_pg);
2184 }
2185 
2186 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2187 {       /* Format device page for mode_sense */
2188 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2189 				     0, 0, 0, 0, 0, 0, 0, 0,
2190 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2191 
2192 	memcpy(p, format_pg, sizeof(format_pg));
2193 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2194 	put_unaligned_be16(sdebug_sector_size, p + 12);
2195 	if (sdebug_removable)
2196 		p[20] |= 0x20; /* should agree with INQUIRY */
2197 	if (1 == pcontrol)
2198 		memset(p + 2, 0, sizeof(format_pg) - 2);
2199 	return sizeof(format_pg);
2200 }
2201 
2202 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2203 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2204 				     0, 0, 0, 0};
2205 
2206 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2207 { 	/* Caching page for mode_sense */
2208 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2209 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2210 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2212 
2213 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2214 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2215 	memcpy(p, caching_pg, sizeof(caching_pg));
2216 	if (1 == pcontrol)
2217 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2218 	else if (2 == pcontrol)
2219 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2220 	return sizeof(caching_pg);
2221 }
2222 
2223 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2224 				    0, 0, 0x2, 0x4b};
2225 
2226 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2227 { 	/* Control mode page for mode_sense */
2228 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2229 					0, 0, 0, 0};
2230 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2231 				     0, 0, 0x2, 0x4b};
2232 
2233 	if (sdebug_dsense)
2234 		ctrl_m_pg[2] |= 0x4;
2235 	else
2236 		ctrl_m_pg[2] &= ~0x4;
2237 
2238 	if (sdebug_ato)
2239 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2240 
2241 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2242 	if (1 == pcontrol)
2243 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2244 	else if (2 == pcontrol)
2245 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2246 	return sizeof(ctrl_m_pg);
2247 }
2248 
2249 
2250 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2251 {	/* Informational Exceptions control mode page for mode_sense */
2252 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2253 				       0, 0, 0x0, 0x0};
2254 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2255 				      0, 0, 0x0, 0x0};
2256 
2257 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2258 	if (1 == pcontrol)
2259 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2260 	else if (2 == pcontrol)
2261 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2262 	return sizeof(iec_m_pg);
2263 }
2264 
2265 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2266 {	/* SAS SSP mode page - short format for mode_sense */
2267 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2268 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2269 
2270 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2271 	if (1 == pcontrol)
2272 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2273 	return sizeof(sas_sf_m_pg);
2274 }
2275 
2276 
2277 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2278 			      int target_dev_id)
2279 {	/* SAS phy control and discover mode page for mode_sense */
2280 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2281 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2282 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2283 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2284 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2285 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2286 		    0, 0, 0, 0, 0, 0, 0, 0,
2287 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2288 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2289 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2290 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2291 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2292 		    0, 0, 0, 0, 0, 0, 0, 0,
2293 		};
2294 	int port_a, port_b;
2295 
2296 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2297 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2298 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2299 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2300 	port_a = target_dev_id + 1;
2301 	port_b = port_a + 1;
2302 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2303 	put_unaligned_be32(port_a, p + 20);
2304 	put_unaligned_be32(port_b, p + 48 + 20);
2305 	if (1 == pcontrol)
2306 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2307 	return sizeof(sas_pcd_m_pg);
2308 }
2309 
2310 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2311 {	/* SAS SSP shared protocol specific port mode subpage */
2312 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2313 		    0, 0, 0, 0, 0, 0, 0, 0,
2314 		};
2315 
2316 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2317 	if (1 == pcontrol)
2318 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2319 	return sizeof(sas_sha_m_pg);
2320 }
2321 
2322 #define SDEBUG_MAX_MSENSE_SZ 256
2323 
2324 static int resp_mode_sense(struct scsi_cmnd *scp,
2325 			   struct sdebug_dev_info *devip)
2326 {
2327 	int pcontrol, pcode, subpcode, bd_len;
2328 	unsigned char dev_spec;
2329 	u32 alloc_len, offset, len;
2330 	int target_dev_id;
2331 	int target = scp->device->id;
2332 	unsigned char *ap;
2333 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2334 	unsigned char *cmd = scp->cmnd;
2335 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2336 
2337 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2338 	pcontrol = (cmd[2] & 0xc0) >> 6;
2339 	pcode = cmd[2] & 0x3f;
2340 	subpcode = cmd[3];
2341 	msense_6 = (MODE_SENSE == cmd[0]);
2342 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2343 	is_disk = (sdebug_ptype == TYPE_DISK);
2344 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2345 	if ((is_disk || is_zbc) && !dbd)
2346 		bd_len = llbaa ? 16 : 8;
2347 	else
2348 		bd_len = 0;
2349 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2350 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2351 	if (0x3 == pcontrol) {  /* Saving values not supported */
2352 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2353 		return check_condition_result;
2354 	}
2355 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2356 			(devip->target * 1000) - 3;
2357 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2358 	if (is_disk || is_zbc) {
2359 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2360 		if (sdebug_wp)
2361 			dev_spec |= 0x80;
2362 	} else
2363 		dev_spec = 0x0;
2364 	if (msense_6) {
2365 		arr[2] = dev_spec;
2366 		arr[3] = bd_len;
2367 		offset = 4;
2368 	} else {
2369 		arr[3] = dev_spec;
2370 		if (16 == bd_len)
2371 			arr[4] = 0x1;	/* set LONGLBA bit */
2372 		arr[7] = bd_len;	/* assume 255 or less */
2373 		offset = 8;
2374 	}
2375 	ap = arr + offset;
2376 	if ((bd_len > 0) && (!sdebug_capacity))
2377 		sdebug_capacity = get_sdebug_capacity();
2378 
2379 	if (8 == bd_len) {
2380 		if (sdebug_capacity > 0xfffffffe)
2381 			put_unaligned_be32(0xffffffff, ap + 0);
2382 		else
2383 			put_unaligned_be32(sdebug_capacity, ap + 0);
2384 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2385 		offset += bd_len;
2386 		ap = arr + offset;
2387 	} else if (16 == bd_len) {
2388 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2389 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2390 		offset += bd_len;
2391 		ap = arr + offset;
2392 	}
2393 
2394 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2395 		/* TODO: Control Extension page */
2396 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2397 		return check_condition_result;
2398 	}
2399 	bad_pcode = false;
2400 
2401 	switch (pcode) {
2402 	case 0x1:	/* Read-Write error recovery page, direct access */
2403 		len = resp_err_recov_pg(ap, pcontrol, target);
2404 		offset += len;
2405 		break;
2406 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2407 		len = resp_disconnect_pg(ap, pcontrol, target);
2408 		offset += len;
2409 		break;
2410 	case 0x3:       /* Format device page, direct access */
2411 		if (is_disk) {
2412 			len = resp_format_pg(ap, pcontrol, target);
2413 			offset += len;
2414 		} else
2415 			bad_pcode = true;
2416 		break;
2417 	case 0x8:	/* Caching page, direct access */
2418 		if (is_disk || is_zbc) {
2419 			len = resp_caching_pg(ap, pcontrol, target);
2420 			offset += len;
2421 		} else
2422 			bad_pcode = true;
2423 		break;
2424 	case 0xa:	/* Control Mode page, all devices */
2425 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2426 		offset += len;
2427 		break;
2428 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2429 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2430 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2431 			return check_condition_result;
2432 		}
2433 		len = 0;
2434 		if ((0x0 == subpcode) || (0xff == subpcode))
2435 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2436 		if ((0x1 == subpcode) || (0xff == subpcode))
2437 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2438 						  target_dev_id);
2439 		if ((0x2 == subpcode) || (0xff == subpcode))
2440 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2441 		offset += len;
2442 		break;
2443 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2444 		len = resp_iec_m_pg(ap, pcontrol, target);
2445 		offset += len;
2446 		break;
2447 	case 0x3f:	/* Read all Mode pages */
2448 		if ((0 == subpcode) || (0xff == subpcode)) {
2449 			len = resp_err_recov_pg(ap, pcontrol, target);
2450 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2451 			if (is_disk) {
2452 				len += resp_format_pg(ap + len, pcontrol,
2453 						      target);
2454 				len += resp_caching_pg(ap + len, pcontrol,
2455 						       target);
2456 			} else if (is_zbc) {
2457 				len += resp_caching_pg(ap + len, pcontrol,
2458 						       target);
2459 			}
2460 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2461 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2462 			if (0xff == subpcode) {
2463 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2464 						  target, target_dev_id);
2465 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2466 			}
2467 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2468 			offset += len;
2469 		} else {
2470 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2471 			return check_condition_result;
2472 		}
2473 		break;
2474 	default:
2475 		bad_pcode = true;
2476 		break;
2477 	}
2478 	if (bad_pcode) {
2479 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2480 		return check_condition_result;
2481 	}
2482 	if (msense_6)
2483 		arr[0] = offset - 1;
2484 	else
2485 		put_unaligned_be16((offset - 2), arr + 0);
2486 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2487 }
2488 
2489 #define SDEBUG_MAX_MSELECT_SZ 512
2490 
2491 static int resp_mode_select(struct scsi_cmnd *scp,
2492 			    struct sdebug_dev_info *devip)
2493 {
2494 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2495 	int param_len, res, mpage;
2496 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2497 	unsigned char *cmd = scp->cmnd;
2498 	int mselect6 = (MODE_SELECT == cmd[0]);
2499 
2500 	memset(arr, 0, sizeof(arr));
2501 	pf = cmd[1] & 0x10;
2502 	sp = cmd[1] & 0x1;
2503 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2504 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2506 		return check_condition_result;
2507 	}
2508 	res = fetch_to_dev_buffer(scp, arr, param_len);
2509 	if (-1 == res)
2510 		return DID_ERROR << 16;
2511 	else if (sdebug_verbose && (res < param_len))
2512 		sdev_printk(KERN_INFO, scp->device,
2513 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2514 			    __func__, param_len, res);
2515 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2516 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2517 	off = bd_len + (mselect6 ? 4 : 8);
2518 	if (md_len > 2 || off >= res) {
2519 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2520 		return check_condition_result;
2521 	}
2522 	mpage = arr[off] & 0x3f;
2523 	ps = !!(arr[off] & 0x80);
2524 	if (ps) {
2525 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2526 		return check_condition_result;
2527 	}
2528 	spf = !!(arr[off] & 0x40);
2529 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2530 		       (arr[off + 1] + 2);
2531 	if ((pg_len + off) > param_len) {
2532 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2533 				PARAMETER_LIST_LENGTH_ERR, 0);
2534 		return check_condition_result;
2535 	}
2536 	switch (mpage) {
2537 	case 0x8:      /* Caching Mode page */
2538 		if (caching_pg[1] == arr[off + 1]) {
2539 			memcpy(caching_pg + 2, arr + off + 2,
2540 			       sizeof(caching_pg) - 2);
2541 			goto set_mode_changed_ua;
2542 		}
2543 		break;
2544 	case 0xa:      /* Control Mode page */
2545 		if (ctrl_m_pg[1] == arr[off + 1]) {
2546 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2547 			       sizeof(ctrl_m_pg) - 2);
2548 			if (ctrl_m_pg[4] & 0x8)
2549 				sdebug_wp = true;
2550 			else
2551 				sdebug_wp = false;
2552 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2553 			goto set_mode_changed_ua;
2554 		}
2555 		break;
2556 	case 0x1c:      /* Informational Exceptions Mode page */
2557 		if (iec_m_pg[1] == arr[off + 1]) {
2558 			memcpy(iec_m_pg + 2, arr + off + 2,
2559 			       sizeof(iec_m_pg) - 2);
2560 			goto set_mode_changed_ua;
2561 		}
2562 		break;
2563 	default:
2564 		break;
2565 	}
2566 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2567 	return check_condition_result;
2568 set_mode_changed_ua:
2569 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2570 	return 0;
2571 }
2572 
2573 static int resp_temp_l_pg(unsigned char *arr)
2574 {
2575 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2576 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2577 		};
2578 
2579 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2580 	return sizeof(temp_l_pg);
2581 }
2582 
2583 static int resp_ie_l_pg(unsigned char *arr)
2584 {
2585 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2586 		};
2587 
2588 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2589 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2590 		arr[4] = THRESHOLD_EXCEEDED;
2591 		arr[5] = 0xff;
2592 	}
2593 	return sizeof(ie_l_pg);
2594 }
2595 
2596 static int resp_env_rep_l_spg(unsigned char *arr)
2597 {
2598 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2599 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2600 					 0x1, 0x0, 0x23, 0x8,
2601 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2602 		};
2603 
2604 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2605 	return sizeof(env_rep_l_spg);
2606 }
2607 
2608 #define SDEBUG_MAX_LSENSE_SZ 512
2609 
2610 static int resp_log_sense(struct scsi_cmnd *scp,
2611 			  struct sdebug_dev_info *devip)
2612 {
2613 	int ppc, sp, pcode, subpcode;
2614 	u32 alloc_len, len, n;
2615 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2616 	unsigned char *cmd = scp->cmnd;
2617 
2618 	memset(arr, 0, sizeof(arr));
2619 	ppc = cmd[1] & 0x2;
2620 	sp = cmd[1] & 0x1;
2621 	if (ppc || sp) {
2622 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2623 		return check_condition_result;
2624 	}
2625 	pcode = cmd[2] & 0x3f;
2626 	subpcode = cmd[3] & 0xff;
2627 	alloc_len = get_unaligned_be16(cmd + 7);
2628 	arr[0] = pcode;
2629 	if (0 == subpcode) {
2630 		switch (pcode) {
2631 		case 0x0:	/* Supported log pages log page */
2632 			n = 4;
2633 			arr[n++] = 0x0;		/* this page */
2634 			arr[n++] = 0xd;		/* Temperature */
2635 			arr[n++] = 0x2f;	/* Informational exceptions */
2636 			arr[3] = n - 4;
2637 			break;
2638 		case 0xd:	/* Temperature log page */
2639 			arr[3] = resp_temp_l_pg(arr + 4);
2640 			break;
2641 		case 0x2f:	/* Informational exceptions log page */
2642 			arr[3] = resp_ie_l_pg(arr + 4);
2643 			break;
2644 		default:
2645 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2646 			return check_condition_result;
2647 		}
2648 	} else if (0xff == subpcode) {
2649 		arr[0] |= 0x40;
2650 		arr[1] = subpcode;
2651 		switch (pcode) {
2652 		case 0x0:	/* Supported log pages and subpages log page */
2653 			n = 4;
2654 			arr[n++] = 0x0;
2655 			arr[n++] = 0x0;		/* 0,0 page */
2656 			arr[n++] = 0x0;
2657 			arr[n++] = 0xff;	/* this page */
2658 			arr[n++] = 0xd;
2659 			arr[n++] = 0x0;		/* Temperature */
2660 			arr[n++] = 0xd;
2661 			arr[n++] = 0x1;		/* Environment reporting */
2662 			arr[n++] = 0xd;
2663 			arr[n++] = 0xff;	/* all 0xd subpages */
2664 			arr[n++] = 0x2f;
2665 			arr[n++] = 0x0;	/* Informational exceptions */
2666 			arr[n++] = 0x2f;
2667 			arr[n++] = 0xff;	/* all 0x2f subpages */
2668 			arr[3] = n - 4;
2669 			break;
2670 		case 0xd:	/* Temperature subpages */
2671 			n = 4;
2672 			arr[n++] = 0xd;
2673 			arr[n++] = 0x0;		/* Temperature */
2674 			arr[n++] = 0xd;
2675 			arr[n++] = 0x1;		/* Environment reporting */
2676 			arr[n++] = 0xd;
2677 			arr[n++] = 0xff;	/* these subpages */
2678 			arr[3] = n - 4;
2679 			break;
2680 		case 0x2f:	/* Informational exceptions subpages */
2681 			n = 4;
2682 			arr[n++] = 0x2f;
2683 			arr[n++] = 0x0;		/* Informational exceptions */
2684 			arr[n++] = 0x2f;
2685 			arr[n++] = 0xff;	/* these subpages */
2686 			arr[3] = n - 4;
2687 			break;
2688 		default:
2689 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2690 			return check_condition_result;
2691 		}
2692 	} else if (subpcode > 0) {
2693 		arr[0] |= 0x40;
2694 		arr[1] = subpcode;
2695 		if (pcode == 0xd && subpcode == 1)
2696 			arr[3] = resp_env_rep_l_spg(arr + 4);
2697 		else {
2698 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2699 			return check_condition_result;
2700 		}
2701 	} else {
2702 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2703 		return check_condition_result;
2704 	}
2705 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2706 	return fill_from_dev_buffer(scp, arr,
2707 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2708 }
2709 
2710 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2711 {
2712 	return devip->nr_zones != 0;
2713 }
2714 
2715 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2716 					unsigned long long lba)
2717 {
2718 	return &devip->zstate[lba >> devip->zsize_shift];
2719 }
2720 
2721 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2722 {
2723 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2724 }
2725 
2726 static void zbc_close_zone(struct sdebug_dev_info *devip,
2727 			   struct sdeb_zone_state *zsp)
2728 {
2729 	enum sdebug_z_cond zc;
2730 
2731 	if (zbc_zone_is_conv(zsp))
2732 		return;
2733 
2734 	zc = zsp->z_cond;
2735 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2736 		return;
2737 
2738 	if (zc == ZC2_IMPLICIT_OPEN)
2739 		devip->nr_imp_open--;
2740 	else
2741 		devip->nr_exp_open--;
2742 
2743 	if (zsp->z_wp == zsp->z_start) {
2744 		zsp->z_cond = ZC1_EMPTY;
2745 	} else {
2746 		zsp->z_cond = ZC4_CLOSED;
2747 		devip->nr_closed++;
2748 	}
2749 }
2750 
2751 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2752 {
2753 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2754 	unsigned int i;
2755 
2756 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2757 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2758 			zbc_close_zone(devip, zsp);
2759 			return;
2760 		}
2761 	}
2762 }
2763 
2764 static void zbc_open_zone(struct sdebug_dev_info *devip,
2765 			  struct sdeb_zone_state *zsp, bool explicit)
2766 {
2767 	enum sdebug_z_cond zc;
2768 
2769 	if (zbc_zone_is_conv(zsp))
2770 		return;
2771 
2772 	zc = zsp->z_cond;
2773 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2774 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2775 		return;
2776 
2777 	/* Close an implicit open zone if necessary */
2778 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2779 		zbc_close_zone(devip, zsp);
2780 	else if (devip->max_open &&
2781 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2782 		zbc_close_imp_open_zone(devip);
2783 
2784 	if (zsp->z_cond == ZC4_CLOSED)
2785 		devip->nr_closed--;
2786 	if (explicit) {
2787 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2788 		devip->nr_exp_open++;
2789 	} else {
2790 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2791 		devip->nr_imp_open++;
2792 	}
2793 }
2794 
2795 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2796 		       unsigned long long lba, unsigned int num)
2797 {
2798 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2799 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2800 
2801 	if (zbc_zone_is_conv(zsp))
2802 		return;
2803 
2804 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2805 		zsp->z_wp += num;
2806 		if (zsp->z_wp >= zend)
2807 			zsp->z_cond = ZC5_FULL;
2808 		return;
2809 	}
2810 
2811 	while (num) {
2812 		if (lba != zsp->z_wp)
2813 			zsp->z_non_seq_resource = true;
2814 
2815 		end = lba + num;
2816 		if (end >= zend) {
2817 			n = zend - lba;
2818 			zsp->z_wp = zend;
2819 		} else if (end > zsp->z_wp) {
2820 			n = num;
2821 			zsp->z_wp = end;
2822 		} else {
2823 			n = num;
2824 		}
2825 		if (zsp->z_wp >= zend)
2826 			zsp->z_cond = ZC5_FULL;
2827 
2828 		num -= n;
2829 		lba += n;
2830 		if (num) {
2831 			zsp++;
2832 			zend = zsp->z_start + zsp->z_size;
2833 		}
2834 	}
2835 }
2836 
2837 static int check_zbc_access_params(struct scsi_cmnd *scp,
2838 			unsigned long long lba, unsigned int num, bool write)
2839 {
2840 	struct scsi_device *sdp = scp->device;
2841 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2842 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2843 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2844 
2845 	if (!write) {
2846 		if (devip->zmodel == BLK_ZONED_HA)
2847 			return 0;
2848 		/* For host-managed, reads cannot cross zone types boundaries */
2849 		if (zsp_end != zsp &&
2850 		    zbc_zone_is_conv(zsp) &&
2851 		    !zbc_zone_is_conv(zsp_end)) {
2852 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2853 					LBA_OUT_OF_RANGE,
2854 					READ_INVDATA_ASCQ);
2855 			return check_condition_result;
2856 		}
2857 		return 0;
2858 	}
2859 
2860 	/* No restrictions for writes within conventional zones */
2861 	if (zbc_zone_is_conv(zsp)) {
2862 		if (!zbc_zone_is_conv(zsp_end)) {
2863 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2864 					LBA_OUT_OF_RANGE,
2865 					WRITE_BOUNDARY_ASCQ);
2866 			return check_condition_result;
2867 		}
2868 		return 0;
2869 	}
2870 
2871 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2872 		/* Writes cannot cross sequential zone boundaries */
2873 		if (zsp_end != zsp) {
2874 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2875 					LBA_OUT_OF_RANGE,
2876 					WRITE_BOUNDARY_ASCQ);
2877 			return check_condition_result;
2878 		}
2879 		/* Cannot write full zones */
2880 		if (zsp->z_cond == ZC5_FULL) {
2881 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2882 					INVALID_FIELD_IN_CDB, 0);
2883 			return check_condition_result;
2884 		}
2885 		/* Writes must be aligned to the zone WP */
2886 		if (lba != zsp->z_wp) {
2887 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2888 					LBA_OUT_OF_RANGE,
2889 					UNALIGNED_WRITE_ASCQ);
2890 			return check_condition_result;
2891 		}
2892 	}
2893 
2894 	/* Handle implicit open of closed and empty zones */
2895 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2896 		if (devip->max_open &&
2897 		    devip->nr_exp_open >= devip->max_open) {
2898 			mk_sense_buffer(scp, DATA_PROTECT,
2899 					INSUFF_RES_ASC,
2900 					INSUFF_ZONE_ASCQ);
2901 			return check_condition_result;
2902 		}
2903 		zbc_open_zone(devip, zsp, false);
2904 	}
2905 
2906 	return 0;
2907 }
2908 
2909 static inline int check_device_access_params
2910 			(struct scsi_cmnd *scp, unsigned long long lba,
2911 			 unsigned int num, bool write)
2912 {
2913 	struct scsi_device *sdp = scp->device;
2914 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2915 
2916 	if (lba + num > sdebug_capacity) {
2917 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2918 		return check_condition_result;
2919 	}
2920 	/* transfer length excessive (tie in to block limits VPD page) */
2921 	if (num > sdebug_store_sectors) {
2922 		/* needs work to find which cdb byte 'num' comes from */
2923 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2924 		return check_condition_result;
2925 	}
2926 	if (write && unlikely(sdebug_wp)) {
2927 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2928 		return check_condition_result;
2929 	}
2930 	if (sdebug_dev_is_zoned(devip))
2931 		return check_zbc_access_params(scp, lba, num, write);
2932 
2933 	return 0;
2934 }
2935 
2936 /*
2937  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2938  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2939  * that access any of the "stores" in struct sdeb_store_info should call this
2940  * function with bug_if_fake_rw set to true.
2941  */
2942 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2943 						bool bug_if_fake_rw)
2944 {
2945 	if (sdebug_fake_rw) {
2946 		BUG_ON(bug_if_fake_rw);	/* See note above */
2947 		return NULL;
2948 	}
2949 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2950 }
2951 
2952 /* Returns number of bytes copied or -1 if error. */
2953 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2954 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2955 {
2956 	int ret;
2957 	u64 block, rest = 0;
2958 	enum dma_data_direction dir;
2959 	struct scsi_data_buffer *sdb = &scp->sdb;
2960 	u8 *fsp;
2961 
2962 	if (do_write) {
2963 		dir = DMA_TO_DEVICE;
2964 		write_since_sync = true;
2965 	} else {
2966 		dir = DMA_FROM_DEVICE;
2967 	}
2968 
2969 	if (!sdb->length || !sip)
2970 		return 0;
2971 	if (scp->sc_data_direction != dir)
2972 		return -1;
2973 	fsp = sip->storep;
2974 
2975 	block = do_div(lba, sdebug_store_sectors);
2976 	if (block + num > sdebug_store_sectors)
2977 		rest = block + num - sdebug_store_sectors;
2978 
2979 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2980 		   fsp + (block * sdebug_sector_size),
2981 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2982 	if (ret != (num - rest) * sdebug_sector_size)
2983 		return ret;
2984 
2985 	if (rest) {
2986 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2987 			    fsp, rest * sdebug_sector_size,
2988 			    sg_skip + ((num - rest) * sdebug_sector_size),
2989 			    do_write);
2990 	}
2991 
2992 	return ret;
2993 }
2994 
2995 /* Returns number of bytes copied or -1 if error. */
2996 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2997 {
2998 	struct scsi_data_buffer *sdb = &scp->sdb;
2999 
3000 	if (!sdb->length)
3001 		return 0;
3002 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3003 		return -1;
3004 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3005 			      num * sdebug_sector_size, 0, true);
3006 }
3007 
3008 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3009  * arr into sip->storep+lba and return true. If comparison fails then
3010  * return false. */
3011 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3012 			      const u8 *arr, bool compare_only)
3013 {
3014 	bool res;
3015 	u64 block, rest = 0;
3016 	u32 store_blks = sdebug_store_sectors;
3017 	u32 lb_size = sdebug_sector_size;
3018 	u8 *fsp = sip->storep;
3019 
3020 	block = do_div(lba, store_blks);
3021 	if (block + num > store_blks)
3022 		rest = block + num - store_blks;
3023 
3024 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3025 	if (!res)
3026 		return res;
3027 	if (rest)
3028 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3029 			     rest * lb_size);
3030 	if (!res)
3031 		return res;
3032 	if (compare_only)
3033 		return true;
3034 	arr += num * lb_size;
3035 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3036 	if (rest)
3037 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3038 	return res;
3039 }
3040 
3041 static __be16 dif_compute_csum(const void *buf, int len)
3042 {
3043 	__be16 csum;
3044 
3045 	if (sdebug_guard)
3046 		csum = (__force __be16)ip_compute_csum(buf, len);
3047 	else
3048 		csum = cpu_to_be16(crc_t10dif(buf, len));
3049 
3050 	return csum;
3051 }
3052 
3053 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3054 		      sector_t sector, u32 ei_lba)
3055 {
3056 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3057 
3058 	if (sdt->guard_tag != csum) {
3059 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3060 			(unsigned long)sector,
3061 			be16_to_cpu(sdt->guard_tag),
3062 			be16_to_cpu(csum));
3063 		return 0x01;
3064 	}
3065 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3066 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3067 		pr_err("REF check failed on sector %lu\n",
3068 			(unsigned long)sector);
3069 		return 0x03;
3070 	}
3071 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3072 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3073 		pr_err("REF check failed on sector %lu\n",
3074 			(unsigned long)sector);
3075 		return 0x03;
3076 	}
3077 	return 0;
3078 }
3079 
3080 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3081 			  unsigned int sectors, bool read)
3082 {
3083 	size_t resid;
3084 	void *paddr;
3085 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3086 						scp->device->hostdata, true);
3087 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3088 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3089 	struct sg_mapping_iter miter;
3090 
3091 	/* Bytes of protection data to copy into sgl */
3092 	resid = sectors * sizeof(*dif_storep);
3093 
3094 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3095 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3096 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3097 
3098 	while (sg_miter_next(&miter) && resid > 0) {
3099 		size_t len = min_t(size_t, miter.length, resid);
3100 		void *start = dif_store(sip, sector);
3101 		size_t rest = 0;
3102 
3103 		if (dif_store_end < start + len)
3104 			rest = start + len - dif_store_end;
3105 
3106 		paddr = miter.addr;
3107 
3108 		if (read)
3109 			memcpy(paddr, start, len - rest);
3110 		else
3111 			memcpy(start, paddr, len - rest);
3112 
3113 		if (rest) {
3114 			if (read)
3115 				memcpy(paddr + len - rest, dif_storep, rest);
3116 			else
3117 				memcpy(dif_storep, paddr + len - rest, rest);
3118 		}
3119 
3120 		sector += len / sizeof(*dif_storep);
3121 		resid -= len;
3122 	}
3123 	sg_miter_stop(&miter);
3124 }
3125 
3126 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3127 			    unsigned int sectors, u32 ei_lba)
3128 {
3129 	int ret = 0;
3130 	unsigned int i;
3131 	sector_t sector;
3132 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3133 						scp->device->hostdata, true);
3134 	struct t10_pi_tuple *sdt;
3135 
3136 	for (i = 0; i < sectors; i++, ei_lba++) {
3137 		sector = start_sec + i;
3138 		sdt = dif_store(sip, sector);
3139 
3140 		if (sdt->app_tag == cpu_to_be16(0xffff))
3141 			continue;
3142 
3143 		/*
3144 		 * Because scsi_debug acts as both initiator and
3145 		 * target we proceed to verify the PI even if
3146 		 * RDPROTECT=3. This is done so the "initiator" knows
3147 		 * which type of error to return. Otherwise we would
3148 		 * have to iterate over the PI twice.
3149 		 */
3150 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3151 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3152 					 sector, ei_lba);
3153 			if (ret) {
3154 				dif_errors++;
3155 				break;
3156 			}
3157 		}
3158 	}
3159 
3160 	dif_copy_prot(scp, start_sec, sectors, true);
3161 	dix_reads++;
3162 
3163 	return ret;
3164 }
3165 
3166 static inline void
3167 sdeb_read_lock(struct sdeb_store_info *sip)
3168 {
3169 	if (sdebug_no_rwlock) {
3170 		if (sip)
3171 			__acquire(&sip->macc_lck);
3172 		else
3173 			__acquire(&sdeb_fake_rw_lck);
3174 	} else {
3175 		if (sip)
3176 			read_lock(&sip->macc_lck);
3177 		else
3178 			read_lock(&sdeb_fake_rw_lck);
3179 	}
3180 }
3181 
3182 static inline void
3183 sdeb_read_unlock(struct sdeb_store_info *sip)
3184 {
3185 	if (sdebug_no_rwlock) {
3186 		if (sip)
3187 			__release(&sip->macc_lck);
3188 		else
3189 			__release(&sdeb_fake_rw_lck);
3190 	} else {
3191 		if (sip)
3192 			read_unlock(&sip->macc_lck);
3193 		else
3194 			read_unlock(&sdeb_fake_rw_lck);
3195 	}
3196 }
3197 
3198 static inline void
3199 sdeb_write_lock(struct sdeb_store_info *sip)
3200 {
3201 	if (sdebug_no_rwlock) {
3202 		if (sip)
3203 			__acquire(&sip->macc_lck);
3204 		else
3205 			__acquire(&sdeb_fake_rw_lck);
3206 	} else {
3207 		if (sip)
3208 			write_lock(&sip->macc_lck);
3209 		else
3210 			write_lock(&sdeb_fake_rw_lck);
3211 	}
3212 }
3213 
3214 static inline void
3215 sdeb_write_unlock(struct sdeb_store_info *sip)
3216 {
3217 	if (sdebug_no_rwlock) {
3218 		if (sip)
3219 			__release(&sip->macc_lck);
3220 		else
3221 			__release(&sdeb_fake_rw_lck);
3222 	} else {
3223 		if (sip)
3224 			write_unlock(&sip->macc_lck);
3225 		else
3226 			write_unlock(&sdeb_fake_rw_lck);
3227 	}
3228 }
3229 
3230 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3231 {
3232 	bool check_prot;
3233 	u32 num;
3234 	u32 ei_lba;
3235 	int ret;
3236 	u64 lba;
3237 	struct sdeb_store_info *sip = devip2sip(devip, true);
3238 	u8 *cmd = scp->cmnd;
3239 
3240 	switch (cmd[0]) {
3241 	case READ_16:
3242 		ei_lba = 0;
3243 		lba = get_unaligned_be64(cmd + 2);
3244 		num = get_unaligned_be32(cmd + 10);
3245 		check_prot = true;
3246 		break;
3247 	case READ_10:
3248 		ei_lba = 0;
3249 		lba = get_unaligned_be32(cmd + 2);
3250 		num = get_unaligned_be16(cmd + 7);
3251 		check_prot = true;
3252 		break;
3253 	case READ_6:
3254 		ei_lba = 0;
3255 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3256 		      (u32)(cmd[1] & 0x1f) << 16;
3257 		num = (0 == cmd[4]) ? 256 : cmd[4];
3258 		check_prot = true;
3259 		break;
3260 	case READ_12:
3261 		ei_lba = 0;
3262 		lba = get_unaligned_be32(cmd + 2);
3263 		num = get_unaligned_be32(cmd + 6);
3264 		check_prot = true;
3265 		break;
3266 	case XDWRITEREAD_10:
3267 		ei_lba = 0;
3268 		lba = get_unaligned_be32(cmd + 2);
3269 		num = get_unaligned_be16(cmd + 7);
3270 		check_prot = false;
3271 		break;
3272 	default:	/* assume READ(32) */
3273 		lba = get_unaligned_be64(cmd + 12);
3274 		ei_lba = get_unaligned_be32(cmd + 20);
3275 		num = get_unaligned_be32(cmd + 28);
3276 		check_prot = false;
3277 		break;
3278 	}
3279 	if (unlikely(have_dif_prot && check_prot)) {
3280 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3281 		    (cmd[1] & 0xe0)) {
3282 			mk_sense_invalid_opcode(scp);
3283 			return check_condition_result;
3284 		}
3285 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3286 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3287 		    (cmd[1] & 0xe0) == 0)
3288 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3289 				    "to DIF device\n");
3290 	}
3291 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3292 		     atomic_read(&sdeb_inject_pending))) {
3293 		num /= 2;
3294 		atomic_set(&sdeb_inject_pending, 0);
3295 	}
3296 
3297 	ret = check_device_access_params(scp, lba, num, false);
3298 	if (ret)
3299 		return ret;
3300 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3301 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3302 		     ((lba + num) > sdebug_medium_error_start))) {
3303 		/* claim unrecoverable read error */
3304 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3305 		/* set info field and valid bit for fixed descriptor */
3306 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3307 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3308 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3309 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3310 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3311 		}
3312 		scsi_set_resid(scp, scsi_bufflen(scp));
3313 		return check_condition_result;
3314 	}
3315 
3316 	sdeb_read_lock(sip);
3317 
3318 	/* DIX + T10 DIF */
3319 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3320 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3321 		case 1: /* Guard tag error */
3322 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3323 				sdeb_read_unlock(sip);
3324 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3325 				return check_condition_result;
3326 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3327 				sdeb_read_unlock(sip);
3328 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3329 				return illegal_condition_result;
3330 			}
3331 			break;
3332 		case 3: /* Reference tag error */
3333 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3334 				sdeb_read_unlock(sip);
3335 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3336 				return check_condition_result;
3337 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3338 				sdeb_read_unlock(sip);
3339 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3340 				return illegal_condition_result;
3341 			}
3342 			break;
3343 		}
3344 	}
3345 
3346 	ret = do_device_access(sip, scp, 0, lba, num, false);
3347 	sdeb_read_unlock(sip);
3348 	if (unlikely(ret == -1))
3349 		return DID_ERROR << 16;
3350 
3351 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3352 
3353 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3354 		     atomic_read(&sdeb_inject_pending))) {
3355 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3356 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3357 			atomic_set(&sdeb_inject_pending, 0);
3358 			return check_condition_result;
3359 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3360 			/* Logical block guard check failed */
3361 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3362 			atomic_set(&sdeb_inject_pending, 0);
3363 			return illegal_condition_result;
3364 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3365 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3366 			atomic_set(&sdeb_inject_pending, 0);
3367 			return illegal_condition_result;
3368 		}
3369 	}
3370 	return 0;
3371 }
3372 
3373 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3374 			     unsigned int sectors, u32 ei_lba)
3375 {
3376 	int ret;
3377 	struct t10_pi_tuple *sdt;
3378 	void *daddr;
3379 	sector_t sector = start_sec;
3380 	int ppage_offset;
3381 	int dpage_offset;
3382 	struct sg_mapping_iter diter;
3383 	struct sg_mapping_iter piter;
3384 
3385 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3386 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3387 
3388 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3389 			scsi_prot_sg_count(SCpnt),
3390 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3391 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3392 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3393 
3394 	/* For each protection page */
3395 	while (sg_miter_next(&piter)) {
3396 		dpage_offset = 0;
3397 		if (WARN_ON(!sg_miter_next(&diter))) {
3398 			ret = 0x01;
3399 			goto out;
3400 		}
3401 
3402 		for (ppage_offset = 0; ppage_offset < piter.length;
3403 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3404 			/* If we're at the end of the current
3405 			 * data page advance to the next one
3406 			 */
3407 			if (dpage_offset >= diter.length) {
3408 				if (WARN_ON(!sg_miter_next(&diter))) {
3409 					ret = 0x01;
3410 					goto out;
3411 				}
3412 				dpage_offset = 0;
3413 			}
3414 
3415 			sdt = piter.addr + ppage_offset;
3416 			daddr = diter.addr + dpage_offset;
3417 
3418 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3419 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3420 				if (ret)
3421 					goto out;
3422 			}
3423 
3424 			sector++;
3425 			ei_lba++;
3426 			dpage_offset += sdebug_sector_size;
3427 		}
3428 		diter.consumed = dpage_offset;
3429 		sg_miter_stop(&diter);
3430 	}
3431 	sg_miter_stop(&piter);
3432 
3433 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3434 	dix_writes++;
3435 
3436 	return 0;
3437 
3438 out:
3439 	dif_errors++;
3440 	sg_miter_stop(&diter);
3441 	sg_miter_stop(&piter);
3442 	return ret;
3443 }
3444 
3445 static unsigned long lba_to_map_index(sector_t lba)
3446 {
3447 	if (sdebug_unmap_alignment)
3448 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3449 	sector_div(lba, sdebug_unmap_granularity);
3450 	return lba;
3451 }
3452 
3453 static sector_t map_index_to_lba(unsigned long index)
3454 {
3455 	sector_t lba = index * sdebug_unmap_granularity;
3456 
3457 	if (sdebug_unmap_alignment)
3458 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3459 	return lba;
3460 }
3461 
3462 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3463 			      unsigned int *num)
3464 {
3465 	sector_t end;
3466 	unsigned int mapped;
3467 	unsigned long index;
3468 	unsigned long next;
3469 
3470 	index = lba_to_map_index(lba);
3471 	mapped = test_bit(index, sip->map_storep);
3472 
3473 	if (mapped)
3474 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3475 	else
3476 		next = find_next_bit(sip->map_storep, map_size, index);
3477 
3478 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3479 	*num = end - lba;
3480 	return mapped;
3481 }
3482 
3483 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3484 		       unsigned int len)
3485 {
3486 	sector_t end = lba + len;
3487 
3488 	while (lba < end) {
3489 		unsigned long index = lba_to_map_index(lba);
3490 
3491 		if (index < map_size)
3492 			set_bit(index, sip->map_storep);
3493 
3494 		lba = map_index_to_lba(index + 1);
3495 	}
3496 }
3497 
3498 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3499 			 unsigned int len)
3500 {
3501 	sector_t end = lba + len;
3502 	u8 *fsp = sip->storep;
3503 
3504 	while (lba < end) {
3505 		unsigned long index = lba_to_map_index(lba);
3506 
3507 		if (lba == map_index_to_lba(index) &&
3508 		    lba + sdebug_unmap_granularity <= end &&
3509 		    index < map_size) {
3510 			clear_bit(index, sip->map_storep);
3511 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3512 				memset(fsp + lba * sdebug_sector_size,
3513 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3514 				       sdebug_sector_size *
3515 				       sdebug_unmap_granularity);
3516 			}
3517 			if (sip->dif_storep) {
3518 				memset(sip->dif_storep + lba, 0xff,
3519 				       sizeof(*sip->dif_storep) *
3520 				       sdebug_unmap_granularity);
3521 			}
3522 		}
3523 		lba = map_index_to_lba(index + 1);
3524 	}
3525 }
3526 
3527 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3528 {
3529 	bool check_prot;
3530 	u32 num;
3531 	u32 ei_lba;
3532 	int ret;
3533 	u64 lba;
3534 	struct sdeb_store_info *sip = devip2sip(devip, true);
3535 	u8 *cmd = scp->cmnd;
3536 
3537 	switch (cmd[0]) {
3538 	case WRITE_16:
3539 		ei_lba = 0;
3540 		lba = get_unaligned_be64(cmd + 2);
3541 		num = get_unaligned_be32(cmd + 10);
3542 		check_prot = true;
3543 		break;
3544 	case WRITE_10:
3545 		ei_lba = 0;
3546 		lba = get_unaligned_be32(cmd + 2);
3547 		num = get_unaligned_be16(cmd + 7);
3548 		check_prot = true;
3549 		break;
3550 	case WRITE_6:
3551 		ei_lba = 0;
3552 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3553 		      (u32)(cmd[1] & 0x1f) << 16;
3554 		num = (0 == cmd[4]) ? 256 : cmd[4];
3555 		check_prot = true;
3556 		break;
3557 	case WRITE_12:
3558 		ei_lba = 0;
3559 		lba = get_unaligned_be32(cmd + 2);
3560 		num = get_unaligned_be32(cmd + 6);
3561 		check_prot = true;
3562 		break;
3563 	case 0x53:	/* XDWRITEREAD(10) */
3564 		ei_lba = 0;
3565 		lba = get_unaligned_be32(cmd + 2);
3566 		num = get_unaligned_be16(cmd + 7);
3567 		check_prot = false;
3568 		break;
3569 	default:	/* assume WRITE(32) */
3570 		lba = get_unaligned_be64(cmd + 12);
3571 		ei_lba = get_unaligned_be32(cmd + 20);
3572 		num = get_unaligned_be32(cmd + 28);
3573 		check_prot = false;
3574 		break;
3575 	}
3576 	if (unlikely(have_dif_prot && check_prot)) {
3577 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3578 		    (cmd[1] & 0xe0)) {
3579 			mk_sense_invalid_opcode(scp);
3580 			return check_condition_result;
3581 		}
3582 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3583 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3584 		    (cmd[1] & 0xe0) == 0)
3585 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3586 				    "to DIF device\n");
3587 	}
3588 
3589 	sdeb_write_lock(sip);
3590 	ret = check_device_access_params(scp, lba, num, true);
3591 	if (ret) {
3592 		sdeb_write_unlock(sip);
3593 		return ret;
3594 	}
3595 
3596 	/* DIX + T10 DIF */
3597 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3598 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3599 		case 1: /* Guard tag error */
3600 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3601 				sdeb_write_unlock(sip);
3602 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3603 				return illegal_condition_result;
3604 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3605 				sdeb_write_unlock(sip);
3606 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3607 				return check_condition_result;
3608 			}
3609 			break;
3610 		case 3: /* Reference tag error */
3611 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3612 				sdeb_write_unlock(sip);
3613 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3614 				return illegal_condition_result;
3615 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3616 				sdeb_write_unlock(sip);
3617 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3618 				return check_condition_result;
3619 			}
3620 			break;
3621 		}
3622 	}
3623 
3624 	ret = do_device_access(sip, scp, 0, lba, num, true);
3625 	if (unlikely(scsi_debug_lbp()))
3626 		map_region(sip, lba, num);
3627 	/* If ZBC zone then bump its write pointer */
3628 	if (sdebug_dev_is_zoned(devip))
3629 		zbc_inc_wp(devip, lba, num);
3630 	sdeb_write_unlock(sip);
3631 	if (unlikely(-1 == ret))
3632 		return DID_ERROR << 16;
3633 	else if (unlikely(sdebug_verbose &&
3634 			  (ret < (num * sdebug_sector_size))))
3635 		sdev_printk(KERN_INFO, scp->device,
3636 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3637 			    my_name, num * sdebug_sector_size, ret);
3638 
3639 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3640 		     atomic_read(&sdeb_inject_pending))) {
3641 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3642 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3643 			atomic_set(&sdeb_inject_pending, 0);
3644 			return check_condition_result;
3645 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3646 			/* Logical block guard check failed */
3647 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3648 			atomic_set(&sdeb_inject_pending, 0);
3649 			return illegal_condition_result;
3650 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3651 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3652 			atomic_set(&sdeb_inject_pending, 0);
3653 			return illegal_condition_result;
3654 		}
3655 	}
3656 	return 0;
3657 }
3658 
3659 /*
3660  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3661  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3662  */
3663 static int resp_write_scat(struct scsi_cmnd *scp,
3664 			   struct sdebug_dev_info *devip)
3665 {
3666 	u8 *cmd = scp->cmnd;
3667 	u8 *lrdp = NULL;
3668 	u8 *up;
3669 	struct sdeb_store_info *sip = devip2sip(devip, true);
3670 	u8 wrprotect;
3671 	u16 lbdof, num_lrd, k;
3672 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3673 	u32 lb_size = sdebug_sector_size;
3674 	u32 ei_lba;
3675 	u64 lba;
3676 	int ret, res;
3677 	bool is_16;
3678 	static const u32 lrd_size = 32; /* + parameter list header size */
3679 
3680 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3681 		is_16 = false;
3682 		wrprotect = (cmd[10] >> 5) & 0x7;
3683 		lbdof = get_unaligned_be16(cmd + 12);
3684 		num_lrd = get_unaligned_be16(cmd + 16);
3685 		bt_len = get_unaligned_be32(cmd + 28);
3686 	} else {        /* that leaves WRITE SCATTERED(16) */
3687 		is_16 = true;
3688 		wrprotect = (cmd[2] >> 5) & 0x7;
3689 		lbdof = get_unaligned_be16(cmd + 4);
3690 		num_lrd = get_unaligned_be16(cmd + 8);
3691 		bt_len = get_unaligned_be32(cmd + 10);
3692 		if (unlikely(have_dif_prot)) {
3693 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3694 			    wrprotect) {
3695 				mk_sense_invalid_opcode(scp);
3696 				return illegal_condition_result;
3697 			}
3698 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3699 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3700 			     wrprotect == 0)
3701 				sdev_printk(KERN_ERR, scp->device,
3702 					    "Unprotected WR to DIF device\n");
3703 		}
3704 	}
3705 	if ((num_lrd == 0) || (bt_len == 0))
3706 		return 0;       /* T10 says these do-nothings are not errors */
3707 	if (lbdof == 0) {
3708 		if (sdebug_verbose)
3709 			sdev_printk(KERN_INFO, scp->device,
3710 				"%s: %s: LB Data Offset field bad\n",
3711 				my_name, __func__);
3712 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3713 		return illegal_condition_result;
3714 	}
3715 	lbdof_blen = lbdof * lb_size;
3716 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3717 		if (sdebug_verbose)
3718 			sdev_printk(KERN_INFO, scp->device,
3719 				"%s: %s: LBA range descriptors don't fit\n",
3720 				my_name, __func__);
3721 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3722 		return illegal_condition_result;
3723 	}
3724 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3725 	if (lrdp == NULL)
3726 		return SCSI_MLQUEUE_HOST_BUSY;
3727 	if (sdebug_verbose)
3728 		sdev_printk(KERN_INFO, scp->device,
3729 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3730 			my_name, __func__, lbdof_blen);
3731 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3732 	if (res == -1) {
3733 		ret = DID_ERROR << 16;
3734 		goto err_out;
3735 	}
3736 
3737 	sdeb_write_lock(sip);
3738 	sg_off = lbdof_blen;
3739 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3740 	cum_lb = 0;
3741 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3742 		lba = get_unaligned_be64(up + 0);
3743 		num = get_unaligned_be32(up + 8);
3744 		if (sdebug_verbose)
3745 			sdev_printk(KERN_INFO, scp->device,
3746 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3747 				my_name, __func__, k, lba, num, sg_off);
3748 		if (num == 0)
3749 			continue;
3750 		ret = check_device_access_params(scp, lba, num, true);
3751 		if (ret)
3752 			goto err_out_unlock;
3753 		num_by = num * lb_size;
3754 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3755 
3756 		if ((cum_lb + num) > bt_len) {
3757 			if (sdebug_verbose)
3758 				sdev_printk(KERN_INFO, scp->device,
3759 				    "%s: %s: sum of blocks > data provided\n",
3760 				    my_name, __func__);
3761 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3762 					0);
3763 			ret = illegal_condition_result;
3764 			goto err_out_unlock;
3765 		}
3766 
3767 		/* DIX + T10 DIF */
3768 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3769 			int prot_ret = prot_verify_write(scp, lba, num,
3770 							 ei_lba);
3771 
3772 			if (prot_ret) {
3773 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3774 						prot_ret);
3775 				ret = illegal_condition_result;
3776 				goto err_out_unlock;
3777 			}
3778 		}
3779 
3780 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3781 		/* If ZBC zone then bump its write pointer */
3782 		if (sdebug_dev_is_zoned(devip))
3783 			zbc_inc_wp(devip, lba, num);
3784 		if (unlikely(scsi_debug_lbp()))
3785 			map_region(sip, lba, num);
3786 		if (unlikely(-1 == ret)) {
3787 			ret = DID_ERROR << 16;
3788 			goto err_out_unlock;
3789 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3790 			sdev_printk(KERN_INFO, scp->device,
3791 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3792 			    my_name, num_by, ret);
3793 
3794 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3795 			     atomic_read(&sdeb_inject_pending))) {
3796 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3797 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3798 				atomic_set(&sdeb_inject_pending, 0);
3799 				ret = check_condition_result;
3800 				goto err_out_unlock;
3801 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3802 				/* Logical block guard check failed */
3803 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3804 				atomic_set(&sdeb_inject_pending, 0);
3805 				ret = illegal_condition_result;
3806 				goto err_out_unlock;
3807 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3808 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3809 				atomic_set(&sdeb_inject_pending, 0);
3810 				ret = illegal_condition_result;
3811 				goto err_out_unlock;
3812 			}
3813 		}
3814 		sg_off += num_by;
3815 		cum_lb += num;
3816 	}
3817 	ret = 0;
3818 err_out_unlock:
3819 	sdeb_write_unlock(sip);
3820 err_out:
3821 	kfree(lrdp);
3822 	return ret;
3823 }
3824 
3825 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3826 			   u32 ei_lba, bool unmap, bool ndob)
3827 {
3828 	struct scsi_device *sdp = scp->device;
3829 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3830 	unsigned long long i;
3831 	u64 block, lbaa;
3832 	u32 lb_size = sdebug_sector_size;
3833 	int ret;
3834 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3835 						scp->device->hostdata, true);
3836 	u8 *fs1p;
3837 	u8 *fsp;
3838 
3839 	sdeb_write_lock(sip);
3840 
3841 	ret = check_device_access_params(scp, lba, num, true);
3842 	if (ret) {
3843 		sdeb_write_unlock(sip);
3844 		return ret;
3845 	}
3846 
3847 	if (unmap && scsi_debug_lbp()) {
3848 		unmap_region(sip, lba, num);
3849 		goto out;
3850 	}
3851 	lbaa = lba;
3852 	block = do_div(lbaa, sdebug_store_sectors);
3853 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3854 	fsp = sip->storep;
3855 	fs1p = fsp + (block * lb_size);
3856 	if (ndob) {
3857 		memset(fs1p, 0, lb_size);
3858 		ret = 0;
3859 	} else
3860 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3861 
3862 	if (-1 == ret) {
3863 		sdeb_write_unlock(sip);
3864 		return DID_ERROR << 16;
3865 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3866 		sdev_printk(KERN_INFO, scp->device,
3867 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3868 			    my_name, "write same", lb_size, ret);
3869 
3870 	/* Copy first sector to remaining blocks */
3871 	for (i = 1 ; i < num ; i++) {
3872 		lbaa = lba + i;
3873 		block = do_div(lbaa, sdebug_store_sectors);
3874 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3875 	}
3876 	if (scsi_debug_lbp())
3877 		map_region(sip, lba, num);
3878 	/* If ZBC zone then bump its write pointer */
3879 	if (sdebug_dev_is_zoned(devip))
3880 		zbc_inc_wp(devip, lba, num);
3881 out:
3882 	sdeb_write_unlock(sip);
3883 
3884 	return 0;
3885 }
3886 
3887 static int resp_write_same_10(struct scsi_cmnd *scp,
3888 			      struct sdebug_dev_info *devip)
3889 {
3890 	u8 *cmd = scp->cmnd;
3891 	u32 lba;
3892 	u16 num;
3893 	u32 ei_lba = 0;
3894 	bool unmap = false;
3895 
3896 	if (cmd[1] & 0x8) {
3897 		if (sdebug_lbpws10 == 0) {
3898 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3899 			return check_condition_result;
3900 		} else
3901 			unmap = true;
3902 	}
3903 	lba = get_unaligned_be32(cmd + 2);
3904 	num = get_unaligned_be16(cmd + 7);
3905 	if (num > sdebug_write_same_length) {
3906 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3907 		return check_condition_result;
3908 	}
3909 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3910 }
3911 
3912 static int resp_write_same_16(struct scsi_cmnd *scp,
3913 			      struct sdebug_dev_info *devip)
3914 {
3915 	u8 *cmd = scp->cmnd;
3916 	u64 lba;
3917 	u32 num;
3918 	u32 ei_lba = 0;
3919 	bool unmap = false;
3920 	bool ndob = false;
3921 
3922 	if (cmd[1] & 0x8) {	/* UNMAP */
3923 		if (sdebug_lbpws == 0) {
3924 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3925 			return check_condition_result;
3926 		} else
3927 			unmap = true;
3928 	}
3929 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3930 		ndob = true;
3931 	lba = get_unaligned_be64(cmd + 2);
3932 	num = get_unaligned_be32(cmd + 10);
3933 	if (num > sdebug_write_same_length) {
3934 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3935 		return check_condition_result;
3936 	}
3937 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3938 }
3939 
3940 /* Note the mode field is in the same position as the (lower) service action
3941  * field. For the Report supported operation codes command, SPC-4 suggests
3942  * each mode of this command should be reported separately; for future. */
3943 static int resp_write_buffer(struct scsi_cmnd *scp,
3944 			     struct sdebug_dev_info *devip)
3945 {
3946 	u8 *cmd = scp->cmnd;
3947 	struct scsi_device *sdp = scp->device;
3948 	struct sdebug_dev_info *dp;
3949 	u8 mode;
3950 
3951 	mode = cmd[1] & 0x1f;
3952 	switch (mode) {
3953 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3954 		/* set UAs on this device only */
3955 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3956 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3957 		break;
3958 	case 0x5:	/* download MC, save and ACT */
3959 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3960 		break;
3961 	case 0x6:	/* download MC with offsets and ACT */
3962 		/* set UAs on most devices (LUs) in this target */
3963 		list_for_each_entry(dp,
3964 				    &devip->sdbg_host->dev_info_list,
3965 				    dev_list)
3966 			if (dp->target == sdp->id) {
3967 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3968 				if (devip != dp)
3969 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3970 						dp->uas_bm);
3971 			}
3972 		break;
3973 	case 0x7:	/* download MC with offsets, save, and ACT */
3974 		/* set UA on all devices (LUs) in this target */
3975 		list_for_each_entry(dp,
3976 				    &devip->sdbg_host->dev_info_list,
3977 				    dev_list)
3978 			if (dp->target == sdp->id)
3979 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3980 					dp->uas_bm);
3981 		break;
3982 	default:
3983 		/* do nothing for this command for other mode values */
3984 		break;
3985 	}
3986 	return 0;
3987 }
3988 
3989 static int resp_comp_write(struct scsi_cmnd *scp,
3990 			   struct sdebug_dev_info *devip)
3991 {
3992 	u8 *cmd = scp->cmnd;
3993 	u8 *arr;
3994 	struct sdeb_store_info *sip = devip2sip(devip, true);
3995 	u64 lba;
3996 	u32 dnum;
3997 	u32 lb_size = sdebug_sector_size;
3998 	u8 num;
3999 	int ret;
4000 	int retval = 0;
4001 
4002 	lba = get_unaligned_be64(cmd + 2);
4003 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4004 	if (0 == num)
4005 		return 0;	/* degenerate case, not an error */
4006 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4007 	    (cmd[1] & 0xe0)) {
4008 		mk_sense_invalid_opcode(scp);
4009 		return check_condition_result;
4010 	}
4011 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4012 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4013 	    (cmd[1] & 0xe0) == 0)
4014 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4015 			    "to DIF device\n");
4016 	ret = check_device_access_params(scp, lba, num, false);
4017 	if (ret)
4018 		return ret;
4019 	dnum = 2 * num;
4020 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4021 	if (NULL == arr) {
4022 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4023 				INSUFF_RES_ASCQ);
4024 		return check_condition_result;
4025 	}
4026 
4027 	sdeb_write_lock(sip);
4028 
4029 	ret = do_dout_fetch(scp, dnum, arr);
4030 	if (ret == -1) {
4031 		retval = DID_ERROR << 16;
4032 		goto cleanup;
4033 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4034 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4035 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4036 			    dnum * lb_size, ret);
4037 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4038 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4039 		retval = check_condition_result;
4040 		goto cleanup;
4041 	}
4042 	if (scsi_debug_lbp())
4043 		map_region(sip, lba, num);
4044 cleanup:
4045 	sdeb_write_unlock(sip);
4046 	kfree(arr);
4047 	return retval;
4048 }
4049 
4050 struct unmap_block_desc {
4051 	__be64	lba;
4052 	__be32	blocks;
4053 	__be32	__reserved;
4054 };
4055 
4056 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4057 {
4058 	unsigned char *buf;
4059 	struct unmap_block_desc *desc;
4060 	struct sdeb_store_info *sip = devip2sip(devip, true);
4061 	unsigned int i, payload_len, descriptors;
4062 	int ret;
4063 
4064 	if (!scsi_debug_lbp())
4065 		return 0;	/* fib and say its done */
4066 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4067 	BUG_ON(scsi_bufflen(scp) != payload_len);
4068 
4069 	descriptors = (payload_len - 8) / 16;
4070 	if (descriptors > sdebug_unmap_max_desc) {
4071 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4072 		return check_condition_result;
4073 	}
4074 
4075 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4076 	if (!buf) {
4077 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4078 				INSUFF_RES_ASCQ);
4079 		return check_condition_result;
4080 	}
4081 
4082 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4083 
4084 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4085 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4086 
4087 	desc = (void *)&buf[8];
4088 
4089 	sdeb_write_lock(sip);
4090 
4091 	for (i = 0 ; i < descriptors ; i++) {
4092 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4093 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4094 
4095 		ret = check_device_access_params(scp, lba, num, true);
4096 		if (ret)
4097 			goto out;
4098 
4099 		unmap_region(sip, lba, num);
4100 	}
4101 
4102 	ret = 0;
4103 
4104 out:
4105 	sdeb_write_unlock(sip);
4106 	kfree(buf);
4107 
4108 	return ret;
4109 }
4110 
4111 #define SDEBUG_GET_LBA_STATUS_LEN 32
4112 
4113 static int resp_get_lba_status(struct scsi_cmnd *scp,
4114 			       struct sdebug_dev_info *devip)
4115 {
4116 	u8 *cmd = scp->cmnd;
4117 	u64 lba;
4118 	u32 alloc_len, mapped, num;
4119 	int ret;
4120 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4121 
4122 	lba = get_unaligned_be64(cmd + 2);
4123 	alloc_len = get_unaligned_be32(cmd + 10);
4124 
4125 	if (alloc_len < 24)
4126 		return 0;
4127 
4128 	ret = check_device_access_params(scp, lba, 1, false);
4129 	if (ret)
4130 		return ret;
4131 
4132 	if (scsi_debug_lbp()) {
4133 		struct sdeb_store_info *sip = devip2sip(devip, true);
4134 
4135 		mapped = map_state(sip, lba, &num);
4136 	} else {
4137 		mapped = 1;
4138 		/* following just in case virtual_gb changed */
4139 		sdebug_capacity = get_sdebug_capacity();
4140 		if (sdebug_capacity - lba <= 0xffffffff)
4141 			num = sdebug_capacity - lba;
4142 		else
4143 			num = 0xffffffff;
4144 	}
4145 
4146 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4147 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4148 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4149 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4150 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4151 
4152 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4153 }
4154 
4155 static int resp_sync_cache(struct scsi_cmnd *scp,
4156 			   struct sdebug_dev_info *devip)
4157 {
4158 	int res = 0;
4159 	u64 lba;
4160 	u32 num_blocks;
4161 	u8 *cmd = scp->cmnd;
4162 
4163 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4164 		lba = get_unaligned_be32(cmd + 2);
4165 		num_blocks = get_unaligned_be16(cmd + 7);
4166 	} else {				/* SYNCHRONIZE_CACHE(16) */
4167 		lba = get_unaligned_be64(cmd + 2);
4168 		num_blocks = get_unaligned_be32(cmd + 10);
4169 	}
4170 	if (lba + num_blocks > sdebug_capacity) {
4171 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4172 		return check_condition_result;
4173 	}
4174 	if (!write_since_sync || (cmd[1] & 0x2))
4175 		res = SDEG_RES_IMMED_MASK;
4176 	else		/* delay if write_since_sync and IMMED clear */
4177 		write_since_sync = false;
4178 	return res;
4179 }
4180 
4181 /*
4182  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4183  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4184  * a GOOD status otherwise. Model a disk with a big cache and yield
4185  * CONDITION MET. Actually tries to bring range in main memory into the
4186  * cache associated with the CPU(s).
4187  */
4188 static int resp_pre_fetch(struct scsi_cmnd *scp,
4189 			  struct sdebug_dev_info *devip)
4190 {
4191 	int res = 0;
4192 	u64 lba;
4193 	u64 block, rest = 0;
4194 	u32 nblks;
4195 	u8 *cmd = scp->cmnd;
4196 	struct sdeb_store_info *sip = devip2sip(devip, true);
4197 	u8 *fsp = sip->storep;
4198 
4199 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4200 		lba = get_unaligned_be32(cmd + 2);
4201 		nblks = get_unaligned_be16(cmd + 7);
4202 	} else {			/* PRE-FETCH(16) */
4203 		lba = get_unaligned_be64(cmd + 2);
4204 		nblks = get_unaligned_be32(cmd + 10);
4205 	}
4206 	if (lba + nblks > sdebug_capacity) {
4207 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4208 		return check_condition_result;
4209 	}
4210 	if (!fsp)
4211 		goto fini;
4212 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4213 	block = do_div(lba, sdebug_store_sectors);
4214 	if (block + nblks > sdebug_store_sectors)
4215 		rest = block + nblks - sdebug_store_sectors;
4216 
4217 	/* Try to bring the PRE-FETCH range into CPU's cache */
4218 	sdeb_read_lock(sip);
4219 	prefetch_range(fsp + (sdebug_sector_size * block),
4220 		       (nblks - rest) * sdebug_sector_size);
4221 	if (rest)
4222 		prefetch_range(fsp, rest * sdebug_sector_size);
4223 	sdeb_read_unlock(sip);
4224 fini:
4225 	if (cmd[1] & 0x2)
4226 		res = SDEG_RES_IMMED_MASK;
4227 	return res | condition_met_result;
4228 }
4229 
4230 #define RL_BUCKET_ELEMS 8
4231 
4232 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4233  * (W-LUN), the normal Linux scanning logic does not associate it with a
4234  * device (e.g. /dev/sg7). The following magic will make that association:
4235  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4236  * where <n> is a host number. If there are multiple targets in a host then
4237  * the above will associate a W-LUN to each target. To only get a W-LUN
4238  * for target 2, then use "echo '- 2 49409' > scan" .
4239  */
4240 static int resp_report_luns(struct scsi_cmnd *scp,
4241 			    struct sdebug_dev_info *devip)
4242 {
4243 	unsigned char *cmd = scp->cmnd;
4244 	unsigned int alloc_len;
4245 	unsigned char select_report;
4246 	u64 lun;
4247 	struct scsi_lun *lun_p;
4248 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4249 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4250 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4251 	unsigned int tlun_cnt;	/* total LUN count */
4252 	unsigned int rlen;	/* response length (in bytes) */
4253 	int k, j, n, res;
4254 	unsigned int off_rsp = 0;
4255 	const int sz_lun = sizeof(struct scsi_lun);
4256 
4257 	clear_luns_changed_on_target(devip);
4258 
4259 	select_report = cmd[2];
4260 	alloc_len = get_unaligned_be32(cmd + 6);
4261 
4262 	if (alloc_len < 4) {
4263 		pr_err("alloc len too small %d\n", alloc_len);
4264 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4265 		return check_condition_result;
4266 	}
4267 
4268 	switch (select_report) {
4269 	case 0:		/* all LUNs apart from W-LUNs */
4270 		lun_cnt = sdebug_max_luns;
4271 		wlun_cnt = 0;
4272 		break;
4273 	case 1:		/* only W-LUNs */
4274 		lun_cnt = 0;
4275 		wlun_cnt = 1;
4276 		break;
4277 	case 2:		/* all LUNs */
4278 		lun_cnt = sdebug_max_luns;
4279 		wlun_cnt = 1;
4280 		break;
4281 	case 0x10:	/* only administrative LUs */
4282 	case 0x11:	/* see SPC-5 */
4283 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4284 	default:
4285 		pr_debug("select report invalid %d\n", select_report);
4286 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4287 		return check_condition_result;
4288 	}
4289 
4290 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4291 		--lun_cnt;
4292 
4293 	tlun_cnt = lun_cnt + wlun_cnt;
4294 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4295 	scsi_set_resid(scp, scsi_bufflen(scp));
4296 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4297 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4298 
4299 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4300 	lun = sdebug_no_lun_0 ? 1 : 0;
4301 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4302 		memset(arr, 0, sizeof(arr));
4303 		lun_p = (struct scsi_lun *)&arr[0];
4304 		if (k == 0) {
4305 			put_unaligned_be32(rlen, &arr[0]);
4306 			++lun_p;
4307 			j = 1;
4308 		}
4309 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4310 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4311 				break;
4312 			int_to_scsilun(lun++, lun_p);
4313 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4314 				lun_p->scsi_lun[0] |= 0x40;
4315 		}
4316 		if (j < RL_BUCKET_ELEMS)
4317 			break;
4318 		n = j * sz_lun;
4319 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4320 		if (res)
4321 			return res;
4322 		off_rsp += n;
4323 	}
4324 	if (wlun_cnt) {
4325 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4326 		++j;
4327 	}
4328 	if (j > 0)
4329 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4330 	return res;
4331 }
4332 
4333 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4334 {
4335 	bool is_bytchk3 = false;
4336 	u8 bytchk;
4337 	int ret, j;
4338 	u32 vnum, a_num, off;
4339 	const u32 lb_size = sdebug_sector_size;
4340 	u64 lba;
4341 	u8 *arr;
4342 	u8 *cmd = scp->cmnd;
4343 	struct sdeb_store_info *sip = devip2sip(devip, true);
4344 
4345 	bytchk = (cmd[1] >> 1) & 0x3;
4346 	if (bytchk == 0) {
4347 		return 0;	/* always claim internal verify okay */
4348 	} else if (bytchk == 2) {
4349 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4350 		return check_condition_result;
4351 	} else if (bytchk == 3) {
4352 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4353 	}
4354 	switch (cmd[0]) {
4355 	case VERIFY_16:
4356 		lba = get_unaligned_be64(cmd + 2);
4357 		vnum = get_unaligned_be32(cmd + 10);
4358 		break;
4359 	case VERIFY:		/* is VERIFY(10) */
4360 		lba = get_unaligned_be32(cmd + 2);
4361 		vnum = get_unaligned_be16(cmd + 7);
4362 		break;
4363 	default:
4364 		mk_sense_invalid_opcode(scp);
4365 		return check_condition_result;
4366 	}
4367 	if (vnum == 0)
4368 		return 0;	/* not an error */
4369 	a_num = is_bytchk3 ? 1 : vnum;
4370 	/* Treat following check like one for read (i.e. no write) access */
4371 	ret = check_device_access_params(scp, lba, a_num, false);
4372 	if (ret)
4373 		return ret;
4374 
4375 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4376 	if (!arr) {
4377 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4378 				INSUFF_RES_ASCQ);
4379 		return check_condition_result;
4380 	}
4381 	/* Not changing store, so only need read access */
4382 	sdeb_read_lock(sip);
4383 
4384 	ret = do_dout_fetch(scp, a_num, arr);
4385 	if (ret == -1) {
4386 		ret = DID_ERROR << 16;
4387 		goto cleanup;
4388 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4389 		sdev_printk(KERN_INFO, scp->device,
4390 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4391 			    my_name, __func__, a_num * lb_size, ret);
4392 	}
4393 	if (is_bytchk3) {
4394 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4395 			memcpy(arr + off, arr, lb_size);
4396 	}
4397 	ret = 0;
4398 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4399 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4400 		ret = check_condition_result;
4401 		goto cleanup;
4402 	}
4403 cleanup:
4404 	sdeb_read_unlock(sip);
4405 	kfree(arr);
4406 	return ret;
4407 }
4408 
4409 #define RZONES_DESC_HD 64
4410 
4411 /* Report zones depending on start LBA nad reporting options */
4412 static int resp_report_zones(struct scsi_cmnd *scp,
4413 			     struct sdebug_dev_info *devip)
4414 {
4415 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4416 	int ret = 0;
4417 	u32 alloc_len, rep_opts, rep_len;
4418 	bool partial;
4419 	u64 lba, zs_lba;
4420 	u8 *arr = NULL, *desc;
4421 	u8 *cmd = scp->cmnd;
4422 	struct sdeb_zone_state *zsp;
4423 	struct sdeb_store_info *sip = devip2sip(devip, false);
4424 
4425 	if (!sdebug_dev_is_zoned(devip)) {
4426 		mk_sense_invalid_opcode(scp);
4427 		return check_condition_result;
4428 	}
4429 	zs_lba = get_unaligned_be64(cmd + 2);
4430 	alloc_len = get_unaligned_be32(cmd + 10);
4431 	if (alloc_len == 0)
4432 		return 0;	/* not an error */
4433 	rep_opts = cmd[14] & 0x3f;
4434 	partial = cmd[14] & 0x80;
4435 
4436 	if (zs_lba >= sdebug_capacity) {
4437 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4438 		return check_condition_result;
4439 	}
4440 
4441 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4442 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4443 			    max_zones);
4444 
4445 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4446 	if (!arr) {
4447 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4448 				INSUFF_RES_ASCQ);
4449 		return check_condition_result;
4450 	}
4451 
4452 	sdeb_read_lock(sip);
4453 
4454 	desc = arr + 64;
4455 	for (i = 0; i < max_zones; i++) {
4456 		lba = zs_lba + devip->zsize * i;
4457 		if (lba > sdebug_capacity)
4458 			break;
4459 		zsp = zbc_zone(devip, lba);
4460 		switch (rep_opts) {
4461 		case 0x00:
4462 			/* All zones */
4463 			break;
4464 		case 0x01:
4465 			/* Empty zones */
4466 			if (zsp->z_cond != ZC1_EMPTY)
4467 				continue;
4468 			break;
4469 		case 0x02:
4470 			/* Implicit open zones */
4471 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4472 				continue;
4473 			break;
4474 		case 0x03:
4475 			/* Explicit open zones */
4476 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4477 				continue;
4478 			break;
4479 		case 0x04:
4480 			/* Closed zones */
4481 			if (zsp->z_cond != ZC4_CLOSED)
4482 				continue;
4483 			break;
4484 		case 0x05:
4485 			/* Full zones */
4486 			if (zsp->z_cond != ZC5_FULL)
4487 				continue;
4488 			break;
4489 		case 0x06:
4490 		case 0x07:
4491 		case 0x10:
4492 			/*
4493 			 * Read-only, offline, reset WP recommended are
4494 			 * not emulated: no zones to report;
4495 			 */
4496 			continue;
4497 		case 0x11:
4498 			/* non-seq-resource set */
4499 			if (!zsp->z_non_seq_resource)
4500 				continue;
4501 			break;
4502 		case 0x3f:
4503 			/* Not write pointer (conventional) zones */
4504 			if (!zbc_zone_is_conv(zsp))
4505 				continue;
4506 			break;
4507 		default:
4508 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4509 					INVALID_FIELD_IN_CDB, 0);
4510 			ret = check_condition_result;
4511 			goto fini;
4512 		}
4513 
4514 		if (nrz < rep_max_zones) {
4515 			/* Fill zone descriptor */
4516 			desc[0] = zsp->z_type;
4517 			desc[1] = zsp->z_cond << 4;
4518 			if (zsp->z_non_seq_resource)
4519 				desc[1] |= 1 << 1;
4520 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4521 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4522 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4523 			desc += 64;
4524 		}
4525 
4526 		if (partial && nrz >= rep_max_zones)
4527 			break;
4528 
4529 		nrz++;
4530 	}
4531 
4532 	/* Report header */
4533 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4534 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4535 
4536 	rep_len = (unsigned long)desc - (unsigned long)arr;
4537 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4538 
4539 fini:
4540 	sdeb_read_unlock(sip);
4541 	kfree(arr);
4542 	return ret;
4543 }
4544 
4545 /* Logic transplanted from tcmu-runner, file_zbc.c */
4546 static void zbc_open_all(struct sdebug_dev_info *devip)
4547 {
4548 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4549 	unsigned int i;
4550 
4551 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4552 		if (zsp->z_cond == ZC4_CLOSED)
4553 			zbc_open_zone(devip, &devip->zstate[i], true);
4554 	}
4555 }
4556 
4557 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4558 {
4559 	int res = 0;
4560 	u64 z_id;
4561 	enum sdebug_z_cond zc;
4562 	u8 *cmd = scp->cmnd;
4563 	struct sdeb_zone_state *zsp;
4564 	bool all = cmd[14] & 0x01;
4565 	struct sdeb_store_info *sip = devip2sip(devip, false);
4566 
4567 	if (!sdebug_dev_is_zoned(devip)) {
4568 		mk_sense_invalid_opcode(scp);
4569 		return check_condition_result;
4570 	}
4571 
4572 	sdeb_write_lock(sip);
4573 
4574 	if (all) {
4575 		/* Check if all closed zones can be open */
4576 		if (devip->max_open &&
4577 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4578 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4579 					INSUFF_ZONE_ASCQ);
4580 			res = check_condition_result;
4581 			goto fini;
4582 		}
4583 		/* Open all closed zones */
4584 		zbc_open_all(devip);
4585 		goto fini;
4586 	}
4587 
4588 	/* Open the specified zone */
4589 	z_id = get_unaligned_be64(cmd + 2);
4590 	if (z_id >= sdebug_capacity) {
4591 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4592 		res = check_condition_result;
4593 		goto fini;
4594 	}
4595 
4596 	zsp = zbc_zone(devip, z_id);
4597 	if (z_id != zsp->z_start) {
4598 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4599 		res = check_condition_result;
4600 		goto fini;
4601 	}
4602 	if (zbc_zone_is_conv(zsp)) {
4603 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4604 		res = check_condition_result;
4605 		goto fini;
4606 	}
4607 
4608 	zc = zsp->z_cond;
4609 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4610 		goto fini;
4611 
4612 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4613 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4614 				INSUFF_ZONE_ASCQ);
4615 		res = check_condition_result;
4616 		goto fini;
4617 	}
4618 
4619 	zbc_open_zone(devip, zsp, true);
4620 fini:
4621 	sdeb_write_unlock(sip);
4622 	return res;
4623 }
4624 
4625 static void zbc_close_all(struct sdebug_dev_info *devip)
4626 {
4627 	unsigned int i;
4628 
4629 	for (i = 0; i < devip->nr_zones; i++)
4630 		zbc_close_zone(devip, &devip->zstate[i]);
4631 }
4632 
4633 static int resp_close_zone(struct scsi_cmnd *scp,
4634 			   struct sdebug_dev_info *devip)
4635 {
4636 	int res = 0;
4637 	u64 z_id;
4638 	u8 *cmd = scp->cmnd;
4639 	struct sdeb_zone_state *zsp;
4640 	bool all = cmd[14] & 0x01;
4641 	struct sdeb_store_info *sip = devip2sip(devip, false);
4642 
4643 	if (!sdebug_dev_is_zoned(devip)) {
4644 		mk_sense_invalid_opcode(scp);
4645 		return check_condition_result;
4646 	}
4647 
4648 	sdeb_write_lock(sip);
4649 
4650 	if (all) {
4651 		zbc_close_all(devip);
4652 		goto fini;
4653 	}
4654 
4655 	/* Close specified zone */
4656 	z_id = get_unaligned_be64(cmd + 2);
4657 	if (z_id >= sdebug_capacity) {
4658 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4659 		res = check_condition_result;
4660 		goto fini;
4661 	}
4662 
4663 	zsp = zbc_zone(devip, z_id);
4664 	if (z_id != zsp->z_start) {
4665 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4666 		res = check_condition_result;
4667 		goto fini;
4668 	}
4669 	if (zbc_zone_is_conv(zsp)) {
4670 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4671 		res = check_condition_result;
4672 		goto fini;
4673 	}
4674 
4675 	zbc_close_zone(devip, zsp);
4676 fini:
4677 	sdeb_write_unlock(sip);
4678 	return res;
4679 }
4680 
4681 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4682 			    struct sdeb_zone_state *zsp, bool empty)
4683 {
4684 	enum sdebug_z_cond zc = zsp->z_cond;
4685 
4686 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4687 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4688 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4689 			zbc_close_zone(devip, zsp);
4690 		if (zsp->z_cond == ZC4_CLOSED)
4691 			devip->nr_closed--;
4692 		zsp->z_wp = zsp->z_start + zsp->z_size;
4693 		zsp->z_cond = ZC5_FULL;
4694 	}
4695 }
4696 
4697 static void zbc_finish_all(struct sdebug_dev_info *devip)
4698 {
4699 	unsigned int i;
4700 
4701 	for (i = 0; i < devip->nr_zones; i++)
4702 		zbc_finish_zone(devip, &devip->zstate[i], false);
4703 }
4704 
4705 static int resp_finish_zone(struct scsi_cmnd *scp,
4706 			    struct sdebug_dev_info *devip)
4707 {
4708 	struct sdeb_zone_state *zsp;
4709 	int res = 0;
4710 	u64 z_id;
4711 	u8 *cmd = scp->cmnd;
4712 	bool all = cmd[14] & 0x01;
4713 	struct sdeb_store_info *sip = devip2sip(devip, false);
4714 
4715 	if (!sdebug_dev_is_zoned(devip)) {
4716 		mk_sense_invalid_opcode(scp);
4717 		return check_condition_result;
4718 	}
4719 
4720 	sdeb_write_lock(sip);
4721 
4722 	if (all) {
4723 		zbc_finish_all(devip);
4724 		goto fini;
4725 	}
4726 
4727 	/* Finish the specified zone */
4728 	z_id = get_unaligned_be64(cmd + 2);
4729 	if (z_id >= sdebug_capacity) {
4730 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4731 		res = check_condition_result;
4732 		goto fini;
4733 	}
4734 
4735 	zsp = zbc_zone(devip, z_id);
4736 	if (z_id != zsp->z_start) {
4737 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4738 		res = check_condition_result;
4739 		goto fini;
4740 	}
4741 	if (zbc_zone_is_conv(zsp)) {
4742 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4743 		res = check_condition_result;
4744 		goto fini;
4745 	}
4746 
4747 	zbc_finish_zone(devip, zsp, true);
4748 fini:
4749 	sdeb_write_unlock(sip);
4750 	return res;
4751 }
4752 
4753 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4754 			 struct sdeb_zone_state *zsp)
4755 {
4756 	enum sdebug_z_cond zc;
4757 	struct sdeb_store_info *sip = devip2sip(devip, false);
4758 
4759 	if (zbc_zone_is_conv(zsp))
4760 		return;
4761 
4762 	zc = zsp->z_cond;
4763 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4764 		zbc_close_zone(devip, zsp);
4765 
4766 	if (zsp->z_cond == ZC4_CLOSED)
4767 		devip->nr_closed--;
4768 
4769 	if (zsp->z_wp > zsp->z_start)
4770 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4771 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4772 
4773 	zsp->z_non_seq_resource = false;
4774 	zsp->z_wp = zsp->z_start;
4775 	zsp->z_cond = ZC1_EMPTY;
4776 }
4777 
4778 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4779 {
4780 	unsigned int i;
4781 
4782 	for (i = 0; i < devip->nr_zones; i++)
4783 		zbc_rwp_zone(devip, &devip->zstate[i]);
4784 }
4785 
4786 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4787 {
4788 	struct sdeb_zone_state *zsp;
4789 	int res = 0;
4790 	u64 z_id;
4791 	u8 *cmd = scp->cmnd;
4792 	bool all = cmd[14] & 0x01;
4793 	struct sdeb_store_info *sip = devip2sip(devip, false);
4794 
4795 	if (!sdebug_dev_is_zoned(devip)) {
4796 		mk_sense_invalid_opcode(scp);
4797 		return check_condition_result;
4798 	}
4799 
4800 	sdeb_write_lock(sip);
4801 
4802 	if (all) {
4803 		zbc_rwp_all(devip);
4804 		goto fini;
4805 	}
4806 
4807 	z_id = get_unaligned_be64(cmd + 2);
4808 	if (z_id >= sdebug_capacity) {
4809 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4810 		res = check_condition_result;
4811 		goto fini;
4812 	}
4813 
4814 	zsp = zbc_zone(devip, z_id);
4815 	if (z_id != zsp->z_start) {
4816 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4817 		res = check_condition_result;
4818 		goto fini;
4819 	}
4820 	if (zbc_zone_is_conv(zsp)) {
4821 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4822 		res = check_condition_result;
4823 		goto fini;
4824 	}
4825 
4826 	zbc_rwp_zone(devip, zsp);
4827 fini:
4828 	sdeb_write_unlock(sip);
4829 	return res;
4830 }
4831 
4832 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4833 {
4834 	u16 hwq;
4835 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4836 
4837 	hwq = blk_mq_unique_tag_to_hwq(tag);
4838 
4839 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4840 	if (WARN_ON_ONCE(hwq >= submit_queues))
4841 		hwq = 0;
4842 
4843 	return sdebug_q_arr + hwq;
4844 }
4845 
4846 static u32 get_tag(struct scsi_cmnd *cmnd)
4847 {
4848 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4849 }
4850 
4851 /* Queued (deferred) command completions converge here. */
4852 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4853 {
4854 	bool aborted = sd_dp->aborted;
4855 	int qc_idx;
4856 	int retiring = 0;
4857 	unsigned long iflags;
4858 	struct sdebug_queue *sqp;
4859 	struct sdebug_queued_cmd *sqcp;
4860 	struct scsi_cmnd *scp;
4861 	struct sdebug_dev_info *devip;
4862 
4863 	if (unlikely(aborted))
4864 		sd_dp->aborted = false;
4865 	qc_idx = sd_dp->qc_idx;
4866 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4867 	if (sdebug_statistics) {
4868 		atomic_inc(&sdebug_completions);
4869 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4870 			atomic_inc(&sdebug_miss_cpus);
4871 	}
4872 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4873 		pr_err("wild qc_idx=%d\n", qc_idx);
4874 		return;
4875 	}
4876 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4877 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4878 	sqcp = &sqp->qc_arr[qc_idx];
4879 	scp = sqcp->a_cmnd;
4880 	if (unlikely(scp == NULL)) {
4881 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4882 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4883 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4884 		return;
4885 	}
4886 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4887 	if (likely(devip))
4888 		atomic_dec(&devip->num_in_q);
4889 	else
4890 		pr_err("devip=NULL\n");
4891 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4892 		retiring = 1;
4893 
4894 	sqcp->a_cmnd = NULL;
4895 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4896 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4897 		pr_err("Unexpected completion\n");
4898 		return;
4899 	}
4900 
4901 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4902 		int k, retval;
4903 
4904 		retval = atomic_read(&retired_max_queue);
4905 		if (qc_idx >= retval) {
4906 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4907 			pr_err("index %d too large\n", retval);
4908 			return;
4909 		}
4910 		k = find_last_bit(sqp->in_use_bm, retval);
4911 		if ((k < sdebug_max_queue) || (k == retval))
4912 			atomic_set(&retired_max_queue, 0);
4913 		else
4914 			atomic_set(&retired_max_queue, k + 1);
4915 	}
4916 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4917 	if (unlikely(aborted)) {
4918 		if (sdebug_verbose)
4919 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4920 		return;
4921 	}
4922 	scsi_done(scp); /* callback to mid level */
4923 }
4924 
4925 /* When high resolution timer goes off this function is called. */
4926 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4927 {
4928 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4929 						  hrt);
4930 	sdebug_q_cmd_complete(sd_dp);
4931 	return HRTIMER_NORESTART;
4932 }
4933 
4934 /* When work queue schedules work, it calls this function. */
4935 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4936 {
4937 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4938 						  ew.work);
4939 	sdebug_q_cmd_complete(sd_dp);
4940 }
4941 
4942 static bool got_shared_uuid;
4943 static uuid_t shared_uuid;
4944 
4945 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4946 {
4947 	struct sdeb_zone_state *zsp;
4948 	sector_t capacity = get_sdebug_capacity();
4949 	sector_t zstart = 0;
4950 	unsigned int i;
4951 
4952 	/*
4953 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4954 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4955 	 * use the specified zone size checking that at least 2 zones can be
4956 	 * created for the device.
4957 	 */
4958 	if (!sdeb_zbc_zone_size_mb) {
4959 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4960 			>> ilog2(sdebug_sector_size);
4961 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4962 			devip->zsize >>= 1;
4963 		if (devip->zsize < 2) {
4964 			pr_err("Device capacity too small\n");
4965 			return -EINVAL;
4966 		}
4967 	} else {
4968 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4969 			pr_err("Zone size is not a power of 2\n");
4970 			return -EINVAL;
4971 		}
4972 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4973 			>> ilog2(sdebug_sector_size);
4974 		if (devip->zsize >= capacity) {
4975 			pr_err("Zone size too large for device capacity\n");
4976 			return -EINVAL;
4977 		}
4978 	}
4979 
4980 	devip->zsize_shift = ilog2(devip->zsize);
4981 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4982 
4983 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4984 		pr_err("Number of conventional zones too large\n");
4985 		return -EINVAL;
4986 	}
4987 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4988 
4989 	if (devip->zmodel == BLK_ZONED_HM) {
4990 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4991 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4992 			devip->max_open = (devip->nr_zones - 1) / 2;
4993 		else
4994 			devip->max_open = sdeb_zbc_max_open;
4995 	}
4996 
4997 	devip->zstate = kcalloc(devip->nr_zones,
4998 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4999 	if (!devip->zstate)
5000 		return -ENOMEM;
5001 
5002 	for (i = 0; i < devip->nr_zones; i++) {
5003 		zsp = &devip->zstate[i];
5004 
5005 		zsp->z_start = zstart;
5006 
5007 		if (i < devip->nr_conv_zones) {
5008 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
5009 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5010 			zsp->z_wp = (sector_t)-1;
5011 		} else {
5012 			if (devip->zmodel == BLK_ZONED_HM)
5013 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
5014 			else
5015 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
5016 			zsp->z_cond = ZC1_EMPTY;
5017 			zsp->z_wp = zsp->z_start;
5018 		}
5019 
5020 		if (zsp->z_start + devip->zsize < capacity)
5021 			zsp->z_size = devip->zsize;
5022 		else
5023 			zsp->z_size = capacity - zsp->z_start;
5024 
5025 		zstart += zsp->z_size;
5026 	}
5027 
5028 	return 0;
5029 }
5030 
5031 static struct sdebug_dev_info *sdebug_device_create(
5032 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5033 {
5034 	struct sdebug_dev_info *devip;
5035 
5036 	devip = kzalloc(sizeof(*devip), flags);
5037 	if (devip) {
5038 		if (sdebug_uuid_ctl == 1)
5039 			uuid_gen(&devip->lu_name);
5040 		else if (sdebug_uuid_ctl == 2) {
5041 			if (got_shared_uuid)
5042 				devip->lu_name = shared_uuid;
5043 			else {
5044 				uuid_gen(&shared_uuid);
5045 				got_shared_uuid = true;
5046 				devip->lu_name = shared_uuid;
5047 			}
5048 		}
5049 		devip->sdbg_host = sdbg_host;
5050 		if (sdeb_zbc_in_use) {
5051 			devip->zmodel = sdeb_zbc_model;
5052 			if (sdebug_device_create_zones(devip)) {
5053 				kfree(devip);
5054 				return NULL;
5055 			}
5056 		} else {
5057 			devip->zmodel = BLK_ZONED_NONE;
5058 		}
5059 		devip->sdbg_host = sdbg_host;
5060 		devip->create_ts = ktime_get_boottime();
5061 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5062 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5063 	}
5064 	return devip;
5065 }
5066 
5067 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5068 {
5069 	struct sdebug_host_info *sdbg_host;
5070 	struct sdebug_dev_info *open_devip = NULL;
5071 	struct sdebug_dev_info *devip;
5072 
5073 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5074 	if (!sdbg_host) {
5075 		pr_err("Host info NULL\n");
5076 		return NULL;
5077 	}
5078 
5079 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5080 		if ((devip->used) && (devip->channel == sdev->channel) &&
5081 		    (devip->target == sdev->id) &&
5082 		    (devip->lun == sdev->lun))
5083 			return devip;
5084 		else {
5085 			if ((!devip->used) && (!open_devip))
5086 				open_devip = devip;
5087 		}
5088 	}
5089 	if (!open_devip) { /* try and make a new one */
5090 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5091 		if (!open_devip) {
5092 			pr_err("out of memory at line %d\n", __LINE__);
5093 			return NULL;
5094 		}
5095 	}
5096 
5097 	open_devip->channel = sdev->channel;
5098 	open_devip->target = sdev->id;
5099 	open_devip->lun = sdev->lun;
5100 	open_devip->sdbg_host = sdbg_host;
5101 	atomic_set(&open_devip->num_in_q, 0);
5102 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5103 	open_devip->used = true;
5104 	return open_devip;
5105 }
5106 
5107 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5108 {
5109 	if (sdebug_verbose)
5110 		pr_info("slave_alloc <%u %u %u %llu>\n",
5111 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5112 	return 0;
5113 }
5114 
5115 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5116 {
5117 	struct sdebug_dev_info *devip =
5118 			(struct sdebug_dev_info *)sdp->hostdata;
5119 
5120 	if (sdebug_verbose)
5121 		pr_info("slave_configure <%u %u %u %llu>\n",
5122 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5123 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5124 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5125 	if (smp_load_acquire(&sdebug_deflect_incoming)) {
5126 		pr_info("Exit early due to deflect_incoming\n");
5127 		return 1;
5128 	}
5129 	if (devip == NULL) {
5130 		devip = find_build_dev_info(sdp);
5131 		if (devip == NULL)
5132 			return 1;  /* no resources, will be marked offline */
5133 	}
5134 	sdp->hostdata = devip;
5135 	if (sdebug_no_uld)
5136 		sdp->no_uld_attach = 1;
5137 	config_cdb_len(sdp);
5138 	return 0;
5139 }
5140 
5141 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5142 {
5143 	struct sdebug_dev_info *devip =
5144 		(struct sdebug_dev_info *)sdp->hostdata;
5145 
5146 	if (sdebug_verbose)
5147 		pr_info("slave_destroy <%u %u %u %llu>\n",
5148 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5149 	if (devip) {
5150 		/* make this slot available for re-use */
5151 		devip->used = false;
5152 		sdp->hostdata = NULL;
5153 	}
5154 }
5155 
5156 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5157 			   enum sdeb_defer_type defer_t)
5158 {
5159 	if (!sd_dp)
5160 		return;
5161 	if (defer_t == SDEB_DEFER_HRT)
5162 		hrtimer_cancel(&sd_dp->hrt);
5163 	else if (defer_t == SDEB_DEFER_WQ)
5164 		cancel_work_sync(&sd_dp->ew.work);
5165 }
5166 
5167 /* If @cmnd found deletes its timer or work queue and returns true; else
5168    returns false */
5169 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5170 {
5171 	unsigned long iflags;
5172 	int j, k, qmax, r_qmax;
5173 	enum sdeb_defer_type l_defer_t;
5174 	struct sdebug_queue *sqp;
5175 	struct sdebug_queued_cmd *sqcp;
5176 	struct sdebug_dev_info *devip;
5177 	struct sdebug_defer *sd_dp;
5178 
5179 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5180 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5181 		qmax = sdebug_max_queue;
5182 		r_qmax = atomic_read(&retired_max_queue);
5183 		if (r_qmax > qmax)
5184 			qmax = r_qmax;
5185 		for (k = 0; k < qmax; ++k) {
5186 			if (test_bit(k, sqp->in_use_bm)) {
5187 				sqcp = &sqp->qc_arr[k];
5188 				if (cmnd != sqcp->a_cmnd)
5189 					continue;
5190 				/* found */
5191 				devip = (struct sdebug_dev_info *)
5192 						cmnd->device->hostdata;
5193 				if (devip)
5194 					atomic_dec(&devip->num_in_q);
5195 				sqcp->a_cmnd = NULL;
5196 				sd_dp = sqcp->sd_dp;
5197 				if (sd_dp) {
5198 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5199 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5200 				} else
5201 					l_defer_t = SDEB_DEFER_NONE;
5202 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5203 				stop_qc_helper(sd_dp, l_defer_t);
5204 				clear_bit(k, sqp->in_use_bm);
5205 				return true;
5206 			}
5207 		}
5208 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5209 	}
5210 	return false;
5211 }
5212 
5213 /* Deletes (stops) timers or work queues of all queued commands */
5214 static void stop_all_queued(bool done_with_no_conn)
5215 {
5216 	unsigned long iflags;
5217 	int j, k;
5218 	enum sdeb_defer_type l_defer_t;
5219 	struct sdebug_queue *sqp;
5220 	struct sdebug_queued_cmd *sqcp;
5221 	struct sdebug_dev_info *devip;
5222 	struct sdebug_defer *sd_dp;
5223 	struct scsi_cmnd *scp;
5224 
5225 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5226 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5227 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5228 			if (test_bit(k, sqp->in_use_bm)) {
5229 				sqcp = &sqp->qc_arr[k];
5230 				scp = sqcp->a_cmnd;
5231 				if (!scp)
5232 					continue;
5233 				devip = (struct sdebug_dev_info *)
5234 					sqcp->a_cmnd->device->hostdata;
5235 				if (devip)
5236 					atomic_dec(&devip->num_in_q);
5237 				sqcp->a_cmnd = NULL;
5238 				sd_dp = sqcp->sd_dp;
5239 				if (sd_dp) {
5240 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5241 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5242 				} else
5243 					l_defer_t = SDEB_DEFER_NONE;
5244 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5245 				stop_qc_helper(sd_dp, l_defer_t);
5246 				if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
5247 					scp->result = DID_NO_CONNECT << 16;
5248 					scsi_done(scp);
5249 				}
5250 				clear_bit(k, sqp->in_use_bm);
5251 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5252 			}
5253 		}
5254 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5255 	}
5256 }
5257 
5258 /* Free queued command memory on heap */
5259 static void free_all_queued(void)
5260 {
5261 	int j, k;
5262 	struct sdebug_queue *sqp;
5263 	struct sdebug_queued_cmd *sqcp;
5264 
5265 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5266 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5267 			sqcp = &sqp->qc_arr[k];
5268 			kfree(sqcp->sd_dp);
5269 			sqcp->sd_dp = NULL;
5270 		}
5271 	}
5272 }
5273 
5274 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5275 {
5276 	bool ok;
5277 
5278 	++num_aborts;
5279 	if (SCpnt) {
5280 		ok = stop_queued_cmnd(SCpnt);
5281 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5282 			sdev_printk(KERN_INFO, SCpnt->device,
5283 				    "%s: command%s found\n", __func__,
5284 				    ok ? "" : " not");
5285 	}
5286 	return SUCCESS;
5287 }
5288 
5289 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5290 {
5291 	++num_dev_resets;
5292 	if (SCpnt && SCpnt->device) {
5293 		struct scsi_device *sdp = SCpnt->device;
5294 		struct sdebug_dev_info *devip =
5295 				(struct sdebug_dev_info *)sdp->hostdata;
5296 
5297 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5298 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5299 		if (devip)
5300 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5301 	}
5302 	return SUCCESS;
5303 }
5304 
5305 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5306 {
5307 	struct sdebug_host_info *sdbg_host;
5308 	struct sdebug_dev_info *devip;
5309 	struct scsi_device *sdp;
5310 	struct Scsi_Host *hp;
5311 	int k = 0;
5312 
5313 	++num_target_resets;
5314 	if (!SCpnt)
5315 		goto lie;
5316 	sdp = SCpnt->device;
5317 	if (!sdp)
5318 		goto lie;
5319 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5320 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5321 	hp = sdp->host;
5322 	if (!hp)
5323 		goto lie;
5324 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5325 	if (sdbg_host) {
5326 		list_for_each_entry(devip,
5327 				    &sdbg_host->dev_info_list,
5328 				    dev_list)
5329 			if (devip->target == sdp->id) {
5330 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5331 				++k;
5332 			}
5333 	}
5334 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5335 		sdev_printk(KERN_INFO, sdp,
5336 			    "%s: %d device(s) found in target\n", __func__, k);
5337 lie:
5338 	return SUCCESS;
5339 }
5340 
5341 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5342 {
5343 	struct sdebug_host_info *sdbg_host;
5344 	struct sdebug_dev_info *devip;
5345 	struct scsi_device *sdp;
5346 	struct Scsi_Host *hp;
5347 	int k = 0;
5348 
5349 	++num_bus_resets;
5350 	if (!(SCpnt && SCpnt->device))
5351 		goto lie;
5352 	sdp = SCpnt->device;
5353 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5354 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5355 	hp = sdp->host;
5356 	if (hp) {
5357 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5358 		if (sdbg_host) {
5359 			list_for_each_entry(devip,
5360 					    &sdbg_host->dev_info_list,
5361 					    dev_list) {
5362 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5363 				++k;
5364 			}
5365 		}
5366 	}
5367 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5368 		sdev_printk(KERN_INFO, sdp,
5369 			    "%s: %d device(s) found in host\n", __func__, k);
5370 lie:
5371 	return SUCCESS;
5372 }
5373 
5374 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5375 {
5376 	struct sdebug_host_info *sdbg_host;
5377 	struct sdebug_dev_info *devip;
5378 	int k = 0;
5379 
5380 	++num_host_resets;
5381 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5382 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5383 	spin_lock(&sdebug_host_list_lock);
5384 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5385 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5386 				    dev_list) {
5387 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5388 			++k;
5389 		}
5390 	}
5391 	spin_unlock(&sdebug_host_list_lock);
5392 	stop_all_queued(false);
5393 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5394 		sdev_printk(KERN_INFO, SCpnt->device,
5395 			    "%s: %d device(s) found\n", __func__, k);
5396 	return SUCCESS;
5397 }
5398 
5399 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5400 {
5401 	struct msdos_partition *pp;
5402 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5403 	int sectors_per_part, num_sectors, k;
5404 	int heads_by_sects, start_sec, end_sec;
5405 
5406 	/* assume partition table already zeroed */
5407 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5408 		return;
5409 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5410 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5411 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5412 	}
5413 	num_sectors = (int)get_sdebug_capacity();
5414 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5415 			   / sdebug_num_parts;
5416 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5417 	starts[0] = sdebug_sectors_per;
5418 	max_part_secs = sectors_per_part;
5419 	for (k = 1; k < sdebug_num_parts; ++k) {
5420 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5421 			    * heads_by_sects;
5422 		if (starts[k] - starts[k - 1] < max_part_secs)
5423 			max_part_secs = starts[k] - starts[k - 1];
5424 	}
5425 	starts[sdebug_num_parts] = num_sectors;
5426 	starts[sdebug_num_parts + 1] = 0;
5427 
5428 	ramp[510] = 0x55;	/* magic partition markings */
5429 	ramp[511] = 0xAA;
5430 	pp = (struct msdos_partition *)(ramp + 0x1be);
5431 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5432 		start_sec = starts[k];
5433 		end_sec = starts[k] + max_part_secs - 1;
5434 		pp->boot_ind = 0;
5435 
5436 		pp->cyl = start_sec / heads_by_sects;
5437 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5438 			   / sdebug_sectors_per;
5439 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5440 
5441 		pp->end_cyl = end_sec / heads_by_sects;
5442 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5443 			       / sdebug_sectors_per;
5444 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5445 
5446 		pp->start_sect = cpu_to_le32(start_sec);
5447 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5448 		pp->sys_ind = 0x83;	/* plain Linux partition */
5449 	}
5450 }
5451 
5452 static void sdeb_block_all_queues(void)
5453 {
5454 	int j;
5455 	struct sdebug_queue *sqp;
5456 
5457 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5458 		atomic_set(&sqp->blocked, (int)true);
5459 }
5460 
5461 static void sdeb_unblock_all_queues(void)
5462 {
5463 	int j;
5464 	struct sdebug_queue *sqp;
5465 
5466 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5467 		atomic_set(&sqp->blocked, (int)false);
5468 }
5469 
5470 static void
5471 sdeb_add_n_hosts(int num_hosts)
5472 {
5473 	if (num_hosts < 1)
5474 		return;
5475 	do {
5476 		bool found;
5477 		unsigned long idx;
5478 		struct sdeb_store_info *sip;
5479 		bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5480 
5481 		found = false;
5482 		if (want_phs) {
5483 			xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
5484 				sdeb_most_recent_idx = (int)idx;
5485 				found = true;
5486 				break;
5487 			}
5488 			if (found)	/* re-use case */
5489 				sdebug_add_host_helper((int)idx);
5490 			else
5491 				sdebug_do_add_host(true	/* make new store */);
5492 		} else {
5493 			sdebug_do_add_host(false);
5494 		}
5495 	} while (--num_hosts);
5496 }
5497 
5498 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5499  * commands will be processed normally before triggers occur.
5500  */
5501 static void tweak_cmnd_count(void)
5502 {
5503 	int count, modulo;
5504 
5505 	modulo = abs(sdebug_every_nth);
5506 	if (modulo < 2)
5507 		return;
5508 	sdeb_block_all_queues();
5509 	count = atomic_read(&sdebug_cmnd_count);
5510 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5511 	sdeb_unblock_all_queues();
5512 }
5513 
5514 static void clear_queue_stats(void)
5515 {
5516 	atomic_set(&sdebug_cmnd_count, 0);
5517 	atomic_set(&sdebug_completions, 0);
5518 	atomic_set(&sdebug_miss_cpus, 0);
5519 	atomic_set(&sdebug_a_tsf, 0);
5520 }
5521 
5522 static bool inject_on_this_cmd(void)
5523 {
5524 	if (sdebug_every_nth == 0)
5525 		return false;
5526 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5527 }
5528 
5529 static int process_deflect_incoming(struct scsi_cmnd *scp)
5530 {
5531 	u8 opcode = scp->cmnd[0];
5532 
5533 	if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
5534 		return 0;
5535 	return DID_NO_CONNECT << 16;
5536 }
5537 
5538 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5539 
5540 /* Complete the processing of the thread that queued a SCSI command to this
5541  * driver. It either completes the command by calling cmnd_done() or
5542  * schedules a hr timer or work queue then returns 0. Returns
5543  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5544  */
5545 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5546 			 int scsi_result,
5547 			 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
5548 			 int delta_jiff, int ndelay)
5549 {
5550 	bool new_sd_dp;
5551 	bool inject = false;
5552 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5553 	int k, num_in_q, qdepth;
5554 	unsigned long iflags;
5555 	u64 ns_from_boot = 0;
5556 	struct sdebug_queue *sqp;
5557 	struct sdebug_queued_cmd *sqcp;
5558 	struct scsi_device *sdp;
5559 	struct sdebug_defer *sd_dp;
5560 
5561 	if (unlikely(devip == NULL)) {
5562 		if (scsi_result == 0)
5563 			scsi_result = DID_NO_CONNECT << 16;
5564 		goto respond_in_thread;
5565 	}
5566 	sdp = cmnd->device;
5567 
5568 	if (delta_jiff == 0) {
5569 		sqp = get_queue(cmnd);
5570 		if (atomic_read(&sqp->blocked)) {
5571 			if (smp_load_acquire(&sdebug_deflect_incoming))
5572 				return process_deflect_incoming(cmnd);
5573 			else
5574 				return SCSI_MLQUEUE_HOST_BUSY;
5575 		}
5576 		goto respond_in_thread;
5577 	}
5578 
5579 	sqp = get_queue(cmnd);
5580 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5581 	if (unlikely(atomic_read(&sqp->blocked))) {
5582 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5583 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
5584 			scsi_result = process_deflect_incoming(cmnd);
5585 			goto respond_in_thread;
5586 		}
5587 		if (sdebug_verbose)
5588 			pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
5589 		return SCSI_MLQUEUE_HOST_BUSY;
5590 	}
5591 	num_in_q = atomic_read(&devip->num_in_q);
5592 	qdepth = cmnd->device->queue_depth;
5593 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5594 		if (scsi_result) {
5595 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5596 			goto respond_in_thread;
5597 		} else
5598 			scsi_result = device_qfull_result;
5599 	} else if (unlikely(sdebug_every_nth &&
5600 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5601 			    (scsi_result == 0))) {
5602 		if ((num_in_q == (qdepth - 1)) &&
5603 		    (atomic_inc_return(&sdebug_a_tsf) >=
5604 		     abs(sdebug_every_nth))) {
5605 			atomic_set(&sdebug_a_tsf, 0);
5606 			inject = true;
5607 			scsi_result = device_qfull_result;
5608 		}
5609 	}
5610 
5611 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5612 	if (unlikely(k >= sdebug_max_queue)) {
5613 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5614 		if (scsi_result)
5615 			goto respond_in_thread;
5616 		scsi_result = device_qfull_result;
5617 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5618 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5619 				    __func__, sdebug_max_queue);
5620 		goto respond_in_thread;
5621 	}
5622 	set_bit(k, sqp->in_use_bm);
5623 	atomic_inc(&devip->num_in_q);
5624 	sqcp = &sqp->qc_arr[k];
5625 	sqcp->a_cmnd = cmnd;
5626 	cmnd->host_scribble = (unsigned char *)sqcp;
5627 	sd_dp = sqcp->sd_dp;
5628 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5629 
5630 	if (!sd_dp) {
5631 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5632 		if (!sd_dp) {
5633 			atomic_dec(&devip->num_in_q);
5634 			clear_bit(k, sqp->in_use_bm);
5635 			return SCSI_MLQUEUE_HOST_BUSY;
5636 		}
5637 		new_sd_dp = true;
5638 	} else {
5639 		new_sd_dp = false;
5640 	}
5641 
5642 	/* Set the hostwide tag */
5643 	if (sdebug_host_max_queue)
5644 		sd_dp->hc_idx = get_tag(cmnd);
5645 
5646 	if (polled)
5647 		ns_from_boot = ktime_get_boottime_ns();
5648 
5649 	/* one of the resp_*() response functions is called here */
5650 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5651 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5652 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5653 		delta_jiff = ndelay = 0;
5654 	}
5655 	if (cmnd->result == 0 && scsi_result != 0)
5656 		cmnd->result = scsi_result;
5657 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5658 		if (atomic_read(&sdeb_inject_pending)) {
5659 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5660 			atomic_set(&sdeb_inject_pending, 0);
5661 			cmnd->result = check_condition_result;
5662 		}
5663 	}
5664 
5665 	if (unlikely(sdebug_verbose && cmnd->result))
5666 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5667 			    __func__, cmnd->result);
5668 
5669 	if (delta_jiff > 0 || ndelay > 0) {
5670 		ktime_t kt;
5671 
5672 		if (delta_jiff > 0) {
5673 			u64 ns = jiffies_to_nsecs(delta_jiff);
5674 
5675 			if (sdebug_random && ns < U32_MAX) {
5676 				ns = prandom_u32_max((u32)ns);
5677 			} else if (sdebug_random) {
5678 				ns >>= 12;	/* scale to 4 usec precision */
5679 				if (ns < U32_MAX)	/* over 4 hours max */
5680 					ns = prandom_u32_max((u32)ns);
5681 				ns <<= 12;
5682 			}
5683 			kt = ns_to_ktime(ns);
5684 		} else {	/* ndelay has a 4.2 second max */
5685 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5686 					     (u32)ndelay;
5687 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5688 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5689 
5690 				if (kt <= d) {	/* elapsed duration >= kt */
5691 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5692 					sqcp->a_cmnd = NULL;
5693 					atomic_dec(&devip->num_in_q);
5694 					clear_bit(k, sqp->in_use_bm);
5695 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5696 					if (new_sd_dp)
5697 						kfree(sd_dp);
5698 					/* call scsi_done() from this thread */
5699 					scsi_done(cmnd);
5700 					return 0;
5701 				}
5702 				/* otherwise reduce kt by elapsed time */
5703 				kt -= d;
5704 			}
5705 		}
5706 		if (polled) {
5707 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5708 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5709 			if (!sd_dp->init_poll) {
5710 				sd_dp->init_poll = true;
5711 				sqcp->sd_dp = sd_dp;
5712 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5713 				sd_dp->qc_idx = k;
5714 			}
5715 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5716 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5717 		} else {
5718 			if (!sd_dp->init_hrt) {
5719 				sd_dp->init_hrt = true;
5720 				sqcp->sd_dp = sd_dp;
5721 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5722 					     HRTIMER_MODE_REL_PINNED);
5723 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5724 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5725 				sd_dp->qc_idx = k;
5726 			}
5727 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5728 			/* schedule the invocation of scsi_done() for a later time */
5729 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5730 		}
5731 		if (sdebug_statistics)
5732 			sd_dp->issuing_cpu = raw_smp_processor_id();
5733 	} else {	/* jdelay < 0, use work queue */
5734 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5735 			     atomic_read(&sdeb_inject_pending)))
5736 			sd_dp->aborted = true;
5737 		if (polled) {
5738 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5739 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5740 			if (!sd_dp->init_poll) {
5741 				sd_dp->init_poll = true;
5742 				sqcp->sd_dp = sd_dp;
5743 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5744 				sd_dp->qc_idx = k;
5745 			}
5746 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5747 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5748 		} else {
5749 			if (!sd_dp->init_wq) {
5750 				sd_dp->init_wq = true;
5751 				sqcp->sd_dp = sd_dp;
5752 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5753 				sd_dp->qc_idx = k;
5754 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5755 			}
5756 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5757 			schedule_work(&sd_dp->ew.work);
5758 		}
5759 		if (sdebug_statistics)
5760 			sd_dp->issuing_cpu = raw_smp_processor_id();
5761 		if (unlikely(sd_dp->aborted)) {
5762 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5763 				    scsi_cmd_to_rq(cmnd)->tag);
5764 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5765 			atomic_set(&sdeb_inject_pending, 0);
5766 			sd_dp->aborted = false;
5767 		}
5768 	}
5769 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5770 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5771 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5772 	return 0;
5773 
5774 respond_in_thread:	/* call back to mid-layer using invocation thread */
5775 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5776 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5777 	if (cmnd->result == 0 && scsi_result != 0) {
5778 		cmnd->result = scsi_result;
5779 		if (sdebug_verbose)
5780 			pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
5781 				blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
5782 	}
5783 	scsi_done(cmnd);
5784 	return 0;
5785 }
5786 
5787 /* Note: The following macros create attribute files in the
5788    /sys/module/scsi_debug/parameters directory. Unfortunately this
5789    driver is unaware of a change and cannot trigger auxiliary actions
5790    as it can when the corresponding attribute in the
5791    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5792  */
5793 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5794 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5795 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5796 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5797 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5798 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5799 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5800 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5801 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5802 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5803 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5804 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5805 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5806 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5807 module_param_string(inq_product, sdebug_inq_product_id,
5808 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5809 module_param_string(inq_rev, sdebug_inq_product_rev,
5810 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5811 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5812 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5813 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5814 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5815 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5816 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5817 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5818 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5819 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5820 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5821 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5822 		   S_IRUGO | S_IWUSR);
5823 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5824 		   S_IRUGO | S_IWUSR);
5825 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5826 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5827 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5828 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5829 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5830 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5831 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5832 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5833 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5834 module_param_named(per_host_store, sdebug_per_host_store, bool,
5835 		   S_IRUGO | S_IWUSR);
5836 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5837 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5838 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5839 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5840 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5841 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5842 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5843 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5845 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5846 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5847 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5848 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5849 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5850 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5851 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5852 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5853 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5854 		   S_IRUGO | S_IWUSR);
5855 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5856 module_param_named(write_same_length, sdebug_write_same_length, int,
5857 		   S_IRUGO | S_IWUSR);
5858 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5859 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5860 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5861 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5862 
5863 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5864 MODULE_DESCRIPTION("SCSI debug adapter driver");
5865 MODULE_LICENSE("GPL");
5866 MODULE_VERSION(SDEBUG_VERSION);
5867 
5868 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5869 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5870 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5871 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5872 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5873 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5874 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5875 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5876 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5877 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5878 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5879 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5880 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5881 MODULE_PARM_DESC(host_max_queue,
5882 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5883 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5884 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5885 		 SDEBUG_VERSION "\")");
5886 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5887 MODULE_PARM_DESC(lbprz,
5888 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5889 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5890 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5891 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5893 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5894 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5895 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5896 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5897 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5898 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5899 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5900 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5901 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5902 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5903 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5904 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5905 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5906 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5907 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5908 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5909 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5910 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5911 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5912 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5913 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5914 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5915 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5916 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5917 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5918 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5919 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5920 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5921 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5922 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5923 MODULE_PARM_DESC(uuid_ctl,
5924 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5925 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5926 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5927 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5928 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5929 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5930 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5931 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5932 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5933 
5934 #define SDEBUG_INFO_LEN 256
5935 static char sdebug_info[SDEBUG_INFO_LEN];
5936 
5937 static const char *scsi_debug_info(struct Scsi_Host *shp)
5938 {
5939 	int k;
5940 
5941 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5942 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5943 	if (k >= (SDEBUG_INFO_LEN - 1))
5944 		return sdebug_info;
5945 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5946 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5947 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5948 		  "statistics", (int)sdebug_statistics);
5949 	return sdebug_info;
5950 }
5951 
5952 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5953 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5954 				 int length)
5955 {
5956 	char arr[16];
5957 	int opts;
5958 	int minLen = length > 15 ? 15 : length;
5959 
5960 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5961 		return -EACCES;
5962 	memcpy(arr, buffer, minLen);
5963 	arr[minLen] = '\0';
5964 	if (1 != sscanf(arr, "%d", &opts))
5965 		return -EINVAL;
5966 	sdebug_opts = opts;
5967 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5968 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5969 	if (sdebug_every_nth != 0)
5970 		tweak_cmnd_count();
5971 	return length;
5972 }
5973 
5974 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5975  * same for each scsi_debug host (if more than one). Some of the counters
5976  * output are not atomics so might be inaccurate in a busy system. */
5977 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5978 {
5979 	int f, j, l;
5980 	struct sdebug_queue *sqp;
5981 	struct sdebug_host_info *sdhp;
5982 
5983 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5984 		   SDEBUG_VERSION, sdebug_version_date);
5985 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5986 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5987 		   sdebug_opts, sdebug_every_nth);
5988 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5989 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5990 		   sdebug_sector_size, "bytes");
5991 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5992 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5993 		   num_aborts);
5994 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5995 		   num_dev_resets, num_target_resets, num_bus_resets,
5996 		   num_host_resets);
5997 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5998 		   dix_reads, dix_writes, dif_errors);
5999 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6000 		   sdebug_statistics);
6001 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6002 		   atomic_read(&sdebug_cmnd_count),
6003 		   atomic_read(&sdebug_completions),
6004 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6005 		   atomic_read(&sdebug_a_tsf),
6006 		   atomic_read(&sdeb_mq_poll_count));
6007 
6008 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6009 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6010 		seq_printf(m, "  queue %d:\n", j);
6011 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6012 		if (f != sdebug_max_queue) {
6013 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6014 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6015 				   "first,last bits", f, l);
6016 		}
6017 	}
6018 
6019 	seq_printf(m, "this host_no=%d\n", host->host_no);
6020 	if (!xa_empty(per_store_ap)) {
6021 		bool niu;
6022 		int idx;
6023 		unsigned long l_idx;
6024 		struct sdeb_store_info *sip;
6025 
6026 		seq_puts(m, "\nhost list:\n");
6027 		j = 0;
6028 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6029 			idx = sdhp->si_idx;
6030 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6031 				   sdhp->shost->host_no, idx);
6032 			++j;
6033 		}
6034 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6035 			   sdeb_most_recent_idx);
6036 		j = 0;
6037 		xa_for_each(per_store_ap, l_idx, sip) {
6038 			niu = xa_get_mark(per_store_ap, l_idx,
6039 					  SDEB_XA_NOT_IN_USE);
6040 			idx = (int)l_idx;
6041 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6042 				   (niu ? "  not_in_use" : ""));
6043 			++j;
6044 		}
6045 	}
6046 	return 0;
6047 }
6048 
6049 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6050 {
6051 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6052 }
6053 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6054  * of delay is jiffies.
6055  */
6056 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6057 			   size_t count)
6058 {
6059 	int jdelay, res;
6060 
6061 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6062 		res = count;
6063 		if (sdebug_jdelay != jdelay) {
6064 			int j, k;
6065 			struct sdebug_queue *sqp;
6066 
6067 			sdeb_block_all_queues();
6068 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6069 			     ++j, ++sqp) {
6070 				k = find_first_bit(sqp->in_use_bm,
6071 						   sdebug_max_queue);
6072 				if (k != sdebug_max_queue) {
6073 					res = -EBUSY;   /* queued commands */
6074 					break;
6075 				}
6076 			}
6077 			if (res > 0) {
6078 				sdebug_jdelay = jdelay;
6079 				sdebug_ndelay = 0;
6080 			}
6081 			sdeb_unblock_all_queues();
6082 		}
6083 		return res;
6084 	}
6085 	return -EINVAL;
6086 }
6087 static DRIVER_ATTR_RW(delay);
6088 
6089 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6090 {
6091 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6092 }
6093 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6094 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6095 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6096 			    size_t count)
6097 {
6098 	int ndelay, res;
6099 
6100 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6101 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6102 		res = count;
6103 		if (sdebug_ndelay != ndelay) {
6104 			int j, k;
6105 			struct sdebug_queue *sqp;
6106 
6107 			sdeb_block_all_queues();
6108 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6109 			     ++j, ++sqp) {
6110 				k = find_first_bit(sqp->in_use_bm,
6111 						   sdebug_max_queue);
6112 				if (k != sdebug_max_queue) {
6113 					res = -EBUSY;   /* queued commands */
6114 					break;
6115 				}
6116 			}
6117 			if (res > 0) {
6118 				sdebug_ndelay = ndelay;
6119 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6120 							: DEF_JDELAY;
6121 			}
6122 			sdeb_unblock_all_queues();
6123 		}
6124 		return res;
6125 	}
6126 	return -EINVAL;
6127 }
6128 static DRIVER_ATTR_RW(ndelay);
6129 
6130 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6131 {
6132 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6133 }
6134 
6135 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6136 			  size_t count)
6137 {
6138 	int opts;
6139 	char work[20];
6140 
6141 	if (sscanf(buf, "%10s", work) == 1) {
6142 		if (strncasecmp(work, "0x", 2) == 0) {
6143 			if (kstrtoint(work + 2, 16, &opts) == 0)
6144 				goto opts_done;
6145 		} else {
6146 			if (kstrtoint(work, 10, &opts) == 0)
6147 				goto opts_done;
6148 		}
6149 	}
6150 	return -EINVAL;
6151 opts_done:
6152 	sdebug_opts = opts;
6153 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6154 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6155 	tweak_cmnd_count();
6156 	return count;
6157 }
6158 static DRIVER_ATTR_RW(opts);
6159 
6160 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6161 {
6162 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6163 }
6164 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6165 			   size_t count)
6166 {
6167 	int n;
6168 
6169 	/* Cannot change from or to TYPE_ZBC with sysfs */
6170 	if (sdebug_ptype == TYPE_ZBC)
6171 		return -EINVAL;
6172 
6173 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6174 		if (n == TYPE_ZBC)
6175 			return -EINVAL;
6176 		sdebug_ptype = n;
6177 		return count;
6178 	}
6179 	return -EINVAL;
6180 }
6181 static DRIVER_ATTR_RW(ptype);
6182 
6183 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6184 {
6185 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6186 }
6187 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6188 			    size_t count)
6189 {
6190 	int n;
6191 
6192 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6193 		sdebug_dsense = n;
6194 		return count;
6195 	}
6196 	return -EINVAL;
6197 }
6198 static DRIVER_ATTR_RW(dsense);
6199 
6200 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6201 {
6202 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6203 }
6204 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6205 			     size_t count)
6206 {
6207 	int n, idx;
6208 
6209 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6210 		bool want_store = (n == 0);
6211 		struct sdebug_host_info *sdhp;
6212 
6213 		n = (n > 0);
6214 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6215 		if (sdebug_fake_rw == n)
6216 			return count;	/* not transitioning so do nothing */
6217 
6218 		if (want_store) {	/* 1 --> 0 transition, set up store */
6219 			if (sdeb_first_idx < 0) {
6220 				idx = sdebug_add_store();
6221 				if (idx < 0)
6222 					return idx;
6223 			} else {
6224 				idx = sdeb_first_idx;
6225 				xa_clear_mark(per_store_ap, idx,
6226 					      SDEB_XA_NOT_IN_USE);
6227 			}
6228 			/* make all hosts use same store */
6229 			list_for_each_entry(sdhp, &sdebug_host_list,
6230 					    host_list) {
6231 				if (sdhp->si_idx != idx) {
6232 					xa_set_mark(per_store_ap, sdhp->si_idx,
6233 						    SDEB_XA_NOT_IN_USE);
6234 					sdhp->si_idx = idx;
6235 				}
6236 			}
6237 			sdeb_most_recent_idx = idx;
6238 		} else {	/* 0 --> 1 transition is trigger for shrink */
6239 			sdebug_erase_all_stores(true /* apart from first */);
6240 		}
6241 		sdebug_fake_rw = n;
6242 		return count;
6243 	}
6244 	return -EINVAL;
6245 }
6246 static DRIVER_ATTR_RW(fake_rw);
6247 
6248 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6249 {
6250 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6251 }
6252 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6253 			      size_t count)
6254 {
6255 	int n;
6256 
6257 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6258 		sdebug_no_lun_0 = n;
6259 		return count;
6260 	}
6261 	return -EINVAL;
6262 }
6263 static DRIVER_ATTR_RW(no_lun_0);
6264 
6265 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6266 {
6267 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6268 }
6269 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6270 			      size_t count)
6271 {
6272 	int n;
6273 
6274 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6275 		sdebug_num_tgts = n;
6276 		sdebug_max_tgts_luns();
6277 		return count;
6278 	}
6279 	return -EINVAL;
6280 }
6281 static DRIVER_ATTR_RW(num_tgts);
6282 
6283 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6284 {
6285 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6286 }
6287 static DRIVER_ATTR_RO(dev_size_mb);
6288 
6289 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6290 {
6291 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6292 }
6293 
6294 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6295 				    size_t count)
6296 {
6297 	bool v;
6298 
6299 	if (kstrtobool(buf, &v))
6300 		return -EINVAL;
6301 
6302 	sdebug_per_host_store = v;
6303 	return count;
6304 }
6305 static DRIVER_ATTR_RW(per_host_store);
6306 
6307 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6308 {
6309 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6310 }
6311 static DRIVER_ATTR_RO(num_parts);
6312 
6313 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6314 {
6315 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6316 }
6317 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6318 			       size_t count)
6319 {
6320 	int nth;
6321 	char work[20];
6322 
6323 	if (sscanf(buf, "%10s", work) == 1) {
6324 		if (strncasecmp(work, "0x", 2) == 0) {
6325 			if (kstrtoint(work + 2, 16, &nth) == 0)
6326 				goto every_nth_done;
6327 		} else {
6328 			if (kstrtoint(work, 10, &nth) == 0)
6329 				goto every_nth_done;
6330 		}
6331 	}
6332 	return -EINVAL;
6333 
6334 every_nth_done:
6335 	sdebug_every_nth = nth;
6336 	if (nth && !sdebug_statistics) {
6337 		pr_info("every_nth needs statistics=1, set it\n");
6338 		sdebug_statistics = true;
6339 	}
6340 	tweak_cmnd_count();
6341 	return count;
6342 }
6343 static DRIVER_ATTR_RW(every_nth);
6344 
6345 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6346 {
6347 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6348 }
6349 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6350 				size_t count)
6351 {
6352 	int n;
6353 	bool changed;
6354 
6355 	if (kstrtoint(buf, 0, &n))
6356 		return -EINVAL;
6357 	if (n >= 0) {
6358 		if (n > (int)SAM_LUN_AM_FLAT) {
6359 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6360 			return -EINVAL;
6361 		}
6362 		changed = ((int)sdebug_lun_am != n);
6363 		sdebug_lun_am = n;
6364 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6365 			struct sdebug_host_info *sdhp;
6366 			struct sdebug_dev_info *dp;
6367 
6368 			spin_lock(&sdebug_host_list_lock);
6369 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6370 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6371 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6372 				}
6373 			}
6374 			spin_unlock(&sdebug_host_list_lock);
6375 		}
6376 		return count;
6377 	}
6378 	return -EINVAL;
6379 }
6380 static DRIVER_ATTR_RW(lun_format);
6381 
6382 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6383 {
6384 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6385 }
6386 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6387 			      size_t count)
6388 {
6389 	int n;
6390 	bool changed;
6391 
6392 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6393 		if (n > 256) {
6394 			pr_warn("max_luns can be no more than 256\n");
6395 			return -EINVAL;
6396 		}
6397 		changed = (sdebug_max_luns != n);
6398 		sdebug_max_luns = n;
6399 		sdebug_max_tgts_luns();
6400 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6401 			struct sdebug_host_info *sdhp;
6402 			struct sdebug_dev_info *dp;
6403 
6404 			spin_lock(&sdebug_host_list_lock);
6405 			list_for_each_entry(sdhp, &sdebug_host_list,
6406 					    host_list) {
6407 				list_for_each_entry(dp, &sdhp->dev_info_list,
6408 						    dev_list) {
6409 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6410 						dp->uas_bm);
6411 				}
6412 			}
6413 			spin_unlock(&sdebug_host_list_lock);
6414 		}
6415 		return count;
6416 	}
6417 	return -EINVAL;
6418 }
6419 static DRIVER_ATTR_RW(max_luns);
6420 
6421 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6422 {
6423 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6424 }
6425 /* N.B. max_queue can be changed while there are queued commands. In flight
6426  * commands beyond the new max_queue will be completed. */
6427 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6428 			       size_t count)
6429 {
6430 	int j, n, k, a;
6431 	struct sdebug_queue *sqp;
6432 
6433 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6434 	    (n <= SDEBUG_CANQUEUE) &&
6435 	    (sdebug_host_max_queue == 0)) {
6436 		sdeb_block_all_queues();
6437 		k = 0;
6438 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6439 		     ++j, ++sqp) {
6440 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6441 			if (a > k)
6442 				k = a;
6443 		}
6444 		sdebug_max_queue = n;
6445 		if (k == SDEBUG_CANQUEUE)
6446 			atomic_set(&retired_max_queue, 0);
6447 		else if (k >= n)
6448 			atomic_set(&retired_max_queue, k + 1);
6449 		else
6450 			atomic_set(&retired_max_queue, 0);
6451 		sdeb_unblock_all_queues();
6452 		return count;
6453 	}
6454 	return -EINVAL;
6455 }
6456 static DRIVER_ATTR_RW(max_queue);
6457 
6458 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6459 {
6460 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6461 }
6462 
6463 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6464 {
6465 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6466 }
6467 
6468 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6469 {
6470 	bool v;
6471 
6472 	if (kstrtobool(buf, &v))
6473 		return -EINVAL;
6474 
6475 	sdebug_no_rwlock = v;
6476 	return count;
6477 }
6478 static DRIVER_ATTR_RW(no_rwlock);
6479 
6480 /*
6481  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6482  * in range [0, sdebug_host_max_queue), we can't change it.
6483  */
6484 static DRIVER_ATTR_RO(host_max_queue);
6485 
6486 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6487 {
6488 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6489 }
6490 static DRIVER_ATTR_RO(no_uld);
6491 
6492 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6493 {
6494 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6495 }
6496 static DRIVER_ATTR_RO(scsi_level);
6497 
6498 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6499 {
6500 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6501 }
6502 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6503 				size_t count)
6504 {
6505 	int n;
6506 	bool changed;
6507 
6508 	/* Ignore capacity change for ZBC drives for now */
6509 	if (sdeb_zbc_in_use)
6510 		return -ENOTSUPP;
6511 
6512 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6513 		changed = (sdebug_virtual_gb != n);
6514 		sdebug_virtual_gb = n;
6515 		sdebug_capacity = get_sdebug_capacity();
6516 		if (changed) {
6517 			struct sdebug_host_info *sdhp;
6518 			struct sdebug_dev_info *dp;
6519 
6520 			spin_lock(&sdebug_host_list_lock);
6521 			list_for_each_entry(sdhp, &sdebug_host_list,
6522 					    host_list) {
6523 				list_for_each_entry(dp, &sdhp->dev_info_list,
6524 						    dev_list) {
6525 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6526 						dp->uas_bm);
6527 				}
6528 			}
6529 			spin_unlock(&sdebug_host_list_lock);
6530 		}
6531 		return count;
6532 	}
6533 	return -EINVAL;
6534 }
6535 static DRIVER_ATTR_RW(virtual_gb);
6536 
6537 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6538 {
6539 	/* absolute number of hosts currently active is what is shown */
6540 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
6541 }
6542 
6543 /*
6544  * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
6545  * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
6546  * Returns -EBUSY if another add_host sysfs invocation is active.
6547  */
6548 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6549 			      size_t count)
6550 {
6551 	int delta_hosts;
6552 
6553 	if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
6554 		return -EINVAL;
6555 	if (sdebug_verbose)
6556 		pr_info("prior num_hosts=%d, num_to_add=%d\n",
6557 			atomic_read(&sdebug_num_hosts), delta_hosts);
6558 	if (delta_hosts == 0)
6559 		return count;
6560 	if (mutex_trylock(&add_host_mutex) == 0)
6561 		return -EBUSY;
6562 	if (delta_hosts > 0) {
6563 		sdeb_add_n_hosts(delta_hosts);
6564 	} else if (delta_hosts < 0) {
6565 		smp_store_release(&sdebug_deflect_incoming, true);
6566 		sdeb_block_all_queues();
6567 		if (delta_hosts >= atomic_read(&sdebug_num_hosts))
6568 			stop_all_queued(true);
6569 		do {
6570 			if (atomic_read(&sdebug_num_hosts) < 1) {
6571 				free_all_queued();
6572 				break;
6573 			}
6574 			sdebug_do_remove_host(false);
6575 		} while (++delta_hosts);
6576 		sdeb_unblock_all_queues();
6577 		smp_store_release(&sdebug_deflect_incoming, false);
6578 	}
6579 	mutex_unlock(&add_host_mutex);
6580 	if (sdebug_verbose)
6581 		pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
6582 	return count;
6583 }
6584 static DRIVER_ATTR_RW(add_host);
6585 
6586 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6587 {
6588 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6589 }
6590 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6591 				    size_t count)
6592 {
6593 	int n;
6594 
6595 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6596 		sdebug_vpd_use_hostno = n;
6597 		return count;
6598 	}
6599 	return -EINVAL;
6600 }
6601 static DRIVER_ATTR_RW(vpd_use_hostno);
6602 
6603 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6604 {
6605 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6606 }
6607 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6608 				size_t count)
6609 {
6610 	int n;
6611 
6612 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6613 		if (n > 0)
6614 			sdebug_statistics = true;
6615 		else {
6616 			clear_queue_stats();
6617 			sdebug_statistics = false;
6618 		}
6619 		return count;
6620 	}
6621 	return -EINVAL;
6622 }
6623 static DRIVER_ATTR_RW(statistics);
6624 
6625 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6626 {
6627 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6628 }
6629 static DRIVER_ATTR_RO(sector_size);
6630 
6631 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6632 {
6633 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6634 }
6635 static DRIVER_ATTR_RO(submit_queues);
6636 
6637 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6638 {
6639 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6640 }
6641 static DRIVER_ATTR_RO(dix);
6642 
6643 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6644 {
6645 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6646 }
6647 static DRIVER_ATTR_RO(dif);
6648 
6649 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6650 {
6651 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6652 }
6653 static DRIVER_ATTR_RO(guard);
6654 
6655 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6656 {
6657 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6658 }
6659 static DRIVER_ATTR_RO(ato);
6660 
6661 static ssize_t map_show(struct device_driver *ddp, char *buf)
6662 {
6663 	ssize_t count = 0;
6664 
6665 	if (!scsi_debug_lbp())
6666 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6667 				 sdebug_store_sectors);
6668 
6669 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6670 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6671 
6672 		if (sip)
6673 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6674 					  (int)map_size, sip->map_storep);
6675 	}
6676 	buf[count++] = '\n';
6677 	buf[count] = '\0';
6678 
6679 	return count;
6680 }
6681 static DRIVER_ATTR_RO(map);
6682 
6683 static ssize_t random_show(struct device_driver *ddp, char *buf)
6684 {
6685 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6686 }
6687 
6688 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6689 			    size_t count)
6690 {
6691 	bool v;
6692 
6693 	if (kstrtobool(buf, &v))
6694 		return -EINVAL;
6695 
6696 	sdebug_random = v;
6697 	return count;
6698 }
6699 static DRIVER_ATTR_RW(random);
6700 
6701 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6702 {
6703 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6704 }
6705 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6706 			       size_t count)
6707 {
6708 	int n;
6709 
6710 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6711 		sdebug_removable = (n > 0);
6712 		return count;
6713 	}
6714 	return -EINVAL;
6715 }
6716 static DRIVER_ATTR_RW(removable);
6717 
6718 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6719 {
6720 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6721 }
6722 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6723 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6724 			       size_t count)
6725 {
6726 	int n;
6727 
6728 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6729 		sdebug_host_lock = (n > 0);
6730 		return count;
6731 	}
6732 	return -EINVAL;
6733 }
6734 static DRIVER_ATTR_RW(host_lock);
6735 
6736 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6737 {
6738 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6739 }
6740 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6741 			    size_t count)
6742 {
6743 	int n;
6744 
6745 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6746 		sdebug_strict = (n > 0);
6747 		return count;
6748 	}
6749 	return -EINVAL;
6750 }
6751 static DRIVER_ATTR_RW(strict);
6752 
6753 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6754 {
6755 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6756 }
6757 static DRIVER_ATTR_RO(uuid_ctl);
6758 
6759 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6760 {
6761 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6762 }
6763 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6764 			     size_t count)
6765 {
6766 	int ret, n;
6767 
6768 	ret = kstrtoint(buf, 0, &n);
6769 	if (ret)
6770 		return ret;
6771 	sdebug_cdb_len = n;
6772 	all_config_cdb_len();
6773 	return count;
6774 }
6775 static DRIVER_ATTR_RW(cdb_len);
6776 
6777 static const char * const zbc_model_strs_a[] = {
6778 	[BLK_ZONED_NONE] = "none",
6779 	[BLK_ZONED_HA]   = "host-aware",
6780 	[BLK_ZONED_HM]   = "host-managed",
6781 };
6782 
6783 static const char * const zbc_model_strs_b[] = {
6784 	[BLK_ZONED_NONE] = "no",
6785 	[BLK_ZONED_HA]   = "aware",
6786 	[BLK_ZONED_HM]   = "managed",
6787 };
6788 
6789 static const char * const zbc_model_strs_c[] = {
6790 	[BLK_ZONED_NONE] = "0",
6791 	[BLK_ZONED_HA]   = "1",
6792 	[BLK_ZONED_HM]   = "2",
6793 };
6794 
6795 static int sdeb_zbc_model_str(const char *cp)
6796 {
6797 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6798 
6799 	if (res < 0) {
6800 		res = sysfs_match_string(zbc_model_strs_b, cp);
6801 		if (res < 0) {
6802 			res = sysfs_match_string(zbc_model_strs_c, cp);
6803 			if (res < 0)
6804 				return -EINVAL;
6805 		}
6806 	}
6807 	return res;
6808 }
6809 
6810 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6811 {
6812 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6813 			 zbc_model_strs_a[sdeb_zbc_model]);
6814 }
6815 static DRIVER_ATTR_RO(zbc);
6816 
6817 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6818 {
6819 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6820 }
6821 static DRIVER_ATTR_RO(tur_ms_to_ready);
6822 
6823 /* Note: The following array creates attribute files in the
6824    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6825    files (over those found in the /sys/module/scsi_debug/parameters
6826    directory) is that auxiliary actions can be triggered when an attribute
6827    is changed. For example see: add_host_store() above.
6828  */
6829 
6830 static struct attribute *sdebug_drv_attrs[] = {
6831 	&driver_attr_delay.attr,
6832 	&driver_attr_opts.attr,
6833 	&driver_attr_ptype.attr,
6834 	&driver_attr_dsense.attr,
6835 	&driver_attr_fake_rw.attr,
6836 	&driver_attr_host_max_queue.attr,
6837 	&driver_attr_no_lun_0.attr,
6838 	&driver_attr_num_tgts.attr,
6839 	&driver_attr_dev_size_mb.attr,
6840 	&driver_attr_num_parts.attr,
6841 	&driver_attr_every_nth.attr,
6842 	&driver_attr_lun_format.attr,
6843 	&driver_attr_max_luns.attr,
6844 	&driver_attr_max_queue.attr,
6845 	&driver_attr_no_rwlock.attr,
6846 	&driver_attr_no_uld.attr,
6847 	&driver_attr_scsi_level.attr,
6848 	&driver_attr_virtual_gb.attr,
6849 	&driver_attr_add_host.attr,
6850 	&driver_attr_per_host_store.attr,
6851 	&driver_attr_vpd_use_hostno.attr,
6852 	&driver_attr_sector_size.attr,
6853 	&driver_attr_statistics.attr,
6854 	&driver_attr_submit_queues.attr,
6855 	&driver_attr_dix.attr,
6856 	&driver_attr_dif.attr,
6857 	&driver_attr_guard.attr,
6858 	&driver_attr_ato.attr,
6859 	&driver_attr_map.attr,
6860 	&driver_attr_random.attr,
6861 	&driver_attr_removable.attr,
6862 	&driver_attr_host_lock.attr,
6863 	&driver_attr_ndelay.attr,
6864 	&driver_attr_strict.attr,
6865 	&driver_attr_uuid_ctl.attr,
6866 	&driver_attr_cdb_len.attr,
6867 	&driver_attr_tur_ms_to_ready.attr,
6868 	&driver_attr_zbc.attr,
6869 	NULL,
6870 };
6871 ATTRIBUTE_GROUPS(sdebug_drv);
6872 
6873 static struct device *pseudo_primary;
6874 
6875 static int __init scsi_debug_init(void)
6876 {
6877 	bool want_store = (sdebug_fake_rw == 0);
6878 	unsigned long sz;
6879 	int k, ret, hosts_to_add;
6880 	int idx = -1;
6881 
6882 	ramdisk_lck_a[0] = &atomic_rw;
6883 	ramdisk_lck_a[1] = &atomic_rw2;
6884 	atomic_set(&retired_max_queue, 0);
6885 
6886 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6887 		pr_warn("ndelay must be less than 1 second, ignored\n");
6888 		sdebug_ndelay = 0;
6889 	} else if (sdebug_ndelay > 0)
6890 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6891 
6892 	switch (sdebug_sector_size) {
6893 	case  512:
6894 	case 1024:
6895 	case 2048:
6896 	case 4096:
6897 		break;
6898 	default:
6899 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6900 		return -EINVAL;
6901 	}
6902 
6903 	switch (sdebug_dif) {
6904 	case T10_PI_TYPE0_PROTECTION:
6905 		break;
6906 	case T10_PI_TYPE1_PROTECTION:
6907 	case T10_PI_TYPE2_PROTECTION:
6908 	case T10_PI_TYPE3_PROTECTION:
6909 		have_dif_prot = true;
6910 		break;
6911 
6912 	default:
6913 		pr_err("dif must be 0, 1, 2 or 3\n");
6914 		return -EINVAL;
6915 	}
6916 
6917 	if (sdebug_num_tgts < 0) {
6918 		pr_err("num_tgts must be >= 0\n");
6919 		return -EINVAL;
6920 	}
6921 
6922 	if (sdebug_guard > 1) {
6923 		pr_err("guard must be 0 or 1\n");
6924 		return -EINVAL;
6925 	}
6926 
6927 	if (sdebug_ato > 1) {
6928 		pr_err("ato must be 0 or 1\n");
6929 		return -EINVAL;
6930 	}
6931 
6932 	if (sdebug_physblk_exp > 15) {
6933 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6934 		return -EINVAL;
6935 	}
6936 
6937 	sdebug_lun_am = sdebug_lun_am_i;
6938 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6939 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6940 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6941 	}
6942 
6943 	if (sdebug_max_luns > 256) {
6944 		if (sdebug_max_luns > 16384) {
6945 			pr_warn("max_luns can be no more than 16384, use default\n");
6946 			sdebug_max_luns = DEF_MAX_LUNS;
6947 		}
6948 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6949 	}
6950 
6951 	if (sdebug_lowest_aligned > 0x3fff) {
6952 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6953 		return -EINVAL;
6954 	}
6955 
6956 	if (submit_queues < 1) {
6957 		pr_err("submit_queues must be 1 or more\n");
6958 		return -EINVAL;
6959 	}
6960 
6961 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6962 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6963 		return -EINVAL;
6964 	}
6965 
6966 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6967 	    (sdebug_host_max_queue < 0)) {
6968 		pr_err("host_max_queue must be in range [0 %d]\n",
6969 		       SDEBUG_CANQUEUE);
6970 		return -EINVAL;
6971 	}
6972 
6973 	if (sdebug_host_max_queue &&
6974 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6975 		sdebug_max_queue = sdebug_host_max_queue;
6976 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6977 			sdebug_max_queue);
6978 	}
6979 
6980 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6981 			       GFP_KERNEL);
6982 	if (sdebug_q_arr == NULL)
6983 		return -ENOMEM;
6984 	for (k = 0; k < submit_queues; ++k)
6985 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6986 
6987 	/*
6988 	 * check for host managed zoned block device specified with
6989 	 * ptype=0x14 or zbc=XXX.
6990 	 */
6991 	if (sdebug_ptype == TYPE_ZBC) {
6992 		sdeb_zbc_model = BLK_ZONED_HM;
6993 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6994 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6995 		if (k < 0) {
6996 			ret = k;
6997 			goto free_q_arr;
6998 		}
6999 		sdeb_zbc_model = k;
7000 		switch (sdeb_zbc_model) {
7001 		case BLK_ZONED_NONE:
7002 		case BLK_ZONED_HA:
7003 			sdebug_ptype = TYPE_DISK;
7004 			break;
7005 		case BLK_ZONED_HM:
7006 			sdebug_ptype = TYPE_ZBC;
7007 			break;
7008 		default:
7009 			pr_err("Invalid ZBC model\n");
7010 			ret = -EINVAL;
7011 			goto free_q_arr;
7012 		}
7013 	}
7014 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7015 		sdeb_zbc_in_use = true;
7016 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7017 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7018 	}
7019 
7020 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7021 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7022 	if (sdebug_dev_size_mb < 1)
7023 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7024 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7025 	sdebug_store_sectors = sz / sdebug_sector_size;
7026 	sdebug_capacity = get_sdebug_capacity();
7027 
7028 	/* play around with geometry, don't waste too much on track 0 */
7029 	sdebug_heads = 8;
7030 	sdebug_sectors_per = 32;
7031 	if (sdebug_dev_size_mb >= 256)
7032 		sdebug_heads = 64;
7033 	else if (sdebug_dev_size_mb >= 16)
7034 		sdebug_heads = 32;
7035 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7036 			       (sdebug_sectors_per * sdebug_heads);
7037 	if (sdebug_cylinders_per >= 1024) {
7038 		/* other LLDs do this; implies >= 1GB ram disk ... */
7039 		sdebug_heads = 255;
7040 		sdebug_sectors_per = 63;
7041 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7042 			       (sdebug_sectors_per * sdebug_heads);
7043 	}
7044 	if (scsi_debug_lbp()) {
7045 		sdebug_unmap_max_blocks =
7046 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7047 
7048 		sdebug_unmap_max_desc =
7049 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7050 
7051 		sdebug_unmap_granularity =
7052 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7053 
7054 		if (sdebug_unmap_alignment &&
7055 		    sdebug_unmap_granularity <=
7056 		    sdebug_unmap_alignment) {
7057 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7058 			ret = -EINVAL;
7059 			goto free_q_arr;
7060 		}
7061 	}
7062 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7063 	if (want_store) {
7064 		idx = sdebug_add_store();
7065 		if (idx < 0) {
7066 			ret = idx;
7067 			goto free_q_arr;
7068 		}
7069 	}
7070 
7071 	pseudo_primary = root_device_register("pseudo_0");
7072 	if (IS_ERR(pseudo_primary)) {
7073 		pr_warn("root_device_register() error\n");
7074 		ret = PTR_ERR(pseudo_primary);
7075 		goto free_vm;
7076 	}
7077 	ret = bus_register(&pseudo_lld_bus);
7078 	if (ret < 0) {
7079 		pr_warn("bus_register error: %d\n", ret);
7080 		goto dev_unreg;
7081 	}
7082 	ret = driver_register(&sdebug_driverfs_driver);
7083 	if (ret < 0) {
7084 		pr_warn("driver_register error: %d\n", ret);
7085 		goto bus_unreg;
7086 	}
7087 
7088 	hosts_to_add = sdebug_add_host;
7089 	sdebug_add_host = 0;
7090 
7091 	for (k = 0; k < hosts_to_add; k++) {
7092 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
7093 			pr_info("exit early as sdebug_deflect_incoming is set\n");
7094 			return 0;
7095 		}
7096 		if (want_store && k == 0) {
7097 			ret = sdebug_add_host_helper(idx);
7098 			if (ret < 0) {
7099 				pr_err("add_host_helper k=%d, error=%d\n",
7100 				       k, -ret);
7101 				break;
7102 			}
7103 		} else {
7104 			ret = sdebug_do_add_host(want_store &&
7105 						 sdebug_per_host_store);
7106 			if (ret < 0) {
7107 				pr_err("add_host k=%d error=%d\n", k, -ret);
7108 				break;
7109 			}
7110 		}
7111 	}
7112 	if (sdebug_verbose)
7113 		pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
7114 
7115 	/*
7116 	 * Even though all the hosts have been established, due to async device (LU) scanning
7117 	 * by the scsi mid-level, there may still be devices (LUs) being set up.
7118 	 */
7119 	return 0;
7120 
7121 bus_unreg:
7122 	bus_unregister(&pseudo_lld_bus);
7123 dev_unreg:
7124 	root_device_unregister(pseudo_primary);
7125 free_vm:
7126 	sdebug_erase_store(idx, NULL);
7127 free_q_arr:
7128 	kfree(sdebug_q_arr);
7129 	return ret;
7130 }
7131 
7132 static void __exit scsi_debug_exit(void)
7133 {
7134 	int k;
7135 
7136 	/* Possible race with LUs still being set up; stop them asap */
7137 	sdeb_block_all_queues();
7138 	smp_store_release(&sdebug_deflect_incoming, true);
7139 	stop_all_queued(false);
7140 	for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
7141 		sdebug_do_remove_host(true);
7142 	free_all_queued();
7143 	if (sdebug_verbose)
7144 		pr_info("removed %d hosts\n", k);
7145 	driver_unregister(&sdebug_driverfs_driver);
7146 	bus_unregister(&pseudo_lld_bus);
7147 	root_device_unregister(pseudo_primary);
7148 
7149 	sdebug_erase_all_stores(false);
7150 	xa_destroy(per_store_ap);
7151 	kfree(sdebug_q_arr);
7152 }
7153 
7154 device_initcall(scsi_debug_init);
7155 module_exit(scsi_debug_exit);
7156 
7157 static void sdebug_release_adapter(struct device *dev)
7158 {
7159 	struct sdebug_host_info *sdbg_host;
7160 
7161 	sdbg_host = to_sdebug_host(dev);
7162 	kfree(sdbg_host);
7163 }
7164 
7165 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7166 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7167 {
7168 	if (idx < 0)
7169 		return;
7170 	if (!sip) {
7171 		if (xa_empty(per_store_ap))
7172 			return;
7173 		sip = xa_load(per_store_ap, idx);
7174 		if (!sip)
7175 			return;
7176 	}
7177 	vfree(sip->map_storep);
7178 	vfree(sip->dif_storep);
7179 	vfree(sip->storep);
7180 	xa_erase(per_store_ap, idx);
7181 	kfree(sip);
7182 }
7183 
7184 /* Assume apart_from_first==false only in shutdown case. */
7185 static void sdebug_erase_all_stores(bool apart_from_first)
7186 {
7187 	unsigned long idx;
7188 	struct sdeb_store_info *sip = NULL;
7189 
7190 	xa_for_each(per_store_ap, idx, sip) {
7191 		if (apart_from_first)
7192 			apart_from_first = false;
7193 		else
7194 			sdebug_erase_store(idx, sip);
7195 	}
7196 	if (apart_from_first)
7197 		sdeb_most_recent_idx = sdeb_first_idx;
7198 }
7199 
7200 /*
7201  * Returns store xarray new element index (idx) if >=0 else negated errno.
7202  * Limit the number of stores to 65536.
7203  */
7204 static int sdebug_add_store(void)
7205 {
7206 	int res;
7207 	u32 n_idx;
7208 	unsigned long iflags;
7209 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7210 	struct sdeb_store_info *sip = NULL;
7211 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7212 
7213 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7214 	if (!sip)
7215 		return -ENOMEM;
7216 
7217 	xa_lock_irqsave(per_store_ap, iflags);
7218 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7219 	if (unlikely(res < 0)) {
7220 		xa_unlock_irqrestore(per_store_ap, iflags);
7221 		kfree(sip);
7222 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7223 		return res;
7224 	}
7225 	sdeb_most_recent_idx = n_idx;
7226 	if (sdeb_first_idx < 0)
7227 		sdeb_first_idx = n_idx;
7228 	xa_unlock_irqrestore(per_store_ap, iflags);
7229 
7230 	res = -ENOMEM;
7231 	sip->storep = vzalloc(sz);
7232 	if (!sip->storep) {
7233 		pr_err("user data oom\n");
7234 		goto err;
7235 	}
7236 	if (sdebug_num_parts > 0)
7237 		sdebug_build_parts(sip->storep, sz);
7238 
7239 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7240 	if (sdebug_dix) {
7241 		int dif_size;
7242 
7243 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7244 		sip->dif_storep = vmalloc(dif_size);
7245 
7246 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7247 			sip->dif_storep);
7248 
7249 		if (!sip->dif_storep) {
7250 			pr_err("DIX oom\n");
7251 			goto err;
7252 		}
7253 		memset(sip->dif_storep, 0xff, dif_size);
7254 	}
7255 	/* Logical Block Provisioning */
7256 	if (scsi_debug_lbp()) {
7257 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7258 		sip->map_storep = vmalloc(array_size(sizeof(long),
7259 						     BITS_TO_LONGS(map_size)));
7260 
7261 		pr_info("%lu provisioning blocks\n", map_size);
7262 
7263 		if (!sip->map_storep) {
7264 			pr_err("LBP map oom\n");
7265 			goto err;
7266 		}
7267 
7268 		bitmap_zero(sip->map_storep, map_size);
7269 
7270 		/* Map first 1KB for partition table */
7271 		if (sdebug_num_parts)
7272 			map_region(sip, 0, 2);
7273 	}
7274 
7275 	rwlock_init(&sip->macc_lck);
7276 	return (int)n_idx;
7277 err:
7278 	sdebug_erase_store((int)n_idx, sip);
7279 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7280 	return res;
7281 }
7282 
7283 static int sdebug_add_host_helper(int per_host_idx)
7284 {
7285 	int k, devs_per_host, idx;
7286 	int error = -ENOMEM;
7287 	struct sdebug_host_info *sdbg_host;
7288 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7289 
7290 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7291 	if (!sdbg_host)
7292 		return -ENOMEM;
7293 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7294 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7295 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7296 	sdbg_host->si_idx = idx;
7297 
7298 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7299 
7300 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7301 	for (k = 0; k < devs_per_host; k++) {
7302 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7303 		if (!sdbg_devinfo)
7304 			goto clean;
7305 	}
7306 
7307 	spin_lock(&sdebug_host_list_lock);
7308 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7309 	spin_unlock(&sdebug_host_list_lock);
7310 
7311 	sdbg_host->dev.bus = &pseudo_lld_bus;
7312 	sdbg_host->dev.parent = pseudo_primary;
7313 	sdbg_host->dev.release = &sdebug_release_adapter;
7314 	dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
7315 
7316 	error = device_register(&sdbg_host->dev);
7317 	if (error)
7318 		goto clean;
7319 
7320 	atomic_inc(&sdebug_num_hosts);
7321 	return 0;
7322 
7323 clean:
7324 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7325 				 dev_list) {
7326 		list_del(&sdbg_devinfo->dev_list);
7327 		kfree(sdbg_devinfo->zstate);
7328 		kfree(sdbg_devinfo);
7329 	}
7330 	kfree(sdbg_host);
7331 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7332 	return error;
7333 }
7334 
7335 static int sdebug_do_add_host(bool mk_new_store)
7336 {
7337 	int ph_idx = sdeb_most_recent_idx;
7338 
7339 	if (mk_new_store) {
7340 		ph_idx = sdebug_add_store();
7341 		if (ph_idx < 0)
7342 			return ph_idx;
7343 	}
7344 	return sdebug_add_host_helper(ph_idx);
7345 }
7346 
7347 static void sdebug_do_remove_host(bool the_end)
7348 {
7349 	int idx = -1;
7350 	struct sdebug_host_info *sdbg_host = NULL;
7351 	struct sdebug_host_info *sdbg_host2;
7352 
7353 	spin_lock(&sdebug_host_list_lock);
7354 	if (!list_empty(&sdebug_host_list)) {
7355 		sdbg_host = list_entry(sdebug_host_list.prev,
7356 				       struct sdebug_host_info, host_list);
7357 		idx = sdbg_host->si_idx;
7358 	}
7359 	if (!the_end && idx >= 0) {
7360 		bool unique = true;
7361 
7362 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7363 			if (sdbg_host2 == sdbg_host)
7364 				continue;
7365 			if (idx == sdbg_host2->si_idx) {
7366 				unique = false;
7367 				break;
7368 			}
7369 		}
7370 		if (unique) {
7371 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7372 			if (idx == sdeb_most_recent_idx)
7373 				--sdeb_most_recent_idx;
7374 		}
7375 	}
7376 	if (sdbg_host)
7377 		list_del(&sdbg_host->host_list);
7378 	spin_unlock(&sdebug_host_list_lock);
7379 
7380 	if (!sdbg_host)
7381 		return;
7382 
7383 	device_unregister(&sdbg_host->dev);
7384 	atomic_dec(&sdebug_num_hosts);
7385 }
7386 
7387 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7388 {
7389 	int num_in_q = 0;
7390 	struct sdebug_dev_info *devip;
7391 
7392 	sdeb_block_all_queues();
7393 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7394 	if (NULL == devip) {
7395 		sdeb_unblock_all_queues();
7396 		return	-ENODEV;
7397 	}
7398 	num_in_q = atomic_read(&devip->num_in_q);
7399 
7400 	if (qdepth > SDEBUG_CANQUEUE) {
7401 		qdepth = SDEBUG_CANQUEUE;
7402 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7403 			qdepth, SDEBUG_CANQUEUE);
7404 	}
7405 	if (qdepth < 1)
7406 		qdepth = 1;
7407 	if (qdepth != sdev->queue_depth)
7408 		scsi_change_queue_depth(sdev, qdepth);
7409 
7410 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7411 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7412 			    __func__, qdepth, num_in_q);
7413 	}
7414 	sdeb_unblock_all_queues();
7415 	return sdev->queue_depth;
7416 }
7417 
7418 static bool fake_timeout(struct scsi_cmnd *scp)
7419 {
7420 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7421 		if (sdebug_every_nth < -1)
7422 			sdebug_every_nth = -1;
7423 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7424 			return true; /* ignore command causing timeout */
7425 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7426 			 scsi_medium_access_command(scp))
7427 			return true; /* time out reads and writes */
7428 	}
7429 	return false;
7430 }
7431 
7432 /* Response to TUR or media access command when device stopped */
7433 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7434 {
7435 	int stopped_state;
7436 	u64 diff_ns = 0;
7437 	ktime_t now_ts = ktime_get_boottime();
7438 	struct scsi_device *sdp = scp->device;
7439 
7440 	stopped_state = atomic_read(&devip->stopped);
7441 	if (stopped_state == 2) {
7442 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7443 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7444 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7445 				/* tur_ms_to_ready timer extinguished */
7446 				atomic_set(&devip->stopped, 0);
7447 				return 0;
7448 			}
7449 		}
7450 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7451 		if (sdebug_verbose)
7452 			sdev_printk(KERN_INFO, sdp,
7453 				    "%s: Not ready: in process of becoming ready\n", my_name);
7454 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7455 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7456 
7457 			if (diff_ns <= tur_nanosecs_to_ready)
7458 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7459 			else
7460 				diff_ns = tur_nanosecs_to_ready;
7461 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7462 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7463 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7464 						   diff_ns);
7465 			return check_condition_result;
7466 		}
7467 	}
7468 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7469 	if (sdebug_verbose)
7470 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7471 			    my_name);
7472 	return check_condition_result;
7473 }
7474 
7475 static int sdebug_map_queues(struct Scsi_Host *shost)
7476 {
7477 	int i, qoff;
7478 
7479 	if (shost->nr_hw_queues == 1)
7480 		return 0;
7481 
7482 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7483 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7484 
7485 		map->nr_queues  = 0;
7486 
7487 		if (i == HCTX_TYPE_DEFAULT)
7488 			map->nr_queues = submit_queues - poll_queues;
7489 		else if (i == HCTX_TYPE_POLL)
7490 			map->nr_queues = poll_queues;
7491 
7492 		if (!map->nr_queues) {
7493 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7494 			continue;
7495 		}
7496 
7497 		map->queue_offset = qoff;
7498 		blk_mq_map_queues(map);
7499 
7500 		qoff += map->nr_queues;
7501 	}
7502 
7503 	return 0;
7504 
7505 }
7506 
7507 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7508 {
7509 	bool first;
7510 	bool retiring = false;
7511 	int num_entries = 0;
7512 	unsigned int qc_idx = 0;
7513 	unsigned long iflags;
7514 	ktime_t kt_from_boot = ktime_get_boottime();
7515 	struct sdebug_queue *sqp;
7516 	struct sdebug_queued_cmd *sqcp;
7517 	struct scsi_cmnd *scp;
7518 	struct sdebug_dev_info *devip;
7519 	struct sdebug_defer *sd_dp;
7520 
7521 	sqp = sdebug_q_arr + queue_num;
7522 
7523 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7524 
7525 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7526 	if (qc_idx >= sdebug_max_queue)
7527 		goto unlock;
7528 
7529 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7530 		if (first) {
7531 			first = false;
7532 			if (!test_bit(qc_idx, sqp->in_use_bm))
7533 				continue;
7534 		} else {
7535 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7536 		}
7537 		if (qc_idx >= sdebug_max_queue)
7538 			break;
7539 
7540 		sqcp = &sqp->qc_arr[qc_idx];
7541 		sd_dp = sqcp->sd_dp;
7542 		if (unlikely(!sd_dp))
7543 			continue;
7544 		scp = sqcp->a_cmnd;
7545 		if (unlikely(scp == NULL)) {
7546 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7547 			       queue_num, qc_idx, __func__);
7548 			break;
7549 		}
7550 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7551 			if (kt_from_boot < sd_dp->cmpl_ts)
7552 				continue;
7553 
7554 		} else		/* ignoring non REQ_POLLED requests */
7555 			continue;
7556 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7557 		if (likely(devip))
7558 			atomic_dec(&devip->num_in_q);
7559 		else
7560 			pr_err("devip=NULL from %s\n", __func__);
7561 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7562 			retiring = true;
7563 
7564 		sqcp->a_cmnd = NULL;
7565 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7566 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7567 				sqp, queue_num, qc_idx, __func__);
7568 			break;
7569 		}
7570 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7571 			int k, retval;
7572 
7573 			retval = atomic_read(&retired_max_queue);
7574 			if (qc_idx >= retval) {
7575 				pr_err("index %d too large\n", retval);
7576 				break;
7577 			}
7578 			k = find_last_bit(sqp->in_use_bm, retval);
7579 			if ((k < sdebug_max_queue) || (k == retval))
7580 				atomic_set(&retired_max_queue, 0);
7581 			else
7582 				atomic_set(&retired_max_queue, k + 1);
7583 		}
7584 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7585 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7586 		scsi_done(scp); /* callback to mid level */
7587 		num_entries++;
7588 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7589 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7590 			break;
7591 	}
7592 
7593 unlock:
7594 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7595 
7596 	if (num_entries > 0)
7597 		atomic_add(num_entries, &sdeb_mq_poll_count);
7598 	return num_entries;
7599 }
7600 
7601 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7602 				   struct scsi_cmnd *scp)
7603 {
7604 	u8 sdeb_i;
7605 	struct scsi_device *sdp = scp->device;
7606 	const struct opcode_info_t *oip;
7607 	const struct opcode_info_t *r_oip;
7608 	struct sdebug_dev_info *devip;
7609 	u8 *cmd = scp->cmnd;
7610 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7611 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7612 	int k, na;
7613 	int errsts = 0;
7614 	u64 lun_index = sdp->lun & 0x3FFF;
7615 	u32 flags;
7616 	u16 sa;
7617 	u8 opcode = cmd[0];
7618 	bool has_wlun_rl;
7619 	bool inject_now;
7620 
7621 	scsi_set_resid(scp, 0);
7622 	if (sdebug_statistics) {
7623 		atomic_inc(&sdebug_cmnd_count);
7624 		inject_now = inject_on_this_cmd();
7625 	} else {
7626 		inject_now = false;
7627 	}
7628 	if (unlikely(sdebug_verbose &&
7629 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7630 		char b[120];
7631 		int n, len, sb;
7632 
7633 		len = scp->cmd_len;
7634 		sb = (int)sizeof(b);
7635 		if (len > 32)
7636 			strcpy(b, "too long, over 32 bytes");
7637 		else {
7638 			for (k = 0, n = 0; k < len && n < sb; ++k)
7639 				n += scnprintf(b + n, sb - n, "%02x ",
7640 					       (u32)cmd[k]);
7641 		}
7642 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7643 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7644 	}
7645 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7646 		return SCSI_MLQUEUE_HOST_BUSY;
7647 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7648 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7649 		goto err_out;
7650 
7651 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7652 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7653 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7654 	if (unlikely(!devip)) {
7655 		devip = find_build_dev_info(sdp);
7656 		if (NULL == devip)
7657 			goto err_out;
7658 	}
7659 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7660 		atomic_set(&sdeb_inject_pending, 1);
7661 
7662 	na = oip->num_attached;
7663 	r_pfp = oip->pfp;
7664 	if (na) {	/* multiple commands with this opcode */
7665 		r_oip = oip;
7666 		if (FF_SA & r_oip->flags) {
7667 			if (F_SA_LOW & oip->flags)
7668 				sa = 0x1f & cmd[1];
7669 			else
7670 				sa = get_unaligned_be16(cmd + 8);
7671 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7672 				if (opcode == oip->opcode && sa == oip->sa)
7673 					break;
7674 			}
7675 		} else {   /* since no service action only check opcode */
7676 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7677 				if (opcode == oip->opcode)
7678 					break;
7679 			}
7680 		}
7681 		if (k > na) {
7682 			if (F_SA_LOW & r_oip->flags)
7683 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7684 			else if (F_SA_HIGH & r_oip->flags)
7685 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7686 			else
7687 				mk_sense_invalid_opcode(scp);
7688 			goto check_cond;
7689 		}
7690 	}	/* else (when na==0) we assume the oip is a match */
7691 	flags = oip->flags;
7692 	if (unlikely(F_INV_OP & flags)) {
7693 		mk_sense_invalid_opcode(scp);
7694 		goto check_cond;
7695 	}
7696 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7697 		if (sdebug_verbose)
7698 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7699 				    my_name, opcode, " supported for wlun");
7700 		mk_sense_invalid_opcode(scp);
7701 		goto check_cond;
7702 	}
7703 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7704 		u8 rem;
7705 		int j;
7706 
7707 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7708 			rem = ~oip->len_mask[k] & cmd[k];
7709 			if (rem) {
7710 				for (j = 7; j >= 0; --j, rem <<= 1) {
7711 					if (0x80 & rem)
7712 						break;
7713 				}
7714 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7715 				goto check_cond;
7716 			}
7717 		}
7718 	}
7719 	if (unlikely(!(F_SKIP_UA & flags) &&
7720 		     find_first_bit(devip->uas_bm,
7721 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7722 		errsts = make_ua(scp, devip);
7723 		if (errsts)
7724 			goto check_cond;
7725 	}
7726 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7727 		     atomic_read(&devip->stopped))) {
7728 		errsts = resp_not_ready(scp, devip);
7729 		if (errsts)
7730 			goto fini;
7731 	}
7732 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7733 		goto fini;
7734 	if (unlikely(sdebug_every_nth)) {
7735 		if (fake_timeout(scp))
7736 			return 0;	/* ignore command: make trouble */
7737 	}
7738 	if (likely(oip->pfp))
7739 		pfp = oip->pfp;	/* calls a resp_* function */
7740 	else
7741 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7742 
7743 fini:
7744 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7745 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7746 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7747 					    sdebug_ndelay > 10000)) {
7748 		/*
7749 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7750 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7751 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7752 		 * For Synchronize Cache want 1/20 of SSU's delay.
7753 		 */
7754 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7755 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7756 
7757 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7758 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7759 	} else
7760 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7761 				     sdebug_ndelay);
7762 check_cond:
7763 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7764 err_out:
7765 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7766 }
7767 
7768 static struct scsi_host_template sdebug_driver_template = {
7769 	.show_info =		scsi_debug_show_info,
7770 	.write_info =		scsi_debug_write_info,
7771 	.proc_name =		sdebug_proc_name,
7772 	.name =			"SCSI DEBUG",
7773 	.info =			scsi_debug_info,
7774 	.slave_alloc =		scsi_debug_slave_alloc,
7775 	.slave_configure =	scsi_debug_slave_configure,
7776 	.slave_destroy =	scsi_debug_slave_destroy,
7777 	.ioctl =		scsi_debug_ioctl,
7778 	.queuecommand =		scsi_debug_queuecommand,
7779 	.change_queue_depth =	sdebug_change_qdepth,
7780 	.map_queues =		sdebug_map_queues,
7781 	.mq_poll =		sdebug_blk_mq_poll,
7782 	.eh_abort_handler =	scsi_debug_abort,
7783 	.eh_device_reset_handler = scsi_debug_device_reset,
7784 	.eh_target_reset_handler = scsi_debug_target_reset,
7785 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7786 	.eh_host_reset_handler = scsi_debug_host_reset,
7787 	.can_queue =		SDEBUG_CANQUEUE,
7788 	.this_id =		7,
7789 	.sg_tablesize =		SG_MAX_SEGMENTS,
7790 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7791 	.max_sectors =		-1U,
7792 	.max_segment_size =	-1U,
7793 	.module =		THIS_MODULE,
7794 	.track_queue_depth =	1,
7795 };
7796 
7797 static int sdebug_driver_probe(struct device *dev)
7798 {
7799 	int error = 0;
7800 	struct sdebug_host_info *sdbg_host;
7801 	struct Scsi_Host *hpnt;
7802 	int hprot;
7803 
7804 	sdbg_host = to_sdebug_host(dev);
7805 
7806 	sdebug_driver_template.can_queue = sdebug_max_queue;
7807 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7808 	if (!sdebug_clustering)
7809 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7810 
7811 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7812 	if (NULL == hpnt) {
7813 		pr_err("scsi_host_alloc failed\n");
7814 		error = -ENODEV;
7815 		return error;
7816 	}
7817 	if (submit_queues > nr_cpu_ids) {
7818 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7819 			my_name, submit_queues, nr_cpu_ids);
7820 		submit_queues = nr_cpu_ids;
7821 	}
7822 	/*
7823 	 * Decide whether to tell scsi subsystem that we want mq. The
7824 	 * following should give the same answer for each host.
7825 	 */
7826 	hpnt->nr_hw_queues = submit_queues;
7827 	if (sdebug_host_max_queue)
7828 		hpnt->host_tagset = 1;
7829 
7830 	/* poll queues are possible for nr_hw_queues > 1 */
7831 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7832 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7833 			 my_name, poll_queues, hpnt->nr_hw_queues);
7834 		poll_queues = 0;
7835 	}
7836 
7837 	/*
7838 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7839 	 * left over for non-polled I/O.
7840 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7841 	 */
7842 	if (poll_queues >= submit_queues) {
7843 		if (submit_queues < 3)
7844 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7845 		else
7846 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7847 				my_name, submit_queues - 1);
7848 		poll_queues = 1;
7849 	}
7850 	if (poll_queues)
7851 		hpnt->nr_maps = 3;
7852 
7853 	sdbg_host->shost = hpnt;
7854 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7855 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7856 		hpnt->max_id = sdebug_num_tgts + 1;
7857 	else
7858 		hpnt->max_id = sdebug_num_tgts;
7859 	/* = sdebug_max_luns; */
7860 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7861 
7862 	hprot = 0;
7863 
7864 	switch (sdebug_dif) {
7865 
7866 	case T10_PI_TYPE1_PROTECTION:
7867 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7868 		if (sdebug_dix)
7869 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7870 		break;
7871 
7872 	case T10_PI_TYPE2_PROTECTION:
7873 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7874 		if (sdebug_dix)
7875 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7876 		break;
7877 
7878 	case T10_PI_TYPE3_PROTECTION:
7879 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7880 		if (sdebug_dix)
7881 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7882 		break;
7883 
7884 	default:
7885 		if (sdebug_dix)
7886 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7887 		break;
7888 	}
7889 
7890 	scsi_host_set_prot(hpnt, hprot);
7891 
7892 	if (have_dif_prot || sdebug_dix)
7893 		pr_info("host protection%s%s%s%s%s%s%s\n",
7894 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7895 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7896 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7897 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7898 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7899 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7900 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7901 
7902 	if (sdebug_guard == 1)
7903 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7904 	else
7905 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7906 
7907 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7908 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7909 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7910 		sdebug_statistics = true;
7911 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7912 	if (error) {
7913 		pr_err("scsi_add_host failed\n");
7914 		error = -ENODEV;
7915 		scsi_host_put(hpnt);
7916 	} else {
7917 		scsi_scan_host(hpnt);
7918 	}
7919 
7920 	return error;
7921 }
7922 
7923 static void sdebug_driver_remove(struct device *dev)
7924 {
7925 	struct sdebug_host_info *sdbg_host;
7926 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7927 
7928 	sdbg_host = to_sdebug_host(dev);
7929 
7930 	scsi_remove_host(sdbg_host->shost);
7931 
7932 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7933 				 dev_list) {
7934 		list_del(&sdbg_devinfo->dev_list);
7935 		kfree(sdbg_devinfo->zstate);
7936 		kfree(sdbg_devinfo);
7937 	}
7938 
7939 	scsi_host_put(sdbg_host->shost);
7940 }
7941 
7942 static int pseudo_lld_bus_match(struct device *dev,
7943 				struct device_driver *dev_driver)
7944 {
7945 	return 1;
7946 }
7947 
7948 static struct bus_type pseudo_lld_bus = {
7949 	.name = "pseudo",
7950 	.match = pseudo_lld_bus_match,
7951 	.probe = sdebug_driver_probe,
7952 	.remove = sdebug_driver_remove,
7953 	.drv_groups = sdebug_drv_groups,
7954 };
7955