xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 3fd07aec)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/mutex.h>
37 #include <linux/interrupt.h>
38 #include <linux/atomic.h>
39 #include <linux/hrtimer.h>
40 #include <linux/uuid.h>
41 #include <linux/t10-pi.h>
42 #include <linux/msdos_partition.h>
43 #include <linux/random.h>
44 #include <linux/xarray.h>
45 #include <linux/prefetch.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20210520";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define WRITE_PROTECTED 0x27
80 #define UA_RESET_ASC 0x29
81 #define UA_CHANGED_ASC 0x2a
82 #define TARGET_CHANGED_ASC 0x3f
83 #define LUNS_CHANGED_ASCQ 0x0e
84 #define INSUFF_RES_ASC 0x55
85 #define INSUFF_RES_ASCQ 0x3
86 #define POWER_ON_RESET_ASCQ 0x0
87 #define POWER_ON_OCCURRED_ASCQ 0x1
88 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98 #define WRITE_ERROR_ASC 0xc
99 #define UNALIGNED_WRITE_ASCQ 0x4
100 #define WRITE_BOUNDARY_ASCQ 0x5
101 #define READ_INVDATA_ASCQ 0x6
102 #define READ_BOUNDARY_ASCQ 0x7
103 #define INSUFF_ZONE_ASCQ 0xe
104 
105 /* Additional Sense Code Qualifier (ASCQ) */
106 #define ACK_NAK_TO 0x3
107 
108 /* Default values for driver parameters */
109 #define DEF_NUM_HOST   1
110 #define DEF_NUM_TGTS   1
111 #define DEF_MAX_LUNS   1
112 /* With these defaults, this driver will make 1 host with 1 target
113  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114  */
115 #define DEF_ATO 1
116 #define DEF_CDB_LEN 10
117 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
118 #define DEF_DEV_SIZE_PRE_INIT   0
119 #define DEF_DEV_SIZE_MB   8
120 #define DEF_ZBC_DEV_SIZE_MB   128
121 #define DEF_DIF 0
122 #define DEF_DIX 0
123 #define DEF_PER_HOST_STORE false
124 #define DEF_D_SENSE   0
125 #define DEF_EVERY_NTH   0
126 #define DEF_FAKE_RW	0
127 #define DEF_GUARD 0
128 #define DEF_HOST_LOCK 0
129 #define DEF_LBPU 0
130 #define DEF_LBPWS 0
131 #define DEF_LBPWS10 0
132 #define DEF_LBPRZ 1
133 #define DEF_LOWEST_ALIGNED 0
134 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
135 #define DEF_NO_LUN_0   0
136 #define DEF_NUM_PARTS   0
137 #define DEF_OPTS   0
138 #define DEF_OPT_BLKS 1024
139 #define DEF_PHYSBLK_EXP 0
140 #define DEF_OPT_XFERLEN_EXP 0
141 #define DEF_PTYPE   TYPE_DISK
142 #define DEF_RANDOM false
143 #define DEF_REMOVABLE false
144 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
145 #define DEF_SECTOR_SIZE 512
146 #define DEF_UNMAP_ALIGNMENT 0
147 #define DEF_UNMAP_GRANULARITY 1
148 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
149 #define DEF_UNMAP_MAX_DESC 256
150 #define DEF_VIRTUAL_GB   0
151 #define DEF_VPD_USE_HOSTNO 1
152 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STRICT 0
154 #define DEF_STATISTICS false
155 #define DEF_SUBMIT_QUEUES 1
156 #define DEF_TUR_MS_TO_READY 0
157 #define DEF_UUID_CTL 0
158 #define JDELAY_OVERRIDDEN -9999
159 
160 /* Default parameters for ZBC drives */
161 #define DEF_ZBC_ZONE_SIZE_MB	128
162 #define DEF_ZBC_MAX_OPEN_ZONES	8
163 #define DEF_ZBC_NR_CONV_ZONES	1
164 
165 #define SDEBUG_LUN_0_VAL 0
166 
167 /* bit mask values for sdebug_opts */
168 #define SDEBUG_OPT_NOISE		1
169 #define SDEBUG_OPT_MEDIUM_ERR		2
170 #define SDEBUG_OPT_TIMEOUT		4
171 #define SDEBUG_OPT_RECOVERED_ERR	8
172 #define SDEBUG_OPT_TRANSPORT_ERR	16
173 #define SDEBUG_OPT_DIF_ERR		32
174 #define SDEBUG_OPT_DIX_ERR		64
175 #define SDEBUG_OPT_MAC_TIMEOUT		128
176 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
177 #define SDEBUG_OPT_Q_NOISE		0x200
178 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
179 #define SDEBUG_OPT_RARE_TSF		0x800
180 #define SDEBUG_OPT_N_WCE		0x1000
181 #define SDEBUG_OPT_RESET_NOISE		0x2000
182 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
183 #define SDEBUG_OPT_HOST_BUSY		0x8000
184 #define SDEBUG_OPT_CMD_ABORT		0x10000
185 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
186 			      SDEBUG_OPT_RESET_NOISE)
187 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
188 				  SDEBUG_OPT_TRANSPORT_ERR | \
189 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
190 				  SDEBUG_OPT_SHORT_TRANSFER | \
191 				  SDEBUG_OPT_HOST_BUSY | \
192 				  SDEBUG_OPT_CMD_ABORT)
193 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
194 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 
196 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
197  * priority order. In the subset implemented here lower numbers have higher
198  * priority. The UA numbers should be a sequence starting from 0 with
199  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
200 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
201 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
202 #define SDEBUG_UA_BUS_RESET 2
203 #define SDEBUG_UA_MODE_CHANGED 3
204 #define SDEBUG_UA_CAPACITY_CHANGED 4
205 #define SDEBUG_UA_LUNS_CHANGED 5
206 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
207 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
208 #define SDEBUG_NUM_UAS 8
209 
210 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
211  * sector on read commands: */
212 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
213 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
214 
215 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
216  * (for response) per submit queue at one time. Can be reduced by max_queue
217  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
218  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
219  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
220  * but cannot exceed SDEBUG_CANQUEUE .
221  */
222 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
223 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
224 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
225 
226 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
227 #define F_D_IN			1	/* Data-in command (e.g. READ) */
228 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
229 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
230 #define F_D_UNKN		8
231 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
232 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
233 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
234 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
235 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
236 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
237 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
238 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
239 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
240 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
241 
242 /* Useful combinations of the above flags */
243 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
244 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
245 #define FF_SA (F_SA_HIGH | F_SA_LOW)
246 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
247 
248 #define SDEBUG_MAX_PARTS 4
249 
250 #define SDEBUG_MAX_CMD_LEN 32
251 
252 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 
254 /* Zone types (zbcr05 table 25) */
255 enum sdebug_z_type {
256 	ZBC_ZONE_TYPE_CNV	= 0x1,
257 	ZBC_ZONE_TYPE_SWR	= 0x2,
258 	ZBC_ZONE_TYPE_SWP	= 0x3,
259 };
260 
261 /* enumeration names taken from table 26, zbcr05 */
262 enum sdebug_z_cond {
263 	ZBC_NOT_WRITE_POINTER	= 0x0,
264 	ZC1_EMPTY		= 0x1,
265 	ZC2_IMPLICIT_OPEN	= 0x2,
266 	ZC3_EXPLICIT_OPEN	= 0x3,
267 	ZC4_CLOSED		= 0x4,
268 	ZC6_READ_ONLY		= 0xd,
269 	ZC5_FULL		= 0xe,
270 	ZC7_OFFLINE		= 0xf,
271 };
272 
273 struct sdeb_zone_state {	/* ZBC: per zone state */
274 	enum sdebug_z_type z_type;
275 	enum sdebug_z_cond z_cond;
276 	bool z_non_seq_resource;
277 	unsigned int z_size;
278 	sector_t z_start;
279 	sector_t z_wp;
280 };
281 
282 struct sdebug_dev_info {
283 	struct list_head dev_list;
284 	unsigned int channel;
285 	unsigned int target;
286 	u64 lun;
287 	uuid_t lu_name;
288 	struct sdebug_host_info *sdbg_host;
289 	unsigned long uas_bm[1];
290 	atomic_t num_in_q;
291 	atomic_t stopped;	/* 1: by SSU, 2: device start */
292 	bool used;
293 
294 	/* For ZBC devices */
295 	enum blk_zoned_model zmodel;
296 	unsigned int zsize;
297 	unsigned int zsize_shift;
298 	unsigned int nr_zones;
299 	unsigned int nr_conv_zones;
300 	unsigned int nr_imp_open;
301 	unsigned int nr_exp_open;
302 	unsigned int nr_closed;
303 	unsigned int max_open;
304 	ktime_t create_ts;	/* time since bootup that this device was created */
305 	struct sdeb_zone_state *zstate;
306 };
307 
308 struct sdebug_host_info {
309 	struct list_head host_list;
310 	int si_idx;	/* sdeb_store_info (per host) xarray index */
311 	struct Scsi_Host *shost;
312 	struct device dev;
313 	struct list_head dev_info_list;
314 };
315 
316 /* There is an xarray of pointers to this struct's objects, one per host */
317 struct sdeb_store_info {
318 	rwlock_t macc_lck;	/* for atomic media access on this store */
319 	u8 *storep;		/* user data storage (ram) */
320 	struct t10_pi_tuple *dif_storep; /* protection info */
321 	void *map_storep;	/* provisioning map */
322 };
323 
324 #define to_sdebug_host(d)	\
325 	container_of(d, struct sdebug_host_info, dev)
326 
327 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
328 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
329 
330 struct sdebug_defer {
331 	struct hrtimer hrt;
332 	struct execute_work ew;
333 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
334 	int sqa_idx;	/* index of sdebug_queue array */
335 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
336 	int hc_idx;	/* hostwide tag index */
337 	int issuing_cpu;
338 	bool init_hrt;
339 	bool init_wq;
340 	bool init_poll;
341 	bool aborted;	/* true when blk_abort_request() already called */
342 	enum sdeb_defer_type defer_t;
343 };
344 
345 struct sdebug_queued_cmd {
346 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
347 	 * instance indicates this slot is in use.
348 	 */
349 	struct sdebug_defer *sd_dp;
350 	struct scsi_cmnd *a_cmnd;
351 };
352 
353 struct sdebug_queue {
354 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
355 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
356 	spinlock_t qc_lock;
357 	atomic_t blocked;	/* to temporarily stop more being queued */
358 };
359 
360 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
361 static atomic_t sdebug_completions;  /* count of deferred completions */
362 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
363 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
364 static atomic_t sdeb_inject_pending;
365 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
366 
367 struct opcode_info_t {
368 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
369 				/* for terminating element */
370 	u8 opcode;		/* if num_attached > 0, preferred */
371 	u16 sa;			/* service action */
372 	u32 flags;		/* OR-ed set of SDEB_F_* */
373 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
374 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
375 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
376 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
377 };
378 
379 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
380 enum sdeb_opcode_index {
381 	SDEB_I_INVALID_OPCODE =	0,
382 	SDEB_I_INQUIRY = 1,
383 	SDEB_I_REPORT_LUNS = 2,
384 	SDEB_I_REQUEST_SENSE = 3,
385 	SDEB_I_TEST_UNIT_READY = 4,
386 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
387 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
388 	SDEB_I_LOG_SENSE = 7,
389 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
390 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
391 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
392 	SDEB_I_START_STOP = 11,
393 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
394 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
395 	SDEB_I_MAINT_IN = 14,
396 	SDEB_I_MAINT_OUT = 15,
397 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
398 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
399 	SDEB_I_RESERVE = 18,		/* 6, 10 */
400 	SDEB_I_RELEASE = 19,		/* 6, 10 */
401 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
402 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
403 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
404 	SDEB_I_SEND_DIAG = 23,
405 	SDEB_I_UNMAP = 24,
406 	SDEB_I_WRITE_BUFFER = 25,
407 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
408 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
409 	SDEB_I_COMP_WRITE = 28,
410 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
411 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
412 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
413 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
414 };
415 
416 
417 static const unsigned char opcode_ind_arr[256] = {
418 /* 0x0; 0x0->0x1f: 6 byte cdbs */
419 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
420 	    0, 0, 0, 0,
421 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
422 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
423 	    SDEB_I_RELEASE,
424 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
425 	    SDEB_I_ALLOW_REMOVAL, 0,
426 /* 0x20; 0x20->0x3f: 10 byte cdbs */
427 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
428 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
429 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
430 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
431 /* 0x40; 0x40->0x5f: 10 byte cdbs */
432 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
433 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
434 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435 	    SDEB_I_RELEASE,
436 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
437 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
438 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
439 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 	0, SDEB_I_VARIABLE_LEN,
441 /* 0x80; 0x80->0x9f: 16 byte cdbs */
442 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
443 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
444 	0, 0, 0, SDEB_I_VERIFY,
445 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
446 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
447 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
448 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
449 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
450 	     SDEB_I_MAINT_OUT, 0, 0, 0,
451 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
452 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0,
455 /* 0xc0; 0xc0->0xff: vendor specific */
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 };
461 
462 /*
463  * The following "response" functions return the SCSI mid-level's 4 byte
464  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
465  * command completion, they can mask their return value with
466  * SDEG_RES_IMMED_MASK .
467  */
468 #define SDEG_RES_IMMED_MASK 0x40000000
469 
470 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 
500 static int sdebug_do_add_host(bool mk_new_store);
501 static int sdebug_add_host_helper(int per_host_idx);
502 static void sdebug_do_remove_host(bool the_end);
503 static int sdebug_add_store(void);
504 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
505 static void sdebug_erase_all_stores(bool apart_from_first);
506 
507 /*
508  * The following are overflow arrays for cdbs that "hit" the same index in
509  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
510  * should be placed in opcode_info_arr[], the others should be placed here.
511  */
512 static const struct opcode_info_t msense_iarr[] = {
513 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
514 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
515 };
516 
517 static const struct opcode_info_t mselect_iarr[] = {
518 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
519 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 };
521 
522 static const struct opcode_info_t read_iarr[] = {
523 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
524 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
525 	     0, 0, 0, 0} },
526 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
527 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
529 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 	     0xc7, 0, 0, 0, 0} },
531 };
532 
533 static const struct opcode_info_t write_iarr[] = {
534 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
535 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
536 		   0, 0, 0, 0, 0, 0} },
537 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
538 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
539 		   0, 0, 0} },
540 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
541 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
542 		   0xbf, 0xc7, 0, 0, 0, 0} },
543 };
544 
545 static const struct opcode_info_t verify_iarr[] = {
546 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
547 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 		   0, 0, 0, 0, 0, 0} },
549 };
550 
551 static const struct opcode_info_t sa_in_16_iarr[] = {
552 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
553 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
555 };
556 
557 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
558 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
559 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
560 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
561 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
563 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
564 };
565 
566 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
567 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
568 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
569 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
570 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
571 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
572 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
573 };
574 
575 static const struct opcode_info_t write_same_iarr[] = {
576 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
577 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
578 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
579 };
580 
581 static const struct opcode_info_t reserve_iarr[] = {
582 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
583 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
584 };
585 
586 static const struct opcode_info_t release_iarr[] = {
587 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
588 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 };
590 
591 static const struct opcode_info_t sync_cache_iarr[] = {
592 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
593 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
595 };
596 
597 static const struct opcode_info_t pre_fetch_iarr[] = {
598 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
599 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
601 };
602 
603 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
604 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
605 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
607 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
608 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
610 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
611 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
613 };
614 
615 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
616 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
617 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 };
620 
621 
622 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
623  * plus the terminating elements for logic that scans this table such as
624  * REPORT SUPPORTED OPERATION CODES. */
625 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
626 /* 0 */
627 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
628 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
630 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
631 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
632 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
633 	     0, 0} },					/* REPORT LUNS */
634 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
635 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
637 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
638 /* 5 */
639 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
640 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
641 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
643 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
644 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
646 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
647 	     0, 0, 0} },
648 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
649 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
650 	     0, 0} },
651 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
652 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
653 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
654 /* 10 */
655 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
656 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
657 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
659 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
660 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
662 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
663 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
664 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
665 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
666 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
667 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
668 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
669 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
670 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
671 				0xff, 0, 0xc7, 0, 0, 0, 0} },
672 /* 15 */
673 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
674 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
675 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
676 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
677 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
679 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
680 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
681 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
682 	     0xff, 0xff} },
683 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
684 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
685 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 	     0} },
687 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
688 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
689 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 	     0} },
691 /* 20 */
692 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
693 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
695 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
697 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
699 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
700 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
701 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702 /* 25 */
703 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
704 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
705 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
706 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
707 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
708 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
709 		 0, 0, 0, 0, 0} },
710 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
711 	    resp_sync_cache, sync_cache_iarr,
712 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
713 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
714 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
715 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
716 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
717 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
718 	    resp_pre_fetch, pre_fetch_iarr,
719 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
720 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
721 
722 /* 30 */
723 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
724 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
725 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
727 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
728 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
729 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
730 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
731 /* sentinel */
732 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
733 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
734 };
735 
736 static atomic_t sdebug_num_hosts;
737 static DEFINE_MUTEX(add_host_mutex);
738 
739 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
740 static int sdebug_ato = DEF_ATO;
741 static int sdebug_cdb_len = DEF_CDB_LEN;
742 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
743 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
744 static int sdebug_dif = DEF_DIF;
745 static int sdebug_dix = DEF_DIX;
746 static int sdebug_dsense = DEF_D_SENSE;
747 static int sdebug_every_nth = DEF_EVERY_NTH;
748 static int sdebug_fake_rw = DEF_FAKE_RW;
749 static unsigned int sdebug_guard = DEF_GUARD;
750 static int sdebug_host_max_queue;	/* per host */
751 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
752 static int sdebug_max_luns = DEF_MAX_LUNS;
753 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
754 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
755 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
756 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
757 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_deflect_incoming;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
799 
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 			  SAM_LUN_AM_FLAT = 0x1,
802 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 			  SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity;	/* in sectors */
809 
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811    may still need them */
812 static int sdebug_heads;		/* heads per disk */
813 static int sdebug_cylinders_per;	/* cylinders per surface */
814 static int sdebug_sectors_per;		/* sectors per cylinder */
815 
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
824 
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
834 
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
840 
841 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
843 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
844 
845 static DEFINE_RWLOCK(atomic_rw);
846 static DEFINE_RWLOCK(atomic_rw2);
847 
848 static rwlock_t *ramdisk_lck_a[2];
849 
850 static char sdebug_proc_name[] = MY_NAME;
851 static const char *my_name = MY_NAME;
852 
853 static struct bus_type pseudo_lld_bus;
854 
855 static struct device_driver sdebug_driverfs_driver = {
856 	.name 		= sdebug_proc_name,
857 	.bus		= &pseudo_lld_bus,
858 };
859 
860 static const int check_condition_result =
861 	SAM_STAT_CHECK_CONDITION;
862 
863 static const int illegal_condition_result =
864 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
865 
866 static const int device_qfull_result =
867 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
868 
869 static const int condition_met_result = SAM_STAT_CONDITION_MET;
870 
871 
872 /* Only do the extra work involved in logical block provisioning if one or
873  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
874  * real reads and writes (i.e. not skipping them for speed).
875  */
876 static inline bool scsi_debug_lbp(void)
877 {
878 	return 0 == sdebug_fake_rw &&
879 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
880 }
881 
882 static void *lba2fake_store(struct sdeb_store_info *sip,
883 			    unsigned long long lba)
884 {
885 	struct sdeb_store_info *lsip = sip;
886 
887 	lba = do_div(lba, sdebug_store_sectors);
888 	if (!sip || !sip->storep) {
889 		WARN_ON_ONCE(true);
890 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
891 	}
892 	return lsip->storep + lba * sdebug_sector_size;
893 }
894 
895 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
896 				      sector_t sector)
897 {
898 	sector = sector_div(sector, sdebug_store_sectors);
899 
900 	return sip->dif_storep + sector;
901 }
902 
903 static void sdebug_max_tgts_luns(void)
904 {
905 	struct sdebug_host_info *sdbg_host;
906 	struct Scsi_Host *hpnt;
907 
908 	spin_lock(&sdebug_host_list_lock);
909 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
910 		hpnt = sdbg_host->shost;
911 		if ((hpnt->this_id >= 0) &&
912 		    (sdebug_num_tgts > hpnt->this_id))
913 			hpnt->max_id = sdebug_num_tgts + 1;
914 		else
915 			hpnt->max_id = sdebug_num_tgts;
916 		/* sdebug_max_luns; */
917 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
918 	}
919 	spin_unlock(&sdebug_host_list_lock);
920 }
921 
922 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
923 
924 /* Set in_bit to -1 to indicate no bit position of invalid field */
925 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
926 				 enum sdeb_cmd_data c_d,
927 				 int in_byte, int in_bit)
928 {
929 	unsigned char *sbuff;
930 	u8 sks[4];
931 	int sl, asc;
932 
933 	sbuff = scp->sense_buffer;
934 	if (!sbuff) {
935 		sdev_printk(KERN_ERR, scp->device,
936 			    "%s: sense_buffer is NULL\n", __func__);
937 		return;
938 	}
939 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
940 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
941 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
942 	memset(sks, 0, sizeof(sks));
943 	sks[0] = 0x80;
944 	if (c_d)
945 		sks[0] |= 0x40;
946 	if (in_bit >= 0) {
947 		sks[0] |= 0x8;
948 		sks[0] |= 0x7 & in_bit;
949 	}
950 	put_unaligned_be16(in_byte, sks + 1);
951 	if (sdebug_dsense) {
952 		sl = sbuff[7] + 8;
953 		sbuff[7] = sl;
954 		sbuff[sl] = 0x2;
955 		sbuff[sl + 1] = 0x6;
956 		memcpy(sbuff + sl + 4, sks, 3);
957 	} else
958 		memcpy(sbuff + 15, sks, 3);
959 	if (sdebug_verbose)
960 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
961 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
962 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
963 }
964 
965 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
966 {
967 	if (!scp->sense_buffer) {
968 		sdev_printk(KERN_ERR, scp->device,
969 			    "%s: sense_buffer is NULL\n", __func__);
970 		return;
971 	}
972 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
973 
974 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
975 
976 	if (sdebug_verbose)
977 		sdev_printk(KERN_INFO, scp->device,
978 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
979 			    my_name, key, asc, asq);
980 }
981 
982 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
983 {
984 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
985 }
986 
987 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
988 			    void __user *arg)
989 {
990 	if (sdebug_verbose) {
991 		if (0x1261 == cmd)
992 			sdev_printk(KERN_INFO, dev,
993 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
994 		else if (0x5331 == cmd)
995 			sdev_printk(KERN_INFO, dev,
996 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
997 				    __func__);
998 		else
999 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1000 				    __func__, cmd);
1001 	}
1002 	return -EINVAL;
1003 	/* return -ENOTTY; // correct return but upsets fdisk */
1004 }
1005 
1006 static void config_cdb_len(struct scsi_device *sdev)
1007 {
1008 	switch (sdebug_cdb_len) {
1009 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1010 		sdev->use_10_for_rw = false;
1011 		sdev->use_16_for_rw = false;
1012 		sdev->use_10_for_ms = false;
1013 		break;
1014 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1015 		sdev->use_10_for_rw = true;
1016 		sdev->use_16_for_rw = false;
1017 		sdev->use_10_for_ms = false;
1018 		break;
1019 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1020 		sdev->use_10_for_rw = true;
1021 		sdev->use_16_for_rw = false;
1022 		sdev->use_10_for_ms = true;
1023 		break;
1024 	case 16:
1025 		sdev->use_10_for_rw = false;
1026 		sdev->use_16_for_rw = true;
1027 		sdev->use_10_for_ms = true;
1028 		break;
1029 	case 32: /* No knobs to suggest this so same as 16 for now */
1030 		sdev->use_10_for_rw = false;
1031 		sdev->use_16_for_rw = true;
1032 		sdev->use_10_for_ms = true;
1033 		break;
1034 	default:
1035 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1036 			sdebug_cdb_len);
1037 		sdev->use_10_for_rw = true;
1038 		sdev->use_16_for_rw = false;
1039 		sdev->use_10_for_ms = false;
1040 		sdebug_cdb_len = 10;
1041 		break;
1042 	}
1043 }
1044 
1045 static void all_config_cdb_len(void)
1046 {
1047 	struct sdebug_host_info *sdbg_host;
1048 	struct Scsi_Host *shost;
1049 	struct scsi_device *sdev;
1050 
1051 	spin_lock(&sdebug_host_list_lock);
1052 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1053 		shost = sdbg_host->shost;
1054 		shost_for_each_device(sdev, shost) {
1055 			config_cdb_len(sdev);
1056 		}
1057 	}
1058 	spin_unlock(&sdebug_host_list_lock);
1059 }
1060 
1061 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1062 {
1063 	struct sdebug_host_info *sdhp;
1064 	struct sdebug_dev_info *dp;
1065 
1066 	spin_lock(&sdebug_host_list_lock);
1067 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1068 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1069 			if ((devip->sdbg_host == dp->sdbg_host) &&
1070 			    (devip->target == dp->target))
1071 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1072 		}
1073 	}
1074 	spin_unlock(&sdebug_host_list_lock);
1075 }
1076 
1077 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1078 {
1079 	int k;
1080 
1081 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1082 	if (k != SDEBUG_NUM_UAS) {
1083 		const char *cp = NULL;
1084 
1085 		switch (k) {
1086 		case SDEBUG_UA_POR:
1087 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1088 					POWER_ON_RESET_ASCQ);
1089 			if (sdebug_verbose)
1090 				cp = "power on reset";
1091 			break;
1092 		case SDEBUG_UA_POOCCUR:
1093 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1094 					POWER_ON_OCCURRED_ASCQ);
1095 			if (sdebug_verbose)
1096 				cp = "power on occurred";
1097 			break;
1098 		case SDEBUG_UA_BUS_RESET:
1099 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1100 					BUS_RESET_ASCQ);
1101 			if (sdebug_verbose)
1102 				cp = "bus reset";
1103 			break;
1104 		case SDEBUG_UA_MODE_CHANGED:
1105 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1106 					MODE_CHANGED_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "mode parameters changed";
1109 			break;
1110 		case SDEBUG_UA_CAPACITY_CHANGED:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1112 					CAPACITY_CHANGED_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "capacity data changed";
1115 			break;
1116 		case SDEBUG_UA_MICROCODE_CHANGED:
1117 			mk_sense_buffer(scp, UNIT_ATTENTION,
1118 					TARGET_CHANGED_ASC,
1119 					MICROCODE_CHANGED_ASCQ);
1120 			if (sdebug_verbose)
1121 				cp = "microcode has been changed";
1122 			break;
1123 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1124 			mk_sense_buffer(scp, UNIT_ATTENTION,
1125 					TARGET_CHANGED_ASC,
1126 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1127 			if (sdebug_verbose)
1128 				cp = "microcode has been changed without reset";
1129 			break;
1130 		case SDEBUG_UA_LUNS_CHANGED:
1131 			/*
1132 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1133 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1134 			 * on the target, until a REPORT LUNS command is
1135 			 * received.  SPC-4 behavior is to report it only once.
1136 			 * NOTE:  sdebug_scsi_level does not use the same
1137 			 * values as struct scsi_device->scsi_level.
1138 			 */
1139 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1140 				clear_luns_changed_on_target(devip);
1141 			mk_sense_buffer(scp, UNIT_ATTENTION,
1142 					TARGET_CHANGED_ASC,
1143 					LUNS_CHANGED_ASCQ);
1144 			if (sdebug_verbose)
1145 				cp = "reported luns data has changed";
1146 			break;
1147 		default:
1148 			pr_warn("unexpected unit attention code=%d\n", k);
1149 			if (sdebug_verbose)
1150 				cp = "unknown";
1151 			break;
1152 		}
1153 		clear_bit(k, devip->uas_bm);
1154 		if (sdebug_verbose)
1155 			sdev_printk(KERN_INFO, scp->device,
1156 				   "%s reports: Unit attention: %s\n",
1157 				   my_name, cp);
1158 		return check_condition_result;
1159 	}
1160 	return 0;
1161 }
1162 
1163 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1164 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1165 				int arr_len)
1166 {
1167 	int act_len;
1168 	struct scsi_data_buffer *sdb = &scp->sdb;
1169 
1170 	if (!sdb->length)
1171 		return 0;
1172 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1173 		return DID_ERROR << 16;
1174 
1175 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1176 				      arr, arr_len);
1177 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1178 
1179 	return 0;
1180 }
1181 
1182 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1183  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1184  * calls, not required to write in ascending offset order. Assumes resid
1185  * set to scsi_bufflen() prior to any calls.
1186  */
1187 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1188 				  int arr_len, unsigned int off_dst)
1189 {
1190 	unsigned int act_len, n;
1191 	struct scsi_data_buffer *sdb = &scp->sdb;
1192 	off_t skip = off_dst;
1193 
1194 	if (sdb->length <= off_dst)
1195 		return 0;
1196 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1197 		return DID_ERROR << 16;
1198 
1199 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1200 				       arr, arr_len, skip);
1201 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1202 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1203 		 scsi_get_resid(scp));
1204 	n = scsi_bufflen(scp) - (off_dst + act_len);
1205 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1206 	return 0;
1207 }
1208 
1209 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1210  * 'arr' or -1 if error.
1211  */
1212 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1213 			       int arr_len)
1214 {
1215 	if (!scsi_bufflen(scp))
1216 		return 0;
1217 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1218 		return -1;
1219 
1220 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1221 }
1222 
1223 
1224 static char sdebug_inq_vendor_id[9] = "Linux   ";
1225 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1226 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1227 /* Use some locally assigned NAAs for SAS addresses. */
1228 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1229 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1230 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1231 
1232 /* Device identification VPD page. Returns number of bytes placed in arr */
1233 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1234 			  int target_dev_id, int dev_id_num,
1235 			  const char *dev_id_str, int dev_id_str_len,
1236 			  const uuid_t *lu_name)
1237 {
1238 	int num, port_a;
1239 	char b[32];
1240 
1241 	port_a = target_dev_id + 1;
1242 	/* T10 vendor identifier field format (faked) */
1243 	arr[0] = 0x2;	/* ASCII */
1244 	arr[1] = 0x1;
1245 	arr[2] = 0x0;
1246 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1247 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1248 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1249 	num = 8 + 16 + dev_id_str_len;
1250 	arr[3] = num;
1251 	num += 4;
1252 	if (dev_id_num >= 0) {
1253 		if (sdebug_uuid_ctl) {
1254 			/* Locally assigned UUID */
1255 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1256 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1257 			arr[num++] = 0x0;
1258 			arr[num++] = 0x12;
1259 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1260 			arr[num++] = 0x0;
1261 			memcpy(arr + num, lu_name, 16);
1262 			num += 16;
1263 		} else {
1264 			/* NAA-3, Logical unit identifier (binary) */
1265 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1266 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1267 			arr[num++] = 0x0;
1268 			arr[num++] = 0x8;
1269 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1270 			num += 8;
1271 		}
1272 		/* Target relative port number */
1273 		arr[num++] = 0x61;	/* proto=sas, binary */
1274 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1275 		arr[num++] = 0x0;	/* reserved */
1276 		arr[num++] = 0x4;	/* length */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;
1280 		arr[num++] = 0x1;	/* relative port A */
1281 	}
1282 	/* NAA-3, Target port identifier */
1283 	arr[num++] = 0x61;	/* proto=sas, binary */
1284 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1285 	arr[num++] = 0x0;
1286 	arr[num++] = 0x8;
1287 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1288 	num += 8;
1289 	/* NAA-3, Target port group identifier */
1290 	arr[num++] = 0x61;	/* proto=sas, binary */
1291 	arr[num++] = 0x95;	/* piv=1, target port group id */
1292 	arr[num++] = 0x0;
1293 	arr[num++] = 0x4;
1294 	arr[num++] = 0;
1295 	arr[num++] = 0;
1296 	put_unaligned_be16(port_group_id, arr + num);
1297 	num += 2;
1298 	/* NAA-3, Target device identifier */
1299 	arr[num++] = 0x61;	/* proto=sas, binary */
1300 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1301 	arr[num++] = 0x0;
1302 	arr[num++] = 0x8;
1303 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1304 	num += 8;
1305 	/* SCSI name string: Target device identifier */
1306 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1307 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1308 	arr[num++] = 0x0;
1309 	arr[num++] = 24;
1310 	memcpy(arr + num, "naa.32222220", 12);
1311 	num += 12;
1312 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1313 	memcpy(arr + num, b, 8);
1314 	num += 8;
1315 	memset(arr + num, 0, 4);
1316 	num += 4;
1317 	return num;
1318 }
1319 
1320 static unsigned char vpd84_data[] = {
1321 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1322     0x22,0x22,0x22,0x0,0xbb,0x1,
1323     0x22,0x22,0x22,0x0,0xbb,0x2,
1324 };
1325 
1326 /*  Software interface identification VPD page */
1327 static int inquiry_vpd_84(unsigned char *arr)
1328 {
1329 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1330 	return sizeof(vpd84_data);
1331 }
1332 
1333 /* Management network addresses VPD page */
1334 static int inquiry_vpd_85(unsigned char *arr)
1335 {
1336 	int num = 0;
1337 	const char *na1 = "https://www.kernel.org/config";
1338 	const char *na2 = "http://www.kernel.org/log";
1339 	int plen, olen;
1340 
1341 	arr[num++] = 0x1;	/* lu, storage config */
1342 	arr[num++] = 0x0;	/* reserved */
1343 	arr[num++] = 0x0;
1344 	olen = strlen(na1);
1345 	plen = olen + 1;
1346 	if (plen % 4)
1347 		plen = ((plen / 4) + 1) * 4;
1348 	arr[num++] = plen;	/* length, null termianted, padded */
1349 	memcpy(arr + num, na1, olen);
1350 	memset(arr + num + olen, 0, plen - olen);
1351 	num += plen;
1352 
1353 	arr[num++] = 0x4;	/* lu, logging */
1354 	arr[num++] = 0x0;	/* reserved */
1355 	arr[num++] = 0x0;
1356 	olen = strlen(na2);
1357 	plen = olen + 1;
1358 	if (plen % 4)
1359 		plen = ((plen / 4) + 1) * 4;
1360 	arr[num++] = plen;	/* length, null terminated, padded */
1361 	memcpy(arr + num, na2, olen);
1362 	memset(arr + num + olen, 0, plen - olen);
1363 	num += plen;
1364 
1365 	return num;
1366 }
1367 
1368 /* SCSI ports VPD page */
1369 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1370 {
1371 	int num = 0;
1372 	int port_a, port_b;
1373 
1374 	port_a = target_dev_id + 1;
1375 	port_b = port_a + 1;
1376 	arr[num++] = 0x0;	/* reserved */
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;
1379 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1380 	memset(arr + num, 0, 6);
1381 	num += 6;
1382 	arr[num++] = 0x0;
1383 	arr[num++] = 12;	/* length tp descriptor */
1384 	/* naa-5 target port identifier (A) */
1385 	arr[num++] = 0x61;	/* proto=sas, binary */
1386 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1387 	arr[num++] = 0x0;	/* reserved */
1388 	arr[num++] = 0x8;	/* length */
1389 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1390 	num += 8;
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;
1394 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1395 	memset(arr + num, 0, 6);
1396 	num += 6;
1397 	arr[num++] = 0x0;
1398 	arr[num++] = 12;	/* length tp descriptor */
1399 	/* naa-5 target port identifier (B) */
1400 	arr[num++] = 0x61;	/* proto=sas, binary */
1401 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1402 	arr[num++] = 0x0;	/* reserved */
1403 	arr[num++] = 0x8;	/* length */
1404 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1405 	num += 8;
1406 
1407 	return num;
1408 }
1409 
1410 
1411 static unsigned char vpd89_data[] = {
1412 /* from 4th byte */ 0,0,0,0,
1413 'l','i','n','u','x',' ',' ',' ',
1414 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1415 '1','2','3','4',
1416 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1417 0xec,0,0,0,
1418 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1419 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1421 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1422 0x53,0x41,
1423 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1424 0x20,0x20,
1425 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x10,0x80,
1427 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1428 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1429 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1431 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1432 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1433 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1438 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1439 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1440 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1453 };
1454 
1455 /* ATA Information VPD page */
1456 static int inquiry_vpd_89(unsigned char *arr)
1457 {
1458 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1459 	return sizeof(vpd89_data);
1460 }
1461 
1462 
1463 static unsigned char vpdb0_data[] = {
1464 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1465 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 };
1469 
1470 /* Block limits VPD page (SBC-3) */
1471 static int inquiry_vpd_b0(unsigned char *arr)
1472 {
1473 	unsigned int gran;
1474 
1475 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1476 
1477 	/* Optimal transfer length granularity */
1478 	if (sdebug_opt_xferlen_exp != 0 &&
1479 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1480 		gran = 1 << sdebug_opt_xferlen_exp;
1481 	else
1482 		gran = 1 << sdebug_physblk_exp;
1483 	put_unaligned_be16(gran, arr + 2);
1484 
1485 	/* Maximum Transfer Length */
1486 	if (sdebug_store_sectors > 0x400)
1487 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1488 
1489 	/* Optimal Transfer Length */
1490 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1491 
1492 	if (sdebug_lbpu) {
1493 		/* Maximum Unmap LBA Count */
1494 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1495 
1496 		/* Maximum Unmap Block Descriptor Count */
1497 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1498 	}
1499 
1500 	/* Unmap Granularity Alignment */
1501 	if (sdebug_unmap_alignment) {
1502 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1503 		arr[28] |= 0x80; /* UGAVALID */
1504 	}
1505 
1506 	/* Optimal Unmap Granularity */
1507 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1508 
1509 	/* Maximum WRITE SAME Length */
1510 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1511 
1512 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1513 
1514 	return sizeof(vpdb0_data);
1515 }
1516 
1517 /* Block device characteristics VPD page (SBC-3) */
1518 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1519 {
1520 	memset(arr, 0, 0x3c);
1521 	arr[0] = 0;
1522 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1523 	arr[2] = 0;
1524 	arr[3] = 5;	/* less than 1.8" */
1525 	if (devip->zmodel == BLK_ZONED_HA)
1526 		arr[4] = 1 << 4;	/* zoned field = 01b */
1527 
1528 	return 0x3c;
1529 }
1530 
1531 /* Logical block provisioning VPD page (SBC-4) */
1532 static int inquiry_vpd_b2(unsigned char *arr)
1533 {
1534 	memset(arr, 0, 0x4);
1535 	arr[0] = 0;			/* threshold exponent */
1536 	if (sdebug_lbpu)
1537 		arr[1] = 1 << 7;
1538 	if (sdebug_lbpws)
1539 		arr[1] |= 1 << 6;
1540 	if (sdebug_lbpws10)
1541 		arr[1] |= 1 << 5;
1542 	if (sdebug_lbprz && scsi_debug_lbp())
1543 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1544 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1545 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1546 	/* threshold_percentage=0 */
1547 	return 0x4;
1548 }
1549 
1550 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1551 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1552 {
1553 	memset(arr, 0, 0x3c);
1554 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1555 	/*
1556 	 * Set Optimal number of open sequential write preferred zones and
1557 	 * Optimal number of non-sequentially written sequential write
1558 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1559 	 * fields set to zero, apart from Max. number of open swrz_s field.
1560 	 */
1561 	put_unaligned_be32(0xffffffff, &arr[4]);
1562 	put_unaligned_be32(0xffffffff, &arr[8]);
1563 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1564 		put_unaligned_be32(devip->max_open, &arr[12]);
1565 	else
1566 		put_unaligned_be32(0xffffffff, &arr[12]);
1567 	return 0x3c;
1568 }
1569 
1570 #define SDEBUG_LONG_INQ_SZ 96
1571 #define SDEBUG_MAX_INQ_ARR_SZ 584
1572 
1573 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1574 {
1575 	unsigned char pq_pdt;
1576 	unsigned char *arr;
1577 	unsigned char *cmd = scp->cmnd;
1578 	u32 alloc_len, n;
1579 	int ret;
1580 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1581 
1582 	alloc_len = get_unaligned_be16(cmd + 3);
1583 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1584 	if (! arr)
1585 		return DID_REQUEUE << 16;
1586 	is_disk = (sdebug_ptype == TYPE_DISK);
1587 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1588 	is_disk_zbc = (is_disk || is_zbc);
1589 	have_wlun = scsi_is_wlun(scp->device->lun);
1590 	if (have_wlun)
1591 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1592 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1593 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1594 	else
1595 		pq_pdt = (sdebug_ptype & 0x1f);
1596 	arr[0] = pq_pdt;
1597 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1598 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1599 		kfree(arr);
1600 		return check_condition_result;
1601 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1602 		int lu_id_num, port_group_id, target_dev_id;
1603 		u32 len;
1604 		char lu_id_str[6];
1605 		int host_no = devip->sdbg_host->shost->host_no;
1606 
1607 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1608 		    (devip->channel & 0x7f);
1609 		if (sdebug_vpd_use_hostno == 0)
1610 			host_no = 0;
1611 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1612 			    (devip->target * 1000) + devip->lun);
1613 		target_dev_id = ((host_no + 1) * 2000) +
1614 				 (devip->target * 1000) - 3;
1615 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1616 		if (0 == cmd[2]) { /* supported vital product data pages */
1617 			arr[1] = cmd[2];	/*sanity */
1618 			n = 4;
1619 			arr[n++] = 0x0;   /* this page */
1620 			arr[n++] = 0x80;  /* unit serial number */
1621 			arr[n++] = 0x83;  /* device identification */
1622 			arr[n++] = 0x84;  /* software interface ident. */
1623 			arr[n++] = 0x85;  /* management network addresses */
1624 			arr[n++] = 0x86;  /* extended inquiry */
1625 			arr[n++] = 0x87;  /* mode page policy */
1626 			arr[n++] = 0x88;  /* SCSI ports */
1627 			if (is_disk_zbc) {	  /* SBC or ZBC */
1628 				arr[n++] = 0x89;  /* ATA information */
1629 				arr[n++] = 0xb0;  /* Block limits */
1630 				arr[n++] = 0xb1;  /* Block characteristics */
1631 				if (is_disk)
1632 					arr[n++] = 0xb2;  /* LB Provisioning */
1633 				if (is_zbc)
1634 					arr[n++] = 0xb6;  /* ZB dev. char. */
1635 			}
1636 			arr[3] = n - 4;	  /* number of supported VPD pages */
1637 		} else if (0x80 == cmd[2]) { /* unit serial number */
1638 			arr[1] = cmd[2];	/*sanity */
1639 			arr[3] = len;
1640 			memcpy(&arr[4], lu_id_str, len);
1641 		} else if (0x83 == cmd[2]) { /* device identification */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1644 						target_dev_id, lu_id_num,
1645 						lu_id_str, len,
1646 						&devip->lu_name);
1647 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = inquiry_vpd_84(&arr[4]);
1650 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = inquiry_vpd_85(&arr[4]);
1653 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1654 			arr[1] = cmd[2];	/*sanity */
1655 			arr[3] = 0x3c;	/* number of following entries */
1656 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1657 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1658 			else if (have_dif_prot)
1659 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1660 			else
1661 				arr[4] = 0x0;   /* no protection stuff */
1662 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1663 		} else if (0x87 == cmd[2]) { /* mode page policy */
1664 			arr[1] = cmd[2];	/*sanity */
1665 			arr[3] = 0x8;	/* number of following entries */
1666 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1667 			arr[6] = 0x80;	/* mlus, shared */
1668 			arr[8] = 0x18;	 /* protocol specific lu */
1669 			arr[10] = 0x82;	 /* mlus, per initiator port */
1670 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1671 			arr[1] = cmd[2];	/*sanity */
1672 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1673 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1674 			arr[1] = cmd[2];        /*sanity */
1675 			n = inquiry_vpd_89(&arr[4]);
1676 			put_unaligned_be16(n, arr + 2);
1677 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1678 			arr[1] = cmd[2];        /*sanity */
1679 			arr[3] = inquiry_vpd_b0(&arr[4]);
1680 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1681 			arr[1] = cmd[2];        /*sanity */
1682 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1683 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1684 			arr[1] = cmd[2];        /*sanity */
1685 			arr[3] = inquiry_vpd_b2(&arr[4]);
1686 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1687 			arr[1] = cmd[2];        /*sanity */
1688 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1689 		} else {
1690 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1691 			kfree(arr);
1692 			return check_condition_result;
1693 		}
1694 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1695 		ret = fill_from_dev_buffer(scp, arr,
1696 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1697 		kfree(arr);
1698 		return ret;
1699 	}
1700 	/* drops through here for a standard inquiry */
1701 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1702 	arr[2] = sdebug_scsi_level;
1703 	arr[3] = 2;    /* response_data_format==2 */
1704 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1705 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1706 	if (sdebug_vpd_use_hostno == 0)
1707 		arr[5] |= 0x10; /* claim: implicit TPGS */
1708 	arr[6] = 0x10; /* claim: MultiP */
1709 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1710 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1711 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1712 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1713 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1714 	/* Use Vendor Specific area to place driver date in ASCII hex */
1715 	memcpy(&arr[36], sdebug_version_date, 8);
1716 	/* version descriptors (2 bytes each) follow */
1717 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1718 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1719 	n = 62;
1720 	if (is_disk) {		/* SBC-4 no version claimed */
1721 		put_unaligned_be16(0x600, arr + n);
1722 		n += 2;
1723 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1724 		put_unaligned_be16(0x525, arr + n);
1725 		n += 2;
1726 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1727 		put_unaligned_be16(0x624, arr + n);
1728 		n += 2;
1729 	}
1730 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1731 	ret = fill_from_dev_buffer(scp, arr,
1732 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1733 	kfree(arr);
1734 	return ret;
1735 }
1736 
1737 /* See resp_iec_m_pg() for how this data is manipulated */
1738 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1739 				   0, 0, 0x0, 0x0};
1740 
1741 static int resp_requests(struct scsi_cmnd *scp,
1742 			 struct sdebug_dev_info *devip)
1743 {
1744 	unsigned char *cmd = scp->cmnd;
1745 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1746 	bool dsense = !!(cmd[1] & 1);
1747 	u32 alloc_len = cmd[4];
1748 	u32 len = 18;
1749 	int stopped_state = atomic_read(&devip->stopped);
1750 
1751 	memset(arr, 0, sizeof(arr));
1752 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1753 		if (dsense) {
1754 			arr[0] = 0x72;
1755 			arr[1] = NOT_READY;
1756 			arr[2] = LOGICAL_UNIT_NOT_READY;
1757 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1758 			len = 8;
1759 		} else {
1760 			arr[0] = 0x70;
1761 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1762 			arr[7] = 0xa;			/* 18 byte sense buffer */
1763 			arr[12] = LOGICAL_UNIT_NOT_READY;
1764 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1765 		}
1766 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1767 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1768 		if (dsense) {
1769 			arr[0] = 0x72;
1770 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1771 			arr[2] = THRESHOLD_EXCEEDED;
1772 			arr[3] = 0xff;		/* Failure prediction(false) */
1773 			len = 8;
1774 		} else {
1775 			arr[0] = 0x70;
1776 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1777 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1778 			arr[12] = THRESHOLD_EXCEEDED;
1779 			arr[13] = 0xff;		/* Failure prediction(false) */
1780 		}
1781 	} else {	/* nothing to report */
1782 		if (dsense) {
1783 			len = 8;
1784 			memset(arr, 0, len);
1785 			arr[0] = 0x72;
1786 		} else {
1787 			memset(arr, 0, len);
1788 			arr[0] = 0x70;
1789 			arr[7] = 0xa;
1790 		}
1791 	}
1792 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1793 }
1794 
1795 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1796 {
1797 	unsigned char *cmd = scp->cmnd;
1798 	int power_cond, want_stop, stopped_state;
1799 	bool changing;
1800 
1801 	power_cond = (cmd[4] & 0xf0) >> 4;
1802 	if (power_cond) {
1803 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1804 		return check_condition_result;
1805 	}
1806 	want_stop = !(cmd[4] & 1);
1807 	stopped_state = atomic_read(&devip->stopped);
1808 	if (stopped_state == 2) {
1809 		ktime_t now_ts = ktime_get_boottime();
1810 
1811 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1812 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1813 
1814 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1815 				/* tur_ms_to_ready timer extinguished */
1816 				atomic_set(&devip->stopped, 0);
1817 				stopped_state = 0;
1818 			}
1819 		}
1820 		if (stopped_state == 2) {
1821 			if (want_stop) {
1822 				stopped_state = 1;	/* dummy up success */
1823 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1824 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1825 				return check_condition_result;
1826 			}
1827 		}
1828 	}
1829 	changing = (stopped_state != want_stop);
1830 	if (changing)
1831 		atomic_xchg(&devip->stopped, want_stop);
1832 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1833 		return SDEG_RES_IMMED_MASK;
1834 	else
1835 		return 0;
1836 }
1837 
1838 static sector_t get_sdebug_capacity(void)
1839 {
1840 	static const unsigned int gibibyte = 1073741824;
1841 
1842 	if (sdebug_virtual_gb > 0)
1843 		return (sector_t)sdebug_virtual_gb *
1844 			(gibibyte / sdebug_sector_size);
1845 	else
1846 		return sdebug_store_sectors;
1847 }
1848 
1849 #define SDEBUG_READCAP_ARR_SZ 8
1850 static int resp_readcap(struct scsi_cmnd *scp,
1851 			struct sdebug_dev_info *devip)
1852 {
1853 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1854 	unsigned int capac;
1855 
1856 	/* following just in case virtual_gb changed */
1857 	sdebug_capacity = get_sdebug_capacity();
1858 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1859 	if (sdebug_capacity < 0xffffffff) {
1860 		capac = (unsigned int)sdebug_capacity - 1;
1861 		put_unaligned_be32(capac, arr + 0);
1862 	} else
1863 		put_unaligned_be32(0xffffffff, arr + 0);
1864 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1865 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1866 }
1867 
1868 #define SDEBUG_READCAP16_ARR_SZ 32
1869 static int resp_readcap16(struct scsi_cmnd *scp,
1870 			  struct sdebug_dev_info *devip)
1871 {
1872 	unsigned char *cmd = scp->cmnd;
1873 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1874 	u32 alloc_len;
1875 
1876 	alloc_len = get_unaligned_be32(cmd + 10);
1877 	/* following just in case virtual_gb changed */
1878 	sdebug_capacity = get_sdebug_capacity();
1879 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1880 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1881 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1882 	arr[13] = sdebug_physblk_exp & 0xf;
1883 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1884 
1885 	if (scsi_debug_lbp()) {
1886 		arr[14] |= 0x80; /* LBPME */
1887 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1888 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1889 		 * in the wider field maps to 0 in this field.
1890 		 */
1891 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1892 			arr[14] |= 0x40;
1893 	}
1894 
1895 	arr[15] = sdebug_lowest_aligned & 0xff;
1896 
1897 	if (have_dif_prot) {
1898 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1899 		arr[12] |= 1; /* PROT_EN */
1900 	}
1901 
1902 	return fill_from_dev_buffer(scp, arr,
1903 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1904 }
1905 
1906 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1907 
1908 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1909 			      struct sdebug_dev_info *devip)
1910 {
1911 	unsigned char *cmd = scp->cmnd;
1912 	unsigned char *arr;
1913 	int host_no = devip->sdbg_host->shost->host_no;
1914 	int port_group_a, port_group_b, port_a, port_b;
1915 	u32 alen, n, rlen;
1916 	int ret;
1917 
1918 	alen = get_unaligned_be32(cmd + 6);
1919 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1920 	if (! arr)
1921 		return DID_REQUEUE << 16;
1922 	/*
1923 	 * EVPD page 0x88 states we have two ports, one
1924 	 * real and a fake port with no device connected.
1925 	 * So we create two port groups with one port each
1926 	 * and set the group with port B to unavailable.
1927 	 */
1928 	port_a = 0x1; /* relative port A */
1929 	port_b = 0x2; /* relative port B */
1930 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1931 			(devip->channel & 0x7f);
1932 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1933 			(devip->channel & 0x7f) + 0x80;
1934 
1935 	/*
1936 	 * The asymmetric access state is cycled according to the host_id.
1937 	 */
1938 	n = 4;
1939 	if (sdebug_vpd_use_hostno == 0) {
1940 		arr[n++] = host_no % 3; /* Asymm access state */
1941 		arr[n++] = 0x0F; /* claim: all states are supported */
1942 	} else {
1943 		arr[n++] = 0x0; /* Active/Optimized path */
1944 		arr[n++] = 0x01; /* only support active/optimized paths */
1945 	}
1946 	put_unaligned_be16(port_group_a, arr + n);
1947 	n += 2;
1948 	arr[n++] = 0;    /* Reserved */
1949 	arr[n++] = 0;    /* Status code */
1950 	arr[n++] = 0;    /* Vendor unique */
1951 	arr[n++] = 0x1;  /* One port per group */
1952 	arr[n++] = 0;    /* Reserved */
1953 	arr[n++] = 0;    /* Reserved */
1954 	put_unaligned_be16(port_a, arr + n);
1955 	n += 2;
1956 	arr[n++] = 3;    /* Port unavailable */
1957 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1958 	put_unaligned_be16(port_group_b, arr + n);
1959 	n += 2;
1960 	arr[n++] = 0;    /* Reserved */
1961 	arr[n++] = 0;    /* Status code */
1962 	arr[n++] = 0;    /* Vendor unique */
1963 	arr[n++] = 0x1;  /* One port per group */
1964 	arr[n++] = 0;    /* Reserved */
1965 	arr[n++] = 0;    /* Reserved */
1966 	put_unaligned_be16(port_b, arr + n);
1967 	n += 2;
1968 
1969 	rlen = n - 4;
1970 	put_unaligned_be32(rlen, arr + 0);
1971 
1972 	/*
1973 	 * Return the smallest value of either
1974 	 * - The allocated length
1975 	 * - The constructed command length
1976 	 * - The maximum array size
1977 	 */
1978 	rlen = min(alen, n);
1979 	ret = fill_from_dev_buffer(scp, arr,
1980 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1981 	kfree(arr);
1982 	return ret;
1983 }
1984 
1985 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1986 			     struct sdebug_dev_info *devip)
1987 {
1988 	bool rctd;
1989 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1990 	u16 req_sa, u;
1991 	u32 alloc_len, a_len;
1992 	int k, offset, len, errsts, count, bump, na;
1993 	const struct opcode_info_t *oip;
1994 	const struct opcode_info_t *r_oip;
1995 	u8 *arr;
1996 	u8 *cmd = scp->cmnd;
1997 
1998 	rctd = !!(cmd[2] & 0x80);
1999 	reporting_opts = cmd[2] & 0x7;
2000 	req_opcode = cmd[3];
2001 	req_sa = get_unaligned_be16(cmd + 4);
2002 	alloc_len = get_unaligned_be32(cmd + 6);
2003 	if (alloc_len < 4 || alloc_len > 0xffff) {
2004 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2005 		return check_condition_result;
2006 	}
2007 	if (alloc_len > 8192)
2008 		a_len = 8192;
2009 	else
2010 		a_len = alloc_len;
2011 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2012 	if (NULL == arr) {
2013 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2014 				INSUFF_RES_ASCQ);
2015 		return check_condition_result;
2016 	}
2017 	switch (reporting_opts) {
2018 	case 0:	/* all commands */
2019 		/* count number of commands */
2020 		for (count = 0, oip = opcode_info_arr;
2021 		     oip->num_attached != 0xff; ++oip) {
2022 			if (F_INV_OP & oip->flags)
2023 				continue;
2024 			count += (oip->num_attached + 1);
2025 		}
2026 		bump = rctd ? 20 : 8;
2027 		put_unaligned_be32(count * bump, arr);
2028 		for (offset = 4, oip = opcode_info_arr;
2029 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2030 			if (F_INV_OP & oip->flags)
2031 				continue;
2032 			na = oip->num_attached;
2033 			arr[offset] = oip->opcode;
2034 			put_unaligned_be16(oip->sa, arr + offset + 2);
2035 			if (rctd)
2036 				arr[offset + 5] |= 0x2;
2037 			if (FF_SA & oip->flags)
2038 				arr[offset + 5] |= 0x1;
2039 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2040 			if (rctd)
2041 				put_unaligned_be16(0xa, arr + offset + 8);
2042 			r_oip = oip;
2043 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2044 				if (F_INV_OP & oip->flags)
2045 					continue;
2046 				offset += bump;
2047 				arr[offset] = oip->opcode;
2048 				put_unaligned_be16(oip->sa, arr + offset + 2);
2049 				if (rctd)
2050 					arr[offset + 5] |= 0x2;
2051 				if (FF_SA & oip->flags)
2052 					arr[offset + 5] |= 0x1;
2053 				put_unaligned_be16(oip->len_mask[0],
2054 						   arr + offset + 6);
2055 				if (rctd)
2056 					put_unaligned_be16(0xa,
2057 							   arr + offset + 8);
2058 			}
2059 			oip = r_oip;
2060 			offset += bump;
2061 		}
2062 		break;
2063 	case 1:	/* one command: opcode only */
2064 	case 2:	/* one command: opcode plus service action */
2065 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2066 		sdeb_i = opcode_ind_arr[req_opcode];
2067 		oip = &opcode_info_arr[sdeb_i];
2068 		if (F_INV_OP & oip->flags) {
2069 			supp = 1;
2070 			offset = 4;
2071 		} else {
2072 			if (1 == reporting_opts) {
2073 				if (FF_SA & oip->flags) {
2074 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2075 							     2, 2);
2076 					kfree(arr);
2077 					return check_condition_result;
2078 				}
2079 				req_sa = 0;
2080 			} else if (2 == reporting_opts &&
2081 				   0 == (FF_SA & oip->flags)) {
2082 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2083 				kfree(arr);	/* point at requested sa */
2084 				return check_condition_result;
2085 			}
2086 			if (0 == (FF_SA & oip->flags) &&
2087 			    req_opcode == oip->opcode)
2088 				supp = 3;
2089 			else if (0 == (FF_SA & oip->flags)) {
2090 				na = oip->num_attached;
2091 				for (k = 0, oip = oip->arrp; k < na;
2092 				     ++k, ++oip) {
2093 					if (req_opcode == oip->opcode)
2094 						break;
2095 				}
2096 				supp = (k >= na) ? 1 : 3;
2097 			} else if (req_sa != oip->sa) {
2098 				na = oip->num_attached;
2099 				for (k = 0, oip = oip->arrp; k < na;
2100 				     ++k, ++oip) {
2101 					if (req_sa == oip->sa)
2102 						break;
2103 				}
2104 				supp = (k >= na) ? 1 : 3;
2105 			} else
2106 				supp = 3;
2107 			if (3 == supp) {
2108 				u = oip->len_mask[0];
2109 				put_unaligned_be16(u, arr + 2);
2110 				arr[4] = oip->opcode;
2111 				for (k = 1; k < u; ++k)
2112 					arr[4 + k] = (k < 16) ?
2113 						 oip->len_mask[k] : 0xff;
2114 				offset = 4 + u;
2115 			} else
2116 				offset = 4;
2117 		}
2118 		arr[1] = (rctd ? 0x80 : 0) | supp;
2119 		if (rctd) {
2120 			put_unaligned_be16(0xa, arr + offset);
2121 			offset += 12;
2122 		}
2123 		break;
2124 	default:
2125 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2126 		kfree(arr);
2127 		return check_condition_result;
2128 	}
2129 	offset = (offset < a_len) ? offset : a_len;
2130 	len = (offset < alloc_len) ? offset : alloc_len;
2131 	errsts = fill_from_dev_buffer(scp, arr, len);
2132 	kfree(arr);
2133 	return errsts;
2134 }
2135 
2136 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2137 			  struct sdebug_dev_info *devip)
2138 {
2139 	bool repd;
2140 	u32 alloc_len, len;
2141 	u8 arr[16];
2142 	u8 *cmd = scp->cmnd;
2143 
2144 	memset(arr, 0, sizeof(arr));
2145 	repd = !!(cmd[2] & 0x80);
2146 	alloc_len = get_unaligned_be32(cmd + 6);
2147 	if (alloc_len < 4) {
2148 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2149 		return check_condition_result;
2150 	}
2151 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2152 	arr[1] = 0x1;		/* ITNRS */
2153 	if (repd) {
2154 		arr[3] = 0xc;
2155 		len = 16;
2156 	} else
2157 		len = 4;
2158 
2159 	len = (len < alloc_len) ? len : alloc_len;
2160 	return fill_from_dev_buffer(scp, arr, len);
2161 }
2162 
2163 /* <<Following mode page info copied from ST318451LW>> */
2164 
2165 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2166 {	/* Read-Write Error Recovery page for mode_sense */
2167 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2168 					5, 0, 0xff, 0xff};
2169 
2170 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2171 	if (1 == pcontrol)
2172 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2173 	return sizeof(err_recov_pg);
2174 }
2175 
2176 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2177 { 	/* Disconnect-Reconnect page for mode_sense */
2178 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2179 					 0, 0, 0, 0, 0, 0, 0, 0};
2180 
2181 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2182 	if (1 == pcontrol)
2183 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2184 	return sizeof(disconnect_pg);
2185 }
2186 
2187 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2188 {       /* Format device page for mode_sense */
2189 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2190 				     0, 0, 0, 0, 0, 0, 0, 0,
2191 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2192 
2193 	memcpy(p, format_pg, sizeof(format_pg));
2194 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2195 	put_unaligned_be16(sdebug_sector_size, p + 12);
2196 	if (sdebug_removable)
2197 		p[20] |= 0x20; /* should agree with INQUIRY */
2198 	if (1 == pcontrol)
2199 		memset(p + 2, 0, sizeof(format_pg) - 2);
2200 	return sizeof(format_pg);
2201 }
2202 
2203 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2204 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2205 				     0, 0, 0, 0};
2206 
2207 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2208 { 	/* Caching page for mode_sense */
2209 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2210 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2211 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2212 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2213 
2214 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2215 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2216 	memcpy(p, caching_pg, sizeof(caching_pg));
2217 	if (1 == pcontrol)
2218 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2219 	else if (2 == pcontrol)
2220 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2221 	return sizeof(caching_pg);
2222 }
2223 
2224 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2225 				    0, 0, 0x2, 0x4b};
2226 
2227 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2228 { 	/* Control mode page for mode_sense */
2229 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2230 					0, 0, 0, 0};
2231 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2232 				     0, 0, 0x2, 0x4b};
2233 
2234 	if (sdebug_dsense)
2235 		ctrl_m_pg[2] |= 0x4;
2236 	else
2237 		ctrl_m_pg[2] &= ~0x4;
2238 
2239 	if (sdebug_ato)
2240 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2241 
2242 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2243 	if (1 == pcontrol)
2244 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2245 	else if (2 == pcontrol)
2246 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2247 	return sizeof(ctrl_m_pg);
2248 }
2249 
2250 
2251 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2252 {	/* Informational Exceptions control mode page for mode_sense */
2253 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2254 				       0, 0, 0x0, 0x0};
2255 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2256 				      0, 0, 0x0, 0x0};
2257 
2258 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2259 	if (1 == pcontrol)
2260 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2261 	else if (2 == pcontrol)
2262 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2263 	return sizeof(iec_m_pg);
2264 }
2265 
2266 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2267 {	/* SAS SSP mode page - short format for mode_sense */
2268 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2269 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2270 
2271 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2272 	if (1 == pcontrol)
2273 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2274 	return sizeof(sas_sf_m_pg);
2275 }
2276 
2277 
2278 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2279 			      int target_dev_id)
2280 {	/* SAS phy control and discover mode page for mode_sense */
2281 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2282 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2283 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2284 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2285 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2286 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2287 		    0, 0, 0, 0, 0, 0, 0, 0,
2288 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2289 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2290 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2291 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2292 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2293 		    0, 0, 0, 0, 0, 0, 0, 0,
2294 		};
2295 	int port_a, port_b;
2296 
2297 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2298 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2299 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2300 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2301 	port_a = target_dev_id + 1;
2302 	port_b = port_a + 1;
2303 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2304 	put_unaligned_be32(port_a, p + 20);
2305 	put_unaligned_be32(port_b, p + 48 + 20);
2306 	if (1 == pcontrol)
2307 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2308 	return sizeof(sas_pcd_m_pg);
2309 }
2310 
2311 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2312 {	/* SAS SSP shared protocol specific port mode subpage */
2313 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2314 		    0, 0, 0, 0, 0, 0, 0, 0,
2315 		};
2316 
2317 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2318 	if (1 == pcontrol)
2319 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2320 	return sizeof(sas_sha_m_pg);
2321 }
2322 
2323 #define SDEBUG_MAX_MSENSE_SZ 256
2324 
2325 static int resp_mode_sense(struct scsi_cmnd *scp,
2326 			   struct sdebug_dev_info *devip)
2327 {
2328 	int pcontrol, pcode, subpcode, bd_len;
2329 	unsigned char dev_spec;
2330 	u32 alloc_len, offset, len;
2331 	int target_dev_id;
2332 	int target = scp->device->id;
2333 	unsigned char *ap;
2334 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2335 	unsigned char *cmd = scp->cmnd;
2336 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2337 
2338 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2339 	pcontrol = (cmd[2] & 0xc0) >> 6;
2340 	pcode = cmd[2] & 0x3f;
2341 	subpcode = cmd[3];
2342 	msense_6 = (MODE_SENSE == cmd[0]);
2343 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2344 	is_disk = (sdebug_ptype == TYPE_DISK);
2345 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2346 	if ((is_disk || is_zbc) && !dbd)
2347 		bd_len = llbaa ? 16 : 8;
2348 	else
2349 		bd_len = 0;
2350 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2351 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2352 	if (0x3 == pcontrol) {  /* Saving values not supported */
2353 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2354 		return check_condition_result;
2355 	}
2356 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2357 			(devip->target * 1000) - 3;
2358 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2359 	if (is_disk || is_zbc) {
2360 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2361 		if (sdebug_wp)
2362 			dev_spec |= 0x80;
2363 	} else
2364 		dev_spec = 0x0;
2365 	if (msense_6) {
2366 		arr[2] = dev_spec;
2367 		arr[3] = bd_len;
2368 		offset = 4;
2369 	} else {
2370 		arr[3] = dev_spec;
2371 		if (16 == bd_len)
2372 			arr[4] = 0x1;	/* set LONGLBA bit */
2373 		arr[7] = bd_len;	/* assume 255 or less */
2374 		offset = 8;
2375 	}
2376 	ap = arr + offset;
2377 	if ((bd_len > 0) && (!sdebug_capacity))
2378 		sdebug_capacity = get_sdebug_capacity();
2379 
2380 	if (8 == bd_len) {
2381 		if (sdebug_capacity > 0xfffffffe)
2382 			put_unaligned_be32(0xffffffff, ap + 0);
2383 		else
2384 			put_unaligned_be32(sdebug_capacity, ap + 0);
2385 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2386 		offset += bd_len;
2387 		ap = arr + offset;
2388 	} else if (16 == bd_len) {
2389 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2390 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2391 		offset += bd_len;
2392 		ap = arr + offset;
2393 	}
2394 
2395 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2396 		/* TODO: Control Extension page */
2397 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2398 		return check_condition_result;
2399 	}
2400 	bad_pcode = false;
2401 
2402 	switch (pcode) {
2403 	case 0x1:	/* Read-Write error recovery page, direct access */
2404 		len = resp_err_recov_pg(ap, pcontrol, target);
2405 		offset += len;
2406 		break;
2407 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2408 		len = resp_disconnect_pg(ap, pcontrol, target);
2409 		offset += len;
2410 		break;
2411 	case 0x3:       /* Format device page, direct access */
2412 		if (is_disk) {
2413 			len = resp_format_pg(ap, pcontrol, target);
2414 			offset += len;
2415 		} else
2416 			bad_pcode = true;
2417 		break;
2418 	case 0x8:	/* Caching page, direct access */
2419 		if (is_disk || is_zbc) {
2420 			len = resp_caching_pg(ap, pcontrol, target);
2421 			offset += len;
2422 		} else
2423 			bad_pcode = true;
2424 		break;
2425 	case 0xa:	/* Control Mode page, all devices */
2426 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2427 		offset += len;
2428 		break;
2429 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2430 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2431 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2432 			return check_condition_result;
2433 		}
2434 		len = 0;
2435 		if ((0x0 == subpcode) || (0xff == subpcode))
2436 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2437 		if ((0x1 == subpcode) || (0xff == subpcode))
2438 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2439 						  target_dev_id);
2440 		if ((0x2 == subpcode) || (0xff == subpcode))
2441 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2442 		offset += len;
2443 		break;
2444 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2445 		len = resp_iec_m_pg(ap, pcontrol, target);
2446 		offset += len;
2447 		break;
2448 	case 0x3f:	/* Read all Mode pages */
2449 		if ((0 == subpcode) || (0xff == subpcode)) {
2450 			len = resp_err_recov_pg(ap, pcontrol, target);
2451 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2452 			if (is_disk) {
2453 				len += resp_format_pg(ap + len, pcontrol,
2454 						      target);
2455 				len += resp_caching_pg(ap + len, pcontrol,
2456 						       target);
2457 			} else if (is_zbc) {
2458 				len += resp_caching_pg(ap + len, pcontrol,
2459 						       target);
2460 			}
2461 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2462 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2463 			if (0xff == subpcode) {
2464 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2465 						  target, target_dev_id);
2466 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2467 			}
2468 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2469 			offset += len;
2470 		} else {
2471 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2472 			return check_condition_result;
2473 		}
2474 		break;
2475 	default:
2476 		bad_pcode = true;
2477 		break;
2478 	}
2479 	if (bad_pcode) {
2480 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2481 		return check_condition_result;
2482 	}
2483 	if (msense_6)
2484 		arr[0] = offset - 1;
2485 	else
2486 		put_unaligned_be16((offset - 2), arr + 0);
2487 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2488 }
2489 
2490 #define SDEBUG_MAX_MSELECT_SZ 512
2491 
2492 static int resp_mode_select(struct scsi_cmnd *scp,
2493 			    struct sdebug_dev_info *devip)
2494 {
2495 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2496 	int param_len, res, mpage;
2497 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2498 	unsigned char *cmd = scp->cmnd;
2499 	int mselect6 = (MODE_SELECT == cmd[0]);
2500 
2501 	memset(arr, 0, sizeof(arr));
2502 	pf = cmd[1] & 0x10;
2503 	sp = cmd[1] & 0x1;
2504 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2505 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2506 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2507 		return check_condition_result;
2508 	}
2509 	res = fetch_to_dev_buffer(scp, arr, param_len);
2510 	if (-1 == res)
2511 		return DID_ERROR << 16;
2512 	else if (sdebug_verbose && (res < param_len))
2513 		sdev_printk(KERN_INFO, scp->device,
2514 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2515 			    __func__, param_len, res);
2516 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2517 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2518 	off = bd_len + (mselect6 ? 4 : 8);
2519 	if (md_len > 2 || off >= res) {
2520 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2521 		return check_condition_result;
2522 	}
2523 	mpage = arr[off] & 0x3f;
2524 	ps = !!(arr[off] & 0x80);
2525 	if (ps) {
2526 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2527 		return check_condition_result;
2528 	}
2529 	spf = !!(arr[off] & 0x40);
2530 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2531 		       (arr[off + 1] + 2);
2532 	if ((pg_len + off) > param_len) {
2533 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2534 				PARAMETER_LIST_LENGTH_ERR, 0);
2535 		return check_condition_result;
2536 	}
2537 	switch (mpage) {
2538 	case 0x8:      /* Caching Mode page */
2539 		if (caching_pg[1] == arr[off + 1]) {
2540 			memcpy(caching_pg + 2, arr + off + 2,
2541 			       sizeof(caching_pg) - 2);
2542 			goto set_mode_changed_ua;
2543 		}
2544 		break;
2545 	case 0xa:      /* Control Mode page */
2546 		if (ctrl_m_pg[1] == arr[off + 1]) {
2547 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2548 			       sizeof(ctrl_m_pg) - 2);
2549 			if (ctrl_m_pg[4] & 0x8)
2550 				sdebug_wp = true;
2551 			else
2552 				sdebug_wp = false;
2553 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2554 			goto set_mode_changed_ua;
2555 		}
2556 		break;
2557 	case 0x1c:      /* Informational Exceptions Mode page */
2558 		if (iec_m_pg[1] == arr[off + 1]) {
2559 			memcpy(iec_m_pg + 2, arr + off + 2,
2560 			       sizeof(iec_m_pg) - 2);
2561 			goto set_mode_changed_ua;
2562 		}
2563 		break;
2564 	default:
2565 		break;
2566 	}
2567 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2568 	return check_condition_result;
2569 set_mode_changed_ua:
2570 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2571 	return 0;
2572 }
2573 
2574 static int resp_temp_l_pg(unsigned char *arr)
2575 {
2576 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2577 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2578 		};
2579 
2580 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2581 	return sizeof(temp_l_pg);
2582 }
2583 
2584 static int resp_ie_l_pg(unsigned char *arr)
2585 {
2586 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2587 		};
2588 
2589 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2590 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2591 		arr[4] = THRESHOLD_EXCEEDED;
2592 		arr[5] = 0xff;
2593 	}
2594 	return sizeof(ie_l_pg);
2595 }
2596 
2597 static int resp_env_rep_l_spg(unsigned char *arr)
2598 {
2599 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2600 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2601 					 0x1, 0x0, 0x23, 0x8,
2602 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2603 		};
2604 
2605 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2606 	return sizeof(env_rep_l_spg);
2607 }
2608 
2609 #define SDEBUG_MAX_LSENSE_SZ 512
2610 
2611 static int resp_log_sense(struct scsi_cmnd *scp,
2612 			  struct sdebug_dev_info *devip)
2613 {
2614 	int ppc, sp, pcode, subpcode;
2615 	u32 alloc_len, len, n;
2616 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2617 	unsigned char *cmd = scp->cmnd;
2618 
2619 	memset(arr, 0, sizeof(arr));
2620 	ppc = cmd[1] & 0x2;
2621 	sp = cmd[1] & 0x1;
2622 	if (ppc || sp) {
2623 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2624 		return check_condition_result;
2625 	}
2626 	pcode = cmd[2] & 0x3f;
2627 	subpcode = cmd[3] & 0xff;
2628 	alloc_len = get_unaligned_be16(cmd + 7);
2629 	arr[0] = pcode;
2630 	if (0 == subpcode) {
2631 		switch (pcode) {
2632 		case 0x0:	/* Supported log pages log page */
2633 			n = 4;
2634 			arr[n++] = 0x0;		/* this page */
2635 			arr[n++] = 0xd;		/* Temperature */
2636 			arr[n++] = 0x2f;	/* Informational exceptions */
2637 			arr[3] = n - 4;
2638 			break;
2639 		case 0xd:	/* Temperature log page */
2640 			arr[3] = resp_temp_l_pg(arr + 4);
2641 			break;
2642 		case 0x2f:	/* Informational exceptions log page */
2643 			arr[3] = resp_ie_l_pg(arr + 4);
2644 			break;
2645 		default:
2646 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2647 			return check_condition_result;
2648 		}
2649 	} else if (0xff == subpcode) {
2650 		arr[0] |= 0x40;
2651 		arr[1] = subpcode;
2652 		switch (pcode) {
2653 		case 0x0:	/* Supported log pages and subpages log page */
2654 			n = 4;
2655 			arr[n++] = 0x0;
2656 			arr[n++] = 0x0;		/* 0,0 page */
2657 			arr[n++] = 0x0;
2658 			arr[n++] = 0xff;	/* this page */
2659 			arr[n++] = 0xd;
2660 			arr[n++] = 0x0;		/* Temperature */
2661 			arr[n++] = 0xd;
2662 			arr[n++] = 0x1;		/* Environment reporting */
2663 			arr[n++] = 0xd;
2664 			arr[n++] = 0xff;	/* all 0xd subpages */
2665 			arr[n++] = 0x2f;
2666 			arr[n++] = 0x0;	/* Informational exceptions */
2667 			arr[n++] = 0x2f;
2668 			arr[n++] = 0xff;	/* all 0x2f subpages */
2669 			arr[3] = n - 4;
2670 			break;
2671 		case 0xd:	/* Temperature subpages */
2672 			n = 4;
2673 			arr[n++] = 0xd;
2674 			arr[n++] = 0x0;		/* Temperature */
2675 			arr[n++] = 0xd;
2676 			arr[n++] = 0x1;		/* Environment reporting */
2677 			arr[n++] = 0xd;
2678 			arr[n++] = 0xff;	/* these subpages */
2679 			arr[3] = n - 4;
2680 			break;
2681 		case 0x2f:	/* Informational exceptions subpages */
2682 			n = 4;
2683 			arr[n++] = 0x2f;
2684 			arr[n++] = 0x0;		/* Informational exceptions */
2685 			arr[n++] = 0x2f;
2686 			arr[n++] = 0xff;	/* these subpages */
2687 			arr[3] = n - 4;
2688 			break;
2689 		default:
2690 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2691 			return check_condition_result;
2692 		}
2693 	} else if (subpcode > 0) {
2694 		arr[0] |= 0x40;
2695 		arr[1] = subpcode;
2696 		if (pcode == 0xd && subpcode == 1)
2697 			arr[3] = resp_env_rep_l_spg(arr + 4);
2698 		else {
2699 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2700 			return check_condition_result;
2701 		}
2702 	} else {
2703 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2704 		return check_condition_result;
2705 	}
2706 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2707 	return fill_from_dev_buffer(scp, arr,
2708 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2709 }
2710 
2711 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2712 {
2713 	return devip->nr_zones != 0;
2714 }
2715 
2716 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2717 					unsigned long long lba)
2718 {
2719 	return &devip->zstate[lba >> devip->zsize_shift];
2720 }
2721 
2722 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2723 {
2724 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2725 }
2726 
2727 static void zbc_close_zone(struct sdebug_dev_info *devip,
2728 			   struct sdeb_zone_state *zsp)
2729 {
2730 	enum sdebug_z_cond zc;
2731 
2732 	if (zbc_zone_is_conv(zsp))
2733 		return;
2734 
2735 	zc = zsp->z_cond;
2736 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2737 		return;
2738 
2739 	if (zc == ZC2_IMPLICIT_OPEN)
2740 		devip->nr_imp_open--;
2741 	else
2742 		devip->nr_exp_open--;
2743 
2744 	if (zsp->z_wp == zsp->z_start) {
2745 		zsp->z_cond = ZC1_EMPTY;
2746 	} else {
2747 		zsp->z_cond = ZC4_CLOSED;
2748 		devip->nr_closed++;
2749 	}
2750 }
2751 
2752 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2753 {
2754 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2755 	unsigned int i;
2756 
2757 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2758 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2759 			zbc_close_zone(devip, zsp);
2760 			return;
2761 		}
2762 	}
2763 }
2764 
2765 static void zbc_open_zone(struct sdebug_dev_info *devip,
2766 			  struct sdeb_zone_state *zsp, bool explicit)
2767 {
2768 	enum sdebug_z_cond zc;
2769 
2770 	if (zbc_zone_is_conv(zsp))
2771 		return;
2772 
2773 	zc = zsp->z_cond;
2774 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2775 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2776 		return;
2777 
2778 	/* Close an implicit open zone if necessary */
2779 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2780 		zbc_close_zone(devip, zsp);
2781 	else if (devip->max_open &&
2782 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2783 		zbc_close_imp_open_zone(devip);
2784 
2785 	if (zsp->z_cond == ZC4_CLOSED)
2786 		devip->nr_closed--;
2787 	if (explicit) {
2788 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2789 		devip->nr_exp_open++;
2790 	} else {
2791 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2792 		devip->nr_imp_open++;
2793 	}
2794 }
2795 
2796 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2797 		       unsigned long long lba, unsigned int num)
2798 {
2799 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2800 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2801 
2802 	if (zbc_zone_is_conv(zsp))
2803 		return;
2804 
2805 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2806 		zsp->z_wp += num;
2807 		if (zsp->z_wp >= zend)
2808 			zsp->z_cond = ZC5_FULL;
2809 		return;
2810 	}
2811 
2812 	while (num) {
2813 		if (lba != zsp->z_wp)
2814 			zsp->z_non_seq_resource = true;
2815 
2816 		end = lba + num;
2817 		if (end >= zend) {
2818 			n = zend - lba;
2819 			zsp->z_wp = zend;
2820 		} else if (end > zsp->z_wp) {
2821 			n = num;
2822 			zsp->z_wp = end;
2823 		} else {
2824 			n = num;
2825 		}
2826 		if (zsp->z_wp >= zend)
2827 			zsp->z_cond = ZC5_FULL;
2828 
2829 		num -= n;
2830 		lba += n;
2831 		if (num) {
2832 			zsp++;
2833 			zend = zsp->z_start + zsp->z_size;
2834 		}
2835 	}
2836 }
2837 
2838 static int check_zbc_access_params(struct scsi_cmnd *scp,
2839 			unsigned long long lba, unsigned int num, bool write)
2840 {
2841 	struct scsi_device *sdp = scp->device;
2842 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2843 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2844 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2845 
2846 	if (!write) {
2847 		if (devip->zmodel == BLK_ZONED_HA)
2848 			return 0;
2849 		/* For host-managed, reads cannot cross zone types boundaries */
2850 		if (zsp_end != zsp &&
2851 		    zbc_zone_is_conv(zsp) &&
2852 		    !zbc_zone_is_conv(zsp_end)) {
2853 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2854 					LBA_OUT_OF_RANGE,
2855 					READ_INVDATA_ASCQ);
2856 			return check_condition_result;
2857 		}
2858 		return 0;
2859 	}
2860 
2861 	/* No restrictions for writes within conventional zones */
2862 	if (zbc_zone_is_conv(zsp)) {
2863 		if (!zbc_zone_is_conv(zsp_end)) {
2864 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2865 					LBA_OUT_OF_RANGE,
2866 					WRITE_BOUNDARY_ASCQ);
2867 			return check_condition_result;
2868 		}
2869 		return 0;
2870 	}
2871 
2872 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2873 		/* Writes cannot cross sequential zone boundaries */
2874 		if (zsp_end != zsp) {
2875 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2876 					LBA_OUT_OF_RANGE,
2877 					WRITE_BOUNDARY_ASCQ);
2878 			return check_condition_result;
2879 		}
2880 		/* Cannot write full zones */
2881 		if (zsp->z_cond == ZC5_FULL) {
2882 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2883 					INVALID_FIELD_IN_CDB, 0);
2884 			return check_condition_result;
2885 		}
2886 		/* Writes must be aligned to the zone WP */
2887 		if (lba != zsp->z_wp) {
2888 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2889 					LBA_OUT_OF_RANGE,
2890 					UNALIGNED_WRITE_ASCQ);
2891 			return check_condition_result;
2892 		}
2893 	}
2894 
2895 	/* Handle implicit open of closed and empty zones */
2896 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2897 		if (devip->max_open &&
2898 		    devip->nr_exp_open >= devip->max_open) {
2899 			mk_sense_buffer(scp, DATA_PROTECT,
2900 					INSUFF_RES_ASC,
2901 					INSUFF_ZONE_ASCQ);
2902 			return check_condition_result;
2903 		}
2904 		zbc_open_zone(devip, zsp, false);
2905 	}
2906 
2907 	return 0;
2908 }
2909 
2910 static inline int check_device_access_params
2911 			(struct scsi_cmnd *scp, unsigned long long lba,
2912 			 unsigned int num, bool write)
2913 {
2914 	struct scsi_device *sdp = scp->device;
2915 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2916 
2917 	if (lba + num > sdebug_capacity) {
2918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2919 		return check_condition_result;
2920 	}
2921 	/* transfer length excessive (tie in to block limits VPD page) */
2922 	if (num > sdebug_store_sectors) {
2923 		/* needs work to find which cdb byte 'num' comes from */
2924 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2925 		return check_condition_result;
2926 	}
2927 	if (write && unlikely(sdebug_wp)) {
2928 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2929 		return check_condition_result;
2930 	}
2931 	if (sdebug_dev_is_zoned(devip))
2932 		return check_zbc_access_params(scp, lba, num, write);
2933 
2934 	return 0;
2935 }
2936 
2937 /*
2938  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2939  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2940  * that access any of the "stores" in struct sdeb_store_info should call this
2941  * function with bug_if_fake_rw set to true.
2942  */
2943 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2944 						bool bug_if_fake_rw)
2945 {
2946 	if (sdebug_fake_rw) {
2947 		BUG_ON(bug_if_fake_rw);	/* See note above */
2948 		return NULL;
2949 	}
2950 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2951 }
2952 
2953 /* Returns number of bytes copied or -1 if error. */
2954 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2955 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2956 {
2957 	int ret;
2958 	u64 block, rest = 0;
2959 	enum dma_data_direction dir;
2960 	struct scsi_data_buffer *sdb = &scp->sdb;
2961 	u8 *fsp;
2962 
2963 	if (do_write) {
2964 		dir = DMA_TO_DEVICE;
2965 		write_since_sync = true;
2966 	} else {
2967 		dir = DMA_FROM_DEVICE;
2968 	}
2969 
2970 	if (!sdb->length || !sip)
2971 		return 0;
2972 	if (scp->sc_data_direction != dir)
2973 		return -1;
2974 	fsp = sip->storep;
2975 
2976 	block = do_div(lba, sdebug_store_sectors);
2977 	if (block + num > sdebug_store_sectors)
2978 		rest = block + num - sdebug_store_sectors;
2979 
2980 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2981 		   fsp + (block * sdebug_sector_size),
2982 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2983 	if (ret != (num - rest) * sdebug_sector_size)
2984 		return ret;
2985 
2986 	if (rest) {
2987 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2988 			    fsp, rest * sdebug_sector_size,
2989 			    sg_skip + ((num - rest) * sdebug_sector_size),
2990 			    do_write);
2991 	}
2992 
2993 	return ret;
2994 }
2995 
2996 /* Returns number of bytes copied or -1 if error. */
2997 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2998 {
2999 	struct scsi_data_buffer *sdb = &scp->sdb;
3000 
3001 	if (!sdb->length)
3002 		return 0;
3003 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3004 		return -1;
3005 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3006 			      num * sdebug_sector_size, 0, true);
3007 }
3008 
3009 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3010  * arr into sip->storep+lba and return true. If comparison fails then
3011  * return false. */
3012 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3013 			      const u8 *arr, bool compare_only)
3014 {
3015 	bool res;
3016 	u64 block, rest = 0;
3017 	u32 store_blks = sdebug_store_sectors;
3018 	u32 lb_size = sdebug_sector_size;
3019 	u8 *fsp = sip->storep;
3020 
3021 	block = do_div(lba, store_blks);
3022 	if (block + num > store_blks)
3023 		rest = block + num - store_blks;
3024 
3025 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3026 	if (!res)
3027 		return res;
3028 	if (rest)
3029 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3030 			     rest * lb_size);
3031 	if (!res)
3032 		return res;
3033 	if (compare_only)
3034 		return true;
3035 	arr += num * lb_size;
3036 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3037 	if (rest)
3038 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3039 	return res;
3040 }
3041 
3042 static __be16 dif_compute_csum(const void *buf, int len)
3043 {
3044 	__be16 csum;
3045 
3046 	if (sdebug_guard)
3047 		csum = (__force __be16)ip_compute_csum(buf, len);
3048 	else
3049 		csum = cpu_to_be16(crc_t10dif(buf, len));
3050 
3051 	return csum;
3052 }
3053 
3054 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3055 		      sector_t sector, u32 ei_lba)
3056 {
3057 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3058 
3059 	if (sdt->guard_tag != csum) {
3060 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3061 			(unsigned long)sector,
3062 			be16_to_cpu(sdt->guard_tag),
3063 			be16_to_cpu(csum));
3064 		return 0x01;
3065 	}
3066 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3067 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3068 		pr_err("REF check failed on sector %lu\n",
3069 			(unsigned long)sector);
3070 		return 0x03;
3071 	}
3072 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3073 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3074 		pr_err("REF check failed on sector %lu\n",
3075 			(unsigned long)sector);
3076 		return 0x03;
3077 	}
3078 	return 0;
3079 }
3080 
3081 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3082 			  unsigned int sectors, bool read)
3083 {
3084 	size_t resid;
3085 	void *paddr;
3086 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3087 						scp->device->hostdata, true);
3088 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3089 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3090 	struct sg_mapping_iter miter;
3091 
3092 	/* Bytes of protection data to copy into sgl */
3093 	resid = sectors * sizeof(*dif_storep);
3094 
3095 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3096 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3097 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3098 
3099 	while (sg_miter_next(&miter) && resid > 0) {
3100 		size_t len = min_t(size_t, miter.length, resid);
3101 		void *start = dif_store(sip, sector);
3102 		size_t rest = 0;
3103 
3104 		if (dif_store_end < start + len)
3105 			rest = start + len - dif_store_end;
3106 
3107 		paddr = miter.addr;
3108 
3109 		if (read)
3110 			memcpy(paddr, start, len - rest);
3111 		else
3112 			memcpy(start, paddr, len - rest);
3113 
3114 		if (rest) {
3115 			if (read)
3116 				memcpy(paddr + len - rest, dif_storep, rest);
3117 			else
3118 				memcpy(dif_storep, paddr + len - rest, rest);
3119 		}
3120 
3121 		sector += len / sizeof(*dif_storep);
3122 		resid -= len;
3123 	}
3124 	sg_miter_stop(&miter);
3125 }
3126 
3127 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3128 			    unsigned int sectors, u32 ei_lba)
3129 {
3130 	int ret = 0;
3131 	unsigned int i;
3132 	sector_t sector;
3133 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3134 						scp->device->hostdata, true);
3135 	struct t10_pi_tuple *sdt;
3136 
3137 	for (i = 0; i < sectors; i++, ei_lba++) {
3138 		sector = start_sec + i;
3139 		sdt = dif_store(sip, sector);
3140 
3141 		if (sdt->app_tag == cpu_to_be16(0xffff))
3142 			continue;
3143 
3144 		/*
3145 		 * Because scsi_debug acts as both initiator and
3146 		 * target we proceed to verify the PI even if
3147 		 * RDPROTECT=3. This is done so the "initiator" knows
3148 		 * which type of error to return. Otherwise we would
3149 		 * have to iterate over the PI twice.
3150 		 */
3151 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3152 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3153 					 sector, ei_lba);
3154 			if (ret) {
3155 				dif_errors++;
3156 				break;
3157 			}
3158 		}
3159 	}
3160 
3161 	dif_copy_prot(scp, start_sec, sectors, true);
3162 	dix_reads++;
3163 
3164 	return ret;
3165 }
3166 
3167 static inline void
3168 sdeb_read_lock(struct sdeb_store_info *sip)
3169 {
3170 	if (sdebug_no_rwlock) {
3171 		if (sip)
3172 			__acquire(&sip->macc_lck);
3173 		else
3174 			__acquire(&sdeb_fake_rw_lck);
3175 	} else {
3176 		if (sip)
3177 			read_lock(&sip->macc_lck);
3178 		else
3179 			read_lock(&sdeb_fake_rw_lck);
3180 	}
3181 }
3182 
3183 static inline void
3184 sdeb_read_unlock(struct sdeb_store_info *sip)
3185 {
3186 	if (sdebug_no_rwlock) {
3187 		if (sip)
3188 			__release(&sip->macc_lck);
3189 		else
3190 			__release(&sdeb_fake_rw_lck);
3191 	} else {
3192 		if (sip)
3193 			read_unlock(&sip->macc_lck);
3194 		else
3195 			read_unlock(&sdeb_fake_rw_lck);
3196 	}
3197 }
3198 
3199 static inline void
3200 sdeb_write_lock(struct sdeb_store_info *sip)
3201 {
3202 	if (sdebug_no_rwlock) {
3203 		if (sip)
3204 			__acquire(&sip->macc_lck);
3205 		else
3206 			__acquire(&sdeb_fake_rw_lck);
3207 	} else {
3208 		if (sip)
3209 			write_lock(&sip->macc_lck);
3210 		else
3211 			write_lock(&sdeb_fake_rw_lck);
3212 	}
3213 }
3214 
3215 static inline void
3216 sdeb_write_unlock(struct sdeb_store_info *sip)
3217 {
3218 	if (sdebug_no_rwlock) {
3219 		if (sip)
3220 			__release(&sip->macc_lck);
3221 		else
3222 			__release(&sdeb_fake_rw_lck);
3223 	} else {
3224 		if (sip)
3225 			write_unlock(&sip->macc_lck);
3226 		else
3227 			write_unlock(&sdeb_fake_rw_lck);
3228 	}
3229 }
3230 
3231 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3232 {
3233 	bool check_prot;
3234 	u32 num;
3235 	u32 ei_lba;
3236 	int ret;
3237 	u64 lba;
3238 	struct sdeb_store_info *sip = devip2sip(devip, true);
3239 	u8 *cmd = scp->cmnd;
3240 
3241 	switch (cmd[0]) {
3242 	case READ_16:
3243 		ei_lba = 0;
3244 		lba = get_unaligned_be64(cmd + 2);
3245 		num = get_unaligned_be32(cmd + 10);
3246 		check_prot = true;
3247 		break;
3248 	case READ_10:
3249 		ei_lba = 0;
3250 		lba = get_unaligned_be32(cmd + 2);
3251 		num = get_unaligned_be16(cmd + 7);
3252 		check_prot = true;
3253 		break;
3254 	case READ_6:
3255 		ei_lba = 0;
3256 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3257 		      (u32)(cmd[1] & 0x1f) << 16;
3258 		num = (0 == cmd[4]) ? 256 : cmd[4];
3259 		check_prot = true;
3260 		break;
3261 	case READ_12:
3262 		ei_lba = 0;
3263 		lba = get_unaligned_be32(cmd + 2);
3264 		num = get_unaligned_be32(cmd + 6);
3265 		check_prot = true;
3266 		break;
3267 	case XDWRITEREAD_10:
3268 		ei_lba = 0;
3269 		lba = get_unaligned_be32(cmd + 2);
3270 		num = get_unaligned_be16(cmd + 7);
3271 		check_prot = false;
3272 		break;
3273 	default:	/* assume READ(32) */
3274 		lba = get_unaligned_be64(cmd + 12);
3275 		ei_lba = get_unaligned_be32(cmd + 20);
3276 		num = get_unaligned_be32(cmd + 28);
3277 		check_prot = false;
3278 		break;
3279 	}
3280 	if (unlikely(have_dif_prot && check_prot)) {
3281 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3282 		    (cmd[1] & 0xe0)) {
3283 			mk_sense_invalid_opcode(scp);
3284 			return check_condition_result;
3285 		}
3286 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3287 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3288 		    (cmd[1] & 0xe0) == 0)
3289 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3290 				    "to DIF device\n");
3291 	}
3292 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3293 		     atomic_read(&sdeb_inject_pending))) {
3294 		num /= 2;
3295 		atomic_set(&sdeb_inject_pending, 0);
3296 	}
3297 
3298 	ret = check_device_access_params(scp, lba, num, false);
3299 	if (ret)
3300 		return ret;
3301 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3302 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3303 		     ((lba + num) > sdebug_medium_error_start))) {
3304 		/* claim unrecoverable read error */
3305 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3306 		/* set info field and valid bit for fixed descriptor */
3307 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3308 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3309 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3310 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3311 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3312 		}
3313 		scsi_set_resid(scp, scsi_bufflen(scp));
3314 		return check_condition_result;
3315 	}
3316 
3317 	sdeb_read_lock(sip);
3318 
3319 	/* DIX + T10 DIF */
3320 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3321 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3322 		case 1: /* Guard tag error */
3323 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3324 				sdeb_read_unlock(sip);
3325 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3326 				return check_condition_result;
3327 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3328 				sdeb_read_unlock(sip);
3329 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3330 				return illegal_condition_result;
3331 			}
3332 			break;
3333 		case 3: /* Reference tag error */
3334 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3335 				sdeb_read_unlock(sip);
3336 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3337 				return check_condition_result;
3338 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3339 				sdeb_read_unlock(sip);
3340 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3341 				return illegal_condition_result;
3342 			}
3343 			break;
3344 		}
3345 	}
3346 
3347 	ret = do_device_access(sip, scp, 0, lba, num, false);
3348 	sdeb_read_unlock(sip);
3349 	if (unlikely(ret == -1))
3350 		return DID_ERROR << 16;
3351 
3352 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3353 
3354 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3355 		     atomic_read(&sdeb_inject_pending))) {
3356 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3357 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3358 			atomic_set(&sdeb_inject_pending, 0);
3359 			return check_condition_result;
3360 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3361 			/* Logical block guard check failed */
3362 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3363 			atomic_set(&sdeb_inject_pending, 0);
3364 			return illegal_condition_result;
3365 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3366 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3367 			atomic_set(&sdeb_inject_pending, 0);
3368 			return illegal_condition_result;
3369 		}
3370 	}
3371 	return 0;
3372 }
3373 
3374 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3375 			     unsigned int sectors, u32 ei_lba)
3376 {
3377 	int ret;
3378 	struct t10_pi_tuple *sdt;
3379 	void *daddr;
3380 	sector_t sector = start_sec;
3381 	int ppage_offset;
3382 	int dpage_offset;
3383 	struct sg_mapping_iter diter;
3384 	struct sg_mapping_iter piter;
3385 
3386 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3387 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3388 
3389 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3390 			scsi_prot_sg_count(SCpnt),
3391 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3392 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3393 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3394 
3395 	/* For each protection page */
3396 	while (sg_miter_next(&piter)) {
3397 		dpage_offset = 0;
3398 		if (WARN_ON(!sg_miter_next(&diter))) {
3399 			ret = 0x01;
3400 			goto out;
3401 		}
3402 
3403 		for (ppage_offset = 0; ppage_offset < piter.length;
3404 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3405 			/* If we're at the end of the current
3406 			 * data page advance to the next one
3407 			 */
3408 			if (dpage_offset >= diter.length) {
3409 				if (WARN_ON(!sg_miter_next(&diter))) {
3410 					ret = 0x01;
3411 					goto out;
3412 				}
3413 				dpage_offset = 0;
3414 			}
3415 
3416 			sdt = piter.addr + ppage_offset;
3417 			daddr = diter.addr + dpage_offset;
3418 
3419 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3420 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3421 				if (ret)
3422 					goto out;
3423 			}
3424 
3425 			sector++;
3426 			ei_lba++;
3427 			dpage_offset += sdebug_sector_size;
3428 		}
3429 		diter.consumed = dpage_offset;
3430 		sg_miter_stop(&diter);
3431 	}
3432 	sg_miter_stop(&piter);
3433 
3434 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3435 	dix_writes++;
3436 
3437 	return 0;
3438 
3439 out:
3440 	dif_errors++;
3441 	sg_miter_stop(&diter);
3442 	sg_miter_stop(&piter);
3443 	return ret;
3444 }
3445 
3446 static unsigned long lba_to_map_index(sector_t lba)
3447 {
3448 	if (sdebug_unmap_alignment)
3449 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3450 	sector_div(lba, sdebug_unmap_granularity);
3451 	return lba;
3452 }
3453 
3454 static sector_t map_index_to_lba(unsigned long index)
3455 {
3456 	sector_t lba = index * sdebug_unmap_granularity;
3457 
3458 	if (sdebug_unmap_alignment)
3459 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3460 	return lba;
3461 }
3462 
3463 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3464 			      unsigned int *num)
3465 {
3466 	sector_t end;
3467 	unsigned int mapped;
3468 	unsigned long index;
3469 	unsigned long next;
3470 
3471 	index = lba_to_map_index(lba);
3472 	mapped = test_bit(index, sip->map_storep);
3473 
3474 	if (mapped)
3475 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3476 	else
3477 		next = find_next_bit(sip->map_storep, map_size, index);
3478 
3479 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3480 	*num = end - lba;
3481 	return mapped;
3482 }
3483 
3484 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3485 		       unsigned int len)
3486 {
3487 	sector_t end = lba + len;
3488 
3489 	while (lba < end) {
3490 		unsigned long index = lba_to_map_index(lba);
3491 
3492 		if (index < map_size)
3493 			set_bit(index, sip->map_storep);
3494 
3495 		lba = map_index_to_lba(index + 1);
3496 	}
3497 }
3498 
3499 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3500 			 unsigned int len)
3501 {
3502 	sector_t end = lba + len;
3503 	u8 *fsp = sip->storep;
3504 
3505 	while (lba < end) {
3506 		unsigned long index = lba_to_map_index(lba);
3507 
3508 		if (lba == map_index_to_lba(index) &&
3509 		    lba + sdebug_unmap_granularity <= end &&
3510 		    index < map_size) {
3511 			clear_bit(index, sip->map_storep);
3512 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3513 				memset(fsp + lba * sdebug_sector_size,
3514 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3515 				       sdebug_sector_size *
3516 				       sdebug_unmap_granularity);
3517 			}
3518 			if (sip->dif_storep) {
3519 				memset(sip->dif_storep + lba, 0xff,
3520 				       sizeof(*sip->dif_storep) *
3521 				       sdebug_unmap_granularity);
3522 			}
3523 		}
3524 		lba = map_index_to_lba(index + 1);
3525 	}
3526 }
3527 
3528 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3529 {
3530 	bool check_prot;
3531 	u32 num;
3532 	u32 ei_lba;
3533 	int ret;
3534 	u64 lba;
3535 	struct sdeb_store_info *sip = devip2sip(devip, true);
3536 	u8 *cmd = scp->cmnd;
3537 
3538 	switch (cmd[0]) {
3539 	case WRITE_16:
3540 		ei_lba = 0;
3541 		lba = get_unaligned_be64(cmd + 2);
3542 		num = get_unaligned_be32(cmd + 10);
3543 		check_prot = true;
3544 		break;
3545 	case WRITE_10:
3546 		ei_lba = 0;
3547 		lba = get_unaligned_be32(cmd + 2);
3548 		num = get_unaligned_be16(cmd + 7);
3549 		check_prot = true;
3550 		break;
3551 	case WRITE_6:
3552 		ei_lba = 0;
3553 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3554 		      (u32)(cmd[1] & 0x1f) << 16;
3555 		num = (0 == cmd[4]) ? 256 : cmd[4];
3556 		check_prot = true;
3557 		break;
3558 	case WRITE_12:
3559 		ei_lba = 0;
3560 		lba = get_unaligned_be32(cmd + 2);
3561 		num = get_unaligned_be32(cmd + 6);
3562 		check_prot = true;
3563 		break;
3564 	case 0x53:	/* XDWRITEREAD(10) */
3565 		ei_lba = 0;
3566 		lba = get_unaligned_be32(cmd + 2);
3567 		num = get_unaligned_be16(cmd + 7);
3568 		check_prot = false;
3569 		break;
3570 	default:	/* assume WRITE(32) */
3571 		lba = get_unaligned_be64(cmd + 12);
3572 		ei_lba = get_unaligned_be32(cmd + 20);
3573 		num = get_unaligned_be32(cmd + 28);
3574 		check_prot = false;
3575 		break;
3576 	}
3577 	if (unlikely(have_dif_prot && check_prot)) {
3578 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3579 		    (cmd[1] & 0xe0)) {
3580 			mk_sense_invalid_opcode(scp);
3581 			return check_condition_result;
3582 		}
3583 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3584 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3585 		    (cmd[1] & 0xe0) == 0)
3586 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3587 				    "to DIF device\n");
3588 	}
3589 
3590 	sdeb_write_lock(sip);
3591 	ret = check_device_access_params(scp, lba, num, true);
3592 	if (ret) {
3593 		sdeb_write_unlock(sip);
3594 		return ret;
3595 	}
3596 
3597 	/* DIX + T10 DIF */
3598 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3599 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3600 		case 1: /* Guard tag error */
3601 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3602 				sdeb_write_unlock(sip);
3603 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3604 				return illegal_condition_result;
3605 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3606 				sdeb_write_unlock(sip);
3607 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3608 				return check_condition_result;
3609 			}
3610 			break;
3611 		case 3: /* Reference tag error */
3612 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3613 				sdeb_write_unlock(sip);
3614 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3615 				return illegal_condition_result;
3616 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3617 				sdeb_write_unlock(sip);
3618 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3619 				return check_condition_result;
3620 			}
3621 			break;
3622 		}
3623 	}
3624 
3625 	ret = do_device_access(sip, scp, 0, lba, num, true);
3626 	if (unlikely(scsi_debug_lbp()))
3627 		map_region(sip, lba, num);
3628 	/* If ZBC zone then bump its write pointer */
3629 	if (sdebug_dev_is_zoned(devip))
3630 		zbc_inc_wp(devip, lba, num);
3631 	sdeb_write_unlock(sip);
3632 	if (unlikely(-1 == ret))
3633 		return DID_ERROR << 16;
3634 	else if (unlikely(sdebug_verbose &&
3635 			  (ret < (num * sdebug_sector_size))))
3636 		sdev_printk(KERN_INFO, scp->device,
3637 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3638 			    my_name, num * sdebug_sector_size, ret);
3639 
3640 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3641 		     atomic_read(&sdeb_inject_pending))) {
3642 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3643 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3644 			atomic_set(&sdeb_inject_pending, 0);
3645 			return check_condition_result;
3646 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3647 			/* Logical block guard check failed */
3648 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3649 			atomic_set(&sdeb_inject_pending, 0);
3650 			return illegal_condition_result;
3651 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3652 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3653 			atomic_set(&sdeb_inject_pending, 0);
3654 			return illegal_condition_result;
3655 		}
3656 	}
3657 	return 0;
3658 }
3659 
3660 /*
3661  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3662  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3663  */
3664 static int resp_write_scat(struct scsi_cmnd *scp,
3665 			   struct sdebug_dev_info *devip)
3666 {
3667 	u8 *cmd = scp->cmnd;
3668 	u8 *lrdp = NULL;
3669 	u8 *up;
3670 	struct sdeb_store_info *sip = devip2sip(devip, true);
3671 	u8 wrprotect;
3672 	u16 lbdof, num_lrd, k;
3673 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3674 	u32 lb_size = sdebug_sector_size;
3675 	u32 ei_lba;
3676 	u64 lba;
3677 	int ret, res;
3678 	bool is_16;
3679 	static const u32 lrd_size = 32; /* + parameter list header size */
3680 
3681 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3682 		is_16 = false;
3683 		wrprotect = (cmd[10] >> 5) & 0x7;
3684 		lbdof = get_unaligned_be16(cmd + 12);
3685 		num_lrd = get_unaligned_be16(cmd + 16);
3686 		bt_len = get_unaligned_be32(cmd + 28);
3687 	} else {        /* that leaves WRITE SCATTERED(16) */
3688 		is_16 = true;
3689 		wrprotect = (cmd[2] >> 5) & 0x7;
3690 		lbdof = get_unaligned_be16(cmd + 4);
3691 		num_lrd = get_unaligned_be16(cmd + 8);
3692 		bt_len = get_unaligned_be32(cmd + 10);
3693 		if (unlikely(have_dif_prot)) {
3694 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3695 			    wrprotect) {
3696 				mk_sense_invalid_opcode(scp);
3697 				return illegal_condition_result;
3698 			}
3699 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3700 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3701 			     wrprotect == 0)
3702 				sdev_printk(KERN_ERR, scp->device,
3703 					    "Unprotected WR to DIF device\n");
3704 		}
3705 	}
3706 	if ((num_lrd == 0) || (bt_len == 0))
3707 		return 0;       /* T10 says these do-nothings are not errors */
3708 	if (lbdof == 0) {
3709 		if (sdebug_verbose)
3710 			sdev_printk(KERN_INFO, scp->device,
3711 				"%s: %s: LB Data Offset field bad\n",
3712 				my_name, __func__);
3713 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3714 		return illegal_condition_result;
3715 	}
3716 	lbdof_blen = lbdof * lb_size;
3717 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3718 		if (sdebug_verbose)
3719 			sdev_printk(KERN_INFO, scp->device,
3720 				"%s: %s: LBA range descriptors don't fit\n",
3721 				my_name, __func__);
3722 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3723 		return illegal_condition_result;
3724 	}
3725 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3726 	if (lrdp == NULL)
3727 		return SCSI_MLQUEUE_HOST_BUSY;
3728 	if (sdebug_verbose)
3729 		sdev_printk(KERN_INFO, scp->device,
3730 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3731 			my_name, __func__, lbdof_blen);
3732 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3733 	if (res == -1) {
3734 		ret = DID_ERROR << 16;
3735 		goto err_out;
3736 	}
3737 
3738 	sdeb_write_lock(sip);
3739 	sg_off = lbdof_blen;
3740 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3741 	cum_lb = 0;
3742 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3743 		lba = get_unaligned_be64(up + 0);
3744 		num = get_unaligned_be32(up + 8);
3745 		if (sdebug_verbose)
3746 			sdev_printk(KERN_INFO, scp->device,
3747 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3748 				my_name, __func__, k, lba, num, sg_off);
3749 		if (num == 0)
3750 			continue;
3751 		ret = check_device_access_params(scp, lba, num, true);
3752 		if (ret)
3753 			goto err_out_unlock;
3754 		num_by = num * lb_size;
3755 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3756 
3757 		if ((cum_lb + num) > bt_len) {
3758 			if (sdebug_verbose)
3759 				sdev_printk(KERN_INFO, scp->device,
3760 				    "%s: %s: sum of blocks > data provided\n",
3761 				    my_name, __func__);
3762 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3763 					0);
3764 			ret = illegal_condition_result;
3765 			goto err_out_unlock;
3766 		}
3767 
3768 		/* DIX + T10 DIF */
3769 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3770 			int prot_ret = prot_verify_write(scp, lba, num,
3771 							 ei_lba);
3772 
3773 			if (prot_ret) {
3774 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3775 						prot_ret);
3776 				ret = illegal_condition_result;
3777 				goto err_out_unlock;
3778 			}
3779 		}
3780 
3781 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3782 		/* If ZBC zone then bump its write pointer */
3783 		if (sdebug_dev_is_zoned(devip))
3784 			zbc_inc_wp(devip, lba, num);
3785 		if (unlikely(scsi_debug_lbp()))
3786 			map_region(sip, lba, num);
3787 		if (unlikely(-1 == ret)) {
3788 			ret = DID_ERROR << 16;
3789 			goto err_out_unlock;
3790 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3791 			sdev_printk(KERN_INFO, scp->device,
3792 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3793 			    my_name, num_by, ret);
3794 
3795 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3796 			     atomic_read(&sdeb_inject_pending))) {
3797 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3798 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3799 				atomic_set(&sdeb_inject_pending, 0);
3800 				ret = check_condition_result;
3801 				goto err_out_unlock;
3802 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3803 				/* Logical block guard check failed */
3804 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3805 				atomic_set(&sdeb_inject_pending, 0);
3806 				ret = illegal_condition_result;
3807 				goto err_out_unlock;
3808 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3809 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3810 				atomic_set(&sdeb_inject_pending, 0);
3811 				ret = illegal_condition_result;
3812 				goto err_out_unlock;
3813 			}
3814 		}
3815 		sg_off += num_by;
3816 		cum_lb += num;
3817 	}
3818 	ret = 0;
3819 err_out_unlock:
3820 	sdeb_write_unlock(sip);
3821 err_out:
3822 	kfree(lrdp);
3823 	return ret;
3824 }
3825 
3826 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3827 			   u32 ei_lba, bool unmap, bool ndob)
3828 {
3829 	struct scsi_device *sdp = scp->device;
3830 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3831 	unsigned long long i;
3832 	u64 block, lbaa;
3833 	u32 lb_size = sdebug_sector_size;
3834 	int ret;
3835 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3836 						scp->device->hostdata, true);
3837 	u8 *fs1p;
3838 	u8 *fsp;
3839 
3840 	sdeb_write_lock(sip);
3841 
3842 	ret = check_device_access_params(scp, lba, num, true);
3843 	if (ret) {
3844 		sdeb_write_unlock(sip);
3845 		return ret;
3846 	}
3847 
3848 	if (unmap && scsi_debug_lbp()) {
3849 		unmap_region(sip, lba, num);
3850 		goto out;
3851 	}
3852 	lbaa = lba;
3853 	block = do_div(lbaa, sdebug_store_sectors);
3854 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3855 	fsp = sip->storep;
3856 	fs1p = fsp + (block * lb_size);
3857 	if (ndob) {
3858 		memset(fs1p, 0, lb_size);
3859 		ret = 0;
3860 	} else
3861 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3862 
3863 	if (-1 == ret) {
3864 		sdeb_write_unlock(sip);
3865 		return DID_ERROR << 16;
3866 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3867 		sdev_printk(KERN_INFO, scp->device,
3868 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3869 			    my_name, "write same", lb_size, ret);
3870 
3871 	/* Copy first sector to remaining blocks */
3872 	for (i = 1 ; i < num ; i++) {
3873 		lbaa = lba + i;
3874 		block = do_div(lbaa, sdebug_store_sectors);
3875 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3876 	}
3877 	if (scsi_debug_lbp())
3878 		map_region(sip, lba, num);
3879 	/* If ZBC zone then bump its write pointer */
3880 	if (sdebug_dev_is_zoned(devip))
3881 		zbc_inc_wp(devip, lba, num);
3882 out:
3883 	sdeb_write_unlock(sip);
3884 
3885 	return 0;
3886 }
3887 
3888 static int resp_write_same_10(struct scsi_cmnd *scp,
3889 			      struct sdebug_dev_info *devip)
3890 {
3891 	u8 *cmd = scp->cmnd;
3892 	u32 lba;
3893 	u16 num;
3894 	u32 ei_lba = 0;
3895 	bool unmap = false;
3896 
3897 	if (cmd[1] & 0x8) {
3898 		if (sdebug_lbpws10 == 0) {
3899 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3900 			return check_condition_result;
3901 		} else
3902 			unmap = true;
3903 	}
3904 	lba = get_unaligned_be32(cmd + 2);
3905 	num = get_unaligned_be16(cmd + 7);
3906 	if (num > sdebug_write_same_length) {
3907 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3908 		return check_condition_result;
3909 	}
3910 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3911 }
3912 
3913 static int resp_write_same_16(struct scsi_cmnd *scp,
3914 			      struct sdebug_dev_info *devip)
3915 {
3916 	u8 *cmd = scp->cmnd;
3917 	u64 lba;
3918 	u32 num;
3919 	u32 ei_lba = 0;
3920 	bool unmap = false;
3921 	bool ndob = false;
3922 
3923 	if (cmd[1] & 0x8) {	/* UNMAP */
3924 		if (sdebug_lbpws == 0) {
3925 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3926 			return check_condition_result;
3927 		} else
3928 			unmap = true;
3929 	}
3930 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3931 		ndob = true;
3932 	lba = get_unaligned_be64(cmd + 2);
3933 	num = get_unaligned_be32(cmd + 10);
3934 	if (num > sdebug_write_same_length) {
3935 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3936 		return check_condition_result;
3937 	}
3938 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3939 }
3940 
3941 /* Note the mode field is in the same position as the (lower) service action
3942  * field. For the Report supported operation codes command, SPC-4 suggests
3943  * each mode of this command should be reported separately; for future. */
3944 static int resp_write_buffer(struct scsi_cmnd *scp,
3945 			     struct sdebug_dev_info *devip)
3946 {
3947 	u8 *cmd = scp->cmnd;
3948 	struct scsi_device *sdp = scp->device;
3949 	struct sdebug_dev_info *dp;
3950 	u8 mode;
3951 
3952 	mode = cmd[1] & 0x1f;
3953 	switch (mode) {
3954 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3955 		/* set UAs on this device only */
3956 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3957 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3958 		break;
3959 	case 0x5:	/* download MC, save and ACT */
3960 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3961 		break;
3962 	case 0x6:	/* download MC with offsets and ACT */
3963 		/* set UAs on most devices (LUs) in this target */
3964 		list_for_each_entry(dp,
3965 				    &devip->sdbg_host->dev_info_list,
3966 				    dev_list)
3967 			if (dp->target == sdp->id) {
3968 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3969 				if (devip != dp)
3970 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3971 						dp->uas_bm);
3972 			}
3973 		break;
3974 	case 0x7:	/* download MC with offsets, save, and ACT */
3975 		/* set UA on all devices (LUs) in this target */
3976 		list_for_each_entry(dp,
3977 				    &devip->sdbg_host->dev_info_list,
3978 				    dev_list)
3979 			if (dp->target == sdp->id)
3980 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3981 					dp->uas_bm);
3982 		break;
3983 	default:
3984 		/* do nothing for this command for other mode values */
3985 		break;
3986 	}
3987 	return 0;
3988 }
3989 
3990 static int resp_comp_write(struct scsi_cmnd *scp,
3991 			   struct sdebug_dev_info *devip)
3992 {
3993 	u8 *cmd = scp->cmnd;
3994 	u8 *arr;
3995 	struct sdeb_store_info *sip = devip2sip(devip, true);
3996 	u64 lba;
3997 	u32 dnum;
3998 	u32 lb_size = sdebug_sector_size;
3999 	u8 num;
4000 	int ret;
4001 	int retval = 0;
4002 
4003 	lba = get_unaligned_be64(cmd + 2);
4004 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4005 	if (0 == num)
4006 		return 0;	/* degenerate case, not an error */
4007 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4008 	    (cmd[1] & 0xe0)) {
4009 		mk_sense_invalid_opcode(scp);
4010 		return check_condition_result;
4011 	}
4012 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4013 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4014 	    (cmd[1] & 0xe0) == 0)
4015 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4016 			    "to DIF device\n");
4017 	ret = check_device_access_params(scp, lba, num, false);
4018 	if (ret)
4019 		return ret;
4020 	dnum = 2 * num;
4021 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4022 	if (NULL == arr) {
4023 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4024 				INSUFF_RES_ASCQ);
4025 		return check_condition_result;
4026 	}
4027 
4028 	sdeb_write_lock(sip);
4029 
4030 	ret = do_dout_fetch(scp, dnum, arr);
4031 	if (ret == -1) {
4032 		retval = DID_ERROR << 16;
4033 		goto cleanup;
4034 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4035 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4036 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4037 			    dnum * lb_size, ret);
4038 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4039 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4040 		retval = check_condition_result;
4041 		goto cleanup;
4042 	}
4043 	if (scsi_debug_lbp())
4044 		map_region(sip, lba, num);
4045 cleanup:
4046 	sdeb_write_unlock(sip);
4047 	kfree(arr);
4048 	return retval;
4049 }
4050 
4051 struct unmap_block_desc {
4052 	__be64	lba;
4053 	__be32	blocks;
4054 	__be32	__reserved;
4055 };
4056 
4057 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4058 {
4059 	unsigned char *buf;
4060 	struct unmap_block_desc *desc;
4061 	struct sdeb_store_info *sip = devip2sip(devip, true);
4062 	unsigned int i, payload_len, descriptors;
4063 	int ret;
4064 
4065 	if (!scsi_debug_lbp())
4066 		return 0;	/* fib and say its done */
4067 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4068 	BUG_ON(scsi_bufflen(scp) != payload_len);
4069 
4070 	descriptors = (payload_len - 8) / 16;
4071 	if (descriptors > sdebug_unmap_max_desc) {
4072 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4073 		return check_condition_result;
4074 	}
4075 
4076 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4077 	if (!buf) {
4078 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4079 				INSUFF_RES_ASCQ);
4080 		return check_condition_result;
4081 	}
4082 
4083 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4084 
4085 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4086 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4087 
4088 	desc = (void *)&buf[8];
4089 
4090 	sdeb_write_lock(sip);
4091 
4092 	for (i = 0 ; i < descriptors ; i++) {
4093 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4094 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4095 
4096 		ret = check_device_access_params(scp, lba, num, true);
4097 		if (ret)
4098 			goto out;
4099 
4100 		unmap_region(sip, lba, num);
4101 	}
4102 
4103 	ret = 0;
4104 
4105 out:
4106 	sdeb_write_unlock(sip);
4107 	kfree(buf);
4108 
4109 	return ret;
4110 }
4111 
4112 #define SDEBUG_GET_LBA_STATUS_LEN 32
4113 
4114 static int resp_get_lba_status(struct scsi_cmnd *scp,
4115 			       struct sdebug_dev_info *devip)
4116 {
4117 	u8 *cmd = scp->cmnd;
4118 	u64 lba;
4119 	u32 alloc_len, mapped, num;
4120 	int ret;
4121 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4122 
4123 	lba = get_unaligned_be64(cmd + 2);
4124 	alloc_len = get_unaligned_be32(cmd + 10);
4125 
4126 	if (alloc_len < 24)
4127 		return 0;
4128 
4129 	ret = check_device_access_params(scp, lba, 1, false);
4130 	if (ret)
4131 		return ret;
4132 
4133 	if (scsi_debug_lbp()) {
4134 		struct sdeb_store_info *sip = devip2sip(devip, true);
4135 
4136 		mapped = map_state(sip, lba, &num);
4137 	} else {
4138 		mapped = 1;
4139 		/* following just in case virtual_gb changed */
4140 		sdebug_capacity = get_sdebug_capacity();
4141 		if (sdebug_capacity - lba <= 0xffffffff)
4142 			num = sdebug_capacity - lba;
4143 		else
4144 			num = 0xffffffff;
4145 	}
4146 
4147 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4148 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4149 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4150 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4151 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4152 
4153 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4154 }
4155 
4156 static int resp_sync_cache(struct scsi_cmnd *scp,
4157 			   struct sdebug_dev_info *devip)
4158 {
4159 	int res = 0;
4160 	u64 lba;
4161 	u32 num_blocks;
4162 	u8 *cmd = scp->cmnd;
4163 
4164 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4165 		lba = get_unaligned_be32(cmd + 2);
4166 		num_blocks = get_unaligned_be16(cmd + 7);
4167 	} else {				/* SYNCHRONIZE_CACHE(16) */
4168 		lba = get_unaligned_be64(cmd + 2);
4169 		num_blocks = get_unaligned_be32(cmd + 10);
4170 	}
4171 	if (lba + num_blocks > sdebug_capacity) {
4172 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4173 		return check_condition_result;
4174 	}
4175 	if (!write_since_sync || (cmd[1] & 0x2))
4176 		res = SDEG_RES_IMMED_MASK;
4177 	else		/* delay if write_since_sync and IMMED clear */
4178 		write_since_sync = false;
4179 	return res;
4180 }
4181 
4182 /*
4183  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4184  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4185  * a GOOD status otherwise. Model a disk with a big cache and yield
4186  * CONDITION MET. Actually tries to bring range in main memory into the
4187  * cache associated with the CPU(s).
4188  */
4189 static int resp_pre_fetch(struct scsi_cmnd *scp,
4190 			  struct sdebug_dev_info *devip)
4191 {
4192 	int res = 0;
4193 	u64 lba;
4194 	u64 block, rest = 0;
4195 	u32 nblks;
4196 	u8 *cmd = scp->cmnd;
4197 	struct sdeb_store_info *sip = devip2sip(devip, true);
4198 	u8 *fsp = sip->storep;
4199 
4200 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4201 		lba = get_unaligned_be32(cmd + 2);
4202 		nblks = get_unaligned_be16(cmd + 7);
4203 	} else {			/* PRE-FETCH(16) */
4204 		lba = get_unaligned_be64(cmd + 2);
4205 		nblks = get_unaligned_be32(cmd + 10);
4206 	}
4207 	if (lba + nblks > sdebug_capacity) {
4208 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4209 		return check_condition_result;
4210 	}
4211 	if (!fsp)
4212 		goto fini;
4213 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4214 	block = do_div(lba, sdebug_store_sectors);
4215 	if (block + nblks > sdebug_store_sectors)
4216 		rest = block + nblks - sdebug_store_sectors;
4217 
4218 	/* Try to bring the PRE-FETCH range into CPU's cache */
4219 	sdeb_read_lock(sip);
4220 	prefetch_range(fsp + (sdebug_sector_size * block),
4221 		       (nblks - rest) * sdebug_sector_size);
4222 	if (rest)
4223 		prefetch_range(fsp, rest * sdebug_sector_size);
4224 	sdeb_read_unlock(sip);
4225 fini:
4226 	if (cmd[1] & 0x2)
4227 		res = SDEG_RES_IMMED_MASK;
4228 	return res | condition_met_result;
4229 }
4230 
4231 #define RL_BUCKET_ELEMS 8
4232 
4233 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4234  * (W-LUN), the normal Linux scanning logic does not associate it with a
4235  * device (e.g. /dev/sg7). The following magic will make that association:
4236  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4237  * where <n> is a host number. If there are multiple targets in a host then
4238  * the above will associate a W-LUN to each target. To only get a W-LUN
4239  * for target 2, then use "echo '- 2 49409' > scan" .
4240  */
4241 static int resp_report_luns(struct scsi_cmnd *scp,
4242 			    struct sdebug_dev_info *devip)
4243 {
4244 	unsigned char *cmd = scp->cmnd;
4245 	unsigned int alloc_len;
4246 	unsigned char select_report;
4247 	u64 lun;
4248 	struct scsi_lun *lun_p;
4249 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4250 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4251 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4252 	unsigned int tlun_cnt;	/* total LUN count */
4253 	unsigned int rlen;	/* response length (in bytes) */
4254 	int k, j, n, res;
4255 	unsigned int off_rsp = 0;
4256 	const int sz_lun = sizeof(struct scsi_lun);
4257 
4258 	clear_luns_changed_on_target(devip);
4259 
4260 	select_report = cmd[2];
4261 	alloc_len = get_unaligned_be32(cmd + 6);
4262 
4263 	if (alloc_len < 4) {
4264 		pr_err("alloc len too small %d\n", alloc_len);
4265 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4266 		return check_condition_result;
4267 	}
4268 
4269 	switch (select_report) {
4270 	case 0:		/* all LUNs apart from W-LUNs */
4271 		lun_cnt = sdebug_max_luns;
4272 		wlun_cnt = 0;
4273 		break;
4274 	case 1:		/* only W-LUNs */
4275 		lun_cnt = 0;
4276 		wlun_cnt = 1;
4277 		break;
4278 	case 2:		/* all LUNs */
4279 		lun_cnt = sdebug_max_luns;
4280 		wlun_cnt = 1;
4281 		break;
4282 	case 0x10:	/* only administrative LUs */
4283 	case 0x11:	/* see SPC-5 */
4284 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4285 	default:
4286 		pr_debug("select report invalid %d\n", select_report);
4287 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4288 		return check_condition_result;
4289 	}
4290 
4291 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4292 		--lun_cnt;
4293 
4294 	tlun_cnt = lun_cnt + wlun_cnt;
4295 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4296 	scsi_set_resid(scp, scsi_bufflen(scp));
4297 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4298 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4299 
4300 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4301 	lun = sdebug_no_lun_0 ? 1 : 0;
4302 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4303 		memset(arr, 0, sizeof(arr));
4304 		lun_p = (struct scsi_lun *)&arr[0];
4305 		if (k == 0) {
4306 			put_unaligned_be32(rlen, &arr[0]);
4307 			++lun_p;
4308 			j = 1;
4309 		}
4310 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4311 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4312 				break;
4313 			int_to_scsilun(lun++, lun_p);
4314 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4315 				lun_p->scsi_lun[0] |= 0x40;
4316 		}
4317 		if (j < RL_BUCKET_ELEMS)
4318 			break;
4319 		n = j * sz_lun;
4320 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4321 		if (res)
4322 			return res;
4323 		off_rsp += n;
4324 	}
4325 	if (wlun_cnt) {
4326 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4327 		++j;
4328 	}
4329 	if (j > 0)
4330 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4331 	return res;
4332 }
4333 
4334 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4335 {
4336 	bool is_bytchk3 = false;
4337 	u8 bytchk;
4338 	int ret, j;
4339 	u32 vnum, a_num, off;
4340 	const u32 lb_size = sdebug_sector_size;
4341 	u64 lba;
4342 	u8 *arr;
4343 	u8 *cmd = scp->cmnd;
4344 	struct sdeb_store_info *sip = devip2sip(devip, true);
4345 
4346 	bytchk = (cmd[1] >> 1) & 0x3;
4347 	if (bytchk == 0) {
4348 		return 0;	/* always claim internal verify okay */
4349 	} else if (bytchk == 2) {
4350 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4351 		return check_condition_result;
4352 	} else if (bytchk == 3) {
4353 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4354 	}
4355 	switch (cmd[0]) {
4356 	case VERIFY_16:
4357 		lba = get_unaligned_be64(cmd + 2);
4358 		vnum = get_unaligned_be32(cmd + 10);
4359 		break;
4360 	case VERIFY:		/* is VERIFY(10) */
4361 		lba = get_unaligned_be32(cmd + 2);
4362 		vnum = get_unaligned_be16(cmd + 7);
4363 		break;
4364 	default:
4365 		mk_sense_invalid_opcode(scp);
4366 		return check_condition_result;
4367 	}
4368 	if (vnum == 0)
4369 		return 0;	/* not an error */
4370 	a_num = is_bytchk3 ? 1 : vnum;
4371 	/* Treat following check like one for read (i.e. no write) access */
4372 	ret = check_device_access_params(scp, lba, a_num, false);
4373 	if (ret)
4374 		return ret;
4375 
4376 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4377 	if (!arr) {
4378 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4379 				INSUFF_RES_ASCQ);
4380 		return check_condition_result;
4381 	}
4382 	/* Not changing store, so only need read access */
4383 	sdeb_read_lock(sip);
4384 
4385 	ret = do_dout_fetch(scp, a_num, arr);
4386 	if (ret == -1) {
4387 		ret = DID_ERROR << 16;
4388 		goto cleanup;
4389 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4390 		sdev_printk(KERN_INFO, scp->device,
4391 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4392 			    my_name, __func__, a_num * lb_size, ret);
4393 	}
4394 	if (is_bytchk3) {
4395 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4396 			memcpy(arr + off, arr, lb_size);
4397 	}
4398 	ret = 0;
4399 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4400 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4401 		ret = check_condition_result;
4402 		goto cleanup;
4403 	}
4404 cleanup:
4405 	sdeb_read_unlock(sip);
4406 	kfree(arr);
4407 	return ret;
4408 }
4409 
4410 #define RZONES_DESC_HD 64
4411 
4412 /* Report zones depending on start LBA nad reporting options */
4413 static int resp_report_zones(struct scsi_cmnd *scp,
4414 			     struct sdebug_dev_info *devip)
4415 {
4416 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4417 	int ret = 0;
4418 	u32 alloc_len, rep_opts, rep_len;
4419 	bool partial;
4420 	u64 lba, zs_lba;
4421 	u8 *arr = NULL, *desc;
4422 	u8 *cmd = scp->cmnd;
4423 	struct sdeb_zone_state *zsp;
4424 	struct sdeb_store_info *sip = devip2sip(devip, false);
4425 
4426 	if (!sdebug_dev_is_zoned(devip)) {
4427 		mk_sense_invalid_opcode(scp);
4428 		return check_condition_result;
4429 	}
4430 	zs_lba = get_unaligned_be64(cmd + 2);
4431 	alloc_len = get_unaligned_be32(cmd + 10);
4432 	if (alloc_len == 0)
4433 		return 0;	/* not an error */
4434 	rep_opts = cmd[14] & 0x3f;
4435 	partial = cmd[14] & 0x80;
4436 
4437 	if (zs_lba >= sdebug_capacity) {
4438 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4439 		return check_condition_result;
4440 	}
4441 
4442 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4443 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4444 			    max_zones);
4445 
4446 	arr = kzalloc(alloc_len, GFP_ATOMIC);
4447 	if (!arr) {
4448 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4449 				INSUFF_RES_ASCQ);
4450 		return check_condition_result;
4451 	}
4452 
4453 	sdeb_read_lock(sip);
4454 
4455 	desc = arr + 64;
4456 	for (i = 0; i < max_zones; i++) {
4457 		lba = zs_lba + devip->zsize * i;
4458 		if (lba > sdebug_capacity)
4459 			break;
4460 		zsp = zbc_zone(devip, lba);
4461 		switch (rep_opts) {
4462 		case 0x00:
4463 			/* All zones */
4464 			break;
4465 		case 0x01:
4466 			/* Empty zones */
4467 			if (zsp->z_cond != ZC1_EMPTY)
4468 				continue;
4469 			break;
4470 		case 0x02:
4471 			/* Implicit open zones */
4472 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4473 				continue;
4474 			break;
4475 		case 0x03:
4476 			/* Explicit open zones */
4477 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4478 				continue;
4479 			break;
4480 		case 0x04:
4481 			/* Closed zones */
4482 			if (zsp->z_cond != ZC4_CLOSED)
4483 				continue;
4484 			break;
4485 		case 0x05:
4486 			/* Full zones */
4487 			if (zsp->z_cond != ZC5_FULL)
4488 				continue;
4489 			break;
4490 		case 0x06:
4491 		case 0x07:
4492 		case 0x10:
4493 			/*
4494 			 * Read-only, offline, reset WP recommended are
4495 			 * not emulated: no zones to report;
4496 			 */
4497 			continue;
4498 		case 0x11:
4499 			/* non-seq-resource set */
4500 			if (!zsp->z_non_seq_resource)
4501 				continue;
4502 			break;
4503 		case 0x3f:
4504 			/* Not write pointer (conventional) zones */
4505 			if (!zbc_zone_is_conv(zsp))
4506 				continue;
4507 			break;
4508 		default:
4509 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4510 					INVALID_FIELD_IN_CDB, 0);
4511 			ret = check_condition_result;
4512 			goto fini;
4513 		}
4514 
4515 		if (nrz < rep_max_zones) {
4516 			/* Fill zone descriptor */
4517 			desc[0] = zsp->z_type;
4518 			desc[1] = zsp->z_cond << 4;
4519 			if (zsp->z_non_seq_resource)
4520 				desc[1] |= 1 << 1;
4521 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4522 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4523 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4524 			desc += 64;
4525 		}
4526 
4527 		if (partial && nrz >= rep_max_zones)
4528 			break;
4529 
4530 		nrz++;
4531 	}
4532 
4533 	/* Report header */
4534 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4535 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4536 
4537 	rep_len = (unsigned long)desc - (unsigned long)arr;
4538 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4539 
4540 fini:
4541 	sdeb_read_unlock(sip);
4542 	kfree(arr);
4543 	return ret;
4544 }
4545 
4546 /* Logic transplanted from tcmu-runner, file_zbc.c */
4547 static void zbc_open_all(struct sdebug_dev_info *devip)
4548 {
4549 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4550 	unsigned int i;
4551 
4552 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4553 		if (zsp->z_cond == ZC4_CLOSED)
4554 			zbc_open_zone(devip, &devip->zstate[i], true);
4555 	}
4556 }
4557 
4558 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4559 {
4560 	int res = 0;
4561 	u64 z_id;
4562 	enum sdebug_z_cond zc;
4563 	u8 *cmd = scp->cmnd;
4564 	struct sdeb_zone_state *zsp;
4565 	bool all = cmd[14] & 0x01;
4566 	struct sdeb_store_info *sip = devip2sip(devip, false);
4567 
4568 	if (!sdebug_dev_is_zoned(devip)) {
4569 		mk_sense_invalid_opcode(scp);
4570 		return check_condition_result;
4571 	}
4572 
4573 	sdeb_write_lock(sip);
4574 
4575 	if (all) {
4576 		/* Check if all closed zones can be open */
4577 		if (devip->max_open &&
4578 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4579 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4580 					INSUFF_ZONE_ASCQ);
4581 			res = check_condition_result;
4582 			goto fini;
4583 		}
4584 		/* Open all closed zones */
4585 		zbc_open_all(devip);
4586 		goto fini;
4587 	}
4588 
4589 	/* Open the specified zone */
4590 	z_id = get_unaligned_be64(cmd + 2);
4591 	if (z_id >= sdebug_capacity) {
4592 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4593 		res = check_condition_result;
4594 		goto fini;
4595 	}
4596 
4597 	zsp = zbc_zone(devip, z_id);
4598 	if (z_id != zsp->z_start) {
4599 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4600 		res = check_condition_result;
4601 		goto fini;
4602 	}
4603 	if (zbc_zone_is_conv(zsp)) {
4604 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4605 		res = check_condition_result;
4606 		goto fini;
4607 	}
4608 
4609 	zc = zsp->z_cond;
4610 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4611 		goto fini;
4612 
4613 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4614 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4615 				INSUFF_ZONE_ASCQ);
4616 		res = check_condition_result;
4617 		goto fini;
4618 	}
4619 
4620 	zbc_open_zone(devip, zsp, true);
4621 fini:
4622 	sdeb_write_unlock(sip);
4623 	return res;
4624 }
4625 
4626 static void zbc_close_all(struct sdebug_dev_info *devip)
4627 {
4628 	unsigned int i;
4629 
4630 	for (i = 0; i < devip->nr_zones; i++)
4631 		zbc_close_zone(devip, &devip->zstate[i]);
4632 }
4633 
4634 static int resp_close_zone(struct scsi_cmnd *scp,
4635 			   struct sdebug_dev_info *devip)
4636 {
4637 	int res = 0;
4638 	u64 z_id;
4639 	u8 *cmd = scp->cmnd;
4640 	struct sdeb_zone_state *zsp;
4641 	bool all = cmd[14] & 0x01;
4642 	struct sdeb_store_info *sip = devip2sip(devip, false);
4643 
4644 	if (!sdebug_dev_is_zoned(devip)) {
4645 		mk_sense_invalid_opcode(scp);
4646 		return check_condition_result;
4647 	}
4648 
4649 	sdeb_write_lock(sip);
4650 
4651 	if (all) {
4652 		zbc_close_all(devip);
4653 		goto fini;
4654 	}
4655 
4656 	/* Close specified zone */
4657 	z_id = get_unaligned_be64(cmd + 2);
4658 	if (z_id >= sdebug_capacity) {
4659 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4660 		res = check_condition_result;
4661 		goto fini;
4662 	}
4663 
4664 	zsp = zbc_zone(devip, z_id);
4665 	if (z_id != zsp->z_start) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 	if (zbc_zone_is_conv(zsp)) {
4671 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4672 		res = check_condition_result;
4673 		goto fini;
4674 	}
4675 
4676 	zbc_close_zone(devip, zsp);
4677 fini:
4678 	sdeb_write_unlock(sip);
4679 	return res;
4680 }
4681 
4682 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4683 			    struct sdeb_zone_state *zsp, bool empty)
4684 {
4685 	enum sdebug_z_cond zc = zsp->z_cond;
4686 
4687 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4688 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4689 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4690 			zbc_close_zone(devip, zsp);
4691 		if (zsp->z_cond == ZC4_CLOSED)
4692 			devip->nr_closed--;
4693 		zsp->z_wp = zsp->z_start + zsp->z_size;
4694 		zsp->z_cond = ZC5_FULL;
4695 	}
4696 }
4697 
4698 static void zbc_finish_all(struct sdebug_dev_info *devip)
4699 {
4700 	unsigned int i;
4701 
4702 	for (i = 0; i < devip->nr_zones; i++)
4703 		zbc_finish_zone(devip, &devip->zstate[i], false);
4704 }
4705 
4706 static int resp_finish_zone(struct scsi_cmnd *scp,
4707 			    struct sdebug_dev_info *devip)
4708 {
4709 	struct sdeb_zone_state *zsp;
4710 	int res = 0;
4711 	u64 z_id;
4712 	u8 *cmd = scp->cmnd;
4713 	bool all = cmd[14] & 0x01;
4714 	struct sdeb_store_info *sip = devip2sip(devip, false);
4715 
4716 	if (!sdebug_dev_is_zoned(devip)) {
4717 		mk_sense_invalid_opcode(scp);
4718 		return check_condition_result;
4719 	}
4720 
4721 	sdeb_write_lock(sip);
4722 
4723 	if (all) {
4724 		zbc_finish_all(devip);
4725 		goto fini;
4726 	}
4727 
4728 	/* Finish the specified zone */
4729 	z_id = get_unaligned_be64(cmd + 2);
4730 	if (z_id >= sdebug_capacity) {
4731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4732 		res = check_condition_result;
4733 		goto fini;
4734 	}
4735 
4736 	zsp = zbc_zone(devip, z_id);
4737 	if (z_id != zsp->z_start) {
4738 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4739 		res = check_condition_result;
4740 		goto fini;
4741 	}
4742 	if (zbc_zone_is_conv(zsp)) {
4743 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4744 		res = check_condition_result;
4745 		goto fini;
4746 	}
4747 
4748 	zbc_finish_zone(devip, zsp, true);
4749 fini:
4750 	sdeb_write_unlock(sip);
4751 	return res;
4752 }
4753 
4754 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4755 			 struct sdeb_zone_state *zsp)
4756 {
4757 	enum sdebug_z_cond zc;
4758 	struct sdeb_store_info *sip = devip2sip(devip, false);
4759 
4760 	if (zbc_zone_is_conv(zsp))
4761 		return;
4762 
4763 	zc = zsp->z_cond;
4764 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4765 		zbc_close_zone(devip, zsp);
4766 
4767 	if (zsp->z_cond == ZC4_CLOSED)
4768 		devip->nr_closed--;
4769 
4770 	if (zsp->z_wp > zsp->z_start)
4771 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4772 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4773 
4774 	zsp->z_non_seq_resource = false;
4775 	zsp->z_wp = zsp->z_start;
4776 	zsp->z_cond = ZC1_EMPTY;
4777 }
4778 
4779 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4780 {
4781 	unsigned int i;
4782 
4783 	for (i = 0; i < devip->nr_zones; i++)
4784 		zbc_rwp_zone(devip, &devip->zstate[i]);
4785 }
4786 
4787 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4788 {
4789 	struct sdeb_zone_state *zsp;
4790 	int res = 0;
4791 	u64 z_id;
4792 	u8 *cmd = scp->cmnd;
4793 	bool all = cmd[14] & 0x01;
4794 	struct sdeb_store_info *sip = devip2sip(devip, false);
4795 
4796 	if (!sdebug_dev_is_zoned(devip)) {
4797 		mk_sense_invalid_opcode(scp);
4798 		return check_condition_result;
4799 	}
4800 
4801 	sdeb_write_lock(sip);
4802 
4803 	if (all) {
4804 		zbc_rwp_all(devip);
4805 		goto fini;
4806 	}
4807 
4808 	z_id = get_unaligned_be64(cmd + 2);
4809 	if (z_id >= sdebug_capacity) {
4810 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4811 		res = check_condition_result;
4812 		goto fini;
4813 	}
4814 
4815 	zsp = zbc_zone(devip, z_id);
4816 	if (z_id != zsp->z_start) {
4817 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4818 		res = check_condition_result;
4819 		goto fini;
4820 	}
4821 	if (zbc_zone_is_conv(zsp)) {
4822 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4823 		res = check_condition_result;
4824 		goto fini;
4825 	}
4826 
4827 	zbc_rwp_zone(devip, zsp);
4828 fini:
4829 	sdeb_write_unlock(sip);
4830 	return res;
4831 }
4832 
4833 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4834 {
4835 	u16 hwq;
4836 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4837 
4838 	hwq = blk_mq_unique_tag_to_hwq(tag);
4839 
4840 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4841 	if (WARN_ON_ONCE(hwq >= submit_queues))
4842 		hwq = 0;
4843 
4844 	return sdebug_q_arr + hwq;
4845 }
4846 
4847 static u32 get_tag(struct scsi_cmnd *cmnd)
4848 {
4849 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4850 }
4851 
4852 /* Queued (deferred) command completions converge here. */
4853 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4854 {
4855 	bool aborted = sd_dp->aborted;
4856 	int qc_idx;
4857 	int retiring = 0;
4858 	unsigned long iflags;
4859 	struct sdebug_queue *sqp;
4860 	struct sdebug_queued_cmd *sqcp;
4861 	struct scsi_cmnd *scp;
4862 	struct sdebug_dev_info *devip;
4863 
4864 	if (unlikely(aborted))
4865 		sd_dp->aborted = false;
4866 	qc_idx = sd_dp->qc_idx;
4867 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4868 	if (sdebug_statistics) {
4869 		atomic_inc(&sdebug_completions);
4870 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4871 			atomic_inc(&sdebug_miss_cpus);
4872 	}
4873 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4874 		pr_err("wild qc_idx=%d\n", qc_idx);
4875 		return;
4876 	}
4877 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4878 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4879 	sqcp = &sqp->qc_arr[qc_idx];
4880 	scp = sqcp->a_cmnd;
4881 	if (unlikely(scp == NULL)) {
4882 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4883 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4884 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4885 		return;
4886 	}
4887 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4888 	if (likely(devip))
4889 		atomic_dec(&devip->num_in_q);
4890 	else
4891 		pr_err("devip=NULL\n");
4892 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4893 		retiring = 1;
4894 
4895 	sqcp->a_cmnd = NULL;
4896 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4897 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4898 		pr_err("Unexpected completion\n");
4899 		return;
4900 	}
4901 
4902 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4903 		int k, retval;
4904 
4905 		retval = atomic_read(&retired_max_queue);
4906 		if (qc_idx >= retval) {
4907 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4908 			pr_err("index %d too large\n", retval);
4909 			return;
4910 		}
4911 		k = find_last_bit(sqp->in_use_bm, retval);
4912 		if ((k < sdebug_max_queue) || (k == retval))
4913 			atomic_set(&retired_max_queue, 0);
4914 		else
4915 			atomic_set(&retired_max_queue, k + 1);
4916 	}
4917 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4918 	if (unlikely(aborted)) {
4919 		if (sdebug_verbose)
4920 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4921 		return;
4922 	}
4923 	scsi_done(scp); /* callback to mid level */
4924 }
4925 
4926 /* When high resolution timer goes off this function is called. */
4927 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4928 {
4929 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4930 						  hrt);
4931 	sdebug_q_cmd_complete(sd_dp);
4932 	return HRTIMER_NORESTART;
4933 }
4934 
4935 /* When work queue schedules work, it calls this function. */
4936 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4937 {
4938 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4939 						  ew.work);
4940 	sdebug_q_cmd_complete(sd_dp);
4941 }
4942 
4943 static bool got_shared_uuid;
4944 static uuid_t shared_uuid;
4945 
4946 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4947 {
4948 	struct sdeb_zone_state *zsp;
4949 	sector_t capacity = get_sdebug_capacity();
4950 	sector_t zstart = 0;
4951 	unsigned int i;
4952 
4953 	/*
4954 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4955 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4956 	 * use the specified zone size checking that at least 2 zones can be
4957 	 * created for the device.
4958 	 */
4959 	if (!sdeb_zbc_zone_size_mb) {
4960 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4961 			>> ilog2(sdebug_sector_size);
4962 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4963 			devip->zsize >>= 1;
4964 		if (devip->zsize < 2) {
4965 			pr_err("Device capacity too small\n");
4966 			return -EINVAL;
4967 		}
4968 	} else {
4969 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4970 			pr_err("Zone size is not a power of 2\n");
4971 			return -EINVAL;
4972 		}
4973 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4974 			>> ilog2(sdebug_sector_size);
4975 		if (devip->zsize >= capacity) {
4976 			pr_err("Zone size too large for device capacity\n");
4977 			return -EINVAL;
4978 		}
4979 	}
4980 
4981 	devip->zsize_shift = ilog2(devip->zsize);
4982 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4983 
4984 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4985 		pr_err("Number of conventional zones too large\n");
4986 		return -EINVAL;
4987 	}
4988 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4989 
4990 	if (devip->zmodel == BLK_ZONED_HM) {
4991 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4992 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4993 			devip->max_open = (devip->nr_zones - 1) / 2;
4994 		else
4995 			devip->max_open = sdeb_zbc_max_open;
4996 	}
4997 
4998 	devip->zstate = kcalloc(devip->nr_zones,
4999 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5000 	if (!devip->zstate)
5001 		return -ENOMEM;
5002 
5003 	for (i = 0; i < devip->nr_zones; i++) {
5004 		zsp = &devip->zstate[i];
5005 
5006 		zsp->z_start = zstart;
5007 
5008 		if (i < devip->nr_conv_zones) {
5009 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
5010 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5011 			zsp->z_wp = (sector_t)-1;
5012 		} else {
5013 			if (devip->zmodel == BLK_ZONED_HM)
5014 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
5015 			else
5016 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
5017 			zsp->z_cond = ZC1_EMPTY;
5018 			zsp->z_wp = zsp->z_start;
5019 		}
5020 
5021 		if (zsp->z_start + devip->zsize < capacity)
5022 			zsp->z_size = devip->zsize;
5023 		else
5024 			zsp->z_size = capacity - zsp->z_start;
5025 
5026 		zstart += zsp->z_size;
5027 	}
5028 
5029 	return 0;
5030 }
5031 
5032 static struct sdebug_dev_info *sdebug_device_create(
5033 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5034 {
5035 	struct sdebug_dev_info *devip;
5036 
5037 	devip = kzalloc(sizeof(*devip), flags);
5038 	if (devip) {
5039 		if (sdebug_uuid_ctl == 1)
5040 			uuid_gen(&devip->lu_name);
5041 		else if (sdebug_uuid_ctl == 2) {
5042 			if (got_shared_uuid)
5043 				devip->lu_name = shared_uuid;
5044 			else {
5045 				uuid_gen(&shared_uuid);
5046 				got_shared_uuid = true;
5047 				devip->lu_name = shared_uuid;
5048 			}
5049 		}
5050 		devip->sdbg_host = sdbg_host;
5051 		if (sdeb_zbc_in_use) {
5052 			devip->zmodel = sdeb_zbc_model;
5053 			if (sdebug_device_create_zones(devip)) {
5054 				kfree(devip);
5055 				return NULL;
5056 			}
5057 		} else {
5058 			devip->zmodel = BLK_ZONED_NONE;
5059 		}
5060 		devip->sdbg_host = sdbg_host;
5061 		devip->create_ts = ktime_get_boottime();
5062 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5063 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5064 	}
5065 	return devip;
5066 }
5067 
5068 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5069 {
5070 	struct sdebug_host_info *sdbg_host;
5071 	struct sdebug_dev_info *open_devip = NULL;
5072 	struct sdebug_dev_info *devip;
5073 
5074 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5075 	if (!sdbg_host) {
5076 		pr_err("Host info NULL\n");
5077 		return NULL;
5078 	}
5079 
5080 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5081 		if ((devip->used) && (devip->channel == sdev->channel) &&
5082 		    (devip->target == sdev->id) &&
5083 		    (devip->lun == sdev->lun))
5084 			return devip;
5085 		else {
5086 			if ((!devip->used) && (!open_devip))
5087 				open_devip = devip;
5088 		}
5089 	}
5090 	if (!open_devip) { /* try and make a new one */
5091 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5092 		if (!open_devip) {
5093 			pr_err("out of memory at line %d\n", __LINE__);
5094 			return NULL;
5095 		}
5096 	}
5097 
5098 	open_devip->channel = sdev->channel;
5099 	open_devip->target = sdev->id;
5100 	open_devip->lun = sdev->lun;
5101 	open_devip->sdbg_host = sdbg_host;
5102 	atomic_set(&open_devip->num_in_q, 0);
5103 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5104 	open_devip->used = true;
5105 	return open_devip;
5106 }
5107 
5108 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5109 {
5110 	if (sdebug_verbose)
5111 		pr_info("slave_alloc <%u %u %u %llu>\n",
5112 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5113 	return 0;
5114 }
5115 
5116 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5117 {
5118 	struct sdebug_dev_info *devip =
5119 			(struct sdebug_dev_info *)sdp->hostdata;
5120 
5121 	if (sdebug_verbose)
5122 		pr_info("slave_configure <%u %u %u %llu>\n",
5123 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5124 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5125 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5126 	if (smp_load_acquire(&sdebug_deflect_incoming)) {
5127 		pr_info("Exit early due to deflect_incoming\n");
5128 		return 1;
5129 	}
5130 	if (devip == NULL) {
5131 		devip = find_build_dev_info(sdp);
5132 		if (devip == NULL)
5133 			return 1;  /* no resources, will be marked offline */
5134 	}
5135 	sdp->hostdata = devip;
5136 	if (sdebug_no_uld)
5137 		sdp->no_uld_attach = 1;
5138 	config_cdb_len(sdp);
5139 	return 0;
5140 }
5141 
5142 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5143 {
5144 	struct sdebug_dev_info *devip =
5145 		(struct sdebug_dev_info *)sdp->hostdata;
5146 
5147 	if (sdebug_verbose)
5148 		pr_info("slave_destroy <%u %u %u %llu>\n",
5149 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5150 	if (devip) {
5151 		/* make this slot available for re-use */
5152 		devip->used = false;
5153 		sdp->hostdata = NULL;
5154 	}
5155 }
5156 
5157 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5158 			   enum sdeb_defer_type defer_t)
5159 {
5160 	if (!sd_dp)
5161 		return;
5162 	if (defer_t == SDEB_DEFER_HRT)
5163 		hrtimer_cancel(&sd_dp->hrt);
5164 	else if (defer_t == SDEB_DEFER_WQ)
5165 		cancel_work_sync(&sd_dp->ew.work);
5166 }
5167 
5168 /* If @cmnd found deletes its timer or work queue and returns true; else
5169    returns false */
5170 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5171 {
5172 	unsigned long iflags;
5173 	int j, k, qmax, r_qmax;
5174 	enum sdeb_defer_type l_defer_t;
5175 	struct sdebug_queue *sqp;
5176 	struct sdebug_queued_cmd *sqcp;
5177 	struct sdebug_dev_info *devip;
5178 	struct sdebug_defer *sd_dp;
5179 
5180 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5181 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5182 		qmax = sdebug_max_queue;
5183 		r_qmax = atomic_read(&retired_max_queue);
5184 		if (r_qmax > qmax)
5185 			qmax = r_qmax;
5186 		for (k = 0; k < qmax; ++k) {
5187 			if (test_bit(k, sqp->in_use_bm)) {
5188 				sqcp = &sqp->qc_arr[k];
5189 				if (cmnd != sqcp->a_cmnd)
5190 					continue;
5191 				/* found */
5192 				devip = (struct sdebug_dev_info *)
5193 						cmnd->device->hostdata;
5194 				if (devip)
5195 					atomic_dec(&devip->num_in_q);
5196 				sqcp->a_cmnd = NULL;
5197 				sd_dp = sqcp->sd_dp;
5198 				if (sd_dp) {
5199 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5200 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5201 				} else
5202 					l_defer_t = SDEB_DEFER_NONE;
5203 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5204 				stop_qc_helper(sd_dp, l_defer_t);
5205 				clear_bit(k, sqp->in_use_bm);
5206 				return true;
5207 			}
5208 		}
5209 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5210 	}
5211 	return false;
5212 }
5213 
5214 /* Deletes (stops) timers or work queues of all queued commands */
5215 static void stop_all_queued(bool done_with_no_conn)
5216 {
5217 	unsigned long iflags;
5218 	int j, k;
5219 	enum sdeb_defer_type l_defer_t;
5220 	struct sdebug_queue *sqp;
5221 	struct sdebug_queued_cmd *sqcp;
5222 	struct sdebug_dev_info *devip;
5223 	struct sdebug_defer *sd_dp;
5224 	struct scsi_cmnd *scp;
5225 
5226 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5227 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5228 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5229 			if (test_bit(k, sqp->in_use_bm)) {
5230 				sqcp = &sqp->qc_arr[k];
5231 				scp = sqcp->a_cmnd;
5232 				if (!scp)
5233 					continue;
5234 				devip = (struct sdebug_dev_info *)
5235 					sqcp->a_cmnd->device->hostdata;
5236 				if (devip)
5237 					atomic_dec(&devip->num_in_q);
5238 				sqcp->a_cmnd = NULL;
5239 				sd_dp = sqcp->sd_dp;
5240 				if (sd_dp) {
5241 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5242 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5243 				} else
5244 					l_defer_t = SDEB_DEFER_NONE;
5245 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5246 				stop_qc_helper(sd_dp, l_defer_t);
5247 				if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
5248 					scp->result = DID_NO_CONNECT << 16;
5249 					scsi_done(scp);
5250 				}
5251 				clear_bit(k, sqp->in_use_bm);
5252 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5253 			}
5254 		}
5255 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5256 	}
5257 }
5258 
5259 /* Free queued command memory on heap */
5260 static void free_all_queued(void)
5261 {
5262 	int j, k;
5263 	struct sdebug_queue *sqp;
5264 	struct sdebug_queued_cmd *sqcp;
5265 
5266 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5267 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5268 			sqcp = &sqp->qc_arr[k];
5269 			kfree(sqcp->sd_dp);
5270 			sqcp->sd_dp = NULL;
5271 		}
5272 	}
5273 }
5274 
5275 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5276 {
5277 	bool ok;
5278 
5279 	++num_aborts;
5280 	if (SCpnt) {
5281 		ok = stop_queued_cmnd(SCpnt);
5282 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5283 			sdev_printk(KERN_INFO, SCpnt->device,
5284 				    "%s: command%s found\n", __func__,
5285 				    ok ? "" : " not");
5286 	}
5287 	return SUCCESS;
5288 }
5289 
5290 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5291 {
5292 	++num_dev_resets;
5293 	if (SCpnt && SCpnt->device) {
5294 		struct scsi_device *sdp = SCpnt->device;
5295 		struct sdebug_dev_info *devip =
5296 				(struct sdebug_dev_info *)sdp->hostdata;
5297 
5298 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5299 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5300 		if (devip)
5301 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5302 	}
5303 	return SUCCESS;
5304 }
5305 
5306 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5307 {
5308 	struct sdebug_host_info *sdbg_host;
5309 	struct sdebug_dev_info *devip;
5310 	struct scsi_device *sdp;
5311 	struct Scsi_Host *hp;
5312 	int k = 0;
5313 
5314 	++num_target_resets;
5315 	if (!SCpnt)
5316 		goto lie;
5317 	sdp = SCpnt->device;
5318 	if (!sdp)
5319 		goto lie;
5320 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5321 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5322 	hp = sdp->host;
5323 	if (!hp)
5324 		goto lie;
5325 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5326 	if (sdbg_host) {
5327 		list_for_each_entry(devip,
5328 				    &sdbg_host->dev_info_list,
5329 				    dev_list)
5330 			if (devip->target == sdp->id) {
5331 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5332 				++k;
5333 			}
5334 	}
5335 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5336 		sdev_printk(KERN_INFO, sdp,
5337 			    "%s: %d device(s) found in target\n", __func__, k);
5338 lie:
5339 	return SUCCESS;
5340 }
5341 
5342 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5343 {
5344 	struct sdebug_host_info *sdbg_host;
5345 	struct sdebug_dev_info *devip;
5346 	struct scsi_device *sdp;
5347 	struct Scsi_Host *hp;
5348 	int k = 0;
5349 
5350 	++num_bus_resets;
5351 	if (!(SCpnt && SCpnt->device))
5352 		goto lie;
5353 	sdp = SCpnt->device;
5354 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5355 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5356 	hp = sdp->host;
5357 	if (hp) {
5358 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5359 		if (sdbg_host) {
5360 			list_for_each_entry(devip,
5361 					    &sdbg_host->dev_info_list,
5362 					    dev_list) {
5363 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5364 				++k;
5365 			}
5366 		}
5367 	}
5368 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5369 		sdev_printk(KERN_INFO, sdp,
5370 			    "%s: %d device(s) found in host\n", __func__, k);
5371 lie:
5372 	return SUCCESS;
5373 }
5374 
5375 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5376 {
5377 	struct sdebug_host_info *sdbg_host;
5378 	struct sdebug_dev_info *devip;
5379 	int k = 0;
5380 
5381 	++num_host_resets;
5382 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5383 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5384 	spin_lock(&sdebug_host_list_lock);
5385 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5386 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5387 				    dev_list) {
5388 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5389 			++k;
5390 		}
5391 	}
5392 	spin_unlock(&sdebug_host_list_lock);
5393 	stop_all_queued(false);
5394 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5395 		sdev_printk(KERN_INFO, SCpnt->device,
5396 			    "%s: %d device(s) found\n", __func__, k);
5397 	return SUCCESS;
5398 }
5399 
5400 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5401 {
5402 	struct msdos_partition *pp;
5403 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5404 	int sectors_per_part, num_sectors, k;
5405 	int heads_by_sects, start_sec, end_sec;
5406 
5407 	/* assume partition table already zeroed */
5408 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5409 		return;
5410 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5411 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5412 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5413 	}
5414 	num_sectors = (int)get_sdebug_capacity();
5415 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5416 			   / sdebug_num_parts;
5417 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5418 	starts[0] = sdebug_sectors_per;
5419 	max_part_secs = sectors_per_part;
5420 	for (k = 1; k < sdebug_num_parts; ++k) {
5421 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5422 			    * heads_by_sects;
5423 		if (starts[k] - starts[k - 1] < max_part_secs)
5424 			max_part_secs = starts[k] - starts[k - 1];
5425 	}
5426 	starts[sdebug_num_parts] = num_sectors;
5427 	starts[sdebug_num_parts + 1] = 0;
5428 
5429 	ramp[510] = 0x55;	/* magic partition markings */
5430 	ramp[511] = 0xAA;
5431 	pp = (struct msdos_partition *)(ramp + 0x1be);
5432 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5433 		start_sec = starts[k];
5434 		end_sec = starts[k] + max_part_secs - 1;
5435 		pp->boot_ind = 0;
5436 
5437 		pp->cyl = start_sec / heads_by_sects;
5438 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5439 			   / sdebug_sectors_per;
5440 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5441 
5442 		pp->end_cyl = end_sec / heads_by_sects;
5443 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5444 			       / sdebug_sectors_per;
5445 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5446 
5447 		pp->start_sect = cpu_to_le32(start_sec);
5448 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5449 		pp->sys_ind = 0x83;	/* plain Linux partition */
5450 	}
5451 }
5452 
5453 static void sdeb_block_all_queues(void)
5454 {
5455 	int j;
5456 	struct sdebug_queue *sqp;
5457 
5458 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5459 		atomic_set(&sqp->blocked, (int)true);
5460 }
5461 
5462 static void sdeb_unblock_all_queues(void)
5463 {
5464 	int j;
5465 	struct sdebug_queue *sqp;
5466 
5467 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5468 		atomic_set(&sqp->blocked, (int)false);
5469 }
5470 
5471 static void
5472 sdeb_add_n_hosts(int num_hosts)
5473 {
5474 	if (num_hosts < 1)
5475 		return;
5476 	do {
5477 		bool found;
5478 		unsigned long idx;
5479 		struct sdeb_store_info *sip;
5480 		bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5481 
5482 		found = false;
5483 		if (want_phs) {
5484 			xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
5485 				sdeb_most_recent_idx = (int)idx;
5486 				found = true;
5487 				break;
5488 			}
5489 			if (found)	/* re-use case */
5490 				sdebug_add_host_helper((int)idx);
5491 			else
5492 				sdebug_do_add_host(true	/* make new store */);
5493 		} else {
5494 			sdebug_do_add_host(false);
5495 		}
5496 	} while (--num_hosts);
5497 }
5498 
5499 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5500  * commands will be processed normally before triggers occur.
5501  */
5502 static void tweak_cmnd_count(void)
5503 {
5504 	int count, modulo;
5505 
5506 	modulo = abs(sdebug_every_nth);
5507 	if (modulo < 2)
5508 		return;
5509 	sdeb_block_all_queues();
5510 	count = atomic_read(&sdebug_cmnd_count);
5511 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5512 	sdeb_unblock_all_queues();
5513 }
5514 
5515 static void clear_queue_stats(void)
5516 {
5517 	atomic_set(&sdebug_cmnd_count, 0);
5518 	atomic_set(&sdebug_completions, 0);
5519 	atomic_set(&sdebug_miss_cpus, 0);
5520 	atomic_set(&sdebug_a_tsf, 0);
5521 }
5522 
5523 static bool inject_on_this_cmd(void)
5524 {
5525 	if (sdebug_every_nth == 0)
5526 		return false;
5527 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5528 }
5529 
5530 static int process_deflect_incoming(struct scsi_cmnd *scp)
5531 {
5532 	u8 opcode = scp->cmnd[0];
5533 
5534 	if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
5535 		return 0;
5536 	return DID_NO_CONNECT << 16;
5537 }
5538 
5539 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5540 
5541 /* Complete the processing of the thread that queued a SCSI command to this
5542  * driver. It either completes the command by calling cmnd_done() or
5543  * schedules a hr timer or work queue then returns 0. Returns
5544  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5545  */
5546 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5547 			 int scsi_result,
5548 			 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
5549 			 int delta_jiff, int ndelay)
5550 {
5551 	bool new_sd_dp;
5552 	bool inject = false;
5553 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5554 	int k, num_in_q, qdepth;
5555 	unsigned long iflags;
5556 	u64 ns_from_boot = 0;
5557 	struct sdebug_queue *sqp;
5558 	struct sdebug_queued_cmd *sqcp;
5559 	struct scsi_device *sdp;
5560 	struct sdebug_defer *sd_dp;
5561 
5562 	if (unlikely(devip == NULL)) {
5563 		if (scsi_result == 0)
5564 			scsi_result = DID_NO_CONNECT << 16;
5565 		goto respond_in_thread;
5566 	}
5567 	sdp = cmnd->device;
5568 
5569 	if (delta_jiff == 0) {
5570 		sqp = get_queue(cmnd);
5571 		if (atomic_read(&sqp->blocked)) {
5572 			if (smp_load_acquire(&sdebug_deflect_incoming))
5573 				return process_deflect_incoming(cmnd);
5574 			else
5575 				return SCSI_MLQUEUE_HOST_BUSY;
5576 		}
5577 		goto respond_in_thread;
5578 	}
5579 
5580 	sqp = get_queue(cmnd);
5581 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5582 	if (unlikely(atomic_read(&sqp->blocked))) {
5583 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5584 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
5585 			scsi_result = process_deflect_incoming(cmnd);
5586 			goto respond_in_thread;
5587 		}
5588 		if (sdebug_verbose)
5589 			pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
5590 		return SCSI_MLQUEUE_HOST_BUSY;
5591 	}
5592 	num_in_q = atomic_read(&devip->num_in_q);
5593 	qdepth = cmnd->device->queue_depth;
5594 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5595 		if (scsi_result) {
5596 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5597 			goto respond_in_thread;
5598 		} else
5599 			scsi_result = device_qfull_result;
5600 	} else if (unlikely(sdebug_every_nth &&
5601 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5602 			    (scsi_result == 0))) {
5603 		if ((num_in_q == (qdepth - 1)) &&
5604 		    (atomic_inc_return(&sdebug_a_tsf) >=
5605 		     abs(sdebug_every_nth))) {
5606 			atomic_set(&sdebug_a_tsf, 0);
5607 			inject = true;
5608 			scsi_result = device_qfull_result;
5609 		}
5610 	}
5611 
5612 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5613 	if (unlikely(k >= sdebug_max_queue)) {
5614 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5615 		if (scsi_result)
5616 			goto respond_in_thread;
5617 		scsi_result = device_qfull_result;
5618 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5619 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5620 				    __func__, sdebug_max_queue);
5621 		goto respond_in_thread;
5622 	}
5623 	set_bit(k, sqp->in_use_bm);
5624 	atomic_inc(&devip->num_in_q);
5625 	sqcp = &sqp->qc_arr[k];
5626 	sqcp->a_cmnd = cmnd;
5627 	cmnd->host_scribble = (unsigned char *)sqcp;
5628 	sd_dp = sqcp->sd_dp;
5629 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5630 
5631 	if (!sd_dp) {
5632 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5633 		if (!sd_dp) {
5634 			atomic_dec(&devip->num_in_q);
5635 			clear_bit(k, sqp->in_use_bm);
5636 			return SCSI_MLQUEUE_HOST_BUSY;
5637 		}
5638 		new_sd_dp = true;
5639 	} else {
5640 		new_sd_dp = false;
5641 	}
5642 
5643 	/* Set the hostwide tag */
5644 	if (sdebug_host_max_queue)
5645 		sd_dp->hc_idx = get_tag(cmnd);
5646 
5647 	if (polled)
5648 		ns_from_boot = ktime_get_boottime_ns();
5649 
5650 	/* one of the resp_*() response functions is called here */
5651 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5652 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5653 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5654 		delta_jiff = ndelay = 0;
5655 	}
5656 	if (cmnd->result == 0 && scsi_result != 0)
5657 		cmnd->result = scsi_result;
5658 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5659 		if (atomic_read(&sdeb_inject_pending)) {
5660 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5661 			atomic_set(&sdeb_inject_pending, 0);
5662 			cmnd->result = check_condition_result;
5663 		}
5664 	}
5665 
5666 	if (unlikely(sdebug_verbose && cmnd->result))
5667 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5668 			    __func__, cmnd->result);
5669 
5670 	if (delta_jiff > 0 || ndelay > 0) {
5671 		ktime_t kt;
5672 
5673 		if (delta_jiff > 0) {
5674 			u64 ns = jiffies_to_nsecs(delta_jiff);
5675 
5676 			if (sdebug_random && ns < U32_MAX) {
5677 				ns = prandom_u32_max((u32)ns);
5678 			} else if (sdebug_random) {
5679 				ns >>= 12;	/* scale to 4 usec precision */
5680 				if (ns < U32_MAX)	/* over 4 hours max */
5681 					ns = prandom_u32_max((u32)ns);
5682 				ns <<= 12;
5683 			}
5684 			kt = ns_to_ktime(ns);
5685 		} else {	/* ndelay has a 4.2 second max */
5686 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5687 					     (u32)ndelay;
5688 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5689 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5690 
5691 				if (kt <= d) {	/* elapsed duration >= kt */
5692 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5693 					sqcp->a_cmnd = NULL;
5694 					atomic_dec(&devip->num_in_q);
5695 					clear_bit(k, sqp->in_use_bm);
5696 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5697 					if (new_sd_dp)
5698 						kfree(sd_dp);
5699 					/* call scsi_done() from this thread */
5700 					scsi_done(cmnd);
5701 					return 0;
5702 				}
5703 				/* otherwise reduce kt by elapsed time */
5704 				kt -= d;
5705 			}
5706 		}
5707 		if (polled) {
5708 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5709 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5710 			if (!sd_dp->init_poll) {
5711 				sd_dp->init_poll = true;
5712 				sqcp->sd_dp = sd_dp;
5713 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5714 				sd_dp->qc_idx = k;
5715 			}
5716 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5717 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5718 		} else {
5719 			if (!sd_dp->init_hrt) {
5720 				sd_dp->init_hrt = true;
5721 				sqcp->sd_dp = sd_dp;
5722 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5723 					     HRTIMER_MODE_REL_PINNED);
5724 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5725 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5726 				sd_dp->qc_idx = k;
5727 			}
5728 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5729 			/* schedule the invocation of scsi_done() for a later time */
5730 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5731 		}
5732 		if (sdebug_statistics)
5733 			sd_dp->issuing_cpu = raw_smp_processor_id();
5734 	} else {	/* jdelay < 0, use work queue */
5735 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5736 			     atomic_read(&sdeb_inject_pending)))
5737 			sd_dp->aborted = true;
5738 		if (polled) {
5739 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5740 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5741 			if (!sd_dp->init_poll) {
5742 				sd_dp->init_poll = true;
5743 				sqcp->sd_dp = sd_dp;
5744 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5745 				sd_dp->qc_idx = k;
5746 			}
5747 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5748 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5749 		} else {
5750 			if (!sd_dp->init_wq) {
5751 				sd_dp->init_wq = true;
5752 				sqcp->sd_dp = sd_dp;
5753 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5754 				sd_dp->qc_idx = k;
5755 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5756 			}
5757 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5758 			schedule_work(&sd_dp->ew.work);
5759 		}
5760 		if (sdebug_statistics)
5761 			sd_dp->issuing_cpu = raw_smp_processor_id();
5762 		if (unlikely(sd_dp->aborted)) {
5763 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5764 				    scsi_cmd_to_rq(cmnd)->tag);
5765 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5766 			atomic_set(&sdeb_inject_pending, 0);
5767 			sd_dp->aborted = false;
5768 		}
5769 	}
5770 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5771 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5772 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5773 	return 0;
5774 
5775 respond_in_thread:	/* call back to mid-layer using invocation thread */
5776 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5777 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5778 	if (cmnd->result == 0 && scsi_result != 0) {
5779 		cmnd->result = scsi_result;
5780 		if (sdebug_verbose)
5781 			pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
5782 				blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
5783 	}
5784 	scsi_done(cmnd);
5785 	return 0;
5786 }
5787 
5788 /* Note: The following macros create attribute files in the
5789    /sys/module/scsi_debug/parameters directory. Unfortunately this
5790    driver is unaware of a change and cannot trigger auxiliary actions
5791    as it can when the corresponding attribute in the
5792    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5793  */
5794 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5795 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5796 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5797 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5798 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5799 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5800 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5801 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5802 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5803 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5804 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5805 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5806 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5807 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5808 module_param_string(inq_product, sdebug_inq_product_id,
5809 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5810 module_param_string(inq_rev, sdebug_inq_product_rev,
5811 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5812 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5813 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5814 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5815 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5816 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5817 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5818 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5819 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5820 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5821 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5822 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5823 		   S_IRUGO | S_IWUSR);
5824 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5825 		   S_IRUGO | S_IWUSR);
5826 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5827 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5828 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5829 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5830 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5831 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5832 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5833 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5834 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5835 module_param_named(per_host_store, sdebug_per_host_store, bool,
5836 		   S_IRUGO | S_IWUSR);
5837 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5838 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5839 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5840 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5841 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5842 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5843 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5845 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5846 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5847 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5848 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5849 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5850 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5851 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5852 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5853 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5854 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5855 		   S_IRUGO | S_IWUSR);
5856 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5857 module_param_named(write_same_length, sdebug_write_same_length, int,
5858 		   S_IRUGO | S_IWUSR);
5859 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5860 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5861 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5862 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5863 
5864 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5865 MODULE_DESCRIPTION("SCSI debug adapter driver");
5866 MODULE_LICENSE("GPL");
5867 MODULE_VERSION(SDEBUG_VERSION);
5868 
5869 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5870 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5871 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5872 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5873 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5874 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5875 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5876 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5877 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5878 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5879 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5880 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5881 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5882 MODULE_PARM_DESC(host_max_queue,
5883 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5884 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5885 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5886 		 SDEBUG_VERSION "\")");
5887 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5888 MODULE_PARM_DESC(lbprz,
5889 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5890 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5891 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5892 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5893 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5894 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5895 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5896 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5897 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5898 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5899 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5900 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5901 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5902 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5903 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5904 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5905 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5906 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5907 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5908 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5909 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5910 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5911 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5912 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5913 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5914 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5915 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5916 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5917 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5918 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5919 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5920 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5921 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5922 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5923 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5924 MODULE_PARM_DESC(uuid_ctl,
5925 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5926 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5927 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5928 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5929 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5930 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5931 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5932 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5933 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5934 
5935 #define SDEBUG_INFO_LEN 256
5936 static char sdebug_info[SDEBUG_INFO_LEN];
5937 
5938 static const char *scsi_debug_info(struct Scsi_Host *shp)
5939 {
5940 	int k;
5941 
5942 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5943 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5944 	if (k >= (SDEBUG_INFO_LEN - 1))
5945 		return sdebug_info;
5946 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5947 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5948 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5949 		  "statistics", (int)sdebug_statistics);
5950 	return sdebug_info;
5951 }
5952 
5953 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5954 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5955 				 int length)
5956 {
5957 	char arr[16];
5958 	int opts;
5959 	int minLen = length > 15 ? 15 : length;
5960 
5961 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5962 		return -EACCES;
5963 	memcpy(arr, buffer, minLen);
5964 	arr[minLen] = '\0';
5965 	if (1 != sscanf(arr, "%d", &opts))
5966 		return -EINVAL;
5967 	sdebug_opts = opts;
5968 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5969 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5970 	if (sdebug_every_nth != 0)
5971 		tweak_cmnd_count();
5972 	return length;
5973 }
5974 
5975 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5976  * same for each scsi_debug host (if more than one). Some of the counters
5977  * output are not atomics so might be inaccurate in a busy system. */
5978 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5979 {
5980 	int f, j, l;
5981 	struct sdebug_queue *sqp;
5982 	struct sdebug_host_info *sdhp;
5983 
5984 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5985 		   SDEBUG_VERSION, sdebug_version_date);
5986 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5987 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5988 		   sdebug_opts, sdebug_every_nth);
5989 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5990 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5991 		   sdebug_sector_size, "bytes");
5992 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5993 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5994 		   num_aborts);
5995 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5996 		   num_dev_resets, num_target_resets, num_bus_resets,
5997 		   num_host_resets);
5998 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5999 		   dix_reads, dix_writes, dif_errors);
6000 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6001 		   sdebug_statistics);
6002 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6003 		   atomic_read(&sdebug_cmnd_count),
6004 		   atomic_read(&sdebug_completions),
6005 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6006 		   atomic_read(&sdebug_a_tsf),
6007 		   atomic_read(&sdeb_mq_poll_count));
6008 
6009 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6010 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6011 		seq_printf(m, "  queue %d:\n", j);
6012 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6013 		if (f != sdebug_max_queue) {
6014 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6015 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6016 				   "first,last bits", f, l);
6017 		}
6018 	}
6019 
6020 	seq_printf(m, "this host_no=%d\n", host->host_no);
6021 	if (!xa_empty(per_store_ap)) {
6022 		bool niu;
6023 		int idx;
6024 		unsigned long l_idx;
6025 		struct sdeb_store_info *sip;
6026 
6027 		seq_puts(m, "\nhost list:\n");
6028 		j = 0;
6029 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6030 			idx = sdhp->si_idx;
6031 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6032 				   sdhp->shost->host_no, idx);
6033 			++j;
6034 		}
6035 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6036 			   sdeb_most_recent_idx);
6037 		j = 0;
6038 		xa_for_each(per_store_ap, l_idx, sip) {
6039 			niu = xa_get_mark(per_store_ap, l_idx,
6040 					  SDEB_XA_NOT_IN_USE);
6041 			idx = (int)l_idx;
6042 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6043 				   (niu ? "  not_in_use" : ""));
6044 			++j;
6045 		}
6046 	}
6047 	return 0;
6048 }
6049 
6050 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6051 {
6052 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6053 }
6054 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6055  * of delay is jiffies.
6056  */
6057 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6058 			   size_t count)
6059 {
6060 	int jdelay, res;
6061 
6062 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6063 		res = count;
6064 		if (sdebug_jdelay != jdelay) {
6065 			int j, k;
6066 			struct sdebug_queue *sqp;
6067 
6068 			sdeb_block_all_queues();
6069 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6070 			     ++j, ++sqp) {
6071 				k = find_first_bit(sqp->in_use_bm,
6072 						   sdebug_max_queue);
6073 				if (k != sdebug_max_queue) {
6074 					res = -EBUSY;   /* queued commands */
6075 					break;
6076 				}
6077 			}
6078 			if (res > 0) {
6079 				sdebug_jdelay = jdelay;
6080 				sdebug_ndelay = 0;
6081 			}
6082 			sdeb_unblock_all_queues();
6083 		}
6084 		return res;
6085 	}
6086 	return -EINVAL;
6087 }
6088 static DRIVER_ATTR_RW(delay);
6089 
6090 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6091 {
6092 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6093 }
6094 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6095 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6096 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6097 			    size_t count)
6098 {
6099 	int ndelay, res;
6100 
6101 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6102 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6103 		res = count;
6104 		if (sdebug_ndelay != ndelay) {
6105 			int j, k;
6106 			struct sdebug_queue *sqp;
6107 
6108 			sdeb_block_all_queues();
6109 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6110 			     ++j, ++sqp) {
6111 				k = find_first_bit(sqp->in_use_bm,
6112 						   sdebug_max_queue);
6113 				if (k != sdebug_max_queue) {
6114 					res = -EBUSY;   /* queued commands */
6115 					break;
6116 				}
6117 			}
6118 			if (res > 0) {
6119 				sdebug_ndelay = ndelay;
6120 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6121 							: DEF_JDELAY;
6122 			}
6123 			sdeb_unblock_all_queues();
6124 		}
6125 		return res;
6126 	}
6127 	return -EINVAL;
6128 }
6129 static DRIVER_ATTR_RW(ndelay);
6130 
6131 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6132 {
6133 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6134 }
6135 
6136 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6137 			  size_t count)
6138 {
6139 	int opts;
6140 	char work[20];
6141 
6142 	if (sscanf(buf, "%10s", work) == 1) {
6143 		if (strncasecmp(work, "0x", 2) == 0) {
6144 			if (kstrtoint(work + 2, 16, &opts) == 0)
6145 				goto opts_done;
6146 		} else {
6147 			if (kstrtoint(work, 10, &opts) == 0)
6148 				goto opts_done;
6149 		}
6150 	}
6151 	return -EINVAL;
6152 opts_done:
6153 	sdebug_opts = opts;
6154 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6155 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6156 	tweak_cmnd_count();
6157 	return count;
6158 }
6159 static DRIVER_ATTR_RW(opts);
6160 
6161 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6162 {
6163 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6164 }
6165 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6166 			   size_t count)
6167 {
6168 	int n;
6169 
6170 	/* Cannot change from or to TYPE_ZBC with sysfs */
6171 	if (sdebug_ptype == TYPE_ZBC)
6172 		return -EINVAL;
6173 
6174 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6175 		if (n == TYPE_ZBC)
6176 			return -EINVAL;
6177 		sdebug_ptype = n;
6178 		return count;
6179 	}
6180 	return -EINVAL;
6181 }
6182 static DRIVER_ATTR_RW(ptype);
6183 
6184 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6185 {
6186 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6187 }
6188 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6189 			    size_t count)
6190 {
6191 	int n;
6192 
6193 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6194 		sdebug_dsense = n;
6195 		return count;
6196 	}
6197 	return -EINVAL;
6198 }
6199 static DRIVER_ATTR_RW(dsense);
6200 
6201 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6202 {
6203 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6204 }
6205 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6206 			     size_t count)
6207 {
6208 	int n, idx;
6209 
6210 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6211 		bool want_store = (n == 0);
6212 		struct sdebug_host_info *sdhp;
6213 
6214 		n = (n > 0);
6215 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6216 		if (sdebug_fake_rw == n)
6217 			return count;	/* not transitioning so do nothing */
6218 
6219 		if (want_store) {	/* 1 --> 0 transition, set up store */
6220 			if (sdeb_first_idx < 0) {
6221 				idx = sdebug_add_store();
6222 				if (idx < 0)
6223 					return idx;
6224 			} else {
6225 				idx = sdeb_first_idx;
6226 				xa_clear_mark(per_store_ap, idx,
6227 					      SDEB_XA_NOT_IN_USE);
6228 			}
6229 			/* make all hosts use same store */
6230 			list_for_each_entry(sdhp, &sdebug_host_list,
6231 					    host_list) {
6232 				if (sdhp->si_idx != idx) {
6233 					xa_set_mark(per_store_ap, sdhp->si_idx,
6234 						    SDEB_XA_NOT_IN_USE);
6235 					sdhp->si_idx = idx;
6236 				}
6237 			}
6238 			sdeb_most_recent_idx = idx;
6239 		} else {	/* 0 --> 1 transition is trigger for shrink */
6240 			sdebug_erase_all_stores(true /* apart from first */);
6241 		}
6242 		sdebug_fake_rw = n;
6243 		return count;
6244 	}
6245 	return -EINVAL;
6246 }
6247 static DRIVER_ATTR_RW(fake_rw);
6248 
6249 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6250 {
6251 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6252 }
6253 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6254 			      size_t count)
6255 {
6256 	int n;
6257 
6258 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6259 		sdebug_no_lun_0 = n;
6260 		return count;
6261 	}
6262 	return -EINVAL;
6263 }
6264 static DRIVER_ATTR_RW(no_lun_0);
6265 
6266 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6267 {
6268 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6269 }
6270 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6271 			      size_t count)
6272 {
6273 	int n;
6274 
6275 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6276 		sdebug_num_tgts = n;
6277 		sdebug_max_tgts_luns();
6278 		return count;
6279 	}
6280 	return -EINVAL;
6281 }
6282 static DRIVER_ATTR_RW(num_tgts);
6283 
6284 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6285 {
6286 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6287 }
6288 static DRIVER_ATTR_RO(dev_size_mb);
6289 
6290 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6291 {
6292 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6293 }
6294 
6295 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6296 				    size_t count)
6297 {
6298 	bool v;
6299 
6300 	if (kstrtobool(buf, &v))
6301 		return -EINVAL;
6302 
6303 	sdebug_per_host_store = v;
6304 	return count;
6305 }
6306 static DRIVER_ATTR_RW(per_host_store);
6307 
6308 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6309 {
6310 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6311 }
6312 static DRIVER_ATTR_RO(num_parts);
6313 
6314 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6315 {
6316 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6317 }
6318 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6319 			       size_t count)
6320 {
6321 	int nth;
6322 	char work[20];
6323 
6324 	if (sscanf(buf, "%10s", work) == 1) {
6325 		if (strncasecmp(work, "0x", 2) == 0) {
6326 			if (kstrtoint(work + 2, 16, &nth) == 0)
6327 				goto every_nth_done;
6328 		} else {
6329 			if (kstrtoint(work, 10, &nth) == 0)
6330 				goto every_nth_done;
6331 		}
6332 	}
6333 	return -EINVAL;
6334 
6335 every_nth_done:
6336 	sdebug_every_nth = nth;
6337 	if (nth && !sdebug_statistics) {
6338 		pr_info("every_nth needs statistics=1, set it\n");
6339 		sdebug_statistics = true;
6340 	}
6341 	tweak_cmnd_count();
6342 	return count;
6343 }
6344 static DRIVER_ATTR_RW(every_nth);
6345 
6346 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6347 {
6348 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6349 }
6350 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6351 				size_t count)
6352 {
6353 	int n;
6354 	bool changed;
6355 
6356 	if (kstrtoint(buf, 0, &n))
6357 		return -EINVAL;
6358 	if (n >= 0) {
6359 		if (n > (int)SAM_LUN_AM_FLAT) {
6360 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6361 			return -EINVAL;
6362 		}
6363 		changed = ((int)sdebug_lun_am != n);
6364 		sdebug_lun_am = n;
6365 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6366 			struct sdebug_host_info *sdhp;
6367 			struct sdebug_dev_info *dp;
6368 
6369 			spin_lock(&sdebug_host_list_lock);
6370 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6371 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6372 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6373 				}
6374 			}
6375 			spin_unlock(&sdebug_host_list_lock);
6376 		}
6377 		return count;
6378 	}
6379 	return -EINVAL;
6380 }
6381 static DRIVER_ATTR_RW(lun_format);
6382 
6383 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6384 {
6385 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6386 }
6387 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6388 			      size_t count)
6389 {
6390 	int n;
6391 	bool changed;
6392 
6393 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6394 		if (n > 256) {
6395 			pr_warn("max_luns can be no more than 256\n");
6396 			return -EINVAL;
6397 		}
6398 		changed = (sdebug_max_luns != n);
6399 		sdebug_max_luns = n;
6400 		sdebug_max_tgts_luns();
6401 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6402 			struct sdebug_host_info *sdhp;
6403 			struct sdebug_dev_info *dp;
6404 
6405 			spin_lock(&sdebug_host_list_lock);
6406 			list_for_each_entry(sdhp, &sdebug_host_list,
6407 					    host_list) {
6408 				list_for_each_entry(dp, &sdhp->dev_info_list,
6409 						    dev_list) {
6410 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6411 						dp->uas_bm);
6412 				}
6413 			}
6414 			spin_unlock(&sdebug_host_list_lock);
6415 		}
6416 		return count;
6417 	}
6418 	return -EINVAL;
6419 }
6420 static DRIVER_ATTR_RW(max_luns);
6421 
6422 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6423 {
6424 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6425 }
6426 /* N.B. max_queue can be changed while there are queued commands. In flight
6427  * commands beyond the new max_queue will be completed. */
6428 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6429 			       size_t count)
6430 {
6431 	int j, n, k, a;
6432 	struct sdebug_queue *sqp;
6433 
6434 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6435 	    (n <= SDEBUG_CANQUEUE) &&
6436 	    (sdebug_host_max_queue == 0)) {
6437 		sdeb_block_all_queues();
6438 		k = 0;
6439 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6440 		     ++j, ++sqp) {
6441 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6442 			if (a > k)
6443 				k = a;
6444 		}
6445 		sdebug_max_queue = n;
6446 		if (k == SDEBUG_CANQUEUE)
6447 			atomic_set(&retired_max_queue, 0);
6448 		else if (k >= n)
6449 			atomic_set(&retired_max_queue, k + 1);
6450 		else
6451 			atomic_set(&retired_max_queue, 0);
6452 		sdeb_unblock_all_queues();
6453 		return count;
6454 	}
6455 	return -EINVAL;
6456 }
6457 static DRIVER_ATTR_RW(max_queue);
6458 
6459 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6460 {
6461 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6462 }
6463 
6464 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6465 {
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6467 }
6468 
6469 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6470 {
6471 	bool v;
6472 
6473 	if (kstrtobool(buf, &v))
6474 		return -EINVAL;
6475 
6476 	sdebug_no_rwlock = v;
6477 	return count;
6478 }
6479 static DRIVER_ATTR_RW(no_rwlock);
6480 
6481 /*
6482  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6483  * in range [0, sdebug_host_max_queue), we can't change it.
6484  */
6485 static DRIVER_ATTR_RO(host_max_queue);
6486 
6487 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6488 {
6489 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6490 }
6491 static DRIVER_ATTR_RO(no_uld);
6492 
6493 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6494 {
6495 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6496 }
6497 static DRIVER_ATTR_RO(scsi_level);
6498 
6499 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6500 {
6501 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6502 }
6503 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6504 				size_t count)
6505 {
6506 	int n;
6507 	bool changed;
6508 
6509 	/* Ignore capacity change for ZBC drives for now */
6510 	if (sdeb_zbc_in_use)
6511 		return -ENOTSUPP;
6512 
6513 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6514 		changed = (sdebug_virtual_gb != n);
6515 		sdebug_virtual_gb = n;
6516 		sdebug_capacity = get_sdebug_capacity();
6517 		if (changed) {
6518 			struct sdebug_host_info *sdhp;
6519 			struct sdebug_dev_info *dp;
6520 
6521 			spin_lock(&sdebug_host_list_lock);
6522 			list_for_each_entry(sdhp, &sdebug_host_list,
6523 					    host_list) {
6524 				list_for_each_entry(dp, &sdhp->dev_info_list,
6525 						    dev_list) {
6526 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6527 						dp->uas_bm);
6528 				}
6529 			}
6530 			spin_unlock(&sdebug_host_list_lock);
6531 		}
6532 		return count;
6533 	}
6534 	return -EINVAL;
6535 }
6536 static DRIVER_ATTR_RW(virtual_gb);
6537 
6538 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6539 {
6540 	/* absolute number of hosts currently active is what is shown */
6541 	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
6542 }
6543 
6544 /*
6545  * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
6546  * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
6547  * Returns -EBUSY if another add_host sysfs invocation is active.
6548  */
6549 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6550 			      size_t count)
6551 {
6552 	int delta_hosts;
6553 
6554 	if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
6555 		return -EINVAL;
6556 	if (sdebug_verbose)
6557 		pr_info("prior num_hosts=%d, num_to_add=%d\n",
6558 			atomic_read(&sdebug_num_hosts), delta_hosts);
6559 	if (delta_hosts == 0)
6560 		return count;
6561 	if (mutex_trylock(&add_host_mutex) == 0)
6562 		return -EBUSY;
6563 	if (delta_hosts > 0) {
6564 		sdeb_add_n_hosts(delta_hosts);
6565 	} else if (delta_hosts < 0) {
6566 		smp_store_release(&sdebug_deflect_incoming, true);
6567 		sdeb_block_all_queues();
6568 		if (delta_hosts >= atomic_read(&sdebug_num_hosts))
6569 			stop_all_queued(true);
6570 		do {
6571 			if (atomic_read(&sdebug_num_hosts) < 1) {
6572 				free_all_queued();
6573 				break;
6574 			}
6575 			sdebug_do_remove_host(false);
6576 		} while (++delta_hosts);
6577 		sdeb_unblock_all_queues();
6578 		smp_store_release(&sdebug_deflect_incoming, false);
6579 	}
6580 	mutex_unlock(&add_host_mutex);
6581 	if (sdebug_verbose)
6582 		pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
6583 	return count;
6584 }
6585 static DRIVER_ATTR_RW(add_host);
6586 
6587 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6588 {
6589 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6590 }
6591 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6592 				    size_t count)
6593 {
6594 	int n;
6595 
6596 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6597 		sdebug_vpd_use_hostno = n;
6598 		return count;
6599 	}
6600 	return -EINVAL;
6601 }
6602 static DRIVER_ATTR_RW(vpd_use_hostno);
6603 
6604 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6605 {
6606 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6607 }
6608 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6609 				size_t count)
6610 {
6611 	int n;
6612 
6613 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6614 		if (n > 0)
6615 			sdebug_statistics = true;
6616 		else {
6617 			clear_queue_stats();
6618 			sdebug_statistics = false;
6619 		}
6620 		return count;
6621 	}
6622 	return -EINVAL;
6623 }
6624 static DRIVER_ATTR_RW(statistics);
6625 
6626 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6627 {
6628 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6629 }
6630 static DRIVER_ATTR_RO(sector_size);
6631 
6632 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6633 {
6634 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6635 }
6636 static DRIVER_ATTR_RO(submit_queues);
6637 
6638 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6639 {
6640 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6641 }
6642 static DRIVER_ATTR_RO(dix);
6643 
6644 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6645 {
6646 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6647 }
6648 static DRIVER_ATTR_RO(dif);
6649 
6650 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6653 }
6654 static DRIVER_ATTR_RO(guard);
6655 
6656 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6657 {
6658 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6659 }
6660 static DRIVER_ATTR_RO(ato);
6661 
6662 static ssize_t map_show(struct device_driver *ddp, char *buf)
6663 {
6664 	ssize_t count = 0;
6665 
6666 	if (!scsi_debug_lbp())
6667 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6668 				 sdebug_store_sectors);
6669 
6670 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6671 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6672 
6673 		if (sip)
6674 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6675 					  (int)map_size, sip->map_storep);
6676 	}
6677 	buf[count++] = '\n';
6678 	buf[count] = '\0';
6679 
6680 	return count;
6681 }
6682 static DRIVER_ATTR_RO(map);
6683 
6684 static ssize_t random_show(struct device_driver *ddp, char *buf)
6685 {
6686 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6687 }
6688 
6689 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6690 			    size_t count)
6691 {
6692 	bool v;
6693 
6694 	if (kstrtobool(buf, &v))
6695 		return -EINVAL;
6696 
6697 	sdebug_random = v;
6698 	return count;
6699 }
6700 static DRIVER_ATTR_RW(random);
6701 
6702 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6703 {
6704 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6705 }
6706 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6707 			       size_t count)
6708 {
6709 	int n;
6710 
6711 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6712 		sdebug_removable = (n > 0);
6713 		return count;
6714 	}
6715 	return -EINVAL;
6716 }
6717 static DRIVER_ATTR_RW(removable);
6718 
6719 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6720 {
6721 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6722 }
6723 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6724 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6725 			       size_t count)
6726 {
6727 	int n;
6728 
6729 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6730 		sdebug_host_lock = (n > 0);
6731 		return count;
6732 	}
6733 	return -EINVAL;
6734 }
6735 static DRIVER_ATTR_RW(host_lock);
6736 
6737 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6738 {
6739 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6740 }
6741 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6742 			    size_t count)
6743 {
6744 	int n;
6745 
6746 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6747 		sdebug_strict = (n > 0);
6748 		return count;
6749 	}
6750 	return -EINVAL;
6751 }
6752 static DRIVER_ATTR_RW(strict);
6753 
6754 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6755 {
6756 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6757 }
6758 static DRIVER_ATTR_RO(uuid_ctl);
6759 
6760 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6761 {
6762 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6763 }
6764 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6765 			     size_t count)
6766 {
6767 	int ret, n;
6768 
6769 	ret = kstrtoint(buf, 0, &n);
6770 	if (ret)
6771 		return ret;
6772 	sdebug_cdb_len = n;
6773 	all_config_cdb_len();
6774 	return count;
6775 }
6776 static DRIVER_ATTR_RW(cdb_len);
6777 
6778 static const char * const zbc_model_strs_a[] = {
6779 	[BLK_ZONED_NONE] = "none",
6780 	[BLK_ZONED_HA]   = "host-aware",
6781 	[BLK_ZONED_HM]   = "host-managed",
6782 };
6783 
6784 static const char * const zbc_model_strs_b[] = {
6785 	[BLK_ZONED_NONE] = "no",
6786 	[BLK_ZONED_HA]   = "aware",
6787 	[BLK_ZONED_HM]   = "managed",
6788 };
6789 
6790 static const char * const zbc_model_strs_c[] = {
6791 	[BLK_ZONED_NONE] = "0",
6792 	[BLK_ZONED_HA]   = "1",
6793 	[BLK_ZONED_HM]   = "2",
6794 };
6795 
6796 static int sdeb_zbc_model_str(const char *cp)
6797 {
6798 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6799 
6800 	if (res < 0) {
6801 		res = sysfs_match_string(zbc_model_strs_b, cp);
6802 		if (res < 0) {
6803 			res = sysfs_match_string(zbc_model_strs_c, cp);
6804 			if (res < 0)
6805 				return -EINVAL;
6806 		}
6807 	}
6808 	return res;
6809 }
6810 
6811 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6812 {
6813 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6814 			 zbc_model_strs_a[sdeb_zbc_model]);
6815 }
6816 static DRIVER_ATTR_RO(zbc);
6817 
6818 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6819 {
6820 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6821 }
6822 static DRIVER_ATTR_RO(tur_ms_to_ready);
6823 
6824 /* Note: The following array creates attribute files in the
6825    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6826    files (over those found in the /sys/module/scsi_debug/parameters
6827    directory) is that auxiliary actions can be triggered when an attribute
6828    is changed. For example see: add_host_store() above.
6829  */
6830 
6831 static struct attribute *sdebug_drv_attrs[] = {
6832 	&driver_attr_delay.attr,
6833 	&driver_attr_opts.attr,
6834 	&driver_attr_ptype.attr,
6835 	&driver_attr_dsense.attr,
6836 	&driver_attr_fake_rw.attr,
6837 	&driver_attr_host_max_queue.attr,
6838 	&driver_attr_no_lun_0.attr,
6839 	&driver_attr_num_tgts.attr,
6840 	&driver_attr_dev_size_mb.attr,
6841 	&driver_attr_num_parts.attr,
6842 	&driver_attr_every_nth.attr,
6843 	&driver_attr_lun_format.attr,
6844 	&driver_attr_max_luns.attr,
6845 	&driver_attr_max_queue.attr,
6846 	&driver_attr_no_rwlock.attr,
6847 	&driver_attr_no_uld.attr,
6848 	&driver_attr_scsi_level.attr,
6849 	&driver_attr_virtual_gb.attr,
6850 	&driver_attr_add_host.attr,
6851 	&driver_attr_per_host_store.attr,
6852 	&driver_attr_vpd_use_hostno.attr,
6853 	&driver_attr_sector_size.attr,
6854 	&driver_attr_statistics.attr,
6855 	&driver_attr_submit_queues.attr,
6856 	&driver_attr_dix.attr,
6857 	&driver_attr_dif.attr,
6858 	&driver_attr_guard.attr,
6859 	&driver_attr_ato.attr,
6860 	&driver_attr_map.attr,
6861 	&driver_attr_random.attr,
6862 	&driver_attr_removable.attr,
6863 	&driver_attr_host_lock.attr,
6864 	&driver_attr_ndelay.attr,
6865 	&driver_attr_strict.attr,
6866 	&driver_attr_uuid_ctl.attr,
6867 	&driver_attr_cdb_len.attr,
6868 	&driver_attr_tur_ms_to_ready.attr,
6869 	&driver_attr_zbc.attr,
6870 	NULL,
6871 };
6872 ATTRIBUTE_GROUPS(sdebug_drv);
6873 
6874 static struct device *pseudo_primary;
6875 
6876 static int __init scsi_debug_init(void)
6877 {
6878 	bool want_store = (sdebug_fake_rw == 0);
6879 	unsigned long sz;
6880 	int k, ret, hosts_to_add;
6881 	int idx = -1;
6882 
6883 	ramdisk_lck_a[0] = &atomic_rw;
6884 	ramdisk_lck_a[1] = &atomic_rw2;
6885 	atomic_set(&retired_max_queue, 0);
6886 
6887 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6888 		pr_warn("ndelay must be less than 1 second, ignored\n");
6889 		sdebug_ndelay = 0;
6890 	} else if (sdebug_ndelay > 0)
6891 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6892 
6893 	switch (sdebug_sector_size) {
6894 	case  512:
6895 	case 1024:
6896 	case 2048:
6897 	case 4096:
6898 		break;
6899 	default:
6900 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6901 		return -EINVAL;
6902 	}
6903 
6904 	switch (sdebug_dif) {
6905 	case T10_PI_TYPE0_PROTECTION:
6906 		break;
6907 	case T10_PI_TYPE1_PROTECTION:
6908 	case T10_PI_TYPE2_PROTECTION:
6909 	case T10_PI_TYPE3_PROTECTION:
6910 		have_dif_prot = true;
6911 		break;
6912 
6913 	default:
6914 		pr_err("dif must be 0, 1, 2 or 3\n");
6915 		return -EINVAL;
6916 	}
6917 
6918 	if (sdebug_num_tgts < 0) {
6919 		pr_err("num_tgts must be >= 0\n");
6920 		return -EINVAL;
6921 	}
6922 
6923 	if (sdebug_guard > 1) {
6924 		pr_err("guard must be 0 or 1\n");
6925 		return -EINVAL;
6926 	}
6927 
6928 	if (sdebug_ato > 1) {
6929 		pr_err("ato must be 0 or 1\n");
6930 		return -EINVAL;
6931 	}
6932 
6933 	if (sdebug_physblk_exp > 15) {
6934 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6935 		return -EINVAL;
6936 	}
6937 
6938 	sdebug_lun_am = sdebug_lun_am_i;
6939 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6940 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6941 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6942 	}
6943 
6944 	if (sdebug_max_luns > 256) {
6945 		if (sdebug_max_luns > 16384) {
6946 			pr_warn("max_luns can be no more than 16384, use default\n");
6947 			sdebug_max_luns = DEF_MAX_LUNS;
6948 		}
6949 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6950 	}
6951 
6952 	if (sdebug_lowest_aligned > 0x3fff) {
6953 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6954 		return -EINVAL;
6955 	}
6956 
6957 	if (submit_queues < 1) {
6958 		pr_err("submit_queues must be 1 or more\n");
6959 		return -EINVAL;
6960 	}
6961 
6962 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6963 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6964 		return -EINVAL;
6965 	}
6966 
6967 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6968 	    (sdebug_host_max_queue < 0)) {
6969 		pr_err("host_max_queue must be in range [0 %d]\n",
6970 		       SDEBUG_CANQUEUE);
6971 		return -EINVAL;
6972 	}
6973 
6974 	if (sdebug_host_max_queue &&
6975 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6976 		sdebug_max_queue = sdebug_host_max_queue;
6977 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6978 			sdebug_max_queue);
6979 	}
6980 
6981 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6982 			       GFP_KERNEL);
6983 	if (sdebug_q_arr == NULL)
6984 		return -ENOMEM;
6985 	for (k = 0; k < submit_queues; ++k)
6986 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6987 
6988 	/*
6989 	 * check for host managed zoned block device specified with
6990 	 * ptype=0x14 or zbc=XXX.
6991 	 */
6992 	if (sdebug_ptype == TYPE_ZBC) {
6993 		sdeb_zbc_model = BLK_ZONED_HM;
6994 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6995 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6996 		if (k < 0) {
6997 			ret = k;
6998 			goto free_q_arr;
6999 		}
7000 		sdeb_zbc_model = k;
7001 		switch (sdeb_zbc_model) {
7002 		case BLK_ZONED_NONE:
7003 		case BLK_ZONED_HA:
7004 			sdebug_ptype = TYPE_DISK;
7005 			break;
7006 		case BLK_ZONED_HM:
7007 			sdebug_ptype = TYPE_ZBC;
7008 			break;
7009 		default:
7010 			pr_err("Invalid ZBC model\n");
7011 			ret = -EINVAL;
7012 			goto free_q_arr;
7013 		}
7014 	}
7015 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7016 		sdeb_zbc_in_use = true;
7017 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7018 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7019 	}
7020 
7021 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7022 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7023 	if (sdebug_dev_size_mb < 1)
7024 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7025 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7026 	sdebug_store_sectors = sz / sdebug_sector_size;
7027 	sdebug_capacity = get_sdebug_capacity();
7028 
7029 	/* play around with geometry, don't waste too much on track 0 */
7030 	sdebug_heads = 8;
7031 	sdebug_sectors_per = 32;
7032 	if (sdebug_dev_size_mb >= 256)
7033 		sdebug_heads = 64;
7034 	else if (sdebug_dev_size_mb >= 16)
7035 		sdebug_heads = 32;
7036 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7037 			       (sdebug_sectors_per * sdebug_heads);
7038 	if (sdebug_cylinders_per >= 1024) {
7039 		/* other LLDs do this; implies >= 1GB ram disk ... */
7040 		sdebug_heads = 255;
7041 		sdebug_sectors_per = 63;
7042 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7043 			       (sdebug_sectors_per * sdebug_heads);
7044 	}
7045 	if (scsi_debug_lbp()) {
7046 		sdebug_unmap_max_blocks =
7047 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7048 
7049 		sdebug_unmap_max_desc =
7050 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7051 
7052 		sdebug_unmap_granularity =
7053 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7054 
7055 		if (sdebug_unmap_alignment &&
7056 		    sdebug_unmap_granularity <=
7057 		    sdebug_unmap_alignment) {
7058 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7059 			ret = -EINVAL;
7060 			goto free_q_arr;
7061 		}
7062 	}
7063 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7064 	if (want_store) {
7065 		idx = sdebug_add_store();
7066 		if (idx < 0) {
7067 			ret = idx;
7068 			goto free_q_arr;
7069 		}
7070 	}
7071 
7072 	pseudo_primary = root_device_register("pseudo_0");
7073 	if (IS_ERR(pseudo_primary)) {
7074 		pr_warn("root_device_register() error\n");
7075 		ret = PTR_ERR(pseudo_primary);
7076 		goto free_vm;
7077 	}
7078 	ret = bus_register(&pseudo_lld_bus);
7079 	if (ret < 0) {
7080 		pr_warn("bus_register error: %d\n", ret);
7081 		goto dev_unreg;
7082 	}
7083 	ret = driver_register(&sdebug_driverfs_driver);
7084 	if (ret < 0) {
7085 		pr_warn("driver_register error: %d\n", ret);
7086 		goto bus_unreg;
7087 	}
7088 
7089 	hosts_to_add = sdebug_add_host;
7090 	sdebug_add_host = 0;
7091 
7092 	for (k = 0; k < hosts_to_add; k++) {
7093 		if (smp_load_acquire(&sdebug_deflect_incoming)) {
7094 			pr_info("exit early as sdebug_deflect_incoming is set\n");
7095 			return 0;
7096 		}
7097 		if (want_store && k == 0) {
7098 			ret = sdebug_add_host_helper(idx);
7099 			if (ret < 0) {
7100 				pr_err("add_host_helper k=%d, error=%d\n",
7101 				       k, -ret);
7102 				break;
7103 			}
7104 		} else {
7105 			ret = sdebug_do_add_host(want_store &&
7106 						 sdebug_per_host_store);
7107 			if (ret < 0) {
7108 				pr_err("add_host k=%d error=%d\n", k, -ret);
7109 				break;
7110 			}
7111 		}
7112 	}
7113 	if (sdebug_verbose)
7114 		pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
7115 
7116 	/*
7117 	 * Even though all the hosts have been established, due to async device (LU) scanning
7118 	 * by the scsi mid-level, there may still be devices (LUs) being set up.
7119 	 */
7120 	return 0;
7121 
7122 bus_unreg:
7123 	bus_unregister(&pseudo_lld_bus);
7124 dev_unreg:
7125 	root_device_unregister(pseudo_primary);
7126 free_vm:
7127 	sdebug_erase_store(idx, NULL);
7128 free_q_arr:
7129 	kfree(sdebug_q_arr);
7130 	return ret;
7131 }
7132 
7133 static void __exit scsi_debug_exit(void)
7134 {
7135 	int k;
7136 
7137 	/* Possible race with LUs still being set up; stop them asap */
7138 	sdeb_block_all_queues();
7139 	smp_store_release(&sdebug_deflect_incoming, true);
7140 	stop_all_queued(false);
7141 	for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
7142 		sdebug_do_remove_host(true);
7143 	free_all_queued();
7144 	if (sdebug_verbose)
7145 		pr_info("removed %d hosts\n", k);
7146 	driver_unregister(&sdebug_driverfs_driver);
7147 	bus_unregister(&pseudo_lld_bus);
7148 	root_device_unregister(pseudo_primary);
7149 
7150 	sdebug_erase_all_stores(false);
7151 	xa_destroy(per_store_ap);
7152 	kfree(sdebug_q_arr);
7153 }
7154 
7155 device_initcall(scsi_debug_init);
7156 module_exit(scsi_debug_exit);
7157 
7158 static void sdebug_release_adapter(struct device *dev)
7159 {
7160 	struct sdebug_host_info *sdbg_host;
7161 
7162 	sdbg_host = to_sdebug_host(dev);
7163 	kfree(sdbg_host);
7164 }
7165 
7166 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7167 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7168 {
7169 	if (idx < 0)
7170 		return;
7171 	if (!sip) {
7172 		if (xa_empty(per_store_ap))
7173 			return;
7174 		sip = xa_load(per_store_ap, idx);
7175 		if (!sip)
7176 			return;
7177 	}
7178 	vfree(sip->map_storep);
7179 	vfree(sip->dif_storep);
7180 	vfree(sip->storep);
7181 	xa_erase(per_store_ap, idx);
7182 	kfree(sip);
7183 }
7184 
7185 /* Assume apart_from_first==false only in shutdown case. */
7186 static void sdebug_erase_all_stores(bool apart_from_first)
7187 {
7188 	unsigned long idx;
7189 	struct sdeb_store_info *sip = NULL;
7190 
7191 	xa_for_each(per_store_ap, idx, sip) {
7192 		if (apart_from_first)
7193 			apart_from_first = false;
7194 		else
7195 			sdebug_erase_store(idx, sip);
7196 	}
7197 	if (apart_from_first)
7198 		sdeb_most_recent_idx = sdeb_first_idx;
7199 }
7200 
7201 /*
7202  * Returns store xarray new element index (idx) if >=0 else negated errno.
7203  * Limit the number of stores to 65536.
7204  */
7205 static int sdebug_add_store(void)
7206 {
7207 	int res;
7208 	u32 n_idx;
7209 	unsigned long iflags;
7210 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7211 	struct sdeb_store_info *sip = NULL;
7212 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7213 
7214 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7215 	if (!sip)
7216 		return -ENOMEM;
7217 
7218 	xa_lock_irqsave(per_store_ap, iflags);
7219 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7220 	if (unlikely(res < 0)) {
7221 		xa_unlock_irqrestore(per_store_ap, iflags);
7222 		kfree(sip);
7223 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7224 		return res;
7225 	}
7226 	sdeb_most_recent_idx = n_idx;
7227 	if (sdeb_first_idx < 0)
7228 		sdeb_first_idx = n_idx;
7229 	xa_unlock_irqrestore(per_store_ap, iflags);
7230 
7231 	res = -ENOMEM;
7232 	sip->storep = vzalloc(sz);
7233 	if (!sip->storep) {
7234 		pr_err("user data oom\n");
7235 		goto err;
7236 	}
7237 	if (sdebug_num_parts > 0)
7238 		sdebug_build_parts(sip->storep, sz);
7239 
7240 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7241 	if (sdebug_dix) {
7242 		int dif_size;
7243 
7244 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7245 		sip->dif_storep = vmalloc(dif_size);
7246 
7247 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7248 			sip->dif_storep);
7249 
7250 		if (!sip->dif_storep) {
7251 			pr_err("DIX oom\n");
7252 			goto err;
7253 		}
7254 		memset(sip->dif_storep, 0xff, dif_size);
7255 	}
7256 	/* Logical Block Provisioning */
7257 	if (scsi_debug_lbp()) {
7258 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7259 		sip->map_storep = vmalloc(array_size(sizeof(long),
7260 						     BITS_TO_LONGS(map_size)));
7261 
7262 		pr_info("%lu provisioning blocks\n", map_size);
7263 
7264 		if (!sip->map_storep) {
7265 			pr_err("LBP map oom\n");
7266 			goto err;
7267 		}
7268 
7269 		bitmap_zero(sip->map_storep, map_size);
7270 
7271 		/* Map first 1KB for partition table */
7272 		if (sdebug_num_parts)
7273 			map_region(sip, 0, 2);
7274 	}
7275 
7276 	rwlock_init(&sip->macc_lck);
7277 	return (int)n_idx;
7278 err:
7279 	sdebug_erase_store((int)n_idx, sip);
7280 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7281 	return res;
7282 }
7283 
7284 static int sdebug_add_host_helper(int per_host_idx)
7285 {
7286 	int k, devs_per_host, idx;
7287 	int error = -ENOMEM;
7288 	struct sdebug_host_info *sdbg_host;
7289 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7290 
7291 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7292 	if (!sdbg_host)
7293 		return -ENOMEM;
7294 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7295 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7296 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7297 	sdbg_host->si_idx = idx;
7298 
7299 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7300 
7301 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7302 	for (k = 0; k < devs_per_host; k++) {
7303 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7304 		if (!sdbg_devinfo)
7305 			goto clean;
7306 	}
7307 
7308 	spin_lock(&sdebug_host_list_lock);
7309 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7310 	spin_unlock(&sdebug_host_list_lock);
7311 
7312 	sdbg_host->dev.bus = &pseudo_lld_bus;
7313 	sdbg_host->dev.parent = pseudo_primary;
7314 	sdbg_host->dev.release = &sdebug_release_adapter;
7315 	dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
7316 
7317 	error = device_register(&sdbg_host->dev);
7318 	if (error)
7319 		goto clean;
7320 
7321 	atomic_inc(&sdebug_num_hosts);
7322 	return 0;
7323 
7324 clean:
7325 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7326 				 dev_list) {
7327 		list_del(&sdbg_devinfo->dev_list);
7328 		kfree(sdbg_devinfo->zstate);
7329 		kfree(sdbg_devinfo);
7330 	}
7331 	kfree(sdbg_host);
7332 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7333 	return error;
7334 }
7335 
7336 static int sdebug_do_add_host(bool mk_new_store)
7337 {
7338 	int ph_idx = sdeb_most_recent_idx;
7339 
7340 	if (mk_new_store) {
7341 		ph_idx = sdebug_add_store();
7342 		if (ph_idx < 0)
7343 			return ph_idx;
7344 	}
7345 	return sdebug_add_host_helper(ph_idx);
7346 }
7347 
7348 static void sdebug_do_remove_host(bool the_end)
7349 {
7350 	int idx = -1;
7351 	struct sdebug_host_info *sdbg_host = NULL;
7352 	struct sdebug_host_info *sdbg_host2;
7353 
7354 	spin_lock(&sdebug_host_list_lock);
7355 	if (!list_empty(&sdebug_host_list)) {
7356 		sdbg_host = list_entry(sdebug_host_list.prev,
7357 				       struct sdebug_host_info, host_list);
7358 		idx = sdbg_host->si_idx;
7359 	}
7360 	if (!the_end && idx >= 0) {
7361 		bool unique = true;
7362 
7363 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7364 			if (sdbg_host2 == sdbg_host)
7365 				continue;
7366 			if (idx == sdbg_host2->si_idx) {
7367 				unique = false;
7368 				break;
7369 			}
7370 		}
7371 		if (unique) {
7372 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7373 			if (idx == sdeb_most_recent_idx)
7374 				--sdeb_most_recent_idx;
7375 		}
7376 	}
7377 	if (sdbg_host)
7378 		list_del(&sdbg_host->host_list);
7379 	spin_unlock(&sdebug_host_list_lock);
7380 
7381 	if (!sdbg_host)
7382 		return;
7383 
7384 	device_unregister(&sdbg_host->dev);
7385 	atomic_dec(&sdebug_num_hosts);
7386 }
7387 
7388 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7389 {
7390 	int num_in_q = 0;
7391 	struct sdebug_dev_info *devip;
7392 
7393 	sdeb_block_all_queues();
7394 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7395 	if (NULL == devip) {
7396 		sdeb_unblock_all_queues();
7397 		return	-ENODEV;
7398 	}
7399 	num_in_q = atomic_read(&devip->num_in_q);
7400 
7401 	if (qdepth > SDEBUG_CANQUEUE) {
7402 		qdepth = SDEBUG_CANQUEUE;
7403 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7404 			qdepth, SDEBUG_CANQUEUE);
7405 	}
7406 	if (qdepth < 1)
7407 		qdepth = 1;
7408 	if (qdepth != sdev->queue_depth)
7409 		scsi_change_queue_depth(sdev, qdepth);
7410 
7411 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7412 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7413 			    __func__, qdepth, num_in_q);
7414 	}
7415 	sdeb_unblock_all_queues();
7416 	return sdev->queue_depth;
7417 }
7418 
7419 static bool fake_timeout(struct scsi_cmnd *scp)
7420 {
7421 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7422 		if (sdebug_every_nth < -1)
7423 			sdebug_every_nth = -1;
7424 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7425 			return true; /* ignore command causing timeout */
7426 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7427 			 scsi_medium_access_command(scp))
7428 			return true; /* time out reads and writes */
7429 	}
7430 	return false;
7431 }
7432 
7433 /* Response to TUR or media access command when device stopped */
7434 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7435 {
7436 	int stopped_state;
7437 	u64 diff_ns = 0;
7438 	ktime_t now_ts = ktime_get_boottime();
7439 	struct scsi_device *sdp = scp->device;
7440 
7441 	stopped_state = atomic_read(&devip->stopped);
7442 	if (stopped_state == 2) {
7443 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7444 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7445 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7446 				/* tur_ms_to_ready timer extinguished */
7447 				atomic_set(&devip->stopped, 0);
7448 				return 0;
7449 			}
7450 		}
7451 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7452 		if (sdebug_verbose)
7453 			sdev_printk(KERN_INFO, sdp,
7454 				    "%s: Not ready: in process of becoming ready\n", my_name);
7455 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7456 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7457 
7458 			if (diff_ns <= tur_nanosecs_to_ready)
7459 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7460 			else
7461 				diff_ns = tur_nanosecs_to_ready;
7462 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7463 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7464 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7465 						   diff_ns);
7466 			return check_condition_result;
7467 		}
7468 	}
7469 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7470 	if (sdebug_verbose)
7471 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7472 			    my_name);
7473 	return check_condition_result;
7474 }
7475 
7476 static int sdebug_map_queues(struct Scsi_Host *shost)
7477 {
7478 	int i, qoff;
7479 
7480 	if (shost->nr_hw_queues == 1)
7481 		return 0;
7482 
7483 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7484 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7485 
7486 		map->nr_queues  = 0;
7487 
7488 		if (i == HCTX_TYPE_DEFAULT)
7489 			map->nr_queues = submit_queues - poll_queues;
7490 		else if (i == HCTX_TYPE_POLL)
7491 			map->nr_queues = poll_queues;
7492 
7493 		if (!map->nr_queues) {
7494 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7495 			continue;
7496 		}
7497 
7498 		map->queue_offset = qoff;
7499 		blk_mq_map_queues(map);
7500 
7501 		qoff += map->nr_queues;
7502 	}
7503 
7504 	return 0;
7505 
7506 }
7507 
7508 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7509 {
7510 	bool first;
7511 	bool retiring = false;
7512 	int num_entries = 0;
7513 	unsigned int qc_idx = 0;
7514 	unsigned long iflags;
7515 	ktime_t kt_from_boot = ktime_get_boottime();
7516 	struct sdebug_queue *sqp;
7517 	struct sdebug_queued_cmd *sqcp;
7518 	struct scsi_cmnd *scp;
7519 	struct sdebug_dev_info *devip;
7520 	struct sdebug_defer *sd_dp;
7521 
7522 	sqp = sdebug_q_arr + queue_num;
7523 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7524 	if (qc_idx >= sdebug_max_queue)
7525 		return 0;
7526 
7527 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7528 
7529 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7530 		if (first) {
7531 			first = false;
7532 			if (!test_bit(qc_idx, sqp->in_use_bm))
7533 				continue;
7534 		} else {
7535 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7536 		}
7537 		if (qc_idx >= sdebug_max_queue)
7538 			break;
7539 
7540 		sqcp = &sqp->qc_arr[qc_idx];
7541 		sd_dp = sqcp->sd_dp;
7542 		if (unlikely(!sd_dp))
7543 			continue;
7544 		scp = sqcp->a_cmnd;
7545 		if (unlikely(scp == NULL)) {
7546 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7547 			       queue_num, qc_idx, __func__);
7548 			break;
7549 		}
7550 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7551 			if (kt_from_boot < sd_dp->cmpl_ts)
7552 				continue;
7553 
7554 		} else		/* ignoring non REQ_POLLED requests */
7555 			continue;
7556 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7557 		if (likely(devip))
7558 			atomic_dec(&devip->num_in_q);
7559 		else
7560 			pr_err("devip=NULL from %s\n", __func__);
7561 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7562 			retiring = true;
7563 
7564 		sqcp->a_cmnd = NULL;
7565 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7566 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7567 				sqp, queue_num, qc_idx, __func__);
7568 			break;
7569 		}
7570 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7571 			int k, retval;
7572 
7573 			retval = atomic_read(&retired_max_queue);
7574 			if (qc_idx >= retval) {
7575 				pr_err("index %d too large\n", retval);
7576 				break;
7577 			}
7578 			k = find_last_bit(sqp->in_use_bm, retval);
7579 			if ((k < sdebug_max_queue) || (k == retval))
7580 				atomic_set(&retired_max_queue, 0);
7581 			else
7582 				atomic_set(&retired_max_queue, k + 1);
7583 		}
7584 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7585 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7586 		scsi_done(scp); /* callback to mid level */
7587 		num_entries++;
7588 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7589 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7590 			break;
7591 	}
7592 
7593 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7594 
7595 	if (num_entries > 0)
7596 		atomic_add(num_entries, &sdeb_mq_poll_count);
7597 	return num_entries;
7598 }
7599 
7600 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7601 				   struct scsi_cmnd *scp)
7602 {
7603 	u8 sdeb_i;
7604 	struct scsi_device *sdp = scp->device;
7605 	const struct opcode_info_t *oip;
7606 	const struct opcode_info_t *r_oip;
7607 	struct sdebug_dev_info *devip;
7608 	u8 *cmd = scp->cmnd;
7609 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7610 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7611 	int k, na;
7612 	int errsts = 0;
7613 	u64 lun_index = sdp->lun & 0x3FFF;
7614 	u32 flags;
7615 	u16 sa;
7616 	u8 opcode = cmd[0];
7617 	bool has_wlun_rl;
7618 	bool inject_now;
7619 
7620 	scsi_set_resid(scp, 0);
7621 	if (sdebug_statistics) {
7622 		atomic_inc(&sdebug_cmnd_count);
7623 		inject_now = inject_on_this_cmd();
7624 	} else {
7625 		inject_now = false;
7626 	}
7627 	if (unlikely(sdebug_verbose &&
7628 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7629 		char b[120];
7630 		int n, len, sb;
7631 
7632 		len = scp->cmd_len;
7633 		sb = (int)sizeof(b);
7634 		if (len > 32)
7635 			strcpy(b, "too long, over 32 bytes");
7636 		else {
7637 			for (k = 0, n = 0; k < len && n < sb; ++k)
7638 				n += scnprintf(b + n, sb - n, "%02x ",
7639 					       (u32)cmd[k]);
7640 		}
7641 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7642 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7643 	}
7644 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7645 		return SCSI_MLQUEUE_HOST_BUSY;
7646 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7647 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7648 		goto err_out;
7649 
7650 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7651 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7652 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7653 	if (unlikely(!devip)) {
7654 		devip = find_build_dev_info(sdp);
7655 		if (NULL == devip)
7656 			goto err_out;
7657 	}
7658 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7659 		atomic_set(&sdeb_inject_pending, 1);
7660 
7661 	na = oip->num_attached;
7662 	r_pfp = oip->pfp;
7663 	if (na) {	/* multiple commands with this opcode */
7664 		r_oip = oip;
7665 		if (FF_SA & r_oip->flags) {
7666 			if (F_SA_LOW & oip->flags)
7667 				sa = 0x1f & cmd[1];
7668 			else
7669 				sa = get_unaligned_be16(cmd + 8);
7670 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7671 				if (opcode == oip->opcode && sa == oip->sa)
7672 					break;
7673 			}
7674 		} else {   /* since no service action only check opcode */
7675 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7676 				if (opcode == oip->opcode)
7677 					break;
7678 			}
7679 		}
7680 		if (k > na) {
7681 			if (F_SA_LOW & r_oip->flags)
7682 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7683 			else if (F_SA_HIGH & r_oip->flags)
7684 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7685 			else
7686 				mk_sense_invalid_opcode(scp);
7687 			goto check_cond;
7688 		}
7689 	}	/* else (when na==0) we assume the oip is a match */
7690 	flags = oip->flags;
7691 	if (unlikely(F_INV_OP & flags)) {
7692 		mk_sense_invalid_opcode(scp);
7693 		goto check_cond;
7694 	}
7695 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7696 		if (sdebug_verbose)
7697 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7698 				    my_name, opcode, " supported for wlun");
7699 		mk_sense_invalid_opcode(scp);
7700 		goto check_cond;
7701 	}
7702 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7703 		u8 rem;
7704 		int j;
7705 
7706 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7707 			rem = ~oip->len_mask[k] & cmd[k];
7708 			if (rem) {
7709 				for (j = 7; j >= 0; --j, rem <<= 1) {
7710 					if (0x80 & rem)
7711 						break;
7712 				}
7713 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7714 				goto check_cond;
7715 			}
7716 		}
7717 	}
7718 	if (unlikely(!(F_SKIP_UA & flags) &&
7719 		     find_first_bit(devip->uas_bm,
7720 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7721 		errsts = make_ua(scp, devip);
7722 		if (errsts)
7723 			goto check_cond;
7724 	}
7725 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7726 		     atomic_read(&devip->stopped))) {
7727 		errsts = resp_not_ready(scp, devip);
7728 		if (errsts)
7729 			goto fini;
7730 	}
7731 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7732 		goto fini;
7733 	if (unlikely(sdebug_every_nth)) {
7734 		if (fake_timeout(scp))
7735 			return 0;	/* ignore command: make trouble */
7736 	}
7737 	if (likely(oip->pfp))
7738 		pfp = oip->pfp;	/* calls a resp_* function */
7739 	else
7740 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7741 
7742 fini:
7743 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7744 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7745 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7746 					    sdebug_ndelay > 10000)) {
7747 		/*
7748 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7749 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7750 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7751 		 * For Synchronize Cache want 1/20 of SSU's delay.
7752 		 */
7753 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7754 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7755 
7756 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7757 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7758 	} else
7759 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7760 				     sdebug_ndelay);
7761 check_cond:
7762 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7763 err_out:
7764 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7765 }
7766 
7767 static struct scsi_host_template sdebug_driver_template = {
7768 	.show_info =		scsi_debug_show_info,
7769 	.write_info =		scsi_debug_write_info,
7770 	.proc_name =		sdebug_proc_name,
7771 	.name =			"SCSI DEBUG",
7772 	.info =			scsi_debug_info,
7773 	.slave_alloc =		scsi_debug_slave_alloc,
7774 	.slave_configure =	scsi_debug_slave_configure,
7775 	.slave_destroy =	scsi_debug_slave_destroy,
7776 	.ioctl =		scsi_debug_ioctl,
7777 	.queuecommand =		scsi_debug_queuecommand,
7778 	.change_queue_depth =	sdebug_change_qdepth,
7779 	.map_queues =		sdebug_map_queues,
7780 	.mq_poll =		sdebug_blk_mq_poll,
7781 	.eh_abort_handler =	scsi_debug_abort,
7782 	.eh_device_reset_handler = scsi_debug_device_reset,
7783 	.eh_target_reset_handler = scsi_debug_target_reset,
7784 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7785 	.eh_host_reset_handler = scsi_debug_host_reset,
7786 	.can_queue =		SDEBUG_CANQUEUE,
7787 	.this_id =		7,
7788 	.sg_tablesize =		SG_MAX_SEGMENTS,
7789 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7790 	.max_sectors =		-1U,
7791 	.max_segment_size =	-1U,
7792 	.module =		THIS_MODULE,
7793 	.track_queue_depth =	1,
7794 };
7795 
7796 static int sdebug_driver_probe(struct device *dev)
7797 {
7798 	int error = 0;
7799 	struct sdebug_host_info *sdbg_host;
7800 	struct Scsi_Host *hpnt;
7801 	int hprot;
7802 
7803 	sdbg_host = to_sdebug_host(dev);
7804 
7805 	sdebug_driver_template.can_queue = sdebug_max_queue;
7806 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7807 	if (!sdebug_clustering)
7808 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7809 
7810 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7811 	if (NULL == hpnt) {
7812 		pr_err("scsi_host_alloc failed\n");
7813 		error = -ENODEV;
7814 		return error;
7815 	}
7816 	if (submit_queues > nr_cpu_ids) {
7817 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7818 			my_name, submit_queues, nr_cpu_ids);
7819 		submit_queues = nr_cpu_ids;
7820 	}
7821 	/*
7822 	 * Decide whether to tell scsi subsystem that we want mq. The
7823 	 * following should give the same answer for each host.
7824 	 */
7825 	hpnt->nr_hw_queues = submit_queues;
7826 	if (sdebug_host_max_queue)
7827 		hpnt->host_tagset = 1;
7828 
7829 	/* poll queues are possible for nr_hw_queues > 1 */
7830 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7831 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7832 			 my_name, poll_queues, hpnt->nr_hw_queues);
7833 		poll_queues = 0;
7834 	}
7835 
7836 	/*
7837 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7838 	 * left over for non-polled I/O.
7839 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7840 	 */
7841 	if (poll_queues >= submit_queues) {
7842 		if (submit_queues < 3)
7843 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7844 		else
7845 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7846 				my_name, submit_queues - 1);
7847 		poll_queues = 1;
7848 	}
7849 	if (poll_queues)
7850 		hpnt->nr_maps = 3;
7851 
7852 	sdbg_host->shost = hpnt;
7853 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7854 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7855 		hpnt->max_id = sdebug_num_tgts + 1;
7856 	else
7857 		hpnt->max_id = sdebug_num_tgts;
7858 	/* = sdebug_max_luns; */
7859 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7860 
7861 	hprot = 0;
7862 
7863 	switch (sdebug_dif) {
7864 
7865 	case T10_PI_TYPE1_PROTECTION:
7866 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7867 		if (sdebug_dix)
7868 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7869 		break;
7870 
7871 	case T10_PI_TYPE2_PROTECTION:
7872 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7873 		if (sdebug_dix)
7874 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7875 		break;
7876 
7877 	case T10_PI_TYPE3_PROTECTION:
7878 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7879 		if (sdebug_dix)
7880 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7881 		break;
7882 
7883 	default:
7884 		if (sdebug_dix)
7885 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7886 		break;
7887 	}
7888 
7889 	scsi_host_set_prot(hpnt, hprot);
7890 
7891 	if (have_dif_prot || sdebug_dix)
7892 		pr_info("host protection%s%s%s%s%s%s%s\n",
7893 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7894 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7895 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7896 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7897 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7898 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7899 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7900 
7901 	if (sdebug_guard == 1)
7902 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7903 	else
7904 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7905 
7906 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7907 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7908 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7909 		sdebug_statistics = true;
7910 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7911 	if (error) {
7912 		pr_err("scsi_add_host failed\n");
7913 		error = -ENODEV;
7914 		scsi_host_put(hpnt);
7915 	} else {
7916 		scsi_scan_host(hpnt);
7917 	}
7918 
7919 	return error;
7920 }
7921 
7922 static void sdebug_driver_remove(struct device *dev)
7923 {
7924 	struct sdebug_host_info *sdbg_host;
7925 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7926 
7927 	sdbg_host = to_sdebug_host(dev);
7928 
7929 	scsi_remove_host(sdbg_host->shost);
7930 
7931 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7932 				 dev_list) {
7933 		list_del(&sdbg_devinfo->dev_list);
7934 		kfree(sdbg_devinfo->zstate);
7935 		kfree(sdbg_devinfo);
7936 	}
7937 
7938 	scsi_host_put(sdbg_host->shost);
7939 }
7940 
7941 static int pseudo_lld_bus_match(struct device *dev,
7942 				struct device_driver *dev_driver)
7943 {
7944 	return 1;
7945 }
7946 
7947 static struct bus_type pseudo_lld_bus = {
7948 	.name = "pseudo",
7949 	.match = pseudo_lld_bus_match,
7950 	.probe = sdebug_driver_probe,
7951 	.remove = sdebug_driver_remove,
7952 	.drv_groups = sdebug_drv_groups,
7953 };
7954