xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision c4f7ac64)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
222 
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN			1	/* Data-in command (e.g. READ) */
225 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
231 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
234 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
236 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
238 
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250 
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 	ZBC_ZONE_TYPE_CNV	= 0x1,
254 	ZBC_ZONE_TYPE_SWR	= 0x2,
255 	ZBC_ZONE_TYPE_SWP	= 0x3,
256 };
257 
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 	ZBC_NOT_WRITE_POINTER	= 0x0,
261 	ZC1_EMPTY		= 0x1,
262 	ZC2_IMPLICIT_OPEN	= 0x2,
263 	ZC3_EXPLICIT_OPEN	= 0x3,
264 	ZC4_CLOSED		= 0x4,
265 	ZC6_READ_ONLY		= 0xd,
266 	ZC5_FULL		= 0xe,
267 	ZC7_OFFLINE		= 0xf,
268 };
269 
270 struct sdeb_zone_state {	/* ZBC: per zone state */
271 	enum sdebug_z_type z_type;
272 	enum sdebug_z_cond z_cond;
273 	bool z_non_seq_resource;
274 	unsigned int z_size;
275 	sector_t z_start;
276 	sector_t z_wp;
277 };
278 
279 struct sdebug_dev_info {
280 	struct list_head dev_list;
281 	unsigned int channel;
282 	unsigned int target;
283 	u64 lun;
284 	uuid_t lu_name;
285 	struct sdebug_host_info *sdbg_host;
286 	unsigned long uas_bm[1];
287 	atomic_t num_in_q;
288 	atomic_t stopped;	/* 1: by SSU, 2: device start */
289 	bool used;
290 
291 	/* For ZBC devices */
292 	enum blk_zoned_model zmodel;
293 	unsigned int zsize;
294 	unsigned int zsize_shift;
295 	unsigned int nr_zones;
296 	unsigned int nr_conv_zones;
297 	unsigned int nr_imp_open;
298 	unsigned int nr_exp_open;
299 	unsigned int nr_closed;
300 	unsigned int max_open;
301 	ktime_t create_ts;	/* time since bootup that this device was created */
302 	struct sdeb_zone_state *zstate;
303 };
304 
305 struct sdebug_host_info {
306 	struct list_head host_list;
307 	int si_idx;	/* sdeb_store_info (per host) xarray index */
308 	struct Scsi_Host *shost;
309 	struct device dev;
310 	struct list_head dev_info_list;
311 };
312 
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 	rwlock_t macc_lck;	/* for atomic media access on this store */
316 	u8 *storep;		/* user data storage (ram) */
317 	struct t10_pi_tuple *dif_storep; /* protection info */
318 	void *map_storep;	/* provisioning map */
319 };
320 
321 #define to_sdebug_host(d)	\
322 	container_of(d, struct sdebug_host_info, dev)
323 
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
326 
327 struct sdebug_defer {
328 	struct hrtimer hrt;
329 	struct execute_work ew;
330 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 	int sqa_idx;	/* index of sdebug_queue array */
332 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
333 	int hc_idx;	/* hostwide tag index */
334 	int issuing_cpu;
335 	bool init_hrt;
336 	bool init_wq;
337 	bool init_poll;
338 	bool aborted;	/* true when blk_abort_request() already called */
339 	enum sdeb_defer_type defer_t;
340 };
341 
342 struct sdebug_queued_cmd {
343 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 	 * instance indicates this slot is in use.
345 	 */
346 	struct sdebug_defer *sd_dp;
347 	struct scsi_cmnd *a_cmnd;
348 };
349 
350 struct sdebug_queue {
351 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 	spinlock_t qc_lock;
354 	atomic_t blocked;	/* to temporarily stop more being queued */
355 };
356 
357 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
358 static atomic_t sdebug_completions;  /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
363 
364 struct opcode_info_t {
365 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
366 				/* for terminating element */
367 	u8 opcode;		/* if num_attached > 0, preferred */
368 	u16 sa;			/* service action */
369 	u32 flags;		/* OR-ed set of SDEB_F_* */
370 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
372 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
373 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
374 };
375 
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 	SDEB_I_INVALID_OPCODE =	0,
379 	SDEB_I_INQUIRY = 1,
380 	SDEB_I_REPORT_LUNS = 2,
381 	SDEB_I_REQUEST_SENSE = 3,
382 	SDEB_I_TEST_UNIT_READY = 4,
383 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
384 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
385 	SDEB_I_LOG_SENSE = 7,
386 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
387 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
388 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
389 	SDEB_I_START_STOP = 11,
390 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
391 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
392 	SDEB_I_MAINT_IN = 14,
393 	SDEB_I_MAINT_OUT = 15,
394 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
395 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
396 	SDEB_I_RESERVE = 18,		/* 6, 10 */
397 	SDEB_I_RELEASE = 19,		/* 6, 10 */
398 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
399 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
400 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
401 	SDEB_I_SEND_DIAG = 23,
402 	SDEB_I_UNMAP = 24,
403 	SDEB_I_WRITE_BUFFER = 25,
404 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
405 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
406 	SDEB_I_COMP_WRITE = 28,
407 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
408 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
409 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
410 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
411 };
412 
413 
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
417 	    0, 0, 0, 0,
418 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
420 	    SDEB_I_RELEASE,
421 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 	    SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 	0, 0, 0, SDEB_I_VERIFY,
442 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 	     SDEB_I_MAINT_OUT, 0, 0, 0,
448 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 };
458 
459 /*
460  * The following "response" functions return the SCSI mid-level's 4 byte
461  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462  * command completion, they can mask their return value with
463  * SDEG_RES_IMMED_MASK .
464  */
465 #define SDEG_RES_IMMED_MASK 0x40000000
466 
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
503 
504 /*
505  * The following are overflow arrays for cdbs that "hit" the same index in
506  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507  * should be placed in opcode_info_arr[], the others should be placed here.
508  */
509 static const struct opcode_info_t msense_iarr[] = {
510 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
511 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 };
513 
514 static const struct opcode_info_t mselect_iarr[] = {
515 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
516 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 };
518 
519 static const struct opcode_info_t read_iarr[] = {
520 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
522 	     0, 0, 0, 0} },
523 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
527 	     0xc7, 0, 0, 0, 0} },
528 };
529 
530 static const struct opcode_info_t write_iarr[] = {
531 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
532 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
533 		   0, 0, 0, 0, 0, 0} },
534 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
535 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
536 		   0, 0, 0} },
537 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
538 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 		   0xbf, 0xc7, 0, 0, 0, 0} },
540 };
541 
542 static const struct opcode_info_t verify_iarr[] = {
543 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 		   0, 0, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
552 };
553 
554 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
555 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
558 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
561 };
562 
563 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
564 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
570 };
571 
572 static const struct opcode_info_t write_same_iarr[] = {
573 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
576 };
577 
578 static const struct opcode_info_t reserve_iarr[] = {
579 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
580 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
581 };
582 
583 static const struct opcode_info_t release_iarr[] = {
584 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
585 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586 };
587 
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
592 };
593 
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
598 };
599 
600 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
601 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
604 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
607 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
610 };
611 
612 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
613 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616 };
617 
618 
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620  * plus the terminating elements for logic that scans this table such as
621  * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
623 /* 0 */
624 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
625 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 	     0, 0} },					/* REPORT LUNS */
631 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
635 /* 5 */
636 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
637 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
638 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
640 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
641 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
643 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
644 	     0, 0, 0} },
645 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
646 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
647 	     0, 0} },
648 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
650 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
651 /* 10 */
652 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
654 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
665 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
667 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 				0xff, 0, 0xc7, 0, 0, 0, 0} },
669 /* 15 */
670 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
674 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
678 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
679 	     0xff, 0xff} },
680 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
682 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
683 	     0} },
684 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
686 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
687 	     0} },
688 /* 20 */
689 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
696 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
699 /* 25 */
700 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
703 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
705 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
706 		 0, 0, 0, 0, 0} },
707 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 	    resp_sync_cache, sync_cache_iarr,
709 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
711 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
714 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 	    resp_pre_fetch, pre_fetch_iarr,
716 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
718 
719 /* 30 */
720 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
728 /* sentinel */
729 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
730 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
731 };
732 
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue;	/* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
792 
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 			  SAM_LUN_AM_FLAT = 0x1,
795 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 			  SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
799 
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity;	/* in sectors */
802 
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804    may still need them */
805 static int sdebug_heads;		/* heads per disk */
806 static int sdebug_cylinders_per;	/* cylinders per surface */
807 static int sdebug_sectors_per;		/* sectors per cylinder */
808 
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
811 
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
817 
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
827 
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
833 
834 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
837 
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
840 
841 static rwlock_t *ramdisk_lck_a[2];
842 
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
845 
846 static struct bus_type pseudo_lld_bus;
847 
848 static struct device_driver sdebug_driverfs_driver = {
849 	.name 		= sdebug_proc_name,
850 	.bus		= &pseudo_lld_bus,
851 };
852 
853 static const int check_condition_result =
854 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
855 
856 static const int illegal_condition_result =
857 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
858 
859 static const int device_qfull_result =
860 	(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
861 
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
863 
864 
865 /* Only do the extra work involved in logical block provisioning if one or
866  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867  * real reads and writes (i.e. not skipping them for speed).
868  */
869 static inline bool scsi_debug_lbp(void)
870 {
871 	return 0 == sdebug_fake_rw &&
872 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
873 }
874 
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 			    unsigned long long lba)
877 {
878 	struct sdeb_store_info *lsip = sip;
879 
880 	lba = do_div(lba, sdebug_store_sectors);
881 	if (!sip || !sip->storep) {
882 		WARN_ON_ONCE(true);
883 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
884 	}
885 	return lsip->storep + lba * sdebug_sector_size;
886 }
887 
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
889 				      sector_t sector)
890 {
891 	sector = sector_div(sector, sdebug_store_sectors);
892 
893 	return sip->dif_storep + sector;
894 }
895 
896 static void sdebug_max_tgts_luns(void)
897 {
898 	struct sdebug_host_info *sdbg_host;
899 	struct Scsi_Host *hpnt;
900 
901 	spin_lock(&sdebug_host_list_lock);
902 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 		hpnt = sdbg_host->shost;
904 		if ((hpnt->this_id >= 0) &&
905 		    (sdebug_num_tgts > hpnt->this_id))
906 			hpnt->max_id = sdebug_num_tgts + 1;
907 		else
908 			hpnt->max_id = sdebug_num_tgts;
909 		/* sdebug_max_luns; */
910 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
911 	}
912 	spin_unlock(&sdebug_host_list_lock);
913 }
914 
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
916 
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 				 enum sdeb_cmd_data c_d,
920 				 int in_byte, int in_bit)
921 {
922 	unsigned char *sbuff;
923 	u8 sks[4];
924 	int sl, asc;
925 
926 	sbuff = scp->sense_buffer;
927 	if (!sbuff) {
928 		sdev_printk(KERN_ERR, scp->device,
929 			    "%s: sense_buffer is NULL\n", __func__);
930 		return;
931 	}
932 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
935 	memset(sks, 0, sizeof(sks));
936 	sks[0] = 0x80;
937 	if (c_d)
938 		sks[0] |= 0x40;
939 	if (in_bit >= 0) {
940 		sks[0] |= 0x8;
941 		sks[0] |= 0x7 & in_bit;
942 	}
943 	put_unaligned_be16(in_byte, sks + 1);
944 	if (sdebug_dsense) {
945 		sl = sbuff[7] + 8;
946 		sbuff[7] = sl;
947 		sbuff[sl] = 0x2;
948 		sbuff[sl + 1] = 0x6;
949 		memcpy(sbuff + sl + 4, sks, 3);
950 	} else
951 		memcpy(sbuff + 15, sks, 3);
952 	if (sdebug_verbose)
953 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
954 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
956 }
957 
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
959 {
960 	unsigned char *sbuff;
961 
962 	sbuff = scp->sense_buffer;
963 	if (!sbuff) {
964 		sdev_printk(KERN_ERR, scp->device,
965 			    "%s: sense_buffer is NULL\n", __func__);
966 		return;
967 	}
968 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
969 
970 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
971 
972 	if (sdebug_verbose)
973 		sdev_printk(KERN_INFO, scp->device,
974 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
975 			    my_name, key, asc, asq);
976 }
977 
978 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
979 {
980 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
981 }
982 
983 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
984 			    void __user *arg)
985 {
986 	if (sdebug_verbose) {
987 		if (0x1261 == cmd)
988 			sdev_printk(KERN_INFO, dev,
989 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
990 		else if (0x5331 == cmd)
991 			sdev_printk(KERN_INFO, dev,
992 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
993 				    __func__);
994 		else
995 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 				    __func__, cmd);
997 	}
998 	return -EINVAL;
999 	/* return -ENOTTY; // correct return but upsets fdisk */
1000 }
1001 
1002 static void config_cdb_len(struct scsi_device *sdev)
1003 {
1004 	switch (sdebug_cdb_len) {
1005 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1006 		sdev->use_10_for_rw = false;
1007 		sdev->use_16_for_rw = false;
1008 		sdev->use_10_for_ms = false;
1009 		break;
1010 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = true;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = true;
1019 		break;
1020 	case 16:
1021 		sdev->use_10_for_rw = false;
1022 		sdev->use_16_for_rw = true;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 32: /* No knobs to suggest this so same as 16 for now */
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	default:
1031 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1032 			sdebug_cdb_len);
1033 		sdev->use_10_for_rw = true;
1034 		sdev->use_16_for_rw = false;
1035 		sdev->use_10_for_ms = false;
1036 		sdebug_cdb_len = 10;
1037 		break;
1038 	}
1039 }
1040 
1041 static void all_config_cdb_len(void)
1042 {
1043 	struct sdebug_host_info *sdbg_host;
1044 	struct Scsi_Host *shost;
1045 	struct scsi_device *sdev;
1046 
1047 	spin_lock(&sdebug_host_list_lock);
1048 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1049 		shost = sdbg_host->shost;
1050 		shost_for_each_device(sdev, shost) {
1051 			config_cdb_len(sdev);
1052 		}
1053 	}
1054 	spin_unlock(&sdebug_host_list_lock);
1055 }
1056 
1057 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1058 {
1059 	struct sdebug_host_info *sdhp;
1060 	struct sdebug_dev_info *dp;
1061 
1062 	spin_lock(&sdebug_host_list_lock);
1063 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1064 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1065 			if ((devip->sdbg_host == dp->sdbg_host) &&
1066 			    (devip->target == dp->target))
1067 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1068 		}
1069 	}
1070 	spin_unlock(&sdebug_host_list_lock);
1071 }
1072 
1073 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 {
1075 	int k;
1076 
1077 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1078 	if (k != SDEBUG_NUM_UAS) {
1079 		const char *cp = NULL;
1080 
1081 		switch (k) {
1082 		case SDEBUG_UA_POR:
1083 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1084 					POWER_ON_RESET_ASCQ);
1085 			if (sdebug_verbose)
1086 				cp = "power on reset";
1087 			break;
1088 		case SDEBUG_UA_BUS_RESET:
1089 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1090 					BUS_RESET_ASCQ);
1091 			if (sdebug_verbose)
1092 				cp = "bus reset";
1093 			break;
1094 		case SDEBUG_UA_MODE_CHANGED:
1095 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1096 					MODE_CHANGED_ASCQ);
1097 			if (sdebug_verbose)
1098 				cp = "mode parameters changed";
1099 			break;
1100 		case SDEBUG_UA_CAPACITY_CHANGED:
1101 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1102 					CAPACITY_CHANGED_ASCQ);
1103 			if (sdebug_verbose)
1104 				cp = "capacity data changed";
1105 			break;
1106 		case SDEBUG_UA_MICROCODE_CHANGED:
1107 			mk_sense_buffer(scp, UNIT_ATTENTION,
1108 					TARGET_CHANGED_ASC,
1109 					MICROCODE_CHANGED_ASCQ);
1110 			if (sdebug_verbose)
1111 				cp = "microcode has been changed";
1112 			break;
1113 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1114 			mk_sense_buffer(scp, UNIT_ATTENTION,
1115 					TARGET_CHANGED_ASC,
1116 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1117 			if (sdebug_verbose)
1118 				cp = "microcode has been changed without reset";
1119 			break;
1120 		case SDEBUG_UA_LUNS_CHANGED:
1121 			/*
1122 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1123 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1124 			 * on the target, until a REPORT LUNS command is
1125 			 * received.  SPC-4 behavior is to report it only once.
1126 			 * NOTE:  sdebug_scsi_level does not use the same
1127 			 * values as struct scsi_device->scsi_level.
1128 			 */
1129 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1130 				clear_luns_changed_on_target(devip);
1131 			mk_sense_buffer(scp, UNIT_ATTENTION,
1132 					TARGET_CHANGED_ASC,
1133 					LUNS_CHANGED_ASCQ);
1134 			if (sdebug_verbose)
1135 				cp = "reported luns data has changed";
1136 			break;
1137 		default:
1138 			pr_warn("unexpected unit attention code=%d\n", k);
1139 			if (sdebug_verbose)
1140 				cp = "unknown";
1141 			break;
1142 		}
1143 		clear_bit(k, devip->uas_bm);
1144 		if (sdebug_verbose)
1145 			sdev_printk(KERN_INFO, scp->device,
1146 				   "%s reports: Unit attention: %s\n",
1147 				   my_name, cp);
1148 		return check_condition_result;
1149 	}
1150 	return 0;
1151 }
1152 
1153 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1154 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 				int arr_len)
1156 {
1157 	int act_len;
1158 	struct scsi_data_buffer *sdb = &scp->sdb;
1159 
1160 	if (!sdb->length)
1161 		return 0;
1162 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1163 		return DID_ERROR << 16;
1164 
1165 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1166 				      arr, arr_len);
1167 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1168 
1169 	return 0;
1170 }
1171 
1172 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1173  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1174  * calls, not required to write in ascending offset order. Assumes resid
1175  * set to scsi_bufflen() prior to any calls.
1176  */
1177 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1178 				  int arr_len, unsigned int off_dst)
1179 {
1180 	unsigned int act_len, n;
1181 	struct scsi_data_buffer *sdb = &scp->sdb;
1182 	off_t skip = off_dst;
1183 
1184 	if (sdb->length <= off_dst)
1185 		return 0;
1186 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1187 		return DID_ERROR << 16;
1188 
1189 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1190 				       arr, arr_len, skip);
1191 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1192 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1193 		 scsi_get_resid(scp));
1194 	n = scsi_bufflen(scp) - (off_dst + act_len);
1195 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1196 	return 0;
1197 }
1198 
1199 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1200  * 'arr' or -1 if error.
1201  */
1202 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1203 			       int arr_len)
1204 {
1205 	if (!scsi_bufflen(scp))
1206 		return 0;
1207 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1208 		return -1;
1209 
1210 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 }
1212 
1213 
1214 static char sdebug_inq_vendor_id[9] = "Linux   ";
1215 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1216 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1217 /* Use some locally assigned NAAs for SAS addresses. */
1218 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1219 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1220 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1221 
1222 /* Device identification VPD page. Returns number of bytes placed in arr */
1223 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1224 			  int target_dev_id, int dev_id_num,
1225 			  const char *dev_id_str, int dev_id_str_len,
1226 			  const uuid_t *lu_name)
1227 {
1228 	int num, port_a;
1229 	char b[32];
1230 
1231 	port_a = target_dev_id + 1;
1232 	/* T10 vendor identifier field format (faked) */
1233 	arr[0] = 0x2;	/* ASCII */
1234 	arr[1] = 0x1;
1235 	arr[2] = 0x0;
1236 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1237 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1238 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1239 	num = 8 + 16 + dev_id_str_len;
1240 	arr[3] = num;
1241 	num += 4;
1242 	if (dev_id_num >= 0) {
1243 		if (sdebug_uuid_ctl) {
1244 			/* Locally assigned UUID */
1245 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1246 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1247 			arr[num++] = 0x0;
1248 			arr[num++] = 0x12;
1249 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1250 			arr[num++] = 0x0;
1251 			memcpy(arr + num, lu_name, 16);
1252 			num += 16;
1253 		} else {
1254 			/* NAA-3, Logical unit identifier (binary) */
1255 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1256 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1257 			arr[num++] = 0x0;
1258 			arr[num++] = 0x8;
1259 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1260 			num += 8;
1261 		}
1262 		/* Target relative port number */
1263 		arr[num++] = 0x61;	/* proto=sas, binary */
1264 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1265 		arr[num++] = 0x0;	/* reserved */
1266 		arr[num++] = 0x4;	/* length */
1267 		arr[num++] = 0x0;	/* reserved */
1268 		arr[num++] = 0x0;	/* reserved */
1269 		arr[num++] = 0x0;
1270 		arr[num++] = 0x1;	/* relative port A */
1271 	}
1272 	/* NAA-3, Target port identifier */
1273 	arr[num++] = 0x61;	/* proto=sas, binary */
1274 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1275 	arr[num++] = 0x0;
1276 	arr[num++] = 0x8;
1277 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1278 	num += 8;
1279 	/* NAA-3, Target port group identifier */
1280 	arr[num++] = 0x61;	/* proto=sas, binary */
1281 	arr[num++] = 0x95;	/* piv=1, target port group id */
1282 	arr[num++] = 0x0;
1283 	arr[num++] = 0x4;
1284 	arr[num++] = 0;
1285 	arr[num++] = 0;
1286 	put_unaligned_be16(port_group_id, arr + num);
1287 	num += 2;
1288 	/* NAA-3, Target device identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x8;
1293 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1294 	num += 8;
1295 	/* SCSI name string: Target device identifier */
1296 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1297 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1298 	arr[num++] = 0x0;
1299 	arr[num++] = 24;
1300 	memcpy(arr + num, "naa.32222220", 12);
1301 	num += 12;
1302 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1303 	memcpy(arr + num, b, 8);
1304 	num += 8;
1305 	memset(arr + num, 0, 4);
1306 	num += 4;
1307 	return num;
1308 }
1309 
1310 static unsigned char vpd84_data[] = {
1311 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1312     0x22,0x22,0x22,0x0,0xbb,0x1,
1313     0x22,0x22,0x22,0x0,0xbb,0x2,
1314 };
1315 
1316 /*  Software interface identification VPD page */
1317 static int inquiry_vpd_84(unsigned char *arr)
1318 {
1319 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1320 	return sizeof(vpd84_data);
1321 }
1322 
1323 /* Management network addresses VPD page */
1324 static int inquiry_vpd_85(unsigned char *arr)
1325 {
1326 	int num = 0;
1327 	const char *na1 = "https://www.kernel.org/config";
1328 	const char *na2 = "http://www.kernel.org/log";
1329 	int plen, olen;
1330 
1331 	arr[num++] = 0x1;	/* lu, storage config */
1332 	arr[num++] = 0x0;	/* reserved */
1333 	arr[num++] = 0x0;
1334 	olen = strlen(na1);
1335 	plen = olen + 1;
1336 	if (plen % 4)
1337 		plen = ((plen / 4) + 1) * 4;
1338 	arr[num++] = plen;	/* length, null termianted, padded */
1339 	memcpy(arr + num, na1, olen);
1340 	memset(arr + num + olen, 0, plen - olen);
1341 	num += plen;
1342 
1343 	arr[num++] = 0x4;	/* lu, logging */
1344 	arr[num++] = 0x0;	/* reserved */
1345 	arr[num++] = 0x0;
1346 	olen = strlen(na2);
1347 	plen = olen + 1;
1348 	if (plen % 4)
1349 		plen = ((plen / 4) + 1) * 4;
1350 	arr[num++] = plen;	/* length, null terminated, padded */
1351 	memcpy(arr + num, na2, olen);
1352 	memset(arr + num + olen, 0, plen - olen);
1353 	num += plen;
1354 
1355 	return num;
1356 }
1357 
1358 /* SCSI ports VPD page */
1359 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1360 {
1361 	int num = 0;
1362 	int port_a, port_b;
1363 
1364 	port_a = target_dev_id + 1;
1365 	port_b = port_a + 1;
1366 	arr[num++] = 0x0;	/* reserved */
1367 	arr[num++] = 0x0;	/* reserved */
1368 	arr[num++] = 0x0;
1369 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1370 	memset(arr + num, 0, 6);
1371 	num += 6;
1372 	arr[num++] = 0x0;
1373 	arr[num++] = 12;	/* length tp descriptor */
1374 	/* naa-5 target port identifier (A) */
1375 	arr[num++] = 0x61;	/* proto=sas, binary */
1376 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x8;	/* length */
1379 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1380 	num += 8;
1381 	arr[num++] = 0x0;	/* reserved */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1385 	memset(arr + num, 0, 6);
1386 	num += 6;
1387 	arr[num++] = 0x0;
1388 	arr[num++] = 12;	/* length tp descriptor */
1389 	/* naa-5 target port identifier (B) */
1390 	arr[num++] = 0x61;	/* proto=sas, binary */
1391 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x8;	/* length */
1394 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1395 	num += 8;
1396 
1397 	return num;
1398 }
1399 
1400 
1401 static unsigned char vpd89_data[] = {
1402 /* from 4th byte */ 0,0,0,0,
1403 'l','i','n','u','x',' ',' ',' ',
1404 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1405 '1','2','3','4',
1406 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1407 0xec,0,0,0,
1408 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1409 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1411 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1412 0x53,0x41,
1413 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0x20,0x20,
1415 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1416 0x10,0x80,
1417 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1418 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1419 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1421 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1422 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1423 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1428 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1429 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1430 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1443 };
1444 
1445 /* ATA Information VPD page */
1446 static int inquiry_vpd_89(unsigned char *arr)
1447 {
1448 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1449 	return sizeof(vpd89_data);
1450 }
1451 
1452 
1453 static unsigned char vpdb0_data[] = {
1454 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1455 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 };
1459 
1460 /* Block limits VPD page (SBC-3) */
1461 static int inquiry_vpd_b0(unsigned char *arr)
1462 {
1463 	unsigned int gran;
1464 
1465 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1466 
1467 	/* Optimal transfer length granularity */
1468 	if (sdebug_opt_xferlen_exp != 0 &&
1469 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1470 		gran = 1 << sdebug_opt_xferlen_exp;
1471 	else
1472 		gran = 1 << sdebug_physblk_exp;
1473 	put_unaligned_be16(gran, arr + 2);
1474 
1475 	/* Maximum Transfer Length */
1476 	if (sdebug_store_sectors > 0x400)
1477 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1478 
1479 	/* Optimal Transfer Length */
1480 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1481 
1482 	if (sdebug_lbpu) {
1483 		/* Maximum Unmap LBA Count */
1484 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1485 
1486 		/* Maximum Unmap Block Descriptor Count */
1487 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1488 	}
1489 
1490 	/* Unmap Granularity Alignment */
1491 	if (sdebug_unmap_alignment) {
1492 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1493 		arr[28] |= 0x80; /* UGAVALID */
1494 	}
1495 
1496 	/* Optimal Unmap Granularity */
1497 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1498 
1499 	/* Maximum WRITE SAME Length */
1500 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1501 
1502 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1503 
1504 	return sizeof(vpdb0_data);
1505 }
1506 
1507 /* Block device characteristics VPD page (SBC-3) */
1508 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1509 {
1510 	memset(arr, 0, 0x3c);
1511 	arr[0] = 0;
1512 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1513 	arr[2] = 0;
1514 	arr[3] = 5;	/* less than 1.8" */
1515 	if (devip->zmodel == BLK_ZONED_HA)
1516 		arr[4] = 1 << 4;	/* zoned field = 01b */
1517 
1518 	return 0x3c;
1519 }
1520 
1521 /* Logical block provisioning VPD page (SBC-4) */
1522 static int inquiry_vpd_b2(unsigned char *arr)
1523 {
1524 	memset(arr, 0, 0x4);
1525 	arr[0] = 0;			/* threshold exponent */
1526 	if (sdebug_lbpu)
1527 		arr[1] = 1 << 7;
1528 	if (sdebug_lbpws)
1529 		arr[1] |= 1 << 6;
1530 	if (sdebug_lbpws10)
1531 		arr[1] |= 1 << 5;
1532 	if (sdebug_lbprz && scsi_debug_lbp())
1533 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1534 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1535 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1536 	/* threshold_percentage=0 */
1537 	return 0x4;
1538 }
1539 
1540 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1541 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1542 {
1543 	memset(arr, 0, 0x3c);
1544 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1545 	/*
1546 	 * Set Optimal number of open sequential write preferred zones and
1547 	 * Optimal number of non-sequentially written sequential write
1548 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1549 	 * fields set to zero, apart from Max. number of open swrz_s field.
1550 	 */
1551 	put_unaligned_be32(0xffffffff, &arr[4]);
1552 	put_unaligned_be32(0xffffffff, &arr[8]);
1553 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1554 		put_unaligned_be32(devip->max_open, &arr[12]);
1555 	else
1556 		put_unaligned_be32(0xffffffff, &arr[12]);
1557 	return 0x3c;
1558 }
1559 
1560 #define SDEBUG_LONG_INQ_SZ 96
1561 #define SDEBUG_MAX_INQ_ARR_SZ 584
1562 
1563 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1564 {
1565 	unsigned char pq_pdt;
1566 	unsigned char *arr;
1567 	unsigned char *cmd = scp->cmnd;
1568 	int alloc_len, n, ret;
1569 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1570 
1571 	alloc_len = get_unaligned_be16(cmd + 3);
1572 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1573 	if (! arr)
1574 		return DID_REQUEUE << 16;
1575 	is_disk = (sdebug_ptype == TYPE_DISK);
1576 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1577 	is_disk_zbc = (is_disk || is_zbc);
1578 	have_wlun = scsi_is_wlun(scp->device->lun);
1579 	if (have_wlun)
1580 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1581 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1582 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1583 	else
1584 		pq_pdt = (sdebug_ptype & 0x1f);
1585 	arr[0] = pq_pdt;
1586 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1587 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1588 		kfree(arr);
1589 		return check_condition_result;
1590 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1591 		int lu_id_num, port_group_id, target_dev_id, len;
1592 		char lu_id_str[6];
1593 		int host_no = devip->sdbg_host->shost->host_no;
1594 
1595 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1596 		    (devip->channel & 0x7f);
1597 		if (sdebug_vpd_use_hostno == 0)
1598 			host_no = 0;
1599 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1600 			    (devip->target * 1000) + devip->lun);
1601 		target_dev_id = ((host_no + 1) * 2000) +
1602 				 (devip->target * 1000) - 3;
1603 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1604 		if (0 == cmd[2]) { /* supported vital product data pages */
1605 			arr[1] = cmd[2];	/*sanity */
1606 			n = 4;
1607 			arr[n++] = 0x0;   /* this page */
1608 			arr[n++] = 0x80;  /* unit serial number */
1609 			arr[n++] = 0x83;  /* device identification */
1610 			arr[n++] = 0x84;  /* software interface ident. */
1611 			arr[n++] = 0x85;  /* management network addresses */
1612 			arr[n++] = 0x86;  /* extended inquiry */
1613 			arr[n++] = 0x87;  /* mode page policy */
1614 			arr[n++] = 0x88;  /* SCSI ports */
1615 			if (is_disk_zbc) {	  /* SBC or ZBC */
1616 				arr[n++] = 0x89;  /* ATA information */
1617 				arr[n++] = 0xb0;  /* Block limits */
1618 				arr[n++] = 0xb1;  /* Block characteristics */
1619 				if (is_disk)
1620 					arr[n++] = 0xb2;  /* LB Provisioning */
1621 				if (is_zbc)
1622 					arr[n++] = 0xb6;  /* ZB dev. char. */
1623 			}
1624 			arr[3] = n - 4;	  /* number of supported VPD pages */
1625 		} else if (0x80 == cmd[2]) { /* unit serial number */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			arr[3] = len;
1628 			memcpy(&arr[4], lu_id_str, len);
1629 		} else if (0x83 == cmd[2]) { /* device identification */
1630 			arr[1] = cmd[2];	/*sanity */
1631 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1632 						target_dev_id, lu_id_num,
1633 						lu_id_str, len,
1634 						&devip->lu_name);
1635 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1636 			arr[1] = cmd[2];	/*sanity */
1637 			arr[3] = inquiry_vpd_84(&arr[4]);
1638 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1639 			arr[1] = cmd[2];	/*sanity */
1640 			arr[3] = inquiry_vpd_85(&arr[4]);
1641 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = 0x3c;	/* number of following entries */
1644 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1645 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1646 			else if (have_dif_prot)
1647 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1648 			else
1649 				arr[4] = 0x0;   /* no protection stuff */
1650 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1651 		} else if (0x87 == cmd[2]) { /* mode page policy */
1652 			arr[1] = cmd[2];	/*sanity */
1653 			arr[3] = 0x8;	/* number of following entries */
1654 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1655 			arr[6] = 0x80;	/* mlus, shared */
1656 			arr[8] = 0x18;	 /* protocol specific lu */
1657 			arr[10] = 0x82;	 /* mlus, per initiator port */
1658 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1661 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			n = inquiry_vpd_89(&arr[4]);
1664 			put_unaligned_be16(n, arr + 2);
1665 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1666 			arr[1] = cmd[2];        /*sanity */
1667 			arr[3] = inquiry_vpd_b0(&arr[4]);
1668 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1669 			arr[1] = cmd[2];        /*sanity */
1670 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1671 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1672 			arr[1] = cmd[2];        /*sanity */
1673 			arr[3] = inquiry_vpd_b2(&arr[4]);
1674 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1675 			arr[1] = cmd[2];        /*sanity */
1676 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1677 		} else {
1678 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1679 			kfree(arr);
1680 			return check_condition_result;
1681 		}
1682 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1683 		ret = fill_from_dev_buffer(scp, arr,
1684 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1685 		kfree(arr);
1686 		return ret;
1687 	}
1688 	/* drops through here for a standard inquiry */
1689 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1690 	arr[2] = sdebug_scsi_level;
1691 	arr[3] = 2;    /* response_data_format==2 */
1692 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1693 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1694 	if (sdebug_vpd_use_hostno == 0)
1695 		arr[5] |= 0x10; /* claim: implicit TPGS */
1696 	arr[6] = 0x10; /* claim: MultiP */
1697 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1698 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1699 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1700 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1701 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1702 	/* Use Vendor Specific area to place driver date in ASCII hex */
1703 	memcpy(&arr[36], sdebug_version_date, 8);
1704 	/* version descriptors (2 bytes each) follow */
1705 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1706 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1707 	n = 62;
1708 	if (is_disk) {		/* SBC-4 no version claimed */
1709 		put_unaligned_be16(0x600, arr + n);
1710 		n += 2;
1711 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1712 		put_unaligned_be16(0x525, arr + n);
1713 		n += 2;
1714 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1715 		put_unaligned_be16(0x624, arr + n);
1716 		n += 2;
1717 	}
1718 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1719 	ret = fill_from_dev_buffer(scp, arr,
1720 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1721 	kfree(arr);
1722 	return ret;
1723 }
1724 
1725 /* See resp_iec_m_pg() for how this data is manipulated */
1726 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1727 				   0, 0, 0x0, 0x0};
1728 
1729 static int resp_requests(struct scsi_cmnd *scp,
1730 			 struct sdebug_dev_info *devip)
1731 {
1732 	unsigned char *cmd = scp->cmnd;
1733 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1734 	bool dsense = !!(cmd[1] & 1);
1735 	int alloc_len = cmd[4];
1736 	int len = 18;
1737 	int stopped_state = atomic_read(&devip->stopped);
1738 
1739 	memset(arr, 0, sizeof(arr));
1740 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1741 		if (dsense) {
1742 			arr[0] = 0x72;
1743 			arr[1] = NOT_READY;
1744 			arr[2] = LOGICAL_UNIT_NOT_READY;
1745 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1746 			len = 8;
1747 		} else {
1748 			arr[0] = 0x70;
1749 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1750 			arr[7] = 0xa;			/* 18 byte sense buffer */
1751 			arr[12] = LOGICAL_UNIT_NOT_READY;
1752 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1753 		}
1754 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1755 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1756 		if (dsense) {
1757 			arr[0] = 0x72;
1758 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1759 			arr[2] = THRESHOLD_EXCEEDED;
1760 			arr[3] = 0xff;		/* Failure prediction(false) */
1761 			len = 8;
1762 		} else {
1763 			arr[0] = 0x70;
1764 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1765 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1766 			arr[12] = THRESHOLD_EXCEEDED;
1767 			arr[13] = 0xff;		/* Failure prediction(false) */
1768 		}
1769 	} else {	/* nothing to report */
1770 		if (dsense) {
1771 			len = 8;
1772 			memset(arr, 0, len);
1773 			arr[0] = 0x72;
1774 		} else {
1775 			memset(arr, 0, len);
1776 			arr[0] = 0x70;
1777 			arr[7] = 0xa;
1778 		}
1779 	}
1780 	return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1781 }
1782 
1783 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1784 {
1785 	unsigned char *cmd = scp->cmnd;
1786 	int power_cond, want_stop, stopped_state;
1787 	bool changing;
1788 
1789 	power_cond = (cmd[4] & 0xf0) >> 4;
1790 	if (power_cond) {
1791 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1792 		return check_condition_result;
1793 	}
1794 	want_stop = !(cmd[4] & 1);
1795 	stopped_state = atomic_read(&devip->stopped);
1796 	if (stopped_state == 2) {
1797 		ktime_t now_ts = ktime_get_boottime();
1798 
1799 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1800 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1801 
1802 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1803 				/* tur_ms_to_ready timer extinguished */
1804 				atomic_set(&devip->stopped, 0);
1805 				stopped_state = 0;
1806 			}
1807 		}
1808 		if (stopped_state == 2) {
1809 			if (want_stop) {
1810 				stopped_state = 1;	/* dummy up success */
1811 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1812 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1813 				return check_condition_result;
1814 			}
1815 		}
1816 	}
1817 	changing = (stopped_state != want_stop);
1818 	if (changing)
1819 		atomic_xchg(&devip->stopped, want_stop);
1820 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1821 		return SDEG_RES_IMMED_MASK;
1822 	else
1823 		return 0;
1824 }
1825 
1826 static sector_t get_sdebug_capacity(void)
1827 {
1828 	static const unsigned int gibibyte = 1073741824;
1829 
1830 	if (sdebug_virtual_gb > 0)
1831 		return (sector_t)sdebug_virtual_gb *
1832 			(gibibyte / sdebug_sector_size);
1833 	else
1834 		return sdebug_store_sectors;
1835 }
1836 
1837 #define SDEBUG_READCAP_ARR_SZ 8
1838 static int resp_readcap(struct scsi_cmnd *scp,
1839 			struct sdebug_dev_info *devip)
1840 {
1841 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1842 	unsigned int capac;
1843 
1844 	/* following just in case virtual_gb changed */
1845 	sdebug_capacity = get_sdebug_capacity();
1846 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1847 	if (sdebug_capacity < 0xffffffff) {
1848 		capac = (unsigned int)sdebug_capacity - 1;
1849 		put_unaligned_be32(capac, arr + 0);
1850 	} else
1851 		put_unaligned_be32(0xffffffff, arr + 0);
1852 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1853 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1854 }
1855 
1856 #define SDEBUG_READCAP16_ARR_SZ 32
1857 static int resp_readcap16(struct scsi_cmnd *scp,
1858 			  struct sdebug_dev_info *devip)
1859 {
1860 	unsigned char *cmd = scp->cmnd;
1861 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1862 	int alloc_len;
1863 
1864 	alloc_len = get_unaligned_be32(cmd + 10);
1865 	/* following just in case virtual_gb changed */
1866 	sdebug_capacity = get_sdebug_capacity();
1867 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1868 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1869 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1870 	arr[13] = sdebug_physblk_exp & 0xf;
1871 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1872 
1873 	if (scsi_debug_lbp()) {
1874 		arr[14] |= 0x80; /* LBPME */
1875 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1876 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1877 		 * in the wider field maps to 0 in this field.
1878 		 */
1879 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1880 			arr[14] |= 0x40;
1881 	}
1882 
1883 	arr[15] = sdebug_lowest_aligned & 0xff;
1884 
1885 	if (have_dif_prot) {
1886 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1887 		arr[12] |= 1; /* PROT_EN */
1888 	}
1889 
1890 	return fill_from_dev_buffer(scp, arr,
1891 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1892 }
1893 
1894 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1895 
1896 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1897 			      struct sdebug_dev_info *devip)
1898 {
1899 	unsigned char *cmd = scp->cmnd;
1900 	unsigned char *arr;
1901 	int host_no = devip->sdbg_host->shost->host_no;
1902 	int n, ret, alen, rlen;
1903 	int port_group_a, port_group_b, port_a, port_b;
1904 
1905 	alen = get_unaligned_be32(cmd + 6);
1906 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1907 	if (! arr)
1908 		return DID_REQUEUE << 16;
1909 	/*
1910 	 * EVPD page 0x88 states we have two ports, one
1911 	 * real and a fake port with no device connected.
1912 	 * So we create two port groups with one port each
1913 	 * and set the group with port B to unavailable.
1914 	 */
1915 	port_a = 0x1; /* relative port A */
1916 	port_b = 0x2; /* relative port B */
1917 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 			(devip->channel & 0x7f);
1919 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 			(devip->channel & 0x7f) + 0x80;
1921 
1922 	/*
1923 	 * The asymmetric access state is cycled according to the host_id.
1924 	 */
1925 	n = 4;
1926 	if (sdebug_vpd_use_hostno == 0) {
1927 		arr[n++] = host_no % 3; /* Asymm access state */
1928 		arr[n++] = 0x0F; /* claim: all states are supported */
1929 	} else {
1930 		arr[n++] = 0x0; /* Active/Optimized path */
1931 		arr[n++] = 0x01; /* only support active/optimized paths */
1932 	}
1933 	put_unaligned_be16(port_group_a, arr + n);
1934 	n += 2;
1935 	arr[n++] = 0;    /* Reserved */
1936 	arr[n++] = 0;    /* Status code */
1937 	arr[n++] = 0;    /* Vendor unique */
1938 	arr[n++] = 0x1;  /* One port per group */
1939 	arr[n++] = 0;    /* Reserved */
1940 	arr[n++] = 0;    /* Reserved */
1941 	put_unaligned_be16(port_a, arr + n);
1942 	n += 2;
1943 	arr[n++] = 3;    /* Port unavailable */
1944 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 	put_unaligned_be16(port_group_b, arr + n);
1946 	n += 2;
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Status code */
1949 	arr[n++] = 0;    /* Vendor unique */
1950 	arr[n++] = 0x1;  /* One port per group */
1951 	arr[n++] = 0;    /* Reserved */
1952 	arr[n++] = 0;    /* Reserved */
1953 	put_unaligned_be16(port_b, arr + n);
1954 	n += 2;
1955 
1956 	rlen = n - 4;
1957 	put_unaligned_be32(rlen, arr + 0);
1958 
1959 	/*
1960 	 * Return the smallest value of either
1961 	 * - The allocated length
1962 	 * - The constructed command length
1963 	 * - The maximum array size
1964 	 */
1965 	rlen = min_t(int, alen, n);
1966 	ret = fill_from_dev_buffer(scp, arr,
1967 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1968 	kfree(arr);
1969 	return ret;
1970 }
1971 
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 			     struct sdebug_dev_info *devip)
1974 {
1975 	bool rctd;
1976 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1977 	u16 req_sa, u;
1978 	u32 alloc_len, a_len;
1979 	int k, offset, len, errsts, count, bump, na;
1980 	const struct opcode_info_t *oip;
1981 	const struct opcode_info_t *r_oip;
1982 	u8 *arr;
1983 	u8 *cmd = scp->cmnd;
1984 
1985 	rctd = !!(cmd[2] & 0x80);
1986 	reporting_opts = cmd[2] & 0x7;
1987 	req_opcode = cmd[3];
1988 	req_sa = get_unaligned_be16(cmd + 4);
1989 	alloc_len = get_unaligned_be32(cmd + 6);
1990 	if (alloc_len < 4 || alloc_len > 0xffff) {
1991 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 		return check_condition_result;
1993 	}
1994 	if (alloc_len > 8192)
1995 		a_len = 8192;
1996 	else
1997 		a_len = alloc_len;
1998 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1999 	if (NULL == arr) {
2000 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2001 				INSUFF_RES_ASCQ);
2002 		return check_condition_result;
2003 	}
2004 	switch (reporting_opts) {
2005 	case 0:	/* all commands */
2006 		/* count number of commands */
2007 		for (count = 0, oip = opcode_info_arr;
2008 		     oip->num_attached != 0xff; ++oip) {
2009 			if (F_INV_OP & oip->flags)
2010 				continue;
2011 			count += (oip->num_attached + 1);
2012 		}
2013 		bump = rctd ? 20 : 8;
2014 		put_unaligned_be32(count * bump, arr);
2015 		for (offset = 4, oip = opcode_info_arr;
2016 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 			if (F_INV_OP & oip->flags)
2018 				continue;
2019 			na = oip->num_attached;
2020 			arr[offset] = oip->opcode;
2021 			put_unaligned_be16(oip->sa, arr + offset + 2);
2022 			if (rctd)
2023 				arr[offset + 5] |= 0x2;
2024 			if (FF_SA & oip->flags)
2025 				arr[offset + 5] |= 0x1;
2026 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2027 			if (rctd)
2028 				put_unaligned_be16(0xa, arr + offset + 8);
2029 			r_oip = oip;
2030 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 				if (F_INV_OP & oip->flags)
2032 					continue;
2033 				offset += bump;
2034 				arr[offset] = oip->opcode;
2035 				put_unaligned_be16(oip->sa, arr + offset + 2);
2036 				if (rctd)
2037 					arr[offset + 5] |= 0x2;
2038 				if (FF_SA & oip->flags)
2039 					arr[offset + 5] |= 0x1;
2040 				put_unaligned_be16(oip->len_mask[0],
2041 						   arr + offset + 6);
2042 				if (rctd)
2043 					put_unaligned_be16(0xa,
2044 							   arr + offset + 8);
2045 			}
2046 			oip = r_oip;
2047 			offset += bump;
2048 		}
2049 		break;
2050 	case 1:	/* one command: opcode only */
2051 	case 2:	/* one command: opcode plus service action */
2052 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2053 		sdeb_i = opcode_ind_arr[req_opcode];
2054 		oip = &opcode_info_arr[sdeb_i];
2055 		if (F_INV_OP & oip->flags) {
2056 			supp = 1;
2057 			offset = 4;
2058 		} else {
2059 			if (1 == reporting_opts) {
2060 				if (FF_SA & oip->flags) {
2061 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 							     2, 2);
2063 					kfree(arr);
2064 					return check_condition_result;
2065 				}
2066 				req_sa = 0;
2067 			} else if (2 == reporting_opts &&
2068 				   0 == (FF_SA & oip->flags)) {
2069 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 				kfree(arr);	/* point at requested sa */
2071 				return check_condition_result;
2072 			}
2073 			if (0 == (FF_SA & oip->flags) &&
2074 			    req_opcode == oip->opcode)
2075 				supp = 3;
2076 			else if (0 == (FF_SA & oip->flags)) {
2077 				na = oip->num_attached;
2078 				for (k = 0, oip = oip->arrp; k < na;
2079 				     ++k, ++oip) {
2080 					if (req_opcode == oip->opcode)
2081 						break;
2082 				}
2083 				supp = (k >= na) ? 1 : 3;
2084 			} else if (req_sa != oip->sa) {
2085 				na = oip->num_attached;
2086 				for (k = 0, oip = oip->arrp; k < na;
2087 				     ++k, ++oip) {
2088 					if (req_sa == oip->sa)
2089 						break;
2090 				}
2091 				supp = (k >= na) ? 1 : 3;
2092 			} else
2093 				supp = 3;
2094 			if (3 == supp) {
2095 				u = oip->len_mask[0];
2096 				put_unaligned_be16(u, arr + 2);
2097 				arr[4] = oip->opcode;
2098 				for (k = 1; k < u; ++k)
2099 					arr[4 + k] = (k < 16) ?
2100 						 oip->len_mask[k] : 0xff;
2101 				offset = 4 + u;
2102 			} else
2103 				offset = 4;
2104 		}
2105 		arr[1] = (rctd ? 0x80 : 0) | supp;
2106 		if (rctd) {
2107 			put_unaligned_be16(0xa, arr + offset);
2108 			offset += 12;
2109 		}
2110 		break;
2111 	default:
2112 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2113 		kfree(arr);
2114 		return check_condition_result;
2115 	}
2116 	offset = (offset < a_len) ? offset : a_len;
2117 	len = (offset < alloc_len) ? offset : alloc_len;
2118 	errsts = fill_from_dev_buffer(scp, arr, len);
2119 	kfree(arr);
2120 	return errsts;
2121 }
2122 
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 			  struct sdebug_dev_info *devip)
2125 {
2126 	bool repd;
2127 	u32 alloc_len, len;
2128 	u8 arr[16];
2129 	u8 *cmd = scp->cmnd;
2130 
2131 	memset(arr, 0, sizeof(arr));
2132 	repd = !!(cmd[2] & 0x80);
2133 	alloc_len = get_unaligned_be32(cmd + 6);
2134 	if (alloc_len < 4) {
2135 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 		return check_condition_result;
2137 	}
2138 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2139 	arr[1] = 0x1;		/* ITNRS */
2140 	if (repd) {
2141 		arr[3] = 0xc;
2142 		len = 16;
2143 	} else
2144 		len = 4;
2145 
2146 	len = (len < alloc_len) ? len : alloc_len;
2147 	return fill_from_dev_buffer(scp, arr, len);
2148 }
2149 
2150 /* <<Following mode page info copied from ST318451LW>> */
2151 
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 {	/* Read-Write Error Recovery page for mode_sense */
2154 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 					5, 0, 0xff, 0xff};
2156 
2157 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2158 	if (1 == pcontrol)
2159 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 	return sizeof(err_recov_pg);
2161 }
2162 
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { 	/* Disconnect-Reconnect page for mode_sense */
2165 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 					 0, 0, 0, 0, 0, 0, 0, 0};
2167 
2168 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2169 	if (1 == pcontrol)
2170 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 	return sizeof(disconnect_pg);
2172 }
2173 
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 {       /* Format device page for mode_sense */
2176 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 				     0, 0, 0, 0, 0, 0, 0, 0,
2178 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2179 
2180 	memcpy(p, format_pg, sizeof(format_pg));
2181 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 	put_unaligned_be16(sdebug_sector_size, p + 12);
2183 	if (sdebug_removable)
2184 		p[20] |= 0x20; /* should agree with INQUIRY */
2185 	if (1 == pcontrol)
2186 		memset(p + 2, 0, sizeof(format_pg) - 2);
2187 	return sizeof(format_pg);
2188 }
2189 
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 				     0, 0, 0, 0};
2193 
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { 	/* Caching page for mode_sense */
2196 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2200 
2201 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2203 	memcpy(p, caching_pg, sizeof(caching_pg));
2204 	if (1 == pcontrol)
2205 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 	else if (2 == pcontrol)
2207 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 	return sizeof(caching_pg);
2209 }
2210 
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 				    0, 0, 0x2, 0x4b};
2213 
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { 	/* Control mode page for mode_sense */
2216 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2217 					0, 0, 0, 0};
2218 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2219 				     0, 0, 0x2, 0x4b};
2220 
2221 	if (sdebug_dsense)
2222 		ctrl_m_pg[2] |= 0x4;
2223 	else
2224 		ctrl_m_pg[2] &= ~0x4;
2225 
2226 	if (sdebug_ato)
2227 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2228 
2229 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2230 	if (1 == pcontrol)
2231 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 	else if (2 == pcontrol)
2233 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 	return sizeof(ctrl_m_pg);
2235 }
2236 
2237 
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 {	/* Informational Exceptions control mode page for mode_sense */
2240 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2241 				       0, 0, 0x0, 0x0};
2242 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 				      0, 0, 0x0, 0x0};
2244 
2245 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2246 	if (1 == pcontrol)
2247 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 	else if (2 == pcontrol)
2249 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 	return sizeof(iec_m_pg);
2251 }
2252 
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 {	/* SAS SSP mode page - short format for mode_sense */
2255 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2257 
2258 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2259 	if (1 == pcontrol)
2260 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 	return sizeof(sas_sf_m_pg);
2262 }
2263 
2264 
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2266 			      int target_dev_id)
2267 {	/* SAS phy control and discover mode page for mode_sense */
2268 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2272 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2273 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 		    0, 0, 0, 0, 0, 0, 0, 0,
2275 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2277 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2278 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2279 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 		    0, 0, 0, 0, 0, 0, 0, 0,
2281 		};
2282 	int port_a, port_b;
2283 
2284 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 	port_a = target_dev_id + 1;
2289 	port_b = port_a + 1;
2290 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 	put_unaligned_be32(port_a, p + 20);
2292 	put_unaligned_be32(port_b, p + 48 + 20);
2293 	if (1 == pcontrol)
2294 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 	return sizeof(sas_pcd_m_pg);
2296 }
2297 
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 {	/* SAS SSP shared protocol specific port mode subpage */
2300 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 		    0, 0, 0, 0, 0, 0, 0, 0,
2302 		};
2303 
2304 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2305 	if (1 == pcontrol)
2306 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 	return sizeof(sas_sha_m_pg);
2308 }
2309 
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2311 
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 			   struct sdebug_dev_info *devip)
2314 {
2315 	int pcontrol, pcode, subpcode, bd_len;
2316 	unsigned char dev_spec;
2317 	int alloc_len, offset, len, target_dev_id;
2318 	int target = scp->device->id;
2319 	unsigned char *ap;
2320 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2321 	unsigned char *cmd = scp->cmnd;
2322 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2323 
2324 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2325 	pcontrol = (cmd[2] & 0xc0) >> 6;
2326 	pcode = cmd[2] & 0x3f;
2327 	subpcode = cmd[3];
2328 	msense_6 = (MODE_SENSE == cmd[0]);
2329 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2330 	is_disk = (sdebug_ptype == TYPE_DISK);
2331 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2332 	if ((is_disk || is_zbc) && !dbd)
2333 		bd_len = llbaa ? 16 : 8;
2334 	else
2335 		bd_len = 0;
2336 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2337 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2338 	if (0x3 == pcontrol) {  /* Saving values not supported */
2339 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2340 		return check_condition_result;
2341 	}
2342 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2343 			(devip->target * 1000) - 3;
2344 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2345 	if (is_disk || is_zbc) {
2346 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2347 		if (sdebug_wp)
2348 			dev_spec |= 0x80;
2349 	} else
2350 		dev_spec = 0x0;
2351 	if (msense_6) {
2352 		arr[2] = dev_spec;
2353 		arr[3] = bd_len;
2354 		offset = 4;
2355 	} else {
2356 		arr[3] = dev_spec;
2357 		if (16 == bd_len)
2358 			arr[4] = 0x1;	/* set LONGLBA bit */
2359 		arr[7] = bd_len;	/* assume 255 or less */
2360 		offset = 8;
2361 	}
2362 	ap = arr + offset;
2363 	if ((bd_len > 0) && (!sdebug_capacity))
2364 		sdebug_capacity = get_sdebug_capacity();
2365 
2366 	if (8 == bd_len) {
2367 		if (sdebug_capacity > 0xfffffffe)
2368 			put_unaligned_be32(0xffffffff, ap + 0);
2369 		else
2370 			put_unaligned_be32(sdebug_capacity, ap + 0);
2371 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2372 		offset += bd_len;
2373 		ap = arr + offset;
2374 	} else if (16 == bd_len) {
2375 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2376 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2377 		offset += bd_len;
2378 		ap = arr + offset;
2379 	}
2380 
2381 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2382 		/* TODO: Control Extension page */
2383 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2384 		return check_condition_result;
2385 	}
2386 	bad_pcode = false;
2387 
2388 	switch (pcode) {
2389 	case 0x1:	/* Read-Write error recovery page, direct access */
2390 		len = resp_err_recov_pg(ap, pcontrol, target);
2391 		offset += len;
2392 		break;
2393 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2394 		len = resp_disconnect_pg(ap, pcontrol, target);
2395 		offset += len;
2396 		break;
2397 	case 0x3:       /* Format device page, direct access */
2398 		if (is_disk) {
2399 			len = resp_format_pg(ap, pcontrol, target);
2400 			offset += len;
2401 		} else
2402 			bad_pcode = true;
2403 		break;
2404 	case 0x8:	/* Caching page, direct access */
2405 		if (is_disk || is_zbc) {
2406 			len = resp_caching_pg(ap, pcontrol, target);
2407 			offset += len;
2408 		} else
2409 			bad_pcode = true;
2410 		break;
2411 	case 0xa:	/* Control Mode page, all devices */
2412 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2413 		offset += len;
2414 		break;
2415 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2416 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2417 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2418 			return check_condition_result;
2419 		}
2420 		len = 0;
2421 		if ((0x0 == subpcode) || (0xff == subpcode))
2422 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2423 		if ((0x1 == subpcode) || (0xff == subpcode))
2424 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2425 						  target_dev_id);
2426 		if ((0x2 == subpcode) || (0xff == subpcode))
2427 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 		offset += len;
2429 		break;
2430 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2431 		len = resp_iec_m_pg(ap, pcontrol, target);
2432 		offset += len;
2433 		break;
2434 	case 0x3f:	/* Read all Mode pages */
2435 		if ((0 == subpcode) || (0xff == subpcode)) {
2436 			len = resp_err_recov_pg(ap, pcontrol, target);
2437 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2438 			if (is_disk) {
2439 				len += resp_format_pg(ap + len, pcontrol,
2440 						      target);
2441 				len += resp_caching_pg(ap + len, pcontrol,
2442 						       target);
2443 			} else if (is_zbc) {
2444 				len += resp_caching_pg(ap + len, pcontrol,
2445 						       target);
2446 			}
2447 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2448 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2449 			if (0xff == subpcode) {
2450 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2451 						  target, target_dev_id);
2452 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2453 			}
2454 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2455 			offset += len;
2456 		} else {
2457 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2458 			return check_condition_result;
2459 		}
2460 		break;
2461 	default:
2462 		bad_pcode = true;
2463 		break;
2464 	}
2465 	if (bad_pcode) {
2466 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2467 		return check_condition_result;
2468 	}
2469 	if (msense_6)
2470 		arr[0] = offset - 1;
2471 	else
2472 		put_unaligned_be16((offset - 2), arr + 0);
2473 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2474 }
2475 
2476 #define SDEBUG_MAX_MSELECT_SZ 512
2477 
2478 static int resp_mode_select(struct scsi_cmnd *scp,
2479 			    struct sdebug_dev_info *devip)
2480 {
2481 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2482 	int param_len, res, mpage;
2483 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2484 	unsigned char *cmd = scp->cmnd;
2485 	int mselect6 = (MODE_SELECT == cmd[0]);
2486 
2487 	memset(arr, 0, sizeof(arr));
2488 	pf = cmd[1] & 0x10;
2489 	sp = cmd[1] & 0x1;
2490 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2491 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2492 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2493 		return check_condition_result;
2494 	}
2495 	res = fetch_to_dev_buffer(scp, arr, param_len);
2496 	if (-1 == res)
2497 		return DID_ERROR << 16;
2498 	else if (sdebug_verbose && (res < param_len))
2499 		sdev_printk(KERN_INFO, scp->device,
2500 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2501 			    __func__, param_len, res);
2502 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2503 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2504 	if (md_len > 2) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2506 		return check_condition_result;
2507 	}
2508 	off = bd_len + (mselect6 ? 4 : 8);
2509 	mpage = arr[off] & 0x3f;
2510 	ps = !!(arr[off] & 0x80);
2511 	if (ps) {
2512 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2513 		return check_condition_result;
2514 	}
2515 	spf = !!(arr[off] & 0x40);
2516 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2517 		       (arr[off + 1] + 2);
2518 	if ((pg_len + off) > param_len) {
2519 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2520 				PARAMETER_LIST_LENGTH_ERR, 0);
2521 		return check_condition_result;
2522 	}
2523 	switch (mpage) {
2524 	case 0x8:      /* Caching Mode page */
2525 		if (caching_pg[1] == arr[off + 1]) {
2526 			memcpy(caching_pg + 2, arr + off + 2,
2527 			       sizeof(caching_pg) - 2);
2528 			goto set_mode_changed_ua;
2529 		}
2530 		break;
2531 	case 0xa:      /* Control Mode page */
2532 		if (ctrl_m_pg[1] == arr[off + 1]) {
2533 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2534 			       sizeof(ctrl_m_pg) - 2);
2535 			if (ctrl_m_pg[4] & 0x8)
2536 				sdebug_wp = true;
2537 			else
2538 				sdebug_wp = false;
2539 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2540 			goto set_mode_changed_ua;
2541 		}
2542 		break;
2543 	case 0x1c:      /* Informational Exceptions Mode page */
2544 		if (iec_m_pg[1] == arr[off + 1]) {
2545 			memcpy(iec_m_pg + 2, arr + off + 2,
2546 			       sizeof(iec_m_pg) - 2);
2547 			goto set_mode_changed_ua;
2548 		}
2549 		break;
2550 	default:
2551 		break;
2552 	}
2553 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2554 	return check_condition_result;
2555 set_mode_changed_ua:
2556 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2557 	return 0;
2558 }
2559 
2560 static int resp_temp_l_pg(unsigned char *arr)
2561 {
2562 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2563 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564 		};
2565 
2566 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2567 	return sizeof(temp_l_pg);
2568 }
2569 
2570 static int resp_ie_l_pg(unsigned char *arr)
2571 {
2572 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573 		};
2574 
2575 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2576 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2577 		arr[4] = THRESHOLD_EXCEEDED;
2578 		arr[5] = 0xff;
2579 	}
2580 	return sizeof(ie_l_pg);
2581 }
2582 
2583 #define SDEBUG_MAX_LSENSE_SZ 512
2584 
2585 static int resp_log_sense(struct scsi_cmnd *scp,
2586 			  struct sdebug_dev_info *devip)
2587 {
2588 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2589 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2590 	unsigned char *cmd = scp->cmnd;
2591 
2592 	memset(arr, 0, sizeof(arr));
2593 	ppc = cmd[1] & 0x2;
2594 	sp = cmd[1] & 0x1;
2595 	if (ppc || sp) {
2596 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2597 		return check_condition_result;
2598 	}
2599 	pcode = cmd[2] & 0x3f;
2600 	subpcode = cmd[3] & 0xff;
2601 	alloc_len = get_unaligned_be16(cmd + 7);
2602 	arr[0] = pcode;
2603 	if (0 == subpcode) {
2604 		switch (pcode) {
2605 		case 0x0:	/* Supported log pages log page */
2606 			n = 4;
2607 			arr[n++] = 0x0;		/* this page */
2608 			arr[n++] = 0xd;		/* Temperature */
2609 			arr[n++] = 0x2f;	/* Informational exceptions */
2610 			arr[3] = n - 4;
2611 			break;
2612 		case 0xd:	/* Temperature log page */
2613 			arr[3] = resp_temp_l_pg(arr + 4);
2614 			break;
2615 		case 0x2f:	/* Informational exceptions log page */
2616 			arr[3] = resp_ie_l_pg(arr + 4);
2617 			break;
2618 		default:
2619 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2620 			return check_condition_result;
2621 		}
2622 	} else if (0xff == subpcode) {
2623 		arr[0] |= 0x40;
2624 		arr[1] = subpcode;
2625 		switch (pcode) {
2626 		case 0x0:	/* Supported log pages and subpages log page */
2627 			n = 4;
2628 			arr[n++] = 0x0;
2629 			arr[n++] = 0x0;		/* 0,0 page */
2630 			arr[n++] = 0x0;
2631 			arr[n++] = 0xff;	/* this page */
2632 			arr[n++] = 0xd;
2633 			arr[n++] = 0x0;		/* Temperature */
2634 			arr[n++] = 0x2f;
2635 			arr[n++] = 0x0;	/* Informational exceptions */
2636 			arr[3] = n - 4;
2637 			break;
2638 		case 0xd:	/* Temperature subpages */
2639 			n = 4;
2640 			arr[n++] = 0xd;
2641 			arr[n++] = 0x0;		/* Temperature */
2642 			arr[3] = n - 4;
2643 			break;
2644 		case 0x2f:	/* Informational exceptions subpages */
2645 			n = 4;
2646 			arr[n++] = 0x2f;
2647 			arr[n++] = 0x0;		/* Informational exceptions */
2648 			arr[3] = n - 4;
2649 			break;
2650 		default:
2651 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2652 			return check_condition_result;
2653 		}
2654 	} else {
2655 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2656 		return check_condition_result;
2657 	}
2658 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2659 	return fill_from_dev_buffer(scp, arr,
2660 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2661 }
2662 
2663 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2664 {
2665 	return devip->nr_zones != 0;
2666 }
2667 
2668 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2669 					unsigned long long lba)
2670 {
2671 	return &devip->zstate[lba >> devip->zsize_shift];
2672 }
2673 
2674 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2675 {
2676 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2677 }
2678 
2679 static void zbc_close_zone(struct sdebug_dev_info *devip,
2680 			   struct sdeb_zone_state *zsp)
2681 {
2682 	enum sdebug_z_cond zc;
2683 
2684 	if (zbc_zone_is_conv(zsp))
2685 		return;
2686 
2687 	zc = zsp->z_cond;
2688 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2689 		return;
2690 
2691 	if (zc == ZC2_IMPLICIT_OPEN)
2692 		devip->nr_imp_open--;
2693 	else
2694 		devip->nr_exp_open--;
2695 
2696 	if (zsp->z_wp == zsp->z_start) {
2697 		zsp->z_cond = ZC1_EMPTY;
2698 	} else {
2699 		zsp->z_cond = ZC4_CLOSED;
2700 		devip->nr_closed++;
2701 	}
2702 }
2703 
2704 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2705 {
2706 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2707 	unsigned int i;
2708 
2709 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2710 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2711 			zbc_close_zone(devip, zsp);
2712 			return;
2713 		}
2714 	}
2715 }
2716 
2717 static void zbc_open_zone(struct sdebug_dev_info *devip,
2718 			  struct sdeb_zone_state *zsp, bool explicit)
2719 {
2720 	enum sdebug_z_cond zc;
2721 
2722 	if (zbc_zone_is_conv(zsp))
2723 		return;
2724 
2725 	zc = zsp->z_cond;
2726 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2727 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2728 		return;
2729 
2730 	/* Close an implicit open zone if necessary */
2731 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2732 		zbc_close_zone(devip, zsp);
2733 	else if (devip->max_open &&
2734 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2735 		zbc_close_imp_open_zone(devip);
2736 
2737 	if (zsp->z_cond == ZC4_CLOSED)
2738 		devip->nr_closed--;
2739 	if (explicit) {
2740 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2741 		devip->nr_exp_open++;
2742 	} else {
2743 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2744 		devip->nr_imp_open++;
2745 	}
2746 }
2747 
2748 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2749 		       unsigned long long lba, unsigned int num)
2750 {
2751 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2752 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2753 
2754 	if (zbc_zone_is_conv(zsp))
2755 		return;
2756 
2757 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2758 		zsp->z_wp += num;
2759 		if (zsp->z_wp >= zend)
2760 			zsp->z_cond = ZC5_FULL;
2761 		return;
2762 	}
2763 
2764 	while (num) {
2765 		if (lba != zsp->z_wp)
2766 			zsp->z_non_seq_resource = true;
2767 
2768 		end = lba + num;
2769 		if (end >= zend) {
2770 			n = zend - lba;
2771 			zsp->z_wp = zend;
2772 		} else if (end > zsp->z_wp) {
2773 			n = num;
2774 			zsp->z_wp = end;
2775 		} else {
2776 			n = num;
2777 		}
2778 		if (zsp->z_wp >= zend)
2779 			zsp->z_cond = ZC5_FULL;
2780 
2781 		num -= n;
2782 		lba += n;
2783 		if (num) {
2784 			zsp++;
2785 			zend = zsp->z_start + zsp->z_size;
2786 		}
2787 	}
2788 }
2789 
2790 static int check_zbc_access_params(struct scsi_cmnd *scp,
2791 			unsigned long long lba, unsigned int num, bool write)
2792 {
2793 	struct scsi_device *sdp = scp->device;
2794 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2795 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2796 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2797 
2798 	if (!write) {
2799 		if (devip->zmodel == BLK_ZONED_HA)
2800 			return 0;
2801 		/* For host-managed, reads cannot cross zone types boundaries */
2802 		if (zsp_end != zsp &&
2803 		    zbc_zone_is_conv(zsp) &&
2804 		    !zbc_zone_is_conv(zsp_end)) {
2805 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2806 					LBA_OUT_OF_RANGE,
2807 					READ_INVDATA_ASCQ);
2808 			return check_condition_result;
2809 		}
2810 		return 0;
2811 	}
2812 
2813 	/* No restrictions for writes within conventional zones */
2814 	if (zbc_zone_is_conv(zsp)) {
2815 		if (!zbc_zone_is_conv(zsp_end)) {
2816 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2817 					LBA_OUT_OF_RANGE,
2818 					WRITE_BOUNDARY_ASCQ);
2819 			return check_condition_result;
2820 		}
2821 		return 0;
2822 	}
2823 
2824 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2825 		/* Writes cannot cross sequential zone boundaries */
2826 		if (zsp_end != zsp) {
2827 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2828 					LBA_OUT_OF_RANGE,
2829 					WRITE_BOUNDARY_ASCQ);
2830 			return check_condition_result;
2831 		}
2832 		/* Cannot write full zones */
2833 		if (zsp->z_cond == ZC5_FULL) {
2834 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2835 					INVALID_FIELD_IN_CDB, 0);
2836 			return check_condition_result;
2837 		}
2838 		/* Writes must be aligned to the zone WP */
2839 		if (lba != zsp->z_wp) {
2840 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2841 					LBA_OUT_OF_RANGE,
2842 					UNALIGNED_WRITE_ASCQ);
2843 			return check_condition_result;
2844 		}
2845 	}
2846 
2847 	/* Handle implicit open of closed and empty zones */
2848 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2849 		if (devip->max_open &&
2850 		    devip->nr_exp_open >= devip->max_open) {
2851 			mk_sense_buffer(scp, DATA_PROTECT,
2852 					INSUFF_RES_ASC,
2853 					INSUFF_ZONE_ASCQ);
2854 			return check_condition_result;
2855 		}
2856 		zbc_open_zone(devip, zsp, false);
2857 	}
2858 
2859 	return 0;
2860 }
2861 
2862 static inline int check_device_access_params
2863 			(struct scsi_cmnd *scp, unsigned long long lba,
2864 			 unsigned int num, bool write)
2865 {
2866 	struct scsi_device *sdp = scp->device;
2867 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2868 
2869 	if (lba + num > sdebug_capacity) {
2870 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2871 		return check_condition_result;
2872 	}
2873 	/* transfer length excessive (tie in to block limits VPD page) */
2874 	if (num > sdebug_store_sectors) {
2875 		/* needs work to find which cdb byte 'num' comes from */
2876 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2877 		return check_condition_result;
2878 	}
2879 	if (write && unlikely(sdebug_wp)) {
2880 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2881 		return check_condition_result;
2882 	}
2883 	if (sdebug_dev_is_zoned(devip))
2884 		return check_zbc_access_params(scp, lba, num, write);
2885 
2886 	return 0;
2887 }
2888 
2889 /*
2890  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2891  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2892  * that access any of the "stores" in struct sdeb_store_info should call this
2893  * function with bug_if_fake_rw set to true.
2894  */
2895 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2896 						bool bug_if_fake_rw)
2897 {
2898 	if (sdebug_fake_rw) {
2899 		BUG_ON(bug_if_fake_rw);	/* See note above */
2900 		return NULL;
2901 	}
2902 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2903 }
2904 
2905 /* Returns number of bytes copied or -1 if error. */
2906 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2907 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2908 {
2909 	int ret;
2910 	u64 block, rest = 0;
2911 	enum dma_data_direction dir;
2912 	struct scsi_data_buffer *sdb = &scp->sdb;
2913 	u8 *fsp;
2914 
2915 	if (do_write) {
2916 		dir = DMA_TO_DEVICE;
2917 		write_since_sync = true;
2918 	} else {
2919 		dir = DMA_FROM_DEVICE;
2920 	}
2921 
2922 	if (!sdb->length || !sip)
2923 		return 0;
2924 	if (scp->sc_data_direction != dir)
2925 		return -1;
2926 	fsp = sip->storep;
2927 
2928 	block = do_div(lba, sdebug_store_sectors);
2929 	if (block + num > sdebug_store_sectors)
2930 		rest = block + num - sdebug_store_sectors;
2931 
2932 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2933 		   fsp + (block * sdebug_sector_size),
2934 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2935 	if (ret != (num - rest) * sdebug_sector_size)
2936 		return ret;
2937 
2938 	if (rest) {
2939 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2940 			    fsp, rest * sdebug_sector_size,
2941 			    sg_skip + ((num - rest) * sdebug_sector_size),
2942 			    do_write);
2943 	}
2944 
2945 	return ret;
2946 }
2947 
2948 /* Returns number of bytes copied or -1 if error. */
2949 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2950 {
2951 	struct scsi_data_buffer *sdb = &scp->sdb;
2952 
2953 	if (!sdb->length)
2954 		return 0;
2955 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2956 		return -1;
2957 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2958 			      num * sdebug_sector_size, 0, true);
2959 }
2960 
2961 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2962  * arr into sip->storep+lba and return true. If comparison fails then
2963  * return false. */
2964 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2965 			      const u8 *arr, bool compare_only)
2966 {
2967 	bool res;
2968 	u64 block, rest = 0;
2969 	u32 store_blks = sdebug_store_sectors;
2970 	u32 lb_size = sdebug_sector_size;
2971 	u8 *fsp = sip->storep;
2972 
2973 	block = do_div(lba, store_blks);
2974 	if (block + num > store_blks)
2975 		rest = block + num - store_blks;
2976 
2977 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2978 	if (!res)
2979 		return res;
2980 	if (rest)
2981 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2982 			     rest * lb_size);
2983 	if (!res)
2984 		return res;
2985 	if (compare_only)
2986 		return true;
2987 	arr += num * lb_size;
2988 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2989 	if (rest)
2990 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2991 	return res;
2992 }
2993 
2994 static __be16 dif_compute_csum(const void *buf, int len)
2995 {
2996 	__be16 csum;
2997 
2998 	if (sdebug_guard)
2999 		csum = (__force __be16)ip_compute_csum(buf, len);
3000 	else
3001 		csum = cpu_to_be16(crc_t10dif(buf, len));
3002 
3003 	return csum;
3004 }
3005 
3006 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3007 		      sector_t sector, u32 ei_lba)
3008 {
3009 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3010 
3011 	if (sdt->guard_tag != csum) {
3012 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3013 			(unsigned long)sector,
3014 			be16_to_cpu(sdt->guard_tag),
3015 			be16_to_cpu(csum));
3016 		return 0x01;
3017 	}
3018 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3019 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3020 		pr_err("REF check failed on sector %lu\n",
3021 			(unsigned long)sector);
3022 		return 0x03;
3023 	}
3024 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3025 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3026 		pr_err("REF check failed on sector %lu\n",
3027 			(unsigned long)sector);
3028 		return 0x03;
3029 	}
3030 	return 0;
3031 }
3032 
3033 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3034 			  unsigned int sectors, bool read)
3035 {
3036 	size_t resid;
3037 	void *paddr;
3038 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3039 						scp->device->hostdata, true);
3040 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3041 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3042 	struct sg_mapping_iter miter;
3043 
3044 	/* Bytes of protection data to copy into sgl */
3045 	resid = sectors * sizeof(*dif_storep);
3046 
3047 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3048 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3049 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3050 
3051 	while (sg_miter_next(&miter) && resid > 0) {
3052 		size_t len = min_t(size_t, miter.length, resid);
3053 		void *start = dif_store(sip, sector);
3054 		size_t rest = 0;
3055 
3056 		if (dif_store_end < start + len)
3057 			rest = start + len - dif_store_end;
3058 
3059 		paddr = miter.addr;
3060 
3061 		if (read)
3062 			memcpy(paddr, start, len - rest);
3063 		else
3064 			memcpy(start, paddr, len - rest);
3065 
3066 		if (rest) {
3067 			if (read)
3068 				memcpy(paddr + len - rest, dif_storep, rest);
3069 			else
3070 				memcpy(dif_storep, paddr + len - rest, rest);
3071 		}
3072 
3073 		sector += len / sizeof(*dif_storep);
3074 		resid -= len;
3075 	}
3076 	sg_miter_stop(&miter);
3077 }
3078 
3079 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3080 			    unsigned int sectors, u32 ei_lba)
3081 {
3082 	unsigned int i;
3083 	sector_t sector;
3084 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3085 						scp->device->hostdata, true);
3086 	struct t10_pi_tuple *sdt;
3087 
3088 	for (i = 0; i < sectors; i++, ei_lba++) {
3089 		int ret;
3090 
3091 		sector = start_sec + i;
3092 		sdt = dif_store(sip, sector);
3093 
3094 		if (sdt->app_tag == cpu_to_be16(0xffff))
3095 			continue;
3096 
3097 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3098 				 ei_lba);
3099 		if (ret) {
3100 			dif_errors++;
3101 			return ret;
3102 		}
3103 	}
3104 
3105 	dif_copy_prot(scp, start_sec, sectors, true);
3106 	dix_reads++;
3107 
3108 	return 0;
3109 }
3110 
3111 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3112 {
3113 	bool check_prot;
3114 	u32 num;
3115 	u32 ei_lba;
3116 	int ret;
3117 	u64 lba;
3118 	struct sdeb_store_info *sip = devip2sip(devip, true);
3119 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3120 	u8 *cmd = scp->cmnd;
3121 
3122 	switch (cmd[0]) {
3123 	case READ_16:
3124 		ei_lba = 0;
3125 		lba = get_unaligned_be64(cmd + 2);
3126 		num = get_unaligned_be32(cmd + 10);
3127 		check_prot = true;
3128 		break;
3129 	case READ_10:
3130 		ei_lba = 0;
3131 		lba = get_unaligned_be32(cmd + 2);
3132 		num = get_unaligned_be16(cmd + 7);
3133 		check_prot = true;
3134 		break;
3135 	case READ_6:
3136 		ei_lba = 0;
3137 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3138 		      (u32)(cmd[1] & 0x1f) << 16;
3139 		num = (0 == cmd[4]) ? 256 : cmd[4];
3140 		check_prot = true;
3141 		break;
3142 	case READ_12:
3143 		ei_lba = 0;
3144 		lba = get_unaligned_be32(cmd + 2);
3145 		num = get_unaligned_be32(cmd + 6);
3146 		check_prot = true;
3147 		break;
3148 	case XDWRITEREAD_10:
3149 		ei_lba = 0;
3150 		lba = get_unaligned_be32(cmd + 2);
3151 		num = get_unaligned_be16(cmd + 7);
3152 		check_prot = false;
3153 		break;
3154 	default:	/* assume READ(32) */
3155 		lba = get_unaligned_be64(cmd + 12);
3156 		ei_lba = get_unaligned_be32(cmd + 20);
3157 		num = get_unaligned_be32(cmd + 28);
3158 		check_prot = false;
3159 		break;
3160 	}
3161 	if (unlikely(have_dif_prot && check_prot)) {
3162 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3163 		    (cmd[1] & 0xe0)) {
3164 			mk_sense_invalid_opcode(scp);
3165 			return check_condition_result;
3166 		}
3167 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3168 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3169 		    (cmd[1] & 0xe0) == 0)
3170 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3171 				    "to DIF device\n");
3172 	}
3173 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3174 		     atomic_read(&sdeb_inject_pending))) {
3175 		num /= 2;
3176 		atomic_set(&sdeb_inject_pending, 0);
3177 	}
3178 
3179 	ret = check_device_access_params(scp, lba, num, false);
3180 	if (ret)
3181 		return ret;
3182 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3183 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3184 		     ((lba + num) > sdebug_medium_error_start))) {
3185 		/* claim unrecoverable read error */
3186 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3187 		/* set info field and valid bit for fixed descriptor */
3188 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3189 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3190 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3191 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3192 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3193 		}
3194 		scsi_set_resid(scp, scsi_bufflen(scp));
3195 		return check_condition_result;
3196 	}
3197 
3198 	read_lock(macc_lckp);
3199 
3200 	/* DIX + T10 DIF */
3201 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3202 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3203 
3204 		if (prot_ret) {
3205 			read_unlock(macc_lckp);
3206 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3207 			return illegal_condition_result;
3208 		}
3209 	}
3210 
3211 	ret = do_device_access(sip, scp, 0, lba, num, false);
3212 	read_unlock(macc_lckp);
3213 	if (unlikely(ret == -1))
3214 		return DID_ERROR << 16;
3215 
3216 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3217 
3218 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3219 		     atomic_read(&sdeb_inject_pending))) {
3220 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3221 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3222 			atomic_set(&sdeb_inject_pending, 0);
3223 			return check_condition_result;
3224 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3225 			/* Logical block guard check failed */
3226 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3227 			atomic_set(&sdeb_inject_pending, 0);
3228 			return illegal_condition_result;
3229 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3230 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3231 			atomic_set(&sdeb_inject_pending, 0);
3232 			return illegal_condition_result;
3233 		}
3234 	}
3235 	return 0;
3236 }
3237 
3238 static void dump_sector(unsigned char *buf, int len)
3239 {
3240 	int i, j, n;
3241 
3242 	pr_err(">>> Sector Dump <<<\n");
3243 	for (i = 0 ; i < len ; i += 16) {
3244 		char b[128];
3245 
3246 		for (j = 0, n = 0; j < 16; j++) {
3247 			unsigned char c = buf[i+j];
3248 
3249 			if (c >= 0x20 && c < 0x7e)
3250 				n += scnprintf(b + n, sizeof(b) - n,
3251 					       " %c ", buf[i+j]);
3252 			else
3253 				n += scnprintf(b + n, sizeof(b) - n,
3254 					       "%02x ", buf[i+j]);
3255 		}
3256 		pr_err("%04d: %s\n", i, b);
3257 	}
3258 }
3259 
3260 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3261 			     unsigned int sectors, u32 ei_lba)
3262 {
3263 	int ret;
3264 	struct t10_pi_tuple *sdt;
3265 	void *daddr;
3266 	sector_t sector = start_sec;
3267 	int ppage_offset;
3268 	int dpage_offset;
3269 	struct sg_mapping_iter diter;
3270 	struct sg_mapping_iter piter;
3271 
3272 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3273 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3274 
3275 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3276 			scsi_prot_sg_count(SCpnt),
3277 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3278 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3279 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3280 
3281 	/* For each protection page */
3282 	while (sg_miter_next(&piter)) {
3283 		dpage_offset = 0;
3284 		if (WARN_ON(!sg_miter_next(&diter))) {
3285 			ret = 0x01;
3286 			goto out;
3287 		}
3288 
3289 		for (ppage_offset = 0; ppage_offset < piter.length;
3290 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3291 			/* If we're at the end of the current
3292 			 * data page advance to the next one
3293 			 */
3294 			if (dpage_offset >= diter.length) {
3295 				if (WARN_ON(!sg_miter_next(&diter))) {
3296 					ret = 0x01;
3297 					goto out;
3298 				}
3299 				dpage_offset = 0;
3300 			}
3301 
3302 			sdt = piter.addr + ppage_offset;
3303 			daddr = diter.addr + dpage_offset;
3304 
3305 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3306 			if (ret) {
3307 				dump_sector(daddr, sdebug_sector_size);
3308 				goto out;
3309 			}
3310 
3311 			sector++;
3312 			ei_lba++;
3313 			dpage_offset += sdebug_sector_size;
3314 		}
3315 		diter.consumed = dpage_offset;
3316 		sg_miter_stop(&diter);
3317 	}
3318 	sg_miter_stop(&piter);
3319 
3320 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3321 	dix_writes++;
3322 
3323 	return 0;
3324 
3325 out:
3326 	dif_errors++;
3327 	sg_miter_stop(&diter);
3328 	sg_miter_stop(&piter);
3329 	return ret;
3330 }
3331 
3332 static unsigned long lba_to_map_index(sector_t lba)
3333 {
3334 	if (sdebug_unmap_alignment)
3335 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3336 	sector_div(lba, sdebug_unmap_granularity);
3337 	return lba;
3338 }
3339 
3340 static sector_t map_index_to_lba(unsigned long index)
3341 {
3342 	sector_t lba = index * sdebug_unmap_granularity;
3343 
3344 	if (sdebug_unmap_alignment)
3345 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3346 	return lba;
3347 }
3348 
3349 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3350 			      unsigned int *num)
3351 {
3352 	sector_t end;
3353 	unsigned int mapped;
3354 	unsigned long index;
3355 	unsigned long next;
3356 
3357 	index = lba_to_map_index(lba);
3358 	mapped = test_bit(index, sip->map_storep);
3359 
3360 	if (mapped)
3361 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3362 	else
3363 		next = find_next_bit(sip->map_storep, map_size, index);
3364 
3365 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3366 	*num = end - lba;
3367 	return mapped;
3368 }
3369 
3370 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3371 		       unsigned int len)
3372 {
3373 	sector_t end = lba + len;
3374 
3375 	while (lba < end) {
3376 		unsigned long index = lba_to_map_index(lba);
3377 
3378 		if (index < map_size)
3379 			set_bit(index, sip->map_storep);
3380 
3381 		lba = map_index_to_lba(index + 1);
3382 	}
3383 }
3384 
3385 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3386 			 unsigned int len)
3387 {
3388 	sector_t end = lba + len;
3389 	u8 *fsp = sip->storep;
3390 
3391 	while (lba < end) {
3392 		unsigned long index = lba_to_map_index(lba);
3393 
3394 		if (lba == map_index_to_lba(index) &&
3395 		    lba + sdebug_unmap_granularity <= end &&
3396 		    index < map_size) {
3397 			clear_bit(index, sip->map_storep);
3398 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3399 				memset(fsp + lba * sdebug_sector_size,
3400 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3401 				       sdebug_sector_size *
3402 				       sdebug_unmap_granularity);
3403 			}
3404 			if (sip->dif_storep) {
3405 				memset(sip->dif_storep + lba, 0xff,
3406 				       sizeof(*sip->dif_storep) *
3407 				       sdebug_unmap_granularity);
3408 			}
3409 		}
3410 		lba = map_index_to_lba(index + 1);
3411 	}
3412 }
3413 
3414 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3415 {
3416 	bool check_prot;
3417 	u32 num;
3418 	u32 ei_lba;
3419 	int ret;
3420 	u64 lba;
3421 	struct sdeb_store_info *sip = devip2sip(devip, true);
3422 	rwlock_t *macc_lckp = &sip->macc_lck;
3423 	u8 *cmd = scp->cmnd;
3424 
3425 	switch (cmd[0]) {
3426 	case WRITE_16:
3427 		ei_lba = 0;
3428 		lba = get_unaligned_be64(cmd + 2);
3429 		num = get_unaligned_be32(cmd + 10);
3430 		check_prot = true;
3431 		break;
3432 	case WRITE_10:
3433 		ei_lba = 0;
3434 		lba = get_unaligned_be32(cmd + 2);
3435 		num = get_unaligned_be16(cmd + 7);
3436 		check_prot = true;
3437 		break;
3438 	case WRITE_6:
3439 		ei_lba = 0;
3440 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3441 		      (u32)(cmd[1] & 0x1f) << 16;
3442 		num = (0 == cmd[4]) ? 256 : cmd[4];
3443 		check_prot = true;
3444 		break;
3445 	case WRITE_12:
3446 		ei_lba = 0;
3447 		lba = get_unaligned_be32(cmd + 2);
3448 		num = get_unaligned_be32(cmd + 6);
3449 		check_prot = true;
3450 		break;
3451 	case 0x53:	/* XDWRITEREAD(10) */
3452 		ei_lba = 0;
3453 		lba = get_unaligned_be32(cmd + 2);
3454 		num = get_unaligned_be16(cmd + 7);
3455 		check_prot = false;
3456 		break;
3457 	default:	/* assume WRITE(32) */
3458 		lba = get_unaligned_be64(cmd + 12);
3459 		ei_lba = get_unaligned_be32(cmd + 20);
3460 		num = get_unaligned_be32(cmd + 28);
3461 		check_prot = false;
3462 		break;
3463 	}
3464 	if (unlikely(have_dif_prot && check_prot)) {
3465 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3466 		    (cmd[1] & 0xe0)) {
3467 			mk_sense_invalid_opcode(scp);
3468 			return check_condition_result;
3469 		}
3470 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3471 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3472 		    (cmd[1] & 0xe0) == 0)
3473 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3474 				    "to DIF device\n");
3475 	}
3476 
3477 	write_lock(macc_lckp);
3478 	ret = check_device_access_params(scp, lba, num, true);
3479 	if (ret) {
3480 		write_unlock(macc_lckp);
3481 		return ret;
3482 	}
3483 
3484 	/* DIX + T10 DIF */
3485 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3486 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3487 
3488 		if (prot_ret) {
3489 			write_unlock(macc_lckp);
3490 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3491 			return illegal_condition_result;
3492 		}
3493 	}
3494 
3495 	ret = do_device_access(sip, scp, 0, lba, num, true);
3496 	if (unlikely(scsi_debug_lbp()))
3497 		map_region(sip, lba, num);
3498 	/* If ZBC zone then bump its write pointer */
3499 	if (sdebug_dev_is_zoned(devip))
3500 		zbc_inc_wp(devip, lba, num);
3501 	write_unlock(macc_lckp);
3502 	if (unlikely(-1 == ret))
3503 		return DID_ERROR << 16;
3504 	else if (unlikely(sdebug_verbose &&
3505 			  (ret < (num * sdebug_sector_size))))
3506 		sdev_printk(KERN_INFO, scp->device,
3507 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3508 			    my_name, num * sdebug_sector_size, ret);
3509 
3510 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3511 		     atomic_read(&sdeb_inject_pending))) {
3512 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3513 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3514 			atomic_set(&sdeb_inject_pending, 0);
3515 			return check_condition_result;
3516 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3517 			/* Logical block guard check failed */
3518 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3519 			atomic_set(&sdeb_inject_pending, 0);
3520 			return illegal_condition_result;
3521 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3522 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3523 			atomic_set(&sdeb_inject_pending, 0);
3524 			return illegal_condition_result;
3525 		}
3526 	}
3527 	return 0;
3528 }
3529 
3530 /*
3531  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3532  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3533  */
3534 static int resp_write_scat(struct scsi_cmnd *scp,
3535 			   struct sdebug_dev_info *devip)
3536 {
3537 	u8 *cmd = scp->cmnd;
3538 	u8 *lrdp = NULL;
3539 	u8 *up;
3540 	struct sdeb_store_info *sip = devip2sip(devip, true);
3541 	rwlock_t *macc_lckp = &sip->macc_lck;
3542 	u8 wrprotect;
3543 	u16 lbdof, num_lrd, k;
3544 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3545 	u32 lb_size = sdebug_sector_size;
3546 	u32 ei_lba;
3547 	u64 lba;
3548 	int ret, res;
3549 	bool is_16;
3550 	static const u32 lrd_size = 32; /* + parameter list header size */
3551 
3552 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3553 		is_16 = false;
3554 		wrprotect = (cmd[10] >> 5) & 0x7;
3555 		lbdof = get_unaligned_be16(cmd + 12);
3556 		num_lrd = get_unaligned_be16(cmd + 16);
3557 		bt_len = get_unaligned_be32(cmd + 28);
3558 	} else {        /* that leaves WRITE SCATTERED(16) */
3559 		is_16 = true;
3560 		wrprotect = (cmd[2] >> 5) & 0x7;
3561 		lbdof = get_unaligned_be16(cmd + 4);
3562 		num_lrd = get_unaligned_be16(cmd + 8);
3563 		bt_len = get_unaligned_be32(cmd + 10);
3564 		if (unlikely(have_dif_prot)) {
3565 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3566 			    wrprotect) {
3567 				mk_sense_invalid_opcode(scp);
3568 				return illegal_condition_result;
3569 			}
3570 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3571 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3572 			     wrprotect == 0)
3573 				sdev_printk(KERN_ERR, scp->device,
3574 					    "Unprotected WR to DIF device\n");
3575 		}
3576 	}
3577 	if ((num_lrd == 0) || (bt_len == 0))
3578 		return 0;       /* T10 says these do-nothings are not errors */
3579 	if (lbdof == 0) {
3580 		if (sdebug_verbose)
3581 			sdev_printk(KERN_INFO, scp->device,
3582 				"%s: %s: LB Data Offset field bad\n",
3583 				my_name, __func__);
3584 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3585 		return illegal_condition_result;
3586 	}
3587 	lbdof_blen = lbdof * lb_size;
3588 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3589 		if (sdebug_verbose)
3590 			sdev_printk(KERN_INFO, scp->device,
3591 				"%s: %s: LBA range descriptors don't fit\n",
3592 				my_name, __func__);
3593 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3594 		return illegal_condition_result;
3595 	}
3596 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3597 	if (lrdp == NULL)
3598 		return SCSI_MLQUEUE_HOST_BUSY;
3599 	if (sdebug_verbose)
3600 		sdev_printk(KERN_INFO, scp->device,
3601 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3602 			my_name, __func__, lbdof_blen);
3603 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3604 	if (res == -1) {
3605 		ret = DID_ERROR << 16;
3606 		goto err_out;
3607 	}
3608 
3609 	write_lock(macc_lckp);
3610 	sg_off = lbdof_blen;
3611 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3612 	cum_lb = 0;
3613 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3614 		lba = get_unaligned_be64(up + 0);
3615 		num = get_unaligned_be32(up + 8);
3616 		if (sdebug_verbose)
3617 			sdev_printk(KERN_INFO, scp->device,
3618 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3619 				my_name, __func__, k, lba, num, sg_off);
3620 		if (num == 0)
3621 			continue;
3622 		ret = check_device_access_params(scp, lba, num, true);
3623 		if (ret)
3624 			goto err_out_unlock;
3625 		num_by = num * lb_size;
3626 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3627 
3628 		if ((cum_lb + num) > bt_len) {
3629 			if (sdebug_verbose)
3630 				sdev_printk(KERN_INFO, scp->device,
3631 				    "%s: %s: sum of blocks > data provided\n",
3632 				    my_name, __func__);
3633 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3634 					0);
3635 			ret = illegal_condition_result;
3636 			goto err_out_unlock;
3637 		}
3638 
3639 		/* DIX + T10 DIF */
3640 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3641 			int prot_ret = prot_verify_write(scp, lba, num,
3642 							 ei_lba);
3643 
3644 			if (prot_ret) {
3645 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3646 						prot_ret);
3647 				ret = illegal_condition_result;
3648 				goto err_out_unlock;
3649 			}
3650 		}
3651 
3652 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3653 		/* If ZBC zone then bump its write pointer */
3654 		if (sdebug_dev_is_zoned(devip))
3655 			zbc_inc_wp(devip, lba, num);
3656 		if (unlikely(scsi_debug_lbp()))
3657 			map_region(sip, lba, num);
3658 		if (unlikely(-1 == ret)) {
3659 			ret = DID_ERROR << 16;
3660 			goto err_out_unlock;
3661 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3662 			sdev_printk(KERN_INFO, scp->device,
3663 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3664 			    my_name, num_by, ret);
3665 
3666 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3667 			     atomic_read(&sdeb_inject_pending))) {
3668 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3669 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3670 				atomic_set(&sdeb_inject_pending, 0);
3671 				ret = check_condition_result;
3672 				goto err_out_unlock;
3673 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3674 				/* Logical block guard check failed */
3675 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3676 				atomic_set(&sdeb_inject_pending, 0);
3677 				ret = illegal_condition_result;
3678 				goto err_out_unlock;
3679 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3680 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3681 				atomic_set(&sdeb_inject_pending, 0);
3682 				ret = illegal_condition_result;
3683 				goto err_out_unlock;
3684 			}
3685 		}
3686 		sg_off += num_by;
3687 		cum_lb += num;
3688 	}
3689 	ret = 0;
3690 err_out_unlock:
3691 	write_unlock(macc_lckp);
3692 err_out:
3693 	kfree(lrdp);
3694 	return ret;
3695 }
3696 
3697 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3698 			   u32 ei_lba, bool unmap, bool ndob)
3699 {
3700 	struct scsi_device *sdp = scp->device;
3701 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3702 	unsigned long long i;
3703 	u64 block, lbaa;
3704 	u32 lb_size = sdebug_sector_size;
3705 	int ret;
3706 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3707 						scp->device->hostdata, true);
3708 	rwlock_t *macc_lckp = &sip->macc_lck;
3709 	u8 *fs1p;
3710 	u8 *fsp;
3711 
3712 	write_lock(macc_lckp);
3713 
3714 	ret = check_device_access_params(scp, lba, num, true);
3715 	if (ret) {
3716 		write_unlock(macc_lckp);
3717 		return ret;
3718 	}
3719 
3720 	if (unmap && scsi_debug_lbp()) {
3721 		unmap_region(sip, lba, num);
3722 		goto out;
3723 	}
3724 	lbaa = lba;
3725 	block = do_div(lbaa, sdebug_store_sectors);
3726 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3727 	fsp = sip->storep;
3728 	fs1p = fsp + (block * lb_size);
3729 	if (ndob) {
3730 		memset(fs1p, 0, lb_size);
3731 		ret = 0;
3732 	} else
3733 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3734 
3735 	if (-1 == ret) {
3736 		write_unlock(&sip->macc_lck);
3737 		return DID_ERROR << 16;
3738 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3739 		sdev_printk(KERN_INFO, scp->device,
3740 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3741 			    my_name, "write same", lb_size, ret);
3742 
3743 	/* Copy first sector to remaining blocks */
3744 	for (i = 1 ; i < num ; i++) {
3745 		lbaa = lba + i;
3746 		block = do_div(lbaa, sdebug_store_sectors);
3747 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3748 	}
3749 	if (scsi_debug_lbp())
3750 		map_region(sip, lba, num);
3751 	/* If ZBC zone then bump its write pointer */
3752 	if (sdebug_dev_is_zoned(devip))
3753 		zbc_inc_wp(devip, lba, num);
3754 out:
3755 	write_unlock(macc_lckp);
3756 
3757 	return 0;
3758 }
3759 
3760 static int resp_write_same_10(struct scsi_cmnd *scp,
3761 			      struct sdebug_dev_info *devip)
3762 {
3763 	u8 *cmd = scp->cmnd;
3764 	u32 lba;
3765 	u16 num;
3766 	u32 ei_lba = 0;
3767 	bool unmap = false;
3768 
3769 	if (cmd[1] & 0x8) {
3770 		if (sdebug_lbpws10 == 0) {
3771 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3772 			return check_condition_result;
3773 		} else
3774 			unmap = true;
3775 	}
3776 	lba = get_unaligned_be32(cmd + 2);
3777 	num = get_unaligned_be16(cmd + 7);
3778 	if (num > sdebug_write_same_length) {
3779 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3780 		return check_condition_result;
3781 	}
3782 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3783 }
3784 
3785 static int resp_write_same_16(struct scsi_cmnd *scp,
3786 			      struct sdebug_dev_info *devip)
3787 {
3788 	u8 *cmd = scp->cmnd;
3789 	u64 lba;
3790 	u32 num;
3791 	u32 ei_lba = 0;
3792 	bool unmap = false;
3793 	bool ndob = false;
3794 
3795 	if (cmd[1] & 0x8) {	/* UNMAP */
3796 		if (sdebug_lbpws == 0) {
3797 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3798 			return check_condition_result;
3799 		} else
3800 			unmap = true;
3801 	}
3802 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3803 		ndob = true;
3804 	lba = get_unaligned_be64(cmd + 2);
3805 	num = get_unaligned_be32(cmd + 10);
3806 	if (num > sdebug_write_same_length) {
3807 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3808 		return check_condition_result;
3809 	}
3810 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3811 }
3812 
3813 /* Note the mode field is in the same position as the (lower) service action
3814  * field. For the Report supported operation codes command, SPC-4 suggests
3815  * each mode of this command should be reported separately; for future. */
3816 static int resp_write_buffer(struct scsi_cmnd *scp,
3817 			     struct sdebug_dev_info *devip)
3818 {
3819 	u8 *cmd = scp->cmnd;
3820 	struct scsi_device *sdp = scp->device;
3821 	struct sdebug_dev_info *dp;
3822 	u8 mode;
3823 
3824 	mode = cmd[1] & 0x1f;
3825 	switch (mode) {
3826 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3827 		/* set UAs on this device only */
3828 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3829 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3830 		break;
3831 	case 0x5:	/* download MC, save and ACT */
3832 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3833 		break;
3834 	case 0x6:	/* download MC with offsets and ACT */
3835 		/* set UAs on most devices (LUs) in this target */
3836 		list_for_each_entry(dp,
3837 				    &devip->sdbg_host->dev_info_list,
3838 				    dev_list)
3839 			if (dp->target == sdp->id) {
3840 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3841 				if (devip != dp)
3842 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3843 						dp->uas_bm);
3844 			}
3845 		break;
3846 	case 0x7:	/* download MC with offsets, save, and ACT */
3847 		/* set UA on all devices (LUs) in this target */
3848 		list_for_each_entry(dp,
3849 				    &devip->sdbg_host->dev_info_list,
3850 				    dev_list)
3851 			if (dp->target == sdp->id)
3852 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3853 					dp->uas_bm);
3854 		break;
3855 	default:
3856 		/* do nothing for this command for other mode values */
3857 		break;
3858 	}
3859 	return 0;
3860 }
3861 
3862 static int resp_comp_write(struct scsi_cmnd *scp,
3863 			   struct sdebug_dev_info *devip)
3864 {
3865 	u8 *cmd = scp->cmnd;
3866 	u8 *arr;
3867 	struct sdeb_store_info *sip = devip2sip(devip, true);
3868 	rwlock_t *macc_lckp = &sip->macc_lck;
3869 	u64 lba;
3870 	u32 dnum;
3871 	u32 lb_size = sdebug_sector_size;
3872 	u8 num;
3873 	int ret;
3874 	int retval = 0;
3875 
3876 	lba = get_unaligned_be64(cmd + 2);
3877 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3878 	if (0 == num)
3879 		return 0;	/* degenerate case, not an error */
3880 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3881 	    (cmd[1] & 0xe0)) {
3882 		mk_sense_invalid_opcode(scp);
3883 		return check_condition_result;
3884 	}
3885 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3886 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3887 	    (cmd[1] & 0xe0) == 0)
3888 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3889 			    "to DIF device\n");
3890 	ret = check_device_access_params(scp, lba, num, false);
3891 	if (ret)
3892 		return ret;
3893 	dnum = 2 * num;
3894 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3895 	if (NULL == arr) {
3896 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3897 				INSUFF_RES_ASCQ);
3898 		return check_condition_result;
3899 	}
3900 
3901 	write_lock(macc_lckp);
3902 
3903 	ret = do_dout_fetch(scp, dnum, arr);
3904 	if (ret == -1) {
3905 		retval = DID_ERROR << 16;
3906 		goto cleanup;
3907 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3908 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3909 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3910 			    dnum * lb_size, ret);
3911 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3912 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3913 		retval = check_condition_result;
3914 		goto cleanup;
3915 	}
3916 	if (scsi_debug_lbp())
3917 		map_region(sip, lba, num);
3918 cleanup:
3919 	write_unlock(macc_lckp);
3920 	kfree(arr);
3921 	return retval;
3922 }
3923 
3924 struct unmap_block_desc {
3925 	__be64	lba;
3926 	__be32	blocks;
3927 	__be32	__reserved;
3928 };
3929 
3930 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3931 {
3932 	unsigned char *buf;
3933 	struct unmap_block_desc *desc;
3934 	struct sdeb_store_info *sip = devip2sip(devip, true);
3935 	rwlock_t *macc_lckp = &sip->macc_lck;
3936 	unsigned int i, payload_len, descriptors;
3937 	int ret;
3938 
3939 	if (!scsi_debug_lbp())
3940 		return 0;	/* fib and say its done */
3941 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3942 	BUG_ON(scsi_bufflen(scp) != payload_len);
3943 
3944 	descriptors = (payload_len - 8) / 16;
3945 	if (descriptors > sdebug_unmap_max_desc) {
3946 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3947 		return check_condition_result;
3948 	}
3949 
3950 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3951 	if (!buf) {
3952 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3953 				INSUFF_RES_ASCQ);
3954 		return check_condition_result;
3955 	}
3956 
3957 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3958 
3959 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3960 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3961 
3962 	desc = (void *)&buf[8];
3963 
3964 	write_lock(macc_lckp);
3965 
3966 	for (i = 0 ; i < descriptors ; i++) {
3967 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3968 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3969 
3970 		ret = check_device_access_params(scp, lba, num, true);
3971 		if (ret)
3972 			goto out;
3973 
3974 		unmap_region(sip, lba, num);
3975 	}
3976 
3977 	ret = 0;
3978 
3979 out:
3980 	write_unlock(macc_lckp);
3981 	kfree(buf);
3982 
3983 	return ret;
3984 }
3985 
3986 #define SDEBUG_GET_LBA_STATUS_LEN 32
3987 
3988 static int resp_get_lba_status(struct scsi_cmnd *scp,
3989 			       struct sdebug_dev_info *devip)
3990 {
3991 	u8 *cmd = scp->cmnd;
3992 	u64 lba;
3993 	u32 alloc_len, mapped, num;
3994 	int ret;
3995 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3996 
3997 	lba = get_unaligned_be64(cmd + 2);
3998 	alloc_len = get_unaligned_be32(cmd + 10);
3999 
4000 	if (alloc_len < 24)
4001 		return 0;
4002 
4003 	ret = check_device_access_params(scp, lba, 1, false);
4004 	if (ret)
4005 		return ret;
4006 
4007 	if (scsi_debug_lbp()) {
4008 		struct sdeb_store_info *sip = devip2sip(devip, true);
4009 
4010 		mapped = map_state(sip, lba, &num);
4011 	} else {
4012 		mapped = 1;
4013 		/* following just in case virtual_gb changed */
4014 		sdebug_capacity = get_sdebug_capacity();
4015 		if (sdebug_capacity - lba <= 0xffffffff)
4016 			num = sdebug_capacity - lba;
4017 		else
4018 			num = 0xffffffff;
4019 	}
4020 
4021 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4022 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4023 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4024 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4025 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4026 
4027 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4028 }
4029 
4030 static int resp_sync_cache(struct scsi_cmnd *scp,
4031 			   struct sdebug_dev_info *devip)
4032 {
4033 	int res = 0;
4034 	u64 lba;
4035 	u32 num_blocks;
4036 	u8 *cmd = scp->cmnd;
4037 
4038 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4039 		lba = get_unaligned_be32(cmd + 2);
4040 		num_blocks = get_unaligned_be16(cmd + 7);
4041 	} else {				/* SYNCHRONIZE_CACHE(16) */
4042 		lba = get_unaligned_be64(cmd + 2);
4043 		num_blocks = get_unaligned_be32(cmd + 10);
4044 	}
4045 	if (lba + num_blocks > sdebug_capacity) {
4046 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4047 		return check_condition_result;
4048 	}
4049 	if (!write_since_sync || (cmd[1] & 0x2))
4050 		res = SDEG_RES_IMMED_MASK;
4051 	else		/* delay if write_since_sync and IMMED clear */
4052 		write_since_sync = false;
4053 	return res;
4054 }
4055 
4056 /*
4057  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4058  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4059  * a GOOD status otherwise. Model a disk with a big cache and yield
4060  * CONDITION MET. Actually tries to bring range in main memory into the
4061  * cache associated with the CPU(s).
4062  */
4063 static int resp_pre_fetch(struct scsi_cmnd *scp,
4064 			  struct sdebug_dev_info *devip)
4065 {
4066 	int res = 0;
4067 	u64 lba;
4068 	u64 block, rest = 0;
4069 	u32 nblks;
4070 	u8 *cmd = scp->cmnd;
4071 	struct sdeb_store_info *sip = devip2sip(devip, true);
4072 	rwlock_t *macc_lckp = &sip->macc_lck;
4073 	u8 *fsp = sip->storep;
4074 
4075 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4076 		lba = get_unaligned_be32(cmd + 2);
4077 		nblks = get_unaligned_be16(cmd + 7);
4078 	} else {			/* PRE-FETCH(16) */
4079 		lba = get_unaligned_be64(cmd + 2);
4080 		nblks = get_unaligned_be32(cmd + 10);
4081 	}
4082 	if (lba + nblks > sdebug_capacity) {
4083 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4084 		return check_condition_result;
4085 	}
4086 	if (!fsp)
4087 		goto fini;
4088 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4089 	block = do_div(lba, sdebug_store_sectors);
4090 	if (block + nblks > sdebug_store_sectors)
4091 		rest = block + nblks - sdebug_store_sectors;
4092 
4093 	/* Try to bring the PRE-FETCH range into CPU's cache */
4094 	read_lock(macc_lckp);
4095 	prefetch_range(fsp + (sdebug_sector_size * block),
4096 		       (nblks - rest) * sdebug_sector_size);
4097 	if (rest)
4098 		prefetch_range(fsp, rest * sdebug_sector_size);
4099 	read_unlock(macc_lckp);
4100 fini:
4101 	if (cmd[1] & 0x2)
4102 		res = SDEG_RES_IMMED_MASK;
4103 	return res | condition_met_result;
4104 }
4105 
4106 #define RL_BUCKET_ELEMS 8
4107 
4108 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4109  * (W-LUN), the normal Linux scanning logic does not associate it with a
4110  * device (e.g. /dev/sg7). The following magic will make that association:
4111  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4112  * where <n> is a host number. If there are multiple targets in a host then
4113  * the above will associate a W-LUN to each target. To only get a W-LUN
4114  * for target 2, then use "echo '- 2 49409' > scan" .
4115  */
4116 static int resp_report_luns(struct scsi_cmnd *scp,
4117 			    struct sdebug_dev_info *devip)
4118 {
4119 	unsigned char *cmd = scp->cmnd;
4120 	unsigned int alloc_len;
4121 	unsigned char select_report;
4122 	u64 lun;
4123 	struct scsi_lun *lun_p;
4124 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4125 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4126 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4127 	unsigned int tlun_cnt;	/* total LUN count */
4128 	unsigned int rlen;	/* response length (in bytes) */
4129 	int k, j, n, res;
4130 	unsigned int off_rsp = 0;
4131 	const int sz_lun = sizeof(struct scsi_lun);
4132 
4133 	clear_luns_changed_on_target(devip);
4134 
4135 	select_report = cmd[2];
4136 	alloc_len = get_unaligned_be32(cmd + 6);
4137 
4138 	if (alloc_len < 4) {
4139 		pr_err("alloc len too small %d\n", alloc_len);
4140 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4141 		return check_condition_result;
4142 	}
4143 
4144 	switch (select_report) {
4145 	case 0:		/* all LUNs apart from W-LUNs */
4146 		lun_cnt = sdebug_max_luns;
4147 		wlun_cnt = 0;
4148 		break;
4149 	case 1:		/* only W-LUNs */
4150 		lun_cnt = 0;
4151 		wlun_cnt = 1;
4152 		break;
4153 	case 2:		/* all LUNs */
4154 		lun_cnt = sdebug_max_luns;
4155 		wlun_cnt = 1;
4156 		break;
4157 	case 0x10:	/* only administrative LUs */
4158 	case 0x11:	/* see SPC-5 */
4159 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4160 	default:
4161 		pr_debug("select report invalid %d\n", select_report);
4162 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4163 		return check_condition_result;
4164 	}
4165 
4166 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4167 		--lun_cnt;
4168 
4169 	tlun_cnt = lun_cnt + wlun_cnt;
4170 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4171 	scsi_set_resid(scp, scsi_bufflen(scp));
4172 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4173 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4174 
4175 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4176 	lun = sdebug_no_lun_0 ? 1 : 0;
4177 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4178 		memset(arr, 0, sizeof(arr));
4179 		lun_p = (struct scsi_lun *)&arr[0];
4180 		if (k == 0) {
4181 			put_unaligned_be32(rlen, &arr[0]);
4182 			++lun_p;
4183 			j = 1;
4184 		}
4185 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4186 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4187 				break;
4188 			int_to_scsilun(lun++, lun_p);
4189 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4190 				lun_p->scsi_lun[0] |= 0x40;
4191 		}
4192 		if (j < RL_BUCKET_ELEMS)
4193 			break;
4194 		n = j * sz_lun;
4195 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4196 		if (res)
4197 			return res;
4198 		off_rsp += n;
4199 	}
4200 	if (wlun_cnt) {
4201 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4202 		++j;
4203 	}
4204 	if (j > 0)
4205 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4206 	return res;
4207 }
4208 
4209 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4210 {
4211 	bool is_bytchk3 = false;
4212 	u8 bytchk;
4213 	int ret, j;
4214 	u32 vnum, a_num, off;
4215 	const u32 lb_size = sdebug_sector_size;
4216 	u64 lba;
4217 	u8 *arr;
4218 	u8 *cmd = scp->cmnd;
4219 	struct sdeb_store_info *sip = devip2sip(devip, true);
4220 	rwlock_t *macc_lckp = &sip->macc_lck;
4221 
4222 	bytchk = (cmd[1] >> 1) & 0x3;
4223 	if (bytchk == 0) {
4224 		return 0;	/* always claim internal verify okay */
4225 	} else if (bytchk == 2) {
4226 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4227 		return check_condition_result;
4228 	} else if (bytchk == 3) {
4229 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4230 	}
4231 	switch (cmd[0]) {
4232 	case VERIFY_16:
4233 		lba = get_unaligned_be64(cmd + 2);
4234 		vnum = get_unaligned_be32(cmd + 10);
4235 		break;
4236 	case VERIFY:		/* is VERIFY(10) */
4237 		lba = get_unaligned_be32(cmd + 2);
4238 		vnum = get_unaligned_be16(cmd + 7);
4239 		break;
4240 	default:
4241 		mk_sense_invalid_opcode(scp);
4242 		return check_condition_result;
4243 	}
4244 	a_num = is_bytchk3 ? 1 : vnum;
4245 	/* Treat following check like one for read (i.e. no write) access */
4246 	ret = check_device_access_params(scp, lba, a_num, false);
4247 	if (ret)
4248 		return ret;
4249 
4250 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4251 	if (!arr) {
4252 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4253 				INSUFF_RES_ASCQ);
4254 		return check_condition_result;
4255 	}
4256 	/* Not changing store, so only need read access */
4257 	read_lock(macc_lckp);
4258 
4259 	ret = do_dout_fetch(scp, a_num, arr);
4260 	if (ret == -1) {
4261 		ret = DID_ERROR << 16;
4262 		goto cleanup;
4263 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4264 		sdev_printk(KERN_INFO, scp->device,
4265 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4266 			    my_name, __func__, a_num * lb_size, ret);
4267 	}
4268 	if (is_bytchk3) {
4269 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4270 			memcpy(arr + off, arr, lb_size);
4271 	}
4272 	ret = 0;
4273 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4274 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4275 		ret = check_condition_result;
4276 		goto cleanup;
4277 	}
4278 cleanup:
4279 	read_unlock(macc_lckp);
4280 	kfree(arr);
4281 	return ret;
4282 }
4283 
4284 #define RZONES_DESC_HD 64
4285 
4286 /* Report zones depending on start LBA nad reporting options */
4287 static int resp_report_zones(struct scsi_cmnd *scp,
4288 			     struct sdebug_dev_info *devip)
4289 {
4290 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4291 	int ret = 0;
4292 	u32 alloc_len, rep_opts, rep_len;
4293 	bool partial;
4294 	u64 lba, zs_lba;
4295 	u8 *arr = NULL, *desc;
4296 	u8 *cmd = scp->cmnd;
4297 	struct sdeb_zone_state *zsp;
4298 	struct sdeb_store_info *sip = devip2sip(devip, false);
4299 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4300 
4301 	if (!sdebug_dev_is_zoned(devip)) {
4302 		mk_sense_invalid_opcode(scp);
4303 		return check_condition_result;
4304 	}
4305 	zs_lba = get_unaligned_be64(cmd + 2);
4306 	alloc_len = get_unaligned_be32(cmd + 10);
4307 	rep_opts = cmd[14] & 0x3f;
4308 	partial = cmd[14] & 0x80;
4309 
4310 	if (zs_lba >= sdebug_capacity) {
4311 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4312 		return check_condition_result;
4313 	}
4314 
4315 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4316 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4317 			    max_zones);
4318 
4319 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4320 	if (!arr) {
4321 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4322 				INSUFF_RES_ASCQ);
4323 		return check_condition_result;
4324 	}
4325 
4326 	read_lock(macc_lckp);
4327 
4328 	desc = arr + 64;
4329 	for (i = 0; i < max_zones; i++) {
4330 		lba = zs_lba + devip->zsize * i;
4331 		if (lba > sdebug_capacity)
4332 			break;
4333 		zsp = zbc_zone(devip, lba);
4334 		switch (rep_opts) {
4335 		case 0x00:
4336 			/* All zones */
4337 			break;
4338 		case 0x01:
4339 			/* Empty zones */
4340 			if (zsp->z_cond != ZC1_EMPTY)
4341 				continue;
4342 			break;
4343 		case 0x02:
4344 			/* Implicit open zones */
4345 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4346 				continue;
4347 			break;
4348 		case 0x03:
4349 			/* Explicit open zones */
4350 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4351 				continue;
4352 			break;
4353 		case 0x04:
4354 			/* Closed zones */
4355 			if (zsp->z_cond != ZC4_CLOSED)
4356 				continue;
4357 			break;
4358 		case 0x05:
4359 			/* Full zones */
4360 			if (zsp->z_cond != ZC5_FULL)
4361 				continue;
4362 			break;
4363 		case 0x06:
4364 		case 0x07:
4365 		case 0x10:
4366 			/*
4367 			 * Read-only, offline, reset WP recommended are
4368 			 * not emulated: no zones to report;
4369 			 */
4370 			continue;
4371 		case 0x11:
4372 			/* non-seq-resource set */
4373 			if (!zsp->z_non_seq_resource)
4374 				continue;
4375 			break;
4376 		case 0x3f:
4377 			/* Not write pointer (conventional) zones */
4378 			if (!zbc_zone_is_conv(zsp))
4379 				continue;
4380 			break;
4381 		default:
4382 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4383 					INVALID_FIELD_IN_CDB, 0);
4384 			ret = check_condition_result;
4385 			goto fini;
4386 		}
4387 
4388 		if (nrz < rep_max_zones) {
4389 			/* Fill zone descriptor */
4390 			desc[0] = zsp->z_type;
4391 			desc[1] = zsp->z_cond << 4;
4392 			if (zsp->z_non_seq_resource)
4393 				desc[1] |= 1 << 1;
4394 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4395 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4396 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4397 			desc += 64;
4398 		}
4399 
4400 		if (partial && nrz >= rep_max_zones)
4401 			break;
4402 
4403 		nrz++;
4404 	}
4405 
4406 	/* Report header */
4407 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4408 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4409 
4410 	rep_len = (unsigned long)desc - (unsigned long)arr;
4411 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4412 
4413 fini:
4414 	read_unlock(macc_lckp);
4415 	kfree(arr);
4416 	return ret;
4417 }
4418 
4419 /* Logic transplanted from tcmu-runner, file_zbc.c */
4420 static void zbc_open_all(struct sdebug_dev_info *devip)
4421 {
4422 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4423 	unsigned int i;
4424 
4425 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4426 		if (zsp->z_cond == ZC4_CLOSED)
4427 			zbc_open_zone(devip, &devip->zstate[i], true);
4428 	}
4429 }
4430 
4431 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4432 {
4433 	int res = 0;
4434 	u64 z_id;
4435 	enum sdebug_z_cond zc;
4436 	u8 *cmd = scp->cmnd;
4437 	struct sdeb_zone_state *zsp;
4438 	bool all = cmd[14] & 0x01;
4439 	struct sdeb_store_info *sip = devip2sip(devip, false);
4440 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4441 
4442 	if (!sdebug_dev_is_zoned(devip)) {
4443 		mk_sense_invalid_opcode(scp);
4444 		return check_condition_result;
4445 	}
4446 
4447 	write_lock(macc_lckp);
4448 
4449 	if (all) {
4450 		/* Check if all closed zones can be open */
4451 		if (devip->max_open &&
4452 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4453 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4454 					INSUFF_ZONE_ASCQ);
4455 			res = check_condition_result;
4456 			goto fini;
4457 		}
4458 		/* Open all closed zones */
4459 		zbc_open_all(devip);
4460 		goto fini;
4461 	}
4462 
4463 	/* Open the specified zone */
4464 	z_id = get_unaligned_be64(cmd + 2);
4465 	if (z_id >= sdebug_capacity) {
4466 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4467 		res = check_condition_result;
4468 		goto fini;
4469 	}
4470 
4471 	zsp = zbc_zone(devip, z_id);
4472 	if (z_id != zsp->z_start) {
4473 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4474 		res = check_condition_result;
4475 		goto fini;
4476 	}
4477 	if (zbc_zone_is_conv(zsp)) {
4478 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4479 		res = check_condition_result;
4480 		goto fini;
4481 	}
4482 
4483 	zc = zsp->z_cond;
4484 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4485 		goto fini;
4486 
4487 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4488 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4489 				INSUFF_ZONE_ASCQ);
4490 		res = check_condition_result;
4491 		goto fini;
4492 	}
4493 
4494 	zbc_open_zone(devip, zsp, true);
4495 fini:
4496 	write_unlock(macc_lckp);
4497 	return res;
4498 }
4499 
4500 static void zbc_close_all(struct sdebug_dev_info *devip)
4501 {
4502 	unsigned int i;
4503 
4504 	for (i = 0; i < devip->nr_zones; i++)
4505 		zbc_close_zone(devip, &devip->zstate[i]);
4506 }
4507 
4508 static int resp_close_zone(struct scsi_cmnd *scp,
4509 			   struct sdebug_dev_info *devip)
4510 {
4511 	int res = 0;
4512 	u64 z_id;
4513 	u8 *cmd = scp->cmnd;
4514 	struct sdeb_zone_state *zsp;
4515 	bool all = cmd[14] & 0x01;
4516 	struct sdeb_store_info *sip = devip2sip(devip, false);
4517 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4518 
4519 	if (!sdebug_dev_is_zoned(devip)) {
4520 		mk_sense_invalid_opcode(scp);
4521 		return check_condition_result;
4522 	}
4523 
4524 	write_lock(macc_lckp);
4525 
4526 	if (all) {
4527 		zbc_close_all(devip);
4528 		goto fini;
4529 	}
4530 
4531 	/* Close specified zone */
4532 	z_id = get_unaligned_be64(cmd + 2);
4533 	if (z_id >= sdebug_capacity) {
4534 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4535 		res = check_condition_result;
4536 		goto fini;
4537 	}
4538 
4539 	zsp = zbc_zone(devip, z_id);
4540 	if (z_id != zsp->z_start) {
4541 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4542 		res = check_condition_result;
4543 		goto fini;
4544 	}
4545 	if (zbc_zone_is_conv(zsp)) {
4546 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4547 		res = check_condition_result;
4548 		goto fini;
4549 	}
4550 
4551 	zbc_close_zone(devip, zsp);
4552 fini:
4553 	write_unlock(macc_lckp);
4554 	return res;
4555 }
4556 
4557 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4558 			    struct sdeb_zone_state *zsp, bool empty)
4559 {
4560 	enum sdebug_z_cond zc = zsp->z_cond;
4561 
4562 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4563 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4564 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4565 			zbc_close_zone(devip, zsp);
4566 		if (zsp->z_cond == ZC4_CLOSED)
4567 			devip->nr_closed--;
4568 		zsp->z_wp = zsp->z_start + zsp->z_size;
4569 		zsp->z_cond = ZC5_FULL;
4570 	}
4571 }
4572 
4573 static void zbc_finish_all(struct sdebug_dev_info *devip)
4574 {
4575 	unsigned int i;
4576 
4577 	for (i = 0; i < devip->nr_zones; i++)
4578 		zbc_finish_zone(devip, &devip->zstate[i], false);
4579 }
4580 
4581 static int resp_finish_zone(struct scsi_cmnd *scp,
4582 			    struct sdebug_dev_info *devip)
4583 {
4584 	struct sdeb_zone_state *zsp;
4585 	int res = 0;
4586 	u64 z_id;
4587 	u8 *cmd = scp->cmnd;
4588 	bool all = cmd[14] & 0x01;
4589 	struct sdeb_store_info *sip = devip2sip(devip, false);
4590 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4591 
4592 	if (!sdebug_dev_is_zoned(devip)) {
4593 		mk_sense_invalid_opcode(scp);
4594 		return check_condition_result;
4595 	}
4596 
4597 	write_lock(macc_lckp);
4598 
4599 	if (all) {
4600 		zbc_finish_all(devip);
4601 		goto fini;
4602 	}
4603 
4604 	/* Finish the specified zone */
4605 	z_id = get_unaligned_be64(cmd + 2);
4606 	if (z_id >= sdebug_capacity) {
4607 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4608 		res = check_condition_result;
4609 		goto fini;
4610 	}
4611 
4612 	zsp = zbc_zone(devip, z_id);
4613 	if (z_id != zsp->z_start) {
4614 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4615 		res = check_condition_result;
4616 		goto fini;
4617 	}
4618 	if (zbc_zone_is_conv(zsp)) {
4619 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4620 		res = check_condition_result;
4621 		goto fini;
4622 	}
4623 
4624 	zbc_finish_zone(devip, zsp, true);
4625 fini:
4626 	write_unlock(macc_lckp);
4627 	return res;
4628 }
4629 
4630 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4631 			 struct sdeb_zone_state *zsp)
4632 {
4633 	enum sdebug_z_cond zc;
4634 
4635 	if (zbc_zone_is_conv(zsp))
4636 		return;
4637 
4638 	zc = zsp->z_cond;
4639 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4640 		zbc_close_zone(devip, zsp);
4641 
4642 	if (zsp->z_cond == ZC4_CLOSED)
4643 		devip->nr_closed--;
4644 
4645 	zsp->z_non_seq_resource = false;
4646 	zsp->z_wp = zsp->z_start;
4647 	zsp->z_cond = ZC1_EMPTY;
4648 }
4649 
4650 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4651 {
4652 	unsigned int i;
4653 
4654 	for (i = 0; i < devip->nr_zones; i++)
4655 		zbc_rwp_zone(devip, &devip->zstate[i]);
4656 }
4657 
4658 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4659 {
4660 	struct sdeb_zone_state *zsp;
4661 	int res = 0;
4662 	u64 z_id;
4663 	u8 *cmd = scp->cmnd;
4664 	bool all = cmd[14] & 0x01;
4665 	struct sdeb_store_info *sip = devip2sip(devip, false);
4666 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4667 
4668 	if (!sdebug_dev_is_zoned(devip)) {
4669 		mk_sense_invalid_opcode(scp);
4670 		return check_condition_result;
4671 	}
4672 
4673 	write_lock(macc_lckp);
4674 
4675 	if (all) {
4676 		zbc_rwp_all(devip);
4677 		goto fini;
4678 	}
4679 
4680 	z_id = get_unaligned_be64(cmd + 2);
4681 	if (z_id >= sdebug_capacity) {
4682 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4683 		res = check_condition_result;
4684 		goto fini;
4685 	}
4686 
4687 	zsp = zbc_zone(devip, z_id);
4688 	if (z_id != zsp->z_start) {
4689 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4690 		res = check_condition_result;
4691 		goto fini;
4692 	}
4693 	if (zbc_zone_is_conv(zsp)) {
4694 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4695 		res = check_condition_result;
4696 		goto fini;
4697 	}
4698 
4699 	zbc_rwp_zone(devip, zsp);
4700 fini:
4701 	write_unlock(macc_lckp);
4702 	return res;
4703 }
4704 
4705 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4706 {
4707 	u16 hwq;
4708 	u32 tag = blk_mq_unique_tag(cmnd->request);
4709 
4710 	hwq = blk_mq_unique_tag_to_hwq(tag);
4711 
4712 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4713 	if (WARN_ON_ONCE(hwq >= submit_queues))
4714 		hwq = 0;
4715 
4716 	return sdebug_q_arr + hwq;
4717 }
4718 
4719 static u32 get_tag(struct scsi_cmnd *cmnd)
4720 {
4721 	return blk_mq_unique_tag(cmnd->request);
4722 }
4723 
4724 /* Queued (deferred) command completions converge here. */
4725 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4726 {
4727 	bool aborted = sd_dp->aborted;
4728 	int qc_idx;
4729 	int retiring = 0;
4730 	unsigned long iflags;
4731 	struct sdebug_queue *sqp;
4732 	struct sdebug_queued_cmd *sqcp;
4733 	struct scsi_cmnd *scp;
4734 	struct sdebug_dev_info *devip;
4735 
4736 	if (unlikely(aborted))
4737 		sd_dp->aborted = false;
4738 	qc_idx = sd_dp->qc_idx;
4739 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4740 	if (sdebug_statistics) {
4741 		atomic_inc(&sdebug_completions);
4742 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4743 			atomic_inc(&sdebug_miss_cpus);
4744 	}
4745 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4746 		pr_err("wild qc_idx=%d\n", qc_idx);
4747 		return;
4748 	}
4749 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4750 	sd_dp->defer_t = SDEB_DEFER_NONE;
4751 	sqcp = &sqp->qc_arr[qc_idx];
4752 	scp = sqcp->a_cmnd;
4753 	if (unlikely(scp == NULL)) {
4754 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4755 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4756 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4757 		return;
4758 	}
4759 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4760 	if (likely(devip))
4761 		atomic_dec(&devip->num_in_q);
4762 	else
4763 		pr_err("devip=NULL\n");
4764 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4765 		retiring = 1;
4766 
4767 	sqcp->a_cmnd = NULL;
4768 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4769 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4770 		pr_err("Unexpected completion\n");
4771 		return;
4772 	}
4773 
4774 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4775 		int k, retval;
4776 
4777 		retval = atomic_read(&retired_max_queue);
4778 		if (qc_idx >= retval) {
4779 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4780 			pr_err("index %d too large\n", retval);
4781 			return;
4782 		}
4783 		k = find_last_bit(sqp->in_use_bm, retval);
4784 		if ((k < sdebug_max_queue) || (k == retval))
4785 			atomic_set(&retired_max_queue, 0);
4786 		else
4787 			atomic_set(&retired_max_queue, k + 1);
4788 	}
4789 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 	if (unlikely(aborted)) {
4791 		if (sdebug_verbose)
4792 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4793 		return;
4794 	}
4795 	scp->scsi_done(scp); /* callback to mid level */
4796 }
4797 
4798 /* When high resolution timer goes off this function is called. */
4799 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4800 {
4801 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4802 						  hrt);
4803 	sdebug_q_cmd_complete(sd_dp);
4804 	return HRTIMER_NORESTART;
4805 }
4806 
4807 /* When work queue schedules work, it calls this function. */
4808 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4809 {
4810 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4811 						  ew.work);
4812 	sdebug_q_cmd_complete(sd_dp);
4813 }
4814 
4815 static bool got_shared_uuid;
4816 static uuid_t shared_uuid;
4817 
4818 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4819 {
4820 	struct sdeb_zone_state *zsp;
4821 	sector_t capacity = get_sdebug_capacity();
4822 	sector_t zstart = 0;
4823 	unsigned int i;
4824 
4825 	/*
4826 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4827 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4828 	 * use the specified zone size checking that at least 2 zones can be
4829 	 * created for the device.
4830 	 */
4831 	if (!sdeb_zbc_zone_size_mb) {
4832 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4833 			>> ilog2(sdebug_sector_size);
4834 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4835 			devip->zsize >>= 1;
4836 		if (devip->zsize < 2) {
4837 			pr_err("Device capacity too small\n");
4838 			return -EINVAL;
4839 		}
4840 	} else {
4841 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4842 			pr_err("Zone size is not a power of 2\n");
4843 			return -EINVAL;
4844 		}
4845 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4846 			>> ilog2(sdebug_sector_size);
4847 		if (devip->zsize >= capacity) {
4848 			pr_err("Zone size too large for device capacity\n");
4849 			return -EINVAL;
4850 		}
4851 	}
4852 
4853 	devip->zsize_shift = ilog2(devip->zsize);
4854 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4855 
4856 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4857 		pr_err("Number of conventional zones too large\n");
4858 		return -EINVAL;
4859 	}
4860 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4861 
4862 	if (devip->zmodel == BLK_ZONED_HM) {
4863 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4864 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4865 			devip->max_open = (devip->nr_zones - 1) / 2;
4866 		else
4867 			devip->max_open = sdeb_zbc_max_open;
4868 	}
4869 
4870 	devip->zstate = kcalloc(devip->nr_zones,
4871 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4872 	if (!devip->zstate)
4873 		return -ENOMEM;
4874 
4875 	for (i = 0; i < devip->nr_zones; i++) {
4876 		zsp = &devip->zstate[i];
4877 
4878 		zsp->z_start = zstart;
4879 
4880 		if (i < devip->nr_conv_zones) {
4881 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4882 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4883 			zsp->z_wp = (sector_t)-1;
4884 		} else {
4885 			if (devip->zmodel == BLK_ZONED_HM)
4886 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4887 			else
4888 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4889 			zsp->z_cond = ZC1_EMPTY;
4890 			zsp->z_wp = zsp->z_start;
4891 		}
4892 
4893 		if (zsp->z_start + devip->zsize < capacity)
4894 			zsp->z_size = devip->zsize;
4895 		else
4896 			zsp->z_size = capacity - zsp->z_start;
4897 
4898 		zstart += zsp->z_size;
4899 	}
4900 
4901 	return 0;
4902 }
4903 
4904 static struct sdebug_dev_info *sdebug_device_create(
4905 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4906 {
4907 	struct sdebug_dev_info *devip;
4908 
4909 	devip = kzalloc(sizeof(*devip), flags);
4910 	if (devip) {
4911 		if (sdebug_uuid_ctl == 1)
4912 			uuid_gen(&devip->lu_name);
4913 		else if (sdebug_uuid_ctl == 2) {
4914 			if (got_shared_uuid)
4915 				devip->lu_name = shared_uuid;
4916 			else {
4917 				uuid_gen(&shared_uuid);
4918 				got_shared_uuid = true;
4919 				devip->lu_name = shared_uuid;
4920 			}
4921 		}
4922 		devip->sdbg_host = sdbg_host;
4923 		if (sdeb_zbc_in_use) {
4924 			devip->zmodel = sdeb_zbc_model;
4925 			if (sdebug_device_create_zones(devip)) {
4926 				kfree(devip);
4927 				return NULL;
4928 			}
4929 		} else {
4930 			devip->zmodel = BLK_ZONED_NONE;
4931 		}
4932 		devip->sdbg_host = sdbg_host;
4933 		devip->create_ts = ktime_get_boottime();
4934 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4935 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4936 	}
4937 	return devip;
4938 }
4939 
4940 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4941 {
4942 	struct sdebug_host_info *sdbg_host;
4943 	struct sdebug_dev_info *open_devip = NULL;
4944 	struct sdebug_dev_info *devip;
4945 
4946 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4947 	if (!sdbg_host) {
4948 		pr_err("Host info NULL\n");
4949 		return NULL;
4950 	}
4951 
4952 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4953 		if ((devip->used) && (devip->channel == sdev->channel) &&
4954 		    (devip->target == sdev->id) &&
4955 		    (devip->lun == sdev->lun))
4956 			return devip;
4957 		else {
4958 			if ((!devip->used) && (!open_devip))
4959 				open_devip = devip;
4960 		}
4961 	}
4962 	if (!open_devip) { /* try and make a new one */
4963 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4964 		if (!open_devip) {
4965 			pr_err("out of memory at line %d\n", __LINE__);
4966 			return NULL;
4967 		}
4968 	}
4969 
4970 	open_devip->channel = sdev->channel;
4971 	open_devip->target = sdev->id;
4972 	open_devip->lun = sdev->lun;
4973 	open_devip->sdbg_host = sdbg_host;
4974 	atomic_set(&open_devip->num_in_q, 0);
4975 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4976 	open_devip->used = true;
4977 	return open_devip;
4978 }
4979 
4980 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4981 {
4982 	if (sdebug_verbose)
4983 		pr_info("slave_alloc <%u %u %u %llu>\n",
4984 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4985 	return 0;
4986 }
4987 
4988 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4989 {
4990 	struct sdebug_dev_info *devip =
4991 			(struct sdebug_dev_info *)sdp->hostdata;
4992 
4993 	if (sdebug_verbose)
4994 		pr_info("slave_configure <%u %u %u %llu>\n",
4995 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4996 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4997 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4998 	if (devip == NULL) {
4999 		devip = find_build_dev_info(sdp);
5000 		if (devip == NULL)
5001 			return 1;  /* no resources, will be marked offline */
5002 	}
5003 	sdp->hostdata = devip;
5004 	if (sdebug_no_uld)
5005 		sdp->no_uld_attach = 1;
5006 	config_cdb_len(sdp);
5007 	return 0;
5008 }
5009 
5010 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5011 {
5012 	struct sdebug_dev_info *devip =
5013 		(struct sdebug_dev_info *)sdp->hostdata;
5014 
5015 	if (sdebug_verbose)
5016 		pr_info("slave_destroy <%u %u %u %llu>\n",
5017 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5018 	if (devip) {
5019 		/* make this slot available for re-use */
5020 		devip->used = false;
5021 		sdp->hostdata = NULL;
5022 	}
5023 }
5024 
5025 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5026 			   enum sdeb_defer_type defer_t)
5027 {
5028 	if (!sd_dp)
5029 		return;
5030 	if (defer_t == SDEB_DEFER_HRT)
5031 		hrtimer_cancel(&sd_dp->hrt);
5032 	else if (defer_t == SDEB_DEFER_WQ)
5033 		cancel_work_sync(&sd_dp->ew.work);
5034 }
5035 
5036 /* If @cmnd found deletes its timer or work queue and returns true; else
5037    returns false */
5038 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5039 {
5040 	unsigned long iflags;
5041 	int j, k, qmax, r_qmax;
5042 	enum sdeb_defer_type l_defer_t;
5043 	struct sdebug_queue *sqp;
5044 	struct sdebug_queued_cmd *sqcp;
5045 	struct sdebug_dev_info *devip;
5046 	struct sdebug_defer *sd_dp;
5047 
5048 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5049 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5050 		qmax = sdebug_max_queue;
5051 		r_qmax = atomic_read(&retired_max_queue);
5052 		if (r_qmax > qmax)
5053 			qmax = r_qmax;
5054 		for (k = 0; k < qmax; ++k) {
5055 			if (test_bit(k, sqp->in_use_bm)) {
5056 				sqcp = &sqp->qc_arr[k];
5057 				if (cmnd != sqcp->a_cmnd)
5058 					continue;
5059 				/* found */
5060 				devip = (struct sdebug_dev_info *)
5061 						cmnd->device->hostdata;
5062 				if (devip)
5063 					atomic_dec(&devip->num_in_q);
5064 				sqcp->a_cmnd = NULL;
5065 				sd_dp = sqcp->sd_dp;
5066 				if (sd_dp) {
5067 					l_defer_t = sd_dp->defer_t;
5068 					sd_dp->defer_t = SDEB_DEFER_NONE;
5069 				} else
5070 					l_defer_t = SDEB_DEFER_NONE;
5071 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5072 				stop_qc_helper(sd_dp, l_defer_t);
5073 				clear_bit(k, sqp->in_use_bm);
5074 				return true;
5075 			}
5076 		}
5077 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5078 	}
5079 	return false;
5080 }
5081 
5082 /* Deletes (stops) timers or work queues of all queued commands */
5083 static void stop_all_queued(void)
5084 {
5085 	unsigned long iflags;
5086 	int j, k;
5087 	enum sdeb_defer_type l_defer_t;
5088 	struct sdebug_queue *sqp;
5089 	struct sdebug_queued_cmd *sqcp;
5090 	struct sdebug_dev_info *devip;
5091 	struct sdebug_defer *sd_dp;
5092 
5093 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5094 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5095 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5096 			if (test_bit(k, sqp->in_use_bm)) {
5097 				sqcp = &sqp->qc_arr[k];
5098 				if (sqcp->a_cmnd == NULL)
5099 					continue;
5100 				devip = (struct sdebug_dev_info *)
5101 					sqcp->a_cmnd->device->hostdata;
5102 				if (devip)
5103 					atomic_dec(&devip->num_in_q);
5104 				sqcp->a_cmnd = NULL;
5105 				sd_dp = sqcp->sd_dp;
5106 				if (sd_dp) {
5107 					l_defer_t = sd_dp->defer_t;
5108 					sd_dp->defer_t = SDEB_DEFER_NONE;
5109 				} else
5110 					l_defer_t = SDEB_DEFER_NONE;
5111 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5112 				stop_qc_helper(sd_dp, l_defer_t);
5113 				clear_bit(k, sqp->in_use_bm);
5114 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5115 			}
5116 		}
5117 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5118 	}
5119 }
5120 
5121 /* Free queued command memory on heap */
5122 static void free_all_queued(void)
5123 {
5124 	int j, k;
5125 	struct sdebug_queue *sqp;
5126 	struct sdebug_queued_cmd *sqcp;
5127 
5128 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5129 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5130 			sqcp = &sqp->qc_arr[k];
5131 			kfree(sqcp->sd_dp);
5132 			sqcp->sd_dp = NULL;
5133 		}
5134 	}
5135 }
5136 
5137 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5138 {
5139 	bool ok;
5140 
5141 	++num_aborts;
5142 	if (SCpnt) {
5143 		ok = stop_queued_cmnd(SCpnt);
5144 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5145 			sdev_printk(KERN_INFO, SCpnt->device,
5146 				    "%s: command%s found\n", __func__,
5147 				    ok ? "" : " not");
5148 	}
5149 	return SUCCESS;
5150 }
5151 
5152 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5153 {
5154 	++num_dev_resets;
5155 	if (SCpnt && SCpnt->device) {
5156 		struct scsi_device *sdp = SCpnt->device;
5157 		struct sdebug_dev_info *devip =
5158 				(struct sdebug_dev_info *)sdp->hostdata;
5159 
5160 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5161 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5162 		if (devip)
5163 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5164 	}
5165 	return SUCCESS;
5166 }
5167 
5168 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5169 {
5170 	struct sdebug_host_info *sdbg_host;
5171 	struct sdebug_dev_info *devip;
5172 	struct scsi_device *sdp;
5173 	struct Scsi_Host *hp;
5174 	int k = 0;
5175 
5176 	++num_target_resets;
5177 	if (!SCpnt)
5178 		goto lie;
5179 	sdp = SCpnt->device;
5180 	if (!sdp)
5181 		goto lie;
5182 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5183 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5184 	hp = sdp->host;
5185 	if (!hp)
5186 		goto lie;
5187 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5188 	if (sdbg_host) {
5189 		list_for_each_entry(devip,
5190 				    &sdbg_host->dev_info_list,
5191 				    dev_list)
5192 			if (devip->target == sdp->id) {
5193 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5194 				++k;
5195 			}
5196 	}
5197 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5198 		sdev_printk(KERN_INFO, sdp,
5199 			    "%s: %d device(s) found in target\n", __func__, k);
5200 lie:
5201 	return SUCCESS;
5202 }
5203 
5204 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5205 {
5206 	struct sdebug_host_info *sdbg_host;
5207 	struct sdebug_dev_info *devip;
5208 	struct scsi_device *sdp;
5209 	struct Scsi_Host *hp;
5210 	int k = 0;
5211 
5212 	++num_bus_resets;
5213 	if (!(SCpnt && SCpnt->device))
5214 		goto lie;
5215 	sdp = SCpnt->device;
5216 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5217 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5218 	hp = sdp->host;
5219 	if (hp) {
5220 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5221 		if (sdbg_host) {
5222 			list_for_each_entry(devip,
5223 					    &sdbg_host->dev_info_list,
5224 					    dev_list) {
5225 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5226 				++k;
5227 			}
5228 		}
5229 	}
5230 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5231 		sdev_printk(KERN_INFO, sdp,
5232 			    "%s: %d device(s) found in host\n", __func__, k);
5233 lie:
5234 	return SUCCESS;
5235 }
5236 
5237 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5238 {
5239 	struct sdebug_host_info *sdbg_host;
5240 	struct sdebug_dev_info *devip;
5241 	int k = 0;
5242 
5243 	++num_host_resets;
5244 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5245 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5246 	spin_lock(&sdebug_host_list_lock);
5247 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5248 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5249 				    dev_list) {
5250 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5251 			++k;
5252 		}
5253 	}
5254 	spin_unlock(&sdebug_host_list_lock);
5255 	stop_all_queued();
5256 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5257 		sdev_printk(KERN_INFO, SCpnt->device,
5258 			    "%s: %d device(s) found\n", __func__, k);
5259 	return SUCCESS;
5260 }
5261 
5262 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5263 {
5264 	struct msdos_partition *pp;
5265 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5266 	int sectors_per_part, num_sectors, k;
5267 	int heads_by_sects, start_sec, end_sec;
5268 
5269 	/* assume partition table already zeroed */
5270 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5271 		return;
5272 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5273 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5274 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5275 	}
5276 	num_sectors = (int)get_sdebug_capacity();
5277 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5278 			   / sdebug_num_parts;
5279 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5280 	starts[0] = sdebug_sectors_per;
5281 	max_part_secs = sectors_per_part;
5282 	for (k = 1; k < sdebug_num_parts; ++k) {
5283 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5284 			    * heads_by_sects;
5285 		if (starts[k] - starts[k - 1] < max_part_secs)
5286 			max_part_secs = starts[k] - starts[k - 1];
5287 	}
5288 	starts[sdebug_num_parts] = num_sectors;
5289 	starts[sdebug_num_parts + 1] = 0;
5290 
5291 	ramp[510] = 0x55;	/* magic partition markings */
5292 	ramp[511] = 0xAA;
5293 	pp = (struct msdos_partition *)(ramp + 0x1be);
5294 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5295 		start_sec = starts[k];
5296 		end_sec = starts[k] + max_part_secs - 1;
5297 		pp->boot_ind = 0;
5298 
5299 		pp->cyl = start_sec / heads_by_sects;
5300 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5301 			   / sdebug_sectors_per;
5302 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5303 
5304 		pp->end_cyl = end_sec / heads_by_sects;
5305 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5306 			       / sdebug_sectors_per;
5307 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5308 
5309 		pp->start_sect = cpu_to_le32(start_sec);
5310 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5311 		pp->sys_ind = 0x83;	/* plain Linux partition */
5312 	}
5313 }
5314 
5315 static void block_unblock_all_queues(bool block)
5316 {
5317 	int j;
5318 	struct sdebug_queue *sqp;
5319 
5320 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5321 		atomic_set(&sqp->blocked, (int)block);
5322 }
5323 
5324 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5325  * commands will be processed normally before triggers occur.
5326  */
5327 static void tweak_cmnd_count(void)
5328 {
5329 	int count, modulo;
5330 
5331 	modulo = abs(sdebug_every_nth);
5332 	if (modulo < 2)
5333 		return;
5334 	block_unblock_all_queues(true);
5335 	count = atomic_read(&sdebug_cmnd_count);
5336 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5337 	block_unblock_all_queues(false);
5338 }
5339 
5340 static void clear_queue_stats(void)
5341 {
5342 	atomic_set(&sdebug_cmnd_count, 0);
5343 	atomic_set(&sdebug_completions, 0);
5344 	atomic_set(&sdebug_miss_cpus, 0);
5345 	atomic_set(&sdebug_a_tsf, 0);
5346 }
5347 
5348 static bool inject_on_this_cmd(void)
5349 {
5350 	if (sdebug_every_nth == 0)
5351 		return false;
5352 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5353 }
5354 
5355 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5356 
5357 /* Complete the processing of the thread that queued a SCSI command to this
5358  * driver. It either completes the command by calling cmnd_done() or
5359  * schedules a hr timer or work queue then returns 0. Returns
5360  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5361  */
5362 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5363 			 int scsi_result,
5364 			 int (*pfp)(struct scsi_cmnd *,
5365 				    struct sdebug_dev_info *),
5366 			 int delta_jiff, int ndelay)
5367 {
5368 	bool new_sd_dp;
5369 	bool inject = false;
5370 	bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
5371 	int k, num_in_q, qdepth;
5372 	unsigned long iflags;
5373 	u64 ns_from_boot = 0;
5374 	struct sdebug_queue *sqp;
5375 	struct sdebug_queued_cmd *sqcp;
5376 	struct scsi_device *sdp;
5377 	struct sdebug_defer *sd_dp;
5378 
5379 	if (unlikely(devip == NULL)) {
5380 		if (scsi_result == 0)
5381 			scsi_result = DID_NO_CONNECT << 16;
5382 		goto respond_in_thread;
5383 	}
5384 	sdp = cmnd->device;
5385 
5386 	if (delta_jiff == 0)
5387 		goto respond_in_thread;
5388 
5389 	sqp = get_queue(cmnd);
5390 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5391 	if (unlikely(atomic_read(&sqp->blocked))) {
5392 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5393 		return SCSI_MLQUEUE_HOST_BUSY;
5394 	}
5395 	num_in_q = atomic_read(&devip->num_in_q);
5396 	qdepth = cmnd->device->queue_depth;
5397 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5398 		if (scsi_result) {
5399 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5400 			goto respond_in_thread;
5401 		} else
5402 			scsi_result = device_qfull_result;
5403 	} else if (unlikely(sdebug_every_nth &&
5404 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5405 			    (scsi_result == 0))) {
5406 		if ((num_in_q == (qdepth - 1)) &&
5407 		    (atomic_inc_return(&sdebug_a_tsf) >=
5408 		     abs(sdebug_every_nth))) {
5409 			atomic_set(&sdebug_a_tsf, 0);
5410 			inject = true;
5411 			scsi_result = device_qfull_result;
5412 		}
5413 	}
5414 
5415 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5416 	if (unlikely(k >= sdebug_max_queue)) {
5417 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5418 		if (scsi_result)
5419 			goto respond_in_thread;
5420 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5421 			scsi_result = device_qfull_result;
5422 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5423 			sdev_printk(KERN_INFO, sdp,
5424 				    "%s: max_queue=%d exceeded, %s\n",
5425 				    __func__, sdebug_max_queue,
5426 				    (scsi_result ?  "status: TASK SET FULL" :
5427 						    "report: host busy"));
5428 		if (scsi_result)
5429 			goto respond_in_thread;
5430 		else
5431 			return SCSI_MLQUEUE_HOST_BUSY;
5432 	}
5433 	set_bit(k, sqp->in_use_bm);
5434 	atomic_inc(&devip->num_in_q);
5435 	sqcp = &sqp->qc_arr[k];
5436 	sqcp->a_cmnd = cmnd;
5437 	cmnd->host_scribble = (unsigned char *)sqcp;
5438 	sd_dp = sqcp->sd_dp;
5439 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5440 
5441 	if (!sd_dp) {
5442 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5443 		if (!sd_dp) {
5444 			atomic_dec(&devip->num_in_q);
5445 			clear_bit(k, sqp->in_use_bm);
5446 			return SCSI_MLQUEUE_HOST_BUSY;
5447 		}
5448 		new_sd_dp = true;
5449 	} else {
5450 		new_sd_dp = false;
5451 	}
5452 
5453 	/* Set the hostwide tag */
5454 	if (sdebug_host_max_queue)
5455 		sd_dp->hc_idx = get_tag(cmnd);
5456 
5457 	if (hipri)
5458 		ns_from_boot = ktime_get_boottime_ns();
5459 
5460 	/* one of the resp_*() response functions is called here */
5461 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5462 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5463 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5464 		delta_jiff = ndelay = 0;
5465 	}
5466 	if (cmnd->result == 0 && scsi_result != 0)
5467 		cmnd->result = scsi_result;
5468 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5469 		if (atomic_read(&sdeb_inject_pending)) {
5470 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5471 			atomic_set(&sdeb_inject_pending, 0);
5472 			cmnd->result = check_condition_result;
5473 		}
5474 	}
5475 
5476 	if (unlikely(sdebug_verbose && cmnd->result))
5477 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5478 			    __func__, cmnd->result);
5479 
5480 	if (delta_jiff > 0 || ndelay > 0) {
5481 		ktime_t kt;
5482 
5483 		if (delta_jiff > 0) {
5484 			u64 ns = jiffies_to_nsecs(delta_jiff);
5485 
5486 			if (sdebug_random && ns < U32_MAX) {
5487 				ns = prandom_u32_max((u32)ns);
5488 			} else if (sdebug_random) {
5489 				ns >>= 12;	/* scale to 4 usec precision */
5490 				if (ns < U32_MAX)	/* over 4 hours max */
5491 					ns = prandom_u32_max((u32)ns);
5492 				ns <<= 12;
5493 			}
5494 			kt = ns_to_ktime(ns);
5495 		} else {	/* ndelay has a 4.2 second max */
5496 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5497 					     (u32)ndelay;
5498 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5499 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5500 
5501 				if (kt <= d) {	/* elapsed duration >= kt */
5502 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5503 					sqcp->a_cmnd = NULL;
5504 					atomic_dec(&devip->num_in_q);
5505 					clear_bit(k, sqp->in_use_bm);
5506 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5507 					if (new_sd_dp)
5508 						kfree(sd_dp);
5509 					/* call scsi_done() from this thread */
5510 					cmnd->scsi_done(cmnd);
5511 					return 0;
5512 				}
5513 				/* otherwise reduce kt by elapsed time */
5514 				kt -= d;
5515 			}
5516 		}
5517 		if (hipri) {
5518 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5519 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5520 			if (!sd_dp->init_poll) {
5521 				sd_dp->init_poll = true;
5522 				sqcp->sd_dp = sd_dp;
5523 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5524 				sd_dp->qc_idx = k;
5525 			}
5526 			sd_dp->defer_t = SDEB_DEFER_POLL;
5527 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5528 		} else {
5529 			if (!sd_dp->init_hrt) {
5530 				sd_dp->init_hrt = true;
5531 				sqcp->sd_dp = sd_dp;
5532 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5533 					     HRTIMER_MODE_REL_PINNED);
5534 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5535 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5536 				sd_dp->qc_idx = k;
5537 			}
5538 			sd_dp->defer_t = SDEB_DEFER_HRT;
5539 			/* schedule the invocation of scsi_done() for a later time */
5540 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5541 		}
5542 		if (sdebug_statistics)
5543 			sd_dp->issuing_cpu = raw_smp_processor_id();
5544 	} else {	/* jdelay < 0, use work queue */
5545 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5546 			     atomic_read(&sdeb_inject_pending)))
5547 			sd_dp->aborted = true;
5548 		if (hipri) {
5549 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5550 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5551 			if (!sd_dp->init_poll) {
5552 				sd_dp->init_poll = true;
5553 				sqcp->sd_dp = sd_dp;
5554 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5555 				sd_dp->qc_idx = k;
5556 			}
5557 			sd_dp->defer_t = SDEB_DEFER_POLL;
5558 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5559 		} else {
5560 			if (!sd_dp->init_wq) {
5561 				sd_dp->init_wq = true;
5562 				sqcp->sd_dp = sd_dp;
5563 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5564 				sd_dp->qc_idx = k;
5565 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5566 			}
5567 			sd_dp->defer_t = SDEB_DEFER_WQ;
5568 			schedule_work(&sd_dp->ew.work);
5569 		}
5570 		if (sdebug_statistics)
5571 			sd_dp->issuing_cpu = raw_smp_processor_id();
5572 		if (unlikely(sd_dp->aborted)) {
5573 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5574 			blk_abort_request(cmnd->request);
5575 			atomic_set(&sdeb_inject_pending, 0);
5576 			sd_dp->aborted = false;
5577 		}
5578 	}
5579 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5580 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5581 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5582 	return 0;
5583 
5584 respond_in_thread:	/* call back to mid-layer using invocation thread */
5585 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5586 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5587 	if (cmnd->result == 0 && scsi_result != 0)
5588 		cmnd->result = scsi_result;
5589 	cmnd->scsi_done(cmnd);
5590 	return 0;
5591 }
5592 
5593 /* Note: The following macros create attribute files in the
5594    /sys/module/scsi_debug/parameters directory. Unfortunately this
5595    driver is unaware of a change and cannot trigger auxiliary actions
5596    as it can when the corresponding attribute in the
5597    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5598  */
5599 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5600 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5601 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5602 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5603 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5604 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5605 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5606 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5607 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5608 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5609 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5610 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5611 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5613 module_param_string(inq_product, sdebug_inq_product_id,
5614 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5615 module_param_string(inq_rev, sdebug_inq_product_rev,
5616 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5617 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5618 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5619 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5620 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5621 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5622 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5623 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5624 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5625 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5626 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5627 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5628 		   S_IRUGO | S_IWUSR);
5629 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5630 		   S_IRUGO | S_IWUSR);
5631 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5632 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5633 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5634 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5635 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5636 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5637 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5638 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5639 module_param_named(per_host_store, sdebug_per_host_store, bool,
5640 		   S_IRUGO | S_IWUSR);
5641 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5642 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5643 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5645 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5646 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5647 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5648 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5649 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5650 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5651 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5652 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5653 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5654 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5655 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5656 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5657 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5658 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5659 		   S_IRUGO | S_IWUSR);
5660 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5661 module_param_named(write_same_length, sdebug_write_same_length, int,
5662 		   S_IRUGO | S_IWUSR);
5663 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5664 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5665 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5666 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5667 
5668 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5669 MODULE_DESCRIPTION("SCSI debug adapter driver");
5670 MODULE_LICENSE("GPL");
5671 MODULE_VERSION(SDEBUG_VERSION);
5672 
5673 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5674 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5675 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5676 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5677 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5678 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5679 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5680 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5681 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5682 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5683 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5684 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5685 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5686 MODULE_PARM_DESC(host_max_queue,
5687 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5688 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5689 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5690 		 SDEBUG_VERSION "\")");
5691 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5692 MODULE_PARM_DESC(lbprz,
5693 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5694 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5695 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5696 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5697 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5698 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5699 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5700 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5701 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5702 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5703 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5704 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5705 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5706 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5707 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5708 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5709 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5710 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5711 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5712 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5713 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5714 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5715 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5716 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5717 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5718 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5719 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5720 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5721 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5722 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5723 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5724 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5725 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5726 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5727 MODULE_PARM_DESC(uuid_ctl,
5728 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5729 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5730 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5731 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5732 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5733 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5734 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5735 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5736 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5737 
5738 #define SDEBUG_INFO_LEN 256
5739 static char sdebug_info[SDEBUG_INFO_LEN];
5740 
5741 static const char *scsi_debug_info(struct Scsi_Host *shp)
5742 {
5743 	int k;
5744 
5745 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5746 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5747 	if (k >= (SDEBUG_INFO_LEN - 1))
5748 		return sdebug_info;
5749 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5750 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5751 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5752 		  "statistics", (int)sdebug_statistics);
5753 	return sdebug_info;
5754 }
5755 
5756 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5757 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5758 				 int length)
5759 {
5760 	char arr[16];
5761 	int opts;
5762 	int minLen = length > 15 ? 15 : length;
5763 
5764 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5765 		return -EACCES;
5766 	memcpy(arr, buffer, minLen);
5767 	arr[minLen] = '\0';
5768 	if (1 != sscanf(arr, "%d", &opts))
5769 		return -EINVAL;
5770 	sdebug_opts = opts;
5771 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5772 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5773 	if (sdebug_every_nth != 0)
5774 		tweak_cmnd_count();
5775 	return length;
5776 }
5777 
5778 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5779  * same for each scsi_debug host (if more than one). Some of the counters
5780  * output are not atomics so might be inaccurate in a busy system. */
5781 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5782 {
5783 	int f, j, l;
5784 	struct sdebug_queue *sqp;
5785 	struct sdebug_host_info *sdhp;
5786 
5787 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5788 		   SDEBUG_VERSION, sdebug_version_date);
5789 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5790 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5791 		   sdebug_opts, sdebug_every_nth);
5792 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5793 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5794 		   sdebug_sector_size, "bytes");
5795 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5796 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5797 		   num_aborts);
5798 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5799 		   num_dev_resets, num_target_resets, num_bus_resets,
5800 		   num_host_resets);
5801 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5802 		   dix_reads, dix_writes, dif_errors);
5803 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5804 		   sdebug_statistics);
5805 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5806 		   atomic_read(&sdebug_cmnd_count),
5807 		   atomic_read(&sdebug_completions),
5808 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5809 		   atomic_read(&sdebug_a_tsf),
5810 		   atomic_read(&sdeb_mq_poll_count));
5811 
5812 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5813 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5814 		seq_printf(m, "  queue %d:\n", j);
5815 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5816 		if (f != sdebug_max_queue) {
5817 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5818 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5819 				   "first,last bits", f, l);
5820 		}
5821 	}
5822 
5823 	seq_printf(m, "this host_no=%d\n", host->host_no);
5824 	if (!xa_empty(per_store_ap)) {
5825 		bool niu;
5826 		int idx;
5827 		unsigned long l_idx;
5828 		struct sdeb_store_info *sip;
5829 
5830 		seq_puts(m, "\nhost list:\n");
5831 		j = 0;
5832 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5833 			idx = sdhp->si_idx;
5834 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5835 				   sdhp->shost->host_no, idx);
5836 			++j;
5837 		}
5838 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5839 			   sdeb_most_recent_idx);
5840 		j = 0;
5841 		xa_for_each(per_store_ap, l_idx, sip) {
5842 			niu = xa_get_mark(per_store_ap, l_idx,
5843 					  SDEB_XA_NOT_IN_USE);
5844 			idx = (int)l_idx;
5845 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5846 				   (niu ? "  not_in_use" : ""));
5847 			++j;
5848 		}
5849 	}
5850 	return 0;
5851 }
5852 
5853 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5854 {
5855 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5856 }
5857 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5858  * of delay is jiffies.
5859  */
5860 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5861 			   size_t count)
5862 {
5863 	int jdelay, res;
5864 
5865 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5866 		res = count;
5867 		if (sdebug_jdelay != jdelay) {
5868 			int j, k;
5869 			struct sdebug_queue *sqp;
5870 
5871 			block_unblock_all_queues(true);
5872 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5873 			     ++j, ++sqp) {
5874 				k = find_first_bit(sqp->in_use_bm,
5875 						   sdebug_max_queue);
5876 				if (k != sdebug_max_queue) {
5877 					res = -EBUSY;   /* queued commands */
5878 					break;
5879 				}
5880 			}
5881 			if (res > 0) {
5882 				sdebug_jdelay = jdelay;
5883 				sdebug_ndelay = 0;
5884 			}
5885 			block_unblock_all_queues(false);
5886 		}
5887 		return res;
5888 	}
5889 	return -EINVAL;
5890 }
5891 static DRIVER_ATTR_RW(delay);
5892 
5893 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5894 {
5895 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5896 }
5897 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5898 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5899 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5900 			    size_t count)
5901 {
5902 	int ndelay, res;
5903 
5904 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5905 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5906 		res = count;
5907 		if (sdebug_ndelay != ndelay) {
5908 			int j, k;
5909 			struct sdebug_queue *sqp;
5910 
5911 			block_unblock_all_queues(true);
5912 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5913 			     ++j, ++sqp) {
5914 				k = find_first_bit(sqp->in_use_bm,
5915 						   sdebug_max_queue);
5916 				if (k != sdebug_max_queue) {
5917 					res = -EBUSY;   /* queued commands */
5918 					break;
5919 				}
5920 			}
5921 			if (res > 0) {
5922 				sdebug_ndelay = ndelay;
5923 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5924 							: DEF_JDELAY;
5925 			}
5926 			block_unblock_all_queues(false);
5927 		}
5928 		return res;
5929 	}
5930 	return -EINVAL;
5931 }
5932 static DRIVER_ATTR_RW(ndelay);
5933 
5934 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5935 {
5936 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5937 }
5938 
5939 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5940 			  size_t count)
5941 {
5942 	int opts;
5943 	char work[20];
5944 
5945 	if (sscanf(buf, "%10s", work) == 1) {
5946 		if (strncasecmp(work, "0x", 2) == 0) {
5947 			if (kstrtoint(work + 2, 16, &opts) == 0)
5948 				goto opts_done;
5949 		} else {
5950 			if (kstrtoint(work, 10, &opts) == 0)
5951 				goto opts_done;
5952 		}
5953 	}
5954 	return -EINVAL;
5955 opts_done:
5956 	sdebug_opts = opts;
5957 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5958 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5959 	tweak_cmnd_count();
5960 	return count;
5961 }
5962 static DRIVER_ATTR_RW(opts);
5963 
5964 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5965 {
5966 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5967 }
5968 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5969 			   size_t count)
5970 {
5971 	int n;
5972 
5973 	/* Cannot change from or to TYPE_ZBC with sysfs */
5974 	if (sdebug_ptype == TYPE_ZBC)
5975 		return -EINVAL;
5976 
5977 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5978 		if (n == TYPE_ZBC)
5979 			return -EINVAL;
5980 		sdebug_ptype = n;
5981 		return count;
5982 	}
5983 	return -EINVAL;
5984 }
5985 static DRIVER_ATTR_RW(ptype);
5986 
5987 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5988 {
5989 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5990 }
5991 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5992 			    size_t count)
5993 {
5994 	int n;
5995 
5996 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5997 		sdebug_dsense = n;
5998 		return count;
5999 	}
6000 	return -EINVAL;
6001 }
6002 static DRIVER_ATTR_RW(dsense);
6003 
6004 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6005 {
6006 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6007 }
6008 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6009 			     size_t count)
6010 {
6011 	int n, idx;
6012 
6013 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6014 		bool want_store = (n == 0);
6015 		struct sdebug_host_info *sdhp;
6016 
6017 		n = (n > 0);
6018 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6019 		if (sdebug_fake_rw == n)
6020 			return count;	/* not transitioning so do nothing */
6021 
6022 		if (want_store) {	/* 1 --> 0 transition, set up store */
6023 			if (sdeb_first_idx < 0) {
6024 				idx = sdebug_add_store();
6025 				if (idx < 0)
6026 					return idx;
6027 			} else {
6028 				idx = sdeb_first_idx;
6029 				xa_clear_mark(per_store_ap, idx,
6030 					      SDEB_XA_NOT_IN_USE);
6031 			}
6032 			/* make all hosts use same store */
6033 			list_for_each_entry(sdhp, &sdebug_host_list,
6034 					    host_list) {
6035 				if (sdhp->si_idx != idx) {
6036 					xa_set_mark(per_store_ap, sdhp->si_idx,
6037 						    SDEB_XA_NOT_IN_USE);
6038 					sdhp->si_idx = idx;
6039 				}
6040 			}
6041 			sdeb_most_recent_idx = idx;
6042 		} else {	/* 0 --> 1 transition is trigger for shrink */
6043 			sdebug_erase_all_stores(true /* apart from first */);
6044 		}
6045 		sdebug_fake_rw = n;
6046 		return count;
6047 	}
6048 	return -EINVAL;
6049 }
6050 static DRIVER_ATTR_RW(fake_rw);
6051 
6052 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6053 {
6054 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6055 }
6056 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6057 			      size_t count)
6058 {
6059 	int n;
6060 
6061 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6062 		sdebug_no_lun_0 = n;
6063 		return count;
6064 	}
6065 	return -EINVAL;
6066 }
6067 static DRIVER_ATTR_RW(no_lun_0);
6068 
6069 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6070 {
6071 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6072 }
6073 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6074 			      size_t count)
6075 {
6076 	int n;
6077 
6078 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6079 		sdebug_num_tgts = n;
6080 		sdebug_max_tgts_luns();
6081 		return count;
6082 	}
6083 	return -EINVAL;
6084 }
6085 static DRIVER_ATTR_RW(num_tgts);
6086 
6087 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6088 {
6089 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6090 }
6091 static DRIVER_ATTR_RO(dev_size_mb);
6092 
6093 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6094 {
6095 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6096 }
6097 
6098 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6099 				    size_t count)
6100 {
6101 	bool v;
6102 
6103 	if (kstrtobool(buf, &v))
6104 		return -EINVAL;
6105 
6106 	sdebug_per_host_store = v;
6107 	return count;
6108 }
6109 static DRIVER_ATTR_RW(per_host_store);
6110 
6111 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6112 {
6113 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6114 }
6115 static DRIVER_ATTR_RO(num_parts);
6116 
6117 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6118 {
6119 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6120 }
6121 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6122 			       size_t count)
6123 {
6124 	int nth;
6125 	char work[20];
6126 
6127 	if (sscanf(buf, "%10s", work) == 1) {
6128 		if (strncasecmp(work, "0x", 2) == 0) {
6129 			if (kstrtoint(work + 2, 16, &nth) == 0)
6130 				goto every_nth_done;
6131 		} else {
6132 			if (kstrtoint(work, 10, &nth) == 0)
6133 				goto every_nth_done;
6134 		}
6135 	}
6136 	return -EINVAL;
6137 
6138 every_nth_done:
6139 	sdebug_every_nth = nth;
6140 	if (nth && !sdebug_statistics) {
6141 		pr_info("every_nth needs statistics=1, set it\n");
6142 		sdebug_statistics = true;
6143 	}
6144 	tweak_cmnd_count();
6145 	return count;
6146 }
6147 static DRIVER_ATTR_RW(every_nth);
6148 
6149 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6150 {
6151 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6152 }
6153 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6154 				size_t count)
6155 {
6156 	int n;
6157 	bool changed;
6158 
6159 	if (kstrtoint(buf, 0, &n))
6160 		return -EINVAL;
6161 	if (n >= 0) {
6162 		if (n > (int)SAM_LUN_AM_FLAT) {
6163 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6164 			return -EINVAL;
6165 		}
6166 		changed = ((int)sdebug_lun_am != n);
6167 		sdebug_lun_am = n;
6168 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6169 			struct sdebug_host_info *sdhp;
6170 			struct sdebug_dev_info *dp;
6171 
6172 			spin_lock(&sdebug_host_list_lock);
6173 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6174 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6175 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6176 				}
6177 			}
6178 			spin_unlock(&sdebug_host_list_lock);
6179 		}
6180 		return count;
6181 	}
6182 	return -EINVAL;
6183 }
6184 static DRIVER_ATTR_RW(lun_format);
6185 
6186 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6187 {
6188 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6189 }
6190 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6191 			      size_t count)
6192 {
6193 	int n;
6194 	bool changed;
6195 
6196 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6197 		if (n > 256) {
6198 			pr_warn("max_luns can be no more than 256\n");
6199 			return -EINVAL;
6200 		}
6201 		changed = (sdebug_max_luns != n);
6202 		sdebug_max_luns = n;
6203 		sdebug_max_tgts_luns();
6204 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6205 			struct sdebug_host_info *sdhp;
6206 			struct sdebug_dev_info *dp;
6207 
6208 			spin_lock(&sdebug_host_list_lock);
6209 			list_for_each_entry(sdhp, &sdebug_host_list,
6210 					    host_list) {
6211 				list_for_each_entry(dp, &sdhp->dev_info_list,
6212 						    dev_list) {
6213 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6214 						dp->uas_bm);
6215 				}
6216 			}
6217 			spin_unlock(&sdebug_host_list_lock);
6218 		}
6219 		return count;
6220 	}
6221 	return -EINVAL;
6222 }
6223 static DRIVER_ATTR_RW(max_luns);
6224 
6225 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6226 {
6227 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6228 }
6229 /* N.B. max_queue can be changed while there are queued commands. In flight
6230  * commands beyond the new max_queue will be completed. */
6231 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6232 			       size_t count)
6233 {
6234 	int j, n, k, a;
6235 	struct sdebug_queue *sqp;
6236 
6237 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6238 	    (n <= SDEBUG_CANQUEUE) &&
6239 	    (sdebug_host_max_queue == 0)) {
6240 		block_unblock_all_queues(true);
6241 		k = 0;
6242 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6243 		     ++j, ++sqp) {
6244 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6245 			if (a > k)
6246 				k = a;
6247 		}
6248 		sdebug_max_queue = n;
6249 		if (k == SDEBUG_CANQUEUE)
6250 			atomic_set(&retired_max_queue, 0);
6251 		else if (k >= n)
6252 			atomic_set(&retired_max_queue, k + 1);
6253 		else
6254 			atomic_set(&retired_max_queue, 0);
6255 		block_unblock_all_queues(false);
6256 		return count;
6257 	}
6258 	return -EINVAL;
6259 }
6260 static DRIVER_ATTR_RW(max_queue);
6261 
6262 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6263 {
6264 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6265 }
6266 
6267 /*
6268  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6269  * in range [0, sdebug_host_max_queue), we can't change it.
6270  */
6271 static DRIVER_ATTR_RO(host_max_queue);
6272 
6273 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6274 {
6275 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6276 }
6277 static DRIVER_ATTR_RO(no_uld);
6278 
6279 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6280 {
6281 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6282 }
6283 static DRIVER_ATTR_RO(scsi_level);
6284 
6285 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6286 {
6287 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6288 }
6289 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6290 				size_t count)
6291 {
6292 	int n;
6293 	bool changed;
6294 
6295 	/* Ignore capacity change for ZBC drives for now */
6296 	if (sdeb_zbc_in_use)
6297 		return -ENOTSUPP;
6298 
6299 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6300 		changed = (sdebug_virtual_gb != n);
6301 		sdebug_virtual_gb = n;
6302 		sdebug_capacity = get_sdebug_capacity();
6303 		if (changed) {
6304 			struct sdebug_host_info *sdhp;
6305 			struct sdebug_dev_info *dp;
6306 
6307 			spin_lock(&sdebug_host_list_lock);
6308 			list_for_each_entry(sdhp, &sdebug_host_list,
6309 					    host_list) {
6310 				list_for_each_entry(dp, &sdhp->dev_info_list,
6311 						    dev_list) {
6312 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6313 						dp->uas_bm);
6314 				}
6315 			}
6316 			spin_unlock(&sdebug_host_list_lock);
6317 		}
6318 		return count;
6319 	}
6320 	return -EINVAL;
6321 }
6322 static DRIVER_ATTR_RW(virtual_gb);
6323 
6324 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6325 {
6326 	/* absolute number of hosts currently active is what is shown */
6327 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6328 }
6329 
6330 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6331 			      size_t count)
6332 {
6333 	bool found;
6334 	unsigned long idx;
6335 	struct sdeb_store_info *sip;
6336 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6337 	int delta_hosts;
6338 
6339 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6340 		return -EINVAL;
6341 	if (delta_hosts > 0) {
6342 		do {
6343 			found = false;
6344 			if (want_phs) {
6345 				xa_for_each_marked(per_store_ap, idx, sip,
6346 						   SDEB_XA_NOT_IN_USE) {
6347 					sdeb_most_recent_idx = (int)idx;
6348 					found = true;
6349 					break;
6350 				}
6351 				if (found)	/* re-use case */
6352 					sdebug_add_host_helper((int)idx);
6353 				else
6354 					sdebug_do_add_host(true);
6355 			} else {
6356 				sdebug_do_add_host(false);
6357 			}
6358 		} while (--delta_hosts);
6359 	} else if (delta_hosts < 0) {
6360 		do {
6361 			sdebug_do_remove_host(false);
6362 		} while (++delta_hosts);
6363 	}
6364 	return count;
6365 }
6366 static DRIVER_ATTR_RW(add_host);
6367 
6368 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6369 {
6370 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6371 }
6372 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6373 				    size_t count)
6374 {
6375 	int n;
6376 
6377 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6378 		sdebug_vpd_use_hostno = n;
6379 		return count;
6380 	}
6381 	return -EINVAL;
6382 }
6383 static DRIVER_ATTR_RW(vpd_use_hostno);
6384 
6385 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6386 {
6387 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6388 }
6389 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6390 				size_t count)
6391 {
6392 	int n;
6393 
6394 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6395 		if (n > 0)
6396 			sdebug_statistics = true;
6397 		else {
6398 			clear_queue_stats();
6399 			sdebug_statistics = false;
6400 		}
6401 		return count;
6402 	}
6403 	return -EINVAL;
6404 }
6405 static DRIVER_ATTR_RW(statistics);
6406 
6407 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6408 {
6409 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6410 }
6411 static DRIVER_ATTR_RO(sector_size);
6412 
6413 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6414 {
6415 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6416 }
6417 static DRIVER_ATTR_RO(submit_queues);
6418 
6419 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6420 {
6421 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6422 }
6423 static DRIVER_ATTR_RO(dix);
6424 
6425 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6426 {
6427 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6428 }
6429 static DRIVER_ATTR_RO(dif);
6430 
6431 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6432 {
6433 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6434 }
6435 static DRIVER_ATTR_RO(guard);
6436 
6437 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6438 {
6439 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6440 }
6441 static DRIVER_ATTR_RO(ato);
6442 
6443 static ssize_t map_show(struct device_driver *ddp, char *buf)
6444 {
6445 	ssize_t count = 0;
6446 
6447 	if (!scsi_debug_lbp())
6448 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6449 				 sdebug_store_sectors);
6450 
6451 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6452 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6453 
6454 		if (sip)
6455 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6456 					  (int)map_size, sip->map_storep);
6457 	}
6458 	buf[count++] = '\n';
6459 	buf[count] = '\0';
6460 
6461 	return count;
6462 }
6463 static DRIVER_ATTR_RO(map);
6464 
6465 static ssize_t random_show(struct device_driver *ddp, char *buf)
6466 {
6467 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6468 }
6469 
6470 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6471 			    size_t count)
6472 {
6473 	bool v;
6474 
6475 	if (kstrtobool(buf, &v))
6476 		return -EINVAL;
6477 
6478 	sdebug_random = v;
6479 	return count;
6480 }
6481 static DRIVER_ATTR_RW(random);
6482 
6483 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6484 {
6485 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6486 }
6487 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6488 			       size_t count)
6489 {
6490 	int n;
6491 
6492 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6493 		sdebug_removable = (n > 0);
6494 		return count;
6495 	}
6496 	return -EINVAL;
6497 }
6498 static DRIVER_ATTR_RW(removable);
6499 
6500 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6501 {
6502 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6503 }
6504 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6505 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6506 			       size_t count)
6507 {
6508 	int n;
6509 
6510 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6511 		sdebug_host_lock = (n > 0);
6512 		return count;
6513 	}
6514 	return -EINVAL;
6515 }
6516 static DRIVER_ATTR_RW(host_lock);
6517 
6518 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6519 {
6520 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6521 }
6522 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6523 			    size_t count)
6524 {
6525 	int n;
6526 
6527 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6528 		sdebug_strict = (n > 0);
6529 		return count;
6530 	}
6531 	return -EINVAL;
6532 }
6533 static DRIVER_ATTR_RW(strict);
6534 
6535 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6536 {
6537 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6538 }
6539 static DRIVER_ATTR_RO(uuid_ctl);
6540 
6541 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6542 {
6543 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6544 }
6545 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6546 			     size_t count)
6547 {
6548 	int ret, n;
6549 
6550 	ret = kstrtoint(buf, 0, &n);
6551 	if (ret)
6552 		return ret;
6553 	sdebug_cdb_len = n;
6554 	all_config_cdb_len();
6555 	return count;
6556 }
6557 static DRIVER_ATTR_RW(cdb_len);
6558 
6559 static const char * const zbc_model_strs_a[] = {
6560 	[BLK_ZONED_NONE] = "none",
6561 	[BLK_ZONED_HA]   = "host-aware",
6562 	[BLK_ZONED_HM]   = "host-managed",
6563 };
6564 
6565 static const char * const zbc_model_strs_b[] = {
6566 	[BLK_ZONED_NONE] = "no",
6567 	[BLK_ZONED_HA]   = "aware",
6568 	[BLK_ZONED_HM]   = "managed",
6569 };
6570 
6571 static const char * const zbc_model_strs_c[] = {
6572 	[BLK_ZONED_NONE] = "0",
6573 	[BLK_ZONED_HA]   = "1",
6574 	[BLK_ZONED_HM]   = "2",
6575 };
6576 
6577 static int sdeb_zbc_model_str(const char *cp)
6578 {
6579 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6580 
6581 	if (res < 0) {
6582 		res = sysfs_match_string(zbc_model_strs_b, cp);
6583 		if (res < 0) {
6584 			res = sysfs_match_string(zbc_model_strs_c, cp);
6585 			if (res < 0)
6586 				return -EINVAL;
6587 		}
6588 	}
6589 	return res;
6590 }
6591 
6592 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6593 {
6594 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6595 			 zbc_model_strs_a[sdeb_zbc_model]);
6596 }
6597 static DRIVER_ATTR_RO(zbc);
6598 
6599 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6600 {
6601 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6602 }
6603 static DRIVER_ATTR_RO(tur_ms_to_ready);
6604 
6605 /* Note: The following array creates attribute files in the
6606    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6607    files (over those found in the /sys/module/scsi_debug/parameters
6608    directory) is that auxiliary actions can be triggered when an attribute
6609    is changed. For example see: add_host_store() above.
6610  */
6611 
6612 static struct attribute *sdebug_drv_attrs[] = {
6613 	&driver_attr_delay.attr,
6614 	&driver_attr_opts.attr,
6615 	&driver_attr_ptype.attr,
6616 	&driver_attr_dsense.attr,
6617 	&driver_attr_fake_rw.attr,
6618 	&driver_attr_host_max_queue.attr,
6619 	&driver_attr_no_lun_0.attr,
6620 	&driver_attr_num_tgts.attr,
6621 	&driver_attr_dev_size_mb.attr,
6622 	&driver_attr_num_parts.attr,
6623 	&driver_attr_every_nth.attr,
6624 	&driver_attr_lun_format.attr,
6625 	&driver_attr_max_luns.attr,
6626 	&driver_attr_max_queue.attr,
6627 	&driver_attr_no_uld.attr,
6628 	&driver_attr_scsi_level.attr,
6629 	&driver_attr_virtual_gb.attr,
6630 	&driver_attr_add_host.attr,
6631 	&driver_attr_per_host_store.attr,
6632 	&driver_attr_vpd_use_hostno.attr,
6633 	&driver_attr_sector_size.attr,
6634 	&driver_attr_statistics.attr,
6635 	&driver_attr_submit_queues.attr,
6636 	&driver_attr_dix.attr,
6637 	&driver_attr_dif.attr,
6638 	&driver_attr_guard.attr,
6639 	&driver_attr_ato.attr,
6640 	&driver_attr_map.attr,
6641 	&driver_attr_random.attr,
6642 	&driver_attr_removable.attr,
6643 	&driver_attr_host_lock.attr,
6644 	&driver_attr_ndelay.attr,
6645 	&driver_attr_strict.attr,
6646 	&driver_attr_uuid_ctl.attr,
6647 	&driver_attr_cdb_len.attr,
6648 	&driver_attr_tur_ms_to_ready.attr,
6649 	&driver_attr_zbc.attr,
6650 	NULL,
6651 };
6652 ATTRIBUTE_GROUPS(sdebug_drv);
6653 
6654 static struct device *pseudo_primary;
6655 
6656 static int __init scsi_debug_init(void)
6657 {
6658 	bool want_store = (sdebug_fake_rw == 0);
6659 	unsigned long sz;
6660 	int k, ret, hosts_to_add;
6661 	int idx = -1;
6662 
6663 	ramdisk_lck_a[0] = &atomic_rw;
6664 	ramdisk_lck_a[1] = &atomic_rw2;
6665 	atomic_set(&retired_max_queue, 0);
6666 
6667 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6668 		pr_warn("ndelay must be less than 1 second, ignored\n");
6669 		sdebug_ndelay = 0;
6670 	} else if (sdebug_ndelay > 0)
6671 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6672 
6673 	switch (sdebug_sector_size) {
6674 	case  512:
6675 	case 1024:
6676 	case 2048:
6677 	case 4096:
6678 		break;
6679 	default:
6680 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6681 		return -EINVAL;
6682 	}
6683 
6684 	switch (sdebug_dif) {
6685 	case T10_PI_TYPE0_PROTECTION:
6686 		break;
6687 	case T10_PI_TYPE1_PROTECTION:
6688 	case T10_PI_TYPE2_PROTECTION:
6689 	case T10_PI_TYPE3_PROTECTION:
6690 		have_dif_prot = true;
6691 		break;
6692 
6693 	default:
6694 		pr_err("dif must be 0, 1, 2 or 3\n");
6695 		return -EINVAL;
6696 	}
6697 
6698 	if (sdebug_num_tgts < 0) {
6699 		pr_err("num_tgts must be >= 0\n");
6700 		return -EINVAL;
6701 	}
6702 
6703 	if (sdebug_guard > 1) {
6704 		pr_err("guard must be 0 or 1\n");
6705 		return -EINVAL;
6706 	}
6707 
6708 	if (sdebug_ato > 1) {
6709 		pr_err("ato must be 0 or 1\n");
6710 		return -EINVAL;
6711 	}
6712 
6713 	if (sdebug_physblk_exp > 15) {
6714 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6715 		return -EINVAL;
6716 	}
6717 
6718 	sdebug_lun_am = sdebug_lun_am_i;
6719 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6720 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6721 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6722 	}
6723 
6724 	if (sdebug_max_luns > 256) {
6725 		if (sdebug_max_luns > 16384) {
6726 			pr_warn("max_luns can be no more than 16384, use default\n");
6727 			sdebug_max_luns = DEF_MAX_LUNS;
6728 		}
6729 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6730 	}
6731 
6732 	if (sdebug_lowest_aligned > 0x3fff) {
6733 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6734 		return -EINVAL;
6735 	}
6736 
6737 	if (submit_queues < 1) {
6738 		pr_err("submit_queues must be 1 or more\n");
6739 		return -EINVAL;
6740 	}
6741 
6742 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6743 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6744 		return -EINVAL;
6745 	}
6746 
6747 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6748 	    (sdebug_host_max_queue < 0)) {
6749 		pr_err("host_max_queue must be in range [0 %d]\n",
6750 		       SDEBUG_CANQUEUE);
6751 		return -EINVAL;
6752 	}
6753 
6754 	if (sdebug_host_max_queue &&
6755 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6756 		sdebug_max_queue = sdebug_host_max_queue;
6757 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6758 			sdebug_max_queue);
6759 	}
6760 
6761 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6762 			       GFP_KERNEL);
6763 	if (sdebug_q_arr == NULL)
6764 		return -ENOMEM;
6765 	for (k = 0; k < submit_queues; ++k)
6766 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6767 
6768 	/*
6769 	 * check for host managed zoned block device specified with
6770 	 * ptype=0x14 or zbc=XXX.
6771 	 */
6772 	if (sdebug_ptype == TYPE_ZBC) {
6773 		sdeb_zbc_model = BLK_ZONED_HM;
6774 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6775 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6776 		if (k < 0) {
6777 			ret = k;
6778 			goto free_q_arr;
6779 		}
6780 		sdeb_zbc_model = k;
6781 		switch (sdeb_zbc_model) {
6782 		case BLK_ZONED_NONE:
6783 		case BLK_ZONED_HA:
6784 			sdebug_ptype = TYPE_DISK;
6785 			break;
6786 		case BLK_ZONED_HM:
6787 			sdebug_ptype = TYPE_ZBC;
6788 			break;
6789 		default:
6790 			pr_err("Invalid ZBC model\n");
6791 			ret = -EINVAL;
6792 			goto free_q_arr;
6793 		}
6794 	}
6795 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6796 		sdeb_zbc_in_use = true;
6797 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6798 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6799 	}
6800 
6801 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6802 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6803 	if (sdebug_dev_size_mb < 1)
6804 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6805 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6806 	sdebug_store_sectors = sz / sdebug_sector_size;
6807 	sdebug_capacity = get_sdebug_capacity();
6808 
6809 	/* play around with geometry, don't waste too much on track 0 */
6810 	sdebug_heads = 8;
6811 	sdebug_sectors_per = 32;
6812 	if (sdebug_dev_size_mb >= 256)
6813 		sdebug_heads = 64;
6814 	else if (sdebug_dev_size_mb >= 16)
6815 		sdebug_heads = 32;
6816 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6817 			       (sdebug_sectors_per * sdebug_heads);
6818 	if (sdebug_cylinders_per >= 1024) {
6819 		/* other LLDs do this; implies >= 1GB ram disk ... */
6820 		sdebug_heads = 255;
6821 		sdebug_sectors_per = 63;
6822 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6823 			       (sdebug_sectors_per * sdebug_heads);
6824 	}
6825 	if (scsi_debug_lbp()) {
6826 		sdebug_unmap_max_blocks =
6827 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6828 
6829 		sdebug_unmap_max_desc =
6830 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6831 
6832 		sdebug_unmap_granularity =
6833 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6834 
6835 		if (sdebug_unmap_alignment &&
6836 		    sdebug_unmap_granularity <=
6837 		    sdebug_unmap_alignment) {
6838 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6839 			ret = -EINVAL;
6840 			goto free_q_arr;
6841 		}
6842 	}
6843 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6844 	if (want_store) {
6845 		idx = sdebug_add_store();
6846 		if (idx < 0) {
6847 			ret = idx;
6848 			goto free_q_arr;
6849 		}
6850 	}
6851 
6852 	pseudo_primary = root_device_register("pseudo_0");
6853 	if (IS_ERR(pseudo_primary)) {
6854 		pr_warn("root_device_register() error\n");
6855 		ret = PTR_ERR(pseudo_primary);
6856 		goto free_vm;
6857 	}
6858 	ret = bus_register(&pseudo_lld_bus);
6859 	if (ret < 0) {
6860 		pr_warn("bus_register error: %d\n", ret);
6861 		goto dev_unreg;
6862 	}
6863 	ret = driver_register(&sdebug_driverfs_driver);
6864 	if (ret < 0) {
6865 		pr_warn("driver_register error: %d\n", ret);
6866 		goto bus_unreg;
6867 	}
6868 
6869 	hosts_to_add = sdebug_add_host;
6870 	sdebug_add_host = 0;
6871 
6872 	for (k = 0; k < hosts_to_add; k++) {
6873 		if (want_store && k == 0) {
6874 			ret = sdebug_add_host_helper(idx);
6875 			if (ret < 0) {
6876 				pr_err("add_host_helper k=%d, error=%d\n",
6877 				       k, -ret);
6878 				break;
6879 			}
6880 		} else {
6881 			ret = sdebug_do_add_host(want_store &&
6882 						 sdebug_per_host_store);
6883 			if (ret < 0) {
6884 				pr_err("add_host k=%d error=%d\n", k, -ret);
6885 				break;
6886 			}
6887 		}
6888 	}
6889 	if (sdebug_verbose)
6890 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6891 
6892 	return 0;
6893 
6894 bus_unreg:
6895 	bus_unregister(&pseudo_lld_bus);
6896 dev_unreg:
6897 	root_device_unregister(pseudo_primary);
6898 free_vm:
6899 	sdebug_erase_store(idx, NULL);
6900 free_q_arr:
6901 	kfree(sdebug_q_arr);
6902 	return ret;
6903 }
6904 
6905 static void __exit scsi_debug_exit(void)
6906 {
6907 	int k = sdebug_num_hosts;
6908 
6909 	stop_all_queued();
6910 	for (; k; k--)
6911 		sdebug_do_remove_host(true);
6912 	free_all_queued();
6913 	driver_unregister(&sdebug_driverfs_driver);
6914 	bus_unregister(&pseudo_lld_bus);
6915 	root_device_unregister(pseudo_primary);
6916 
6917 	sdebug_erase_all_stores(false);
6918 	xa_destroy(per_store_ap);
6919 	kfree(sdebug_q_arr);
6920 }
6921 
6922 device_initcall(scsi_debug_init);
6923 module_exit(scsi_debug_exit);
6924 
6925 static void sdebug_release_adapter(struct device *dev)
6926 {
6927 	struct sdebug_host_info *sdbg_host;
6928 
6929 	sdbg_host = to_sdebug_host(dev);
6930 	kfree(sdbg_host);
6931 }
6932 
6933 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6934 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6935 {
6936 	if (idx < 0)
6937 		return;
6938 	if (!sip) {
6939 		if (xa_empty(per_store_ap))
6940 			return;
6941 		sip = xa_load(per_store_ap, idx);
6942 		if (!sip)
6943 			return;
6944 	}
6945 	vfree(sip->map_storep);
6946 	vfree(sip->dif_storep);
6947 	vfree(sip->storep);
6948 	xa_erase(per_store_ap, idx);
6949 	kfree(sip);
6950 }
6951 
6952 /* Assume apart_from_first==false only in shutdown case. */
6953 static void sdebug_erase_all_stores(bool apart_from_first)
6954 {
6955 	unsigned long idx;
6956 	struct sdeb_store_info *sip = NULL;
6957 
6958 	xa_for_each(per_store_ap, idx, sip) {
6959 		if (apart_from_first)
6960 			apart_from_first = false;
6961 		else
6962 			sdebug_erase_store(idx, sip);
6963 	}
6964 	if (apart_from_first)
6965 		sdeb_most_recent_idx = sdeb_first_idx;
6966 }
6967 
6968 /*
6969  * Returns store xarray new element index (idx) if >=0 else negated errno.
6970  * Limit the number of stores to 65536.
6971  */
6972 static int sdebug_add_store(void)
6973 {
6974 	int res;
6975 	u32 n_idx;
6976 	unsigned long iflags;
6977 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6978 	struct sdeb_store_info *sip = NULL;
6979 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6980 
6981 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6982 	if (!sip)
6983 		return -ENOMEM;
6984 
6985 	xa_lock_irqsave(per_store_ap, iflags);
6986 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6987 	if (unlikely(res < 0)) {
6988 		xa_unlock_irqrestore(per_store_ap, iflags);
6989 		kfree(sip);
6990 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6991 		return res;
6992 	}
6993 	sdeb_most_recent_idx = n_idx;
6994 	if (sdeb_first_idx < 0)
6995 		sdeb_first_idx = n_idx;
6996 	xa_unlock_irqrestore(per_store_ap, iflags);
6997 
6998 	res = -ENOMEM;
6999 	sip->storep = vzalloc(sz);
7000 	if (!sip->storep) {
7001 		pr_err("user data oom\n");
7002 		goto err;
7003 	}
7004 	if (sdebug_num_parts > 0)
7005 		sdebug_build_parts(sip->storep, sz);
7006 
7007 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7008 	if (sdebug_dix) {
7009 		int dif_size;
7010 
7011 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7012 		sip->dif_storep = vmalloc(dif_size);
7013 
7014 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7015 			sip->dif_storep);
7016 
7017 		if (!sip->dif_storep) {
7018 			pr_err("DIX oom\n");
7019 			goto err;
7020 		}
7021 		memset(sip->dif_storep, 0xff, dif_size);
7022 	}
7023 	/* Logical Block Provisioning */
7024 	if (scsi_debug_lbp()) {
7025 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7026 		sip->map_storep = vmalloc(array_size(sizeof(long),
7027 						     BITS_TO_LONGS(map_size)));
7028 
7029 		pr_info("%lu provisioning blocks\n", map_size);
7030 
7031 		if (!sip->map_storep) {
7032 			pr_err("LBP map oom\n");
7033 			goto err;
7034 		}
7035 
7036 		bitmap_zero(sip->map_storep, map_size);
7037 
7038 		/* Map first 1KB for partition table */
7039 		if (sdebug_num_parts)
7040 			map_region(sip, 0, 2);
7041 	}
7042 
7043 	rwlock_init(&sip->macc_lck);
7044 	return (int)n_idx;
7045 err:
7046 	sdebug_erase_store((int)n_idx, sip);
7047 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7048 	return res;
7049 }
7050 
7051 static int sdebug_add_host_helper(int per_host_idx)
7052 {
7053 	int k, devs_per_host, idx;
7054 	int error = -ENOMEM;
7055 	struct sdebug_host_info *sdbg_host;
7056 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7057 
7058 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7059 	if (!sdbg_host)
7060 		return -ENOMEM;
7061 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7062 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7063 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7064 	sdbg_host->si_idx = idx;
7065 
7066 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7067 
7068 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7069 	for (k = 0; k < devs_per_host; k++) {
7070 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7071 		if (!sdbg_devinfo)
7072 			goto clean;
7073 	}
7074 
7075 	spin_lock(&sdebug_host_list_lock);
7076 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7077 	spin_unlock(&sdebug_host_list_lock);
7078 
7079 	sdbg_host->dev.bus = &pseudo_lld_bus;
7080 	sdbg_host->dev.parent = pseudo_primary;
7081 	sdbg_host->dev.release = &sdebug_release_adapter;
7082 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7083 
7084 	error = device_register(&sdbg_host->dev);
7085 	if (error)
7086 		goto clean;
7087 
7088 	++sdebug_num_hosts;
7089 	return 0;
7090 
7091 clean:
7092 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7093 				 dev_list) {
7094 		list_del(&sdbg_devinfo->dev_list);
7095 		kfree(sdbg_devinfo->zstate);
7096 		kfree(sdbg_devinfo);
7097 	}
7098 	kfree(sdbg_host);
7099 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7100 	return error;
7101 }
7102 
7103 static int sdebug_do_add_host(bool mk_new_store)
7104 {
7105 	int ph_idx = sdeb_most_recent_idx;
7106 
7107 	if (mk_new_store) {
7108 		ph_idx = sdebug_add_store();
7109 		if (ph_idx < 0)
7110 			return ph_idx;
7111 	}
7112 	return sdebug_add_host_helper(ph_idx);
7113 }
7114 
7115 static void sdebug_do_remove_host(bool the_end)
7116 {
7117 	int idx = -1;
7118 	struct sdebug_host_info *sdbg_host = NULL;
7119 	struct sdebug_host_info *sdbg_host2;
7120 
7121 	spin_lock(&sdebug_host_list_lock);
7122 	if (!list_empty(&sdebug_host_list)) {
7123 		sdbg_host = list_entry(sdebug_host_list.prev,
7124 				       struct sdebug_host_info, host_list);
7125 		idx = sdbg_host->si_idx;
7126 	}
7127 	if (!the_end && idx >= 0) {
7128 		bool unique = true;
7129 
7130 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7131 			if (sdbg_host2 == sdbg_host)
7132 				continue;
7133 			if (idx == sdbg_host2->si_idx) {
7134 				unique = false;
7135 				break;
7136 			}
7137 		}
7138 		if (unique) {
7139 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7140 			if (idx == sdeb_most_recent_idx)
7141 				--sdeb_most_recent_idx;
7142 		}
7143 	}
7144 	if (sdbg_host)
7145 		list_del(&sdbg_host->host_list);
7146 	spin_unlock(&sdebug_host_list_lock);
7147 
7148 	if (!sdbg_host)
7149 		return;
7150 
7151 	device_unregister(&sdbg_host->dev);
7152 	--sdebug_num_hosts;
7153 }
7154 
7155 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7156 {
7157 	int num_in_q = 0;
7158 	struct sdebug_dev_info *devip;
7159 
7160 	block_unblock_all_queues(true);
7161 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7162 	if (NULL == devip) {
7163 		block_unblock_all_queues(false);
7164 		return	-ENODEV;
7165 	}
7166 	num_in_q = atomic_read(&devip->num_in_q);
7167 
7168 	if (qdepth > SDEBUG_CANQUEUE) {
7169 		qdepth = SDEBUG_CANQUEUE;
7170 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7171 			qdepth, SDEBUG_CANQUEUE);
7172 	}
7173 	if (qdepth < 1)
7174 		qdepth = 1;
7175 	if (qdepth != sdev->queue_depth)
7176 		scsi_change_queue_depth(sdev, qdepth);
7177 
7178 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7179 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7180 			    __func__, qdepth, num_in_q);
7181 	}
7182 	block_unblock_all_queues(false);
7183 	return sdev->queue_depth;
7184 }
7185 
7186 static bool fake_timeout(struct scsi_cmnd *scp)
7187 {
7188 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7189 		if (sdebug_every_nth < -1)
7190 			sdebug_every_nth = -1;
7191 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7192 			return true; /* ignore command causing timeout */
7193 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7194 			 scsi_medium_access_command(scp))
7195 			return true; /* time out reads and writes */
7196 	}
7197 	return false;
7198 }
7199 
7200 /* Response to TUR or media access command when device stopped */
7201 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7202 {
7203 	int stopped_state;
7204 	u64 diff_ns = 0;
7205 	ktime_t now_ts = ktime_get_boottime();
7206 	struct scsi_device *sdp = scp->device;
7207 
7208 	stopped_state = atomic_read(&devip->stopped);
7209 	if (stopped_state == 2) {
7210 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7211 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7212 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7213 				/* tur_ms_to_ready timer extinguished */
7214 				atomic_set(&devip->stopped, 0);
7215 				return 0;
7216 			}
7217 		}
7218 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7219 		if (sdebug_verbose)
7220 			sdev_printk(KERN_INFO, sdp,
7221 				    "%s: Not ready: in process of becoming ready\n", my_name);
7222 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7223 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7224 
7225 			if (diff_ns <= tur_nanosecs_to_ready)
7226 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7227 			else
7228 				diff_ns = tur_nanosecs_to_ready;
7229 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7230 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7231 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7232 						   diff_ns);
7233 			return check_condition_result;
7234 		}
7235 	}
7236 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7237 	if (sdebug_verbose)
7238 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7239 			    my_name);
7240 	return check_condition_result;
7241 }
7242 
7243 static int sdebug_map_queues(struct Scsi_Host *shost)
7244 {
7245 	int i, qoff;
7246 
7247 	if (shost->nr_hw_queues == 1)
7248 		return 0;
7249 
7250 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7251 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7252 
7253 		map->nr_queues  = 0;
7254 
7255 		if (i == HCTX_TYPE_DEFAULT)
7256 			map->nr_queues = submit_queues - poll_queues;
7257 		else if (i == HCTX_TYPE_POLL)
7258 			map->nr_queues = poll_queues;
7259 
7260 		if (!map->nr_queues) {
7261 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7262 			continue;
7263 		}
7264 
7265 		map->queue_offset = qoff;
7266 		blk_mq_map_queues(map);
7267 
7268 		qoff += map->nr_queues;
7269 	}
7270 
7271 	return 0;
7272 
7273 }
7274 
7275 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7276 {
7277 	bool first;
7278 	bool retiring = false;
7279 	int num_entries = 0;
7280 	unsigned int qc_idx = 0;
7281 	unsigned long iflags;
7282 	ktime_t kt_from_boot = ktime_get_boottime();
7283 	struct sdebug_queue *sqp;
7284 	struct sdebug_queued_cmd *sqcp;
7285 	struct scsi_cmnd *scp;
7286 	struct sdebug_dev_info *devip;
7287 	struct sdebug_defer *sd_dp;
7288 
7289 	sqp = sdebug_q_arr + queue_num;
7290 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7291 
7292 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7293 		if (first) {
7294 			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7295 			first = false;
7296 		} else {
7297 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7298 		}
7299 		if (unlikely(qc_idx >= sdebug_max_queue))
7300 			break;
7301 
7302 		sqcp = &sqp->qc_arr[qc_idx];
7303 		sd_dp = sqcp->sd_dp;
7304 		if (unlikely(!sd_dp))
7305 			continue;
7306 		scp = sqcp->a_cmnd;
7307 		if (unlikely(scp == NULL)) {
7308 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7309 			       queue_num, qc_idx, __func__);
7310 			break;
7311 		}
7312 		if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7313 			if (kt_from_boot < sd_dp->cmpl_ts)
7314 				continue;
7315 
7316 		} else		/* ignoring non REQ_HIPRI requests */
7317 			continue;
7318 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7319 		if (likely(devip))
7320 			atomic_dec(&devip->num_in_q);
7321 		else
7322 			pr_err("devip=NULL from %s\n", __func__);
7323 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7324 			retiring = true;
7325 
7326 		sqcp->a_cmnd = NULL;
7327 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7328 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7329 				sqp, queue_num, qc_idx, __func__);
7330 			break;
7331 		}
7332 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7333 			int k, retval;
7334 
7335 			retval = atomic_read(&retired_max_queue);
7336 			if (qc_idx >= retval) {
7337 				pr_err("index %d too large\n", retval);
7338 				break;
7339 			}
7340 			k = find_last_bit(sqp->in_use_bm, retval);
7341 			if ((k < sdebug_max_queue) || (k == retval))
7342 				atomic_set(&retired_max_queue, 0);
7343 			else
7344 				atomic_set(&retired_max_queue, k + 1);
7345 		}
7346 		sd_dp->defer_t = SDEB_DEFER_NONE;
7347 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7348 		scp->scsi_done(scp); /* callback to mid level */
7349 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7350 		num_entries++;
7351 	}
7352 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7353 	if (num_entries > 0)
7354 		atomic_add(num_entries, &sdeb_mq_poll_count);
7355 	return num_entries;
7356 }
7357 
7358 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7359 				   struct scsi_cmnd *scp)
7360 {
7361 	u8 sdeb_i;
7362 	struct scsi_device *sdp = scp->device;
7363 	const struct opcode_info_t *oip;
7364 	const struct opcode_info_t *r_oip;
7365 	struct sdebug_dev_info *devip;
7366 	u8 *cmd = scp->cmnd;
7367 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7368 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7369 	int k, na;
7370 	int errsts = 0;
7371 	u64 lun_index = sdp->lun & 0x3FFF;
7372 	u32 flags;
7373 	u16 sa;
7374 	u8 opcode = cmd[0];
7375 	bool has_wlun_rl;
7376 	bool inject_now;
7377 
7378 	scsi_set_resid(scp, 0);
7379 	if (sdebug_statistics) {
7380 		atomic_inc(&sdebug_cmnd_count);
7381 		inject_now = inject_on_this_cmd();
7382 	} else {
7383 		inject_now = false;
7384 	}
7385 	if (unlikely(sdebug_verbose &&
7386 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7387 		char b[120];
7388 		int n, len, sb;
7389 
7390 		len = scp->cmd_len;
7391 		sb = (int)sizeof(b);
7392 		if (len > 32)
7393 			strcpy(b, "too long, over 32 bytes");
7394 		else {
7395 			for (k = 0, n = 0; k < len && n < sb; ++k)
7396 				n += scnprintf(b + n, sb - n, "%02x ",
7397 					       (u32)cmd[k]);
7398 		}
7399 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7400 			    blk_mq_unique_tag(scp->request), b);
7401 	}
7402 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7403 		return SCSI_MLQUEUE_HOST_BUSY;
7404 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7405 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7406 		goto err_out;
7407 
7408 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7409 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7410 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7411 	if (unlikely(!devip)) {
7412 		devip = find_build_dev_info(sdp);
7413 		if (NULL == devip)
7414 			goto err_out;
7415 	}
7416 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7417 		atomic_set(&sdeb_inject_pending, 1);
7418 
7419 	na = oip->num_attached;
7420 	r_pfp = oip->pfp;
7421 	if (na) {	/* multiple commands with this opcode */
7422 		r_oip = oip;
7423 		if (FF_SA & r_oip->flags) {
7424 			if (F_SA_LOW & oip->flags)
7425 				sa = 0x1f & cmd[1];
7426 			else
7427 				sa = get_unaligned_be16(cmd + 8);
7428 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7429 				if (opcode == oip->opcode && sa == oip->sa)
7430 					break;
7431 			}
7432 		} else {   /* since no service action only check opcode */
7433 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7434 				if (opcode == oip->opcode)
7435 					break;
7436 			}
7437 		}
7438 		if (k > na) {
7439 			if (F_SA_LOW & r_oip->flags)
7440 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7441 			else if (F_SA_HIGH & r_oip->flags)
7442 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7443 			else
7444 				mk_sense_invalid_opcode(scp);
7445 			goto check_cond;
7446 		}
7447 	}	/* else (when na==0) we assume the oip is a match */
7448 	flags = oip->flags;
7449 	if (unlikely(F_INV_OP & flags)) {
7450 		mk_sense_invalid_opcode(scp);
7451 		goto check_cond;
7452 	}
7453 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7454 		if (sdebug_verbose)
7455 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7456 				    my_name, opcode, " supported for wlun");
7457 		mk_sense_invalid_opcode(scp);
7458 		goto check_cond;
7459 	}
7460 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7461 		u8 rem;
7462 		int j;
7463 
7464 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7465 			rem = ~oip->len_mask[k] & cmd[k];
7466 			if (rem) {
7467 				for (j = 7; j >= 0; --j, rem <<= 1) {
7468 					if (0x80 & rem)
7469 						break;
7470 				}
7471 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7472 				goto check_cond;
7473 			}
7474 		}
7475 	}
7476 	if (unlikely(!(F_SKIP_UA & flags) &&
7477 		     find_first_bit(devip->uas_bm,
7478 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7479 		errsts = make_ua(scp, devip);
7480 		if (errsts)
7481 			goto check_cond;
7482 	}
7483 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7484 		     atomic_read(&devip->stopped))) {
7485 		errsts = resp_not_ready(scp, devip);
7486 		if (errsts)
7487 			goto fini;
7488 	}
7489 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7490 		goto fini;
7491 	if (unlikely(sdebug_every_nth)) {
7492 		if (fake_timeout(scp))
7493 			return 0;	/* ignore command: make trouble */
7494 	}
7495 	if (likely(oip->pfp))
7496 		pfp = oip->pfp;	/* calls a resp_* function */
7497 	else
7498 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7499 
7500 fini:
7501 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7502 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7503 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7504 					    sdebug_ndelay > 10000)) {
7505 		/*
7506 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7507 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7508 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7509 		 * For Synchronize Cache want 1/20 of SSU's delay.
7510 		 */
7511 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7512 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7513 
7514 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7515 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7516 	} else
7517 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7518 				     sdebug_ndelay);
7519 check_cond:
7520 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7521 err_out:
7522 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7523 }
7524 
7525 static struct scsi_host_template sdebug_driver_template = {
7526 	.show_info =		scsi_debug_show_info,
7527 	.write_info =		scsi_debug_write_info,
7528 	.proc_name =		sdebug_proc_name,
7529 	.name =			"SCSI DEBUG",
7530 	.info =			scsi_debug_info,
7531 	.slave_alloc =		scsi_debug_slave_alloc,
7532 	.slave_configure =	scsi_debug_slave_configure,
7533 	.slave_destroy =	scsi_debug_slave_destroy,
7534 	.ioctl =		scsi_debug_ioctl,
7535 	.queuecommand =		scsi_debug_queuecommand,
7536 	.change_queue_depth =	sdebug_change_qdepth,
7537 	.map_queues =		sdebug_map_queues,
7538 	.mq_poll =		sdebug_blk_mq_poll,
7539 	.eh_abort_handler =	scsi_debug_abort,
7540 	.eh_device_reset_handler = scsi_debug_device_reset,
7541 	.eh_target_reset_handler = scsi_debug_target_reset,
7542 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7543 	.eh_host_reset_handler = scsi_debug_host_reset,
7544 	.can_queue =		SDEBUG_CANQUEUE,
7545 	.this_id =		7,
7546 	.sg_tablesize =		SG_MAX_SEGMENTS,
7547 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7548 	.max_sectors =		-1U,
7549 	.max_segment_size =	-1U,
7550 	.module =		THIS_MODULE,
7551 	.track_queue_depth =	1,
7552 };
7553 
7554 static int sdebug_driver_probe(struct device *dev)
7555 {
7556 	int error = 0;
7557 	struct sdebug_host_info *sdbg_host;
7558 	struct Scsi_Host *hpnt;
7559 	int hprot;
7560 
7561 	sdbg_host = to_sdebug_host(dev);
7562 
7563 	sdebug_driver_template.can_queue = sdebug_max_queue;
7564 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7565 	if (!sdebug_clustering)
7566 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7567 
7568 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7569 	if (NULL == hpnt) {
7570 		pr_err("scsi_host_alloc failed\n");
7571 		error = -ENODEV;
7572 		return error;
7573 	}
7574 	if (submit_queues > nr_cpu_ids) {
7575 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7576 			my_name, submit_queues, nr_cpu_ids);
7577 		submit_queues = nr_cpu_ids;
7578 	}
7579 	/*
7580 	 * Decide whether to tell scsi subsystem that we want mq. The
7581 	 * following should give the same answer for each host.
7582 	 */
7583 	hpnt->nr_hw_queues = submit_queues;
7584 	if (sdebug_host_max_queue)
7585 		hpnt->host_tagset = 1;
7586 
7587 	/* poll queues are possible for nr_hw_queues > 1 */
7588 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7589 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7590 			 my_name, poll_queues, hpnt->nr_hw_queues);
7591 		poll_queues = 0;
7592 	}
7593 
7594 	/*
7595 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7596 	 * left over for non-polled I/O.
7597 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7598 	 */
7599 	if (poll_queues >= submit_queues) {
7600 		if (submit_queues < 3)
7601 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7602 		else
7603 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7604 				my_name, submit_queues - 1);
7605 		poll_queues = 1;
7606 	}
7607 	if (poll_queues)
7608 		hpnt->nr_maps = 3;
7609 
7610 	sdbg_host->shost = hpnt;
7611 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7612 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7613 		hpnt->max_id = sdebug_num_tgts + 1;
7614 	else
7615 		hpnt->max_id = sdebug_num_tgts;
7616 	/* = sdebug_max_luns; */
7617 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7618 
7619 	hprot = 0;
7620 
7621 	switch (sdebug_dif) {
7622 
7623 	case T10_PI_TYPE1_PROTECTION:
7624 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7625 		if (sdebug_dix)
7626 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7627 		break;
7628 
7629 	case T10_PI_TYPE2_PROTECTION:
7630 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7631 		if (sdebug_dix)
7632 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7633 		break;
7634 
7635 	case T10_PI_TYPE3_PROTECTION:
7636 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7637 		if (sdebug_dix)
7638 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7639 		break;
7640 
7641 	default:
7642 		if (sdebug_dix)
7643 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7644 		break;
7645 	}
7646 
7647 	scsi_host_set_prot(hpnt, hprot);
7648 
7649 	if (have_dif_prot || sdebug_dix)
7650 		pr_info("host protection%s%s%s%s%s%s%s\n",
7651 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7652 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7653 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7654 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7655 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7656 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7657 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7658 
7659 	if (sdebug_guard == 1)
7660 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7661 	else
7662 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7663 
7664 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7665 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7666 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7667 		sdebug_statistics = true;
7668 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7669 	if (error) {
7670 		pr_err("scsi_add_host failed\n");
7671 		error = -ENODEV;
7672 		scsi_host_put(hpnt);
7673 	} else {
7674 		scsi_scan_host(hpnt);
7675 	}
7676 
7677 	return error;
7678 }
7679 
7680 static int sdebug_driver_remove(struct device *dev)
7681 {
7682 	struct sdebug_host_info *sdbg_host;
7683 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7684 
7685 	sdbg_host = to_sdebug_host(dev);
7686 
7687 	scsi_remove_host(sdbg_host->shost);
7688 
7689 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7690 				 dev_list) {
7691 		list_del(&sdbg_devinfo->dev_list);
7692 		kfree(sdbg_devinfo->zstate);
7693 		kfree(sdbg_devinfo);
7694 	}
7695 
7696 	scsi_host_put(sdbg_host->shost);
7697 	return 0;
7698 }
7699 
7700 static int pseudo_lld_bus_match(struct device *dev,
7701 				struct device_driver *dev_driver)
7702 {
7703 	return 1;
7704 }
7705 
7706 static struct bus_type pseudo_lld_bus = {
7707 	.name = "pseudo",
7708 	.match = pseudo_lld_bus_match,
7709 	.probe = sdebug_driver_probe,
7710 	.remove = sdebug_driver_remove,
7711 	.drv_groups = sdebug_drv_groups,
7712 };
7713