xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 8c657235)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213  * (for response) per submit queue at one time. Can be reduced by max_queue
214  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217  * but cannot exceed SDEBUG_CANQUEUE .
218  */
219 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN  255
222 
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN			1	/* Data-in command (e.g. READ) */
225 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
231 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
234 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
236 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
238 
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
250 
251 /* Zone types (zbcr05 table 25) */
252 enum sdebug_z_type {
253 	ZBC_ZONE_TYPE_CNV	= 0x1,
254 	ZBC_ZONE_TYPE_SWR	= 0x2,
255 	ZBC_ZONE_TYPE_SWP	= 0x3,
256 };
257 
258 /* enumeration names taken from table 26, zbcr05 */
259 enum sdebug_z_cond {
260 	ZBC_NOT_WRITE_POINTER	= 0x0,
261 	ZC1_EMPTY		= 0x1,
262 	ZC2_IMPLICIT_OPEN	= 0x2,
263 	ZC3_EXPLICIT_OPEN	= 0x3,
264 	ZC4_CLOSED		= 0x4,
265 	ZC6_READ_ONLY		= 0xd,
266 	ZC5_FULL		= 0xe,
267 	ZC7_OFFLINE		= 0xf,
268 };
269 
270 struct sdeb_zone_state {	/* ZBC: per zone state */
271 	enum sdebug_z_type z_type;
272 	enum sdebug_z_cond z_cond;
273 	bool z_non_seq_resource;
274 	unsigned int z_size;
275 	sector_t z_start;
276 	sector_t z_wp;
277 };
278 
279 struct sdebug_dev_info {
280 	struct list_head dev_list;
281 	unsigned int channel;
282 	unsigned int target;
283 	u64 lun;
284 	uuid_t lu_name;
285 	struct sdebug_host_info *sdbg_host;
286 	unsigned long uas_bm[1];
287 	atomic_t num_in_q;
288 	atomic_t stopped;	/* 1: by SSU, 2: device start */
289 	bool used;
290 
291 	/* For ZBC devices */
292 	enum blk_zoned_model zmodel;
293 	unsigned int zsize;
294 	unsigned int zsize_shift;
295 	unsigned int nr_zones;
296 	unsigned int nr_conv_zones;
297 	unsigned int nr_imp_open;
298 	unsigned int nr_exp_open;
299 	unsigned int nr_closed;
300 	unsigned int max_open;
301 	ktime_t create_ts;	/* time since bootup that this device was created */
302 	struct sdeb_zone_state *zstate;
303 };
304 
305 struct sdebug_host_info {
306 	struct list_head host_list;
307 	int si_idx;	/* sdeb_store_info (per host) xarray index */
308 	struct Scsi_Host *shost;
309 	struct device dev;
310 	struct list_head dev_info_list;
311 };
312 
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 	rwlock_t macc_lck;	/* for atomic media access on this store */
316 	u8 *storep;		/* user data storage (ram) */
317 	struct t10_pi_tuple *dif_storep; /* protection info */
318 	void *map_storep;	/* provisioning map */
319 };
320 
321 #define to_sdebug_host(d)	\
322 	container_of(d, struct sdebug_host_info, dev)
323 
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 		      SDEB_DEFER_WQ = 2};
326 
327 struct sdebug_defer {
328 	struct hrtimer hrt;
329 	struct execute_work ew;
330 	int sqa_idx;	/* index of sdebug_queue array */
331 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
332 	int hc_idx;	/* hostwide tag index */
333 	int issuing_cpu;
334 	bool init_hrt;
335 	bool init_wq;
336 	bool aborted;	/* true when blk_abort_request() already called */
337 	enum sdeb_defer_type defer_t;
338 };
339 
340 struct sdebug_queued_cmd {
341 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 	 * instance indicates this slot is in use.
343 	 */
344 	struct sdebug_defer *sd_dp;
345 	struct scsi_cmnd *a_cmnd;
346 };
347 
348 struct sdebug_queue {
349 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
350 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
351 	spinlock_t qc_lock;
352 	atomic_t blocked;	/* to temporarily stop more being queued */
353 };
354 
355 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
356 static atomic_t sdebug_completions;  /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending;
360 
361 struct opcode_info_t {
362 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
363 				/* for terminating element */
364 	u8 opcode;		/* if num_attached > 0, preferred */
365 	u16 sa;			/* service action */
366 	u32 flags;		/* OR-ed set of SDEB_F_* */
367 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
368 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
369 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
370 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
371 };
372 
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index {
375 	SDEB_I_INVALID_OPCODE =	0,
376 	SDEB_I_INQUIRY = 1,
377 	SDEB_I_REPORT_LUNS = 2,
378 	SDEB_I_REQUEST_SENSE = 3,
379 	SDEB_I_TEST_UNIT_READY = 4,
380 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
381 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
382 	SDEB_I_LOG_SENSE = 7,
383 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
384 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
385 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
386 	SDEB_I_START_STOP = 11,
387 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
388 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
389 	SDEB_I_MAINT_IN = 14,
390 	SDEB_I_MAINT_OUT = 15,
391 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
392 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
393 	SDEB_I_RESERVE = 18,		/* 6, 10 */
394 	SDEB_I_RELEASE = 19,		/* 6, 10 */
395 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
396 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
397 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
398 	SDEB_I_SEND_DIAG = 23,
399 	SDEB_I_UNMAP = 24,
400 	SDEB_I_WRITE_BUFFER = 25,
401 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
402 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
403 	SDEB_I_COMP_WRITE = 28,
404 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
405 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
406 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
407 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
408 };
409 
410 
411 static const unsigned char opcode_ind_arr[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
414 	    0, 0, 0, 0,
415 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
416 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
417 	    SDEB_I_RELEASE,
418 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
419 	    SDEB_I_ALLOW_REMOVAL, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
422 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
423 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
424 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
427 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
428 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
429 	    SDEB_I_RELEASE,
430 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 	0, SDEB_I_VARIABLE_LEN,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
437 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
438 	0, 0, 0, SDEB_I_VERIFY,
439 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
440 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
441 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
444 	     SDEB_I_MAINT_OUT, 0, 0, 0,
445 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
446 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 	0, 0, 0, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 };
455 
456 /*
457  * The following "response" functions return the SCSI mid-level's 4 byte
458  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459  * command completion, they can mask their return value with
460  * SDEG_RES_IMMED_MASK .
461  */
462 #define SDEG_RES_IMMED_MASK 0x40000000
463 
464 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
465 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
466 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 
494 static int sdebug_do_add_host(bool mk_new_store);
495 static int sdebug_add_host_helper(int per_host_idx);
496 static void sdebug_do_remove_host(bool the_end);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
499 static void sdebug_erase_all_stores(bool apart_from_first);
500 
501 /*
502  * The following are overflow arrays for cdbs that "hit" the same index in
503  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504  * should be placed in opcode_info_arr[], the others should be placed here.
505  */
506 static const struct opcode_info_t msense_iarr[] = {
507 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
508 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510 
511 static const struct opcode_info_t mselect_iarr[] = {
512 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
513 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 };
515 
516 static const struct opcode_info_t read_iarr[] = {
517 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
518 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
519 	     0, 0, 0, 0} },
520 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
521 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
523 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
524 	     0xc7, 0, 0, 0, 0} },
525 };
526 
527 static const struct opcode_info_t write_iarr[] = {
528 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
529 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
530 		   0, 0, 0, 0, 0, 0} },
531 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
532 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
533 		   0, 0, 0} },
534 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
535 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 		   0xbf, 0xc7, 0, 0, 0, 0} },
537 };
538 
539 static const struct opcode_info_t verify_iarr[] = {
540 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
541 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
542 		   0, 0, 0, 0, 0, 0} },
543 };
544 
545 static const struct opcode_info_t sa_in_16_iarr[] = {
546 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
547 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
549 };
550 
551 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
552 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
553 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
555 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
556 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
558 };
559 
560 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
561 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
562 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
565 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
567 };
568 
569 static const struct opcode_info_t write_same_iarr[] = {
570 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
571 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
573 };
574 
575 static const struct opcode_info_t reserve_iarr[] = {
576 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
577 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 };
579 
580 static const struct opcode_info_t release_iarr[] = {
581 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
582 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 };
584 
585 static const struct opcode_info_t sync_cache_iarr[] = {
586 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
587 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
589 };
590 
591 static const struct opcode_info_t pre_fetch_iarr[] = {
592 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
593 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
595 };
596 
597 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
598 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
599 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
601 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
602 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
604 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
605 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
607 };
608 
609 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
610 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
611 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
613 };
614 
615 
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617  * plus the terminating elements for logic that scans this table such as
618  * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
620 /* 0 */
621 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
622 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
624 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
626 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 	     0, 0} },					/* REPORT LUNS */
628 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
629 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
631 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 /* 5 */
633 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
634 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
635 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
637 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
638 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
640 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
641 	     0, 0, 0} },
642 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
643 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
644 	     0, 0} },
645 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
646 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
647 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
648 /* 10 */
649 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
650 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
651 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
654 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
656 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
657 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
660 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
662 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
663 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
664 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 				0xff, 0, 0xc7, 0, 0, 0, 0} },
666 /* 15 */
667 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
668 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
670 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
671 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
674 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
675 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
676 	     0xff, 0xff} },
677 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
678 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
679 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
680 	     0} },
681 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
682 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
683 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 	     0} },
685 /* 20 */
686 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
687 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
689 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
691 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
693 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
695 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
696 /* 25 */
697 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
698 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
700 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
701 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
702 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
703 		 0, 0, 0, 0, 0} },
704 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
705 	    resp_sync_cache, sync_cache_iarr,
706 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
708 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
709 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
711 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
712 	    resp_pre_fetch, pre_fetch_iarr,
713 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
715 
716 /* 30 */
717 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
718 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
719 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
722 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
723 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
725 /* sentinel */
726 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
727 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
728 };
729 
730 static int sdebug_num_hosts;
731 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
732 static int sdebug_ato = DEF_ATO;
733 static int sdebug_cdb_len = DEF_CDB_LEN;
734 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
736 static int sdebug_dif = DEF_DIF;
737 static int sdebug_dix = DEF_DIX;
738 static int sdebug_dsense = DEF_D_SENSE;
739 static int sdebug_every_nth = DEF_EVERY_NTH;
740 static int sdebug_fake_rw = DEF_FAKE_RW;
741 static unsigned int sdebug_guard = DEF_GUARD;
742 static int sdebug_host_max_queue;	/* per host */
743 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
744 static int sdebug_max_luns = DEF_MAX_LUNS;
745 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
746 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
747 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
748 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
749 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
751 static int sdebug_no_uld;
752 static int sdebug_num_parts = DEF_NUM_PARTS;
753 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
754 static int sdebug_opt_blks = DEF_OPT_BLKS;
755 static int sdebug_opts = DEF_OPTS;
756 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
757 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
758 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
759 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
760 static int sdebug_sector_size = DEF_SECTOR_SIZE;
761 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
762 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
763 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
764 static unsigned int sdebug_lbpu = DEF_LBPU;
765 static unsigned int sdebug_lbpws = DEF_LBPWS;
766 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
767 static unsigned int sdebug_lbprz = DEF_LBPRZ;
768 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
769 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
770 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
771 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
772 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
773 static int sdebug_uuid_ctl = DEF_UUID_CTL;
774 static bool sdebug_random = DEF_RANDOM;
775 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
776 static bool sdebug_removable = DEF_REMOVABLE;
777 static bool sdebug_clustering;
778 static bool sdebug_host_lock = DEF_HOST_LOCK;
779 static bool sdebug_strict = DEF_STRICT;
780 static bool sdebug_any_injecting_opt;
781 static bool sdebug_verbose;
782 static bool have_dif_prot;
783 static bool write_since_sync;
784 static bool sdebug_statistics = DEF_STATISTICS;
785 static bool sdebug_wp;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
788 static char *sdeb_zbc_model_s;
789 
790 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
791 			  SAM_LUN_AM_FLAT = 0x1,
792 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
793 			  SAM_LUN_AM_EXTENDED = 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
795 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
796 
797 static unsigned int sdebug_store_sectors;
798 static sector_t sdebug_capacity;	/* in sectors */
799 
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801    may still need them */
802 static int sdebug_heads;		/* heads per disk */
803 static int sdebug_cylinders_per;	/* cylinders per surface */
804 static int sdebug_sectors_per;		/* sectors per cylinder */
805 
806 static LIST_HEAD(sdebug_host_list);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock);
808 
809 static struct xarray per_store_arr;
810 static struct xarray *per_store_ap = &per_store_arr;
811 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
812 static int sdeb_most_recent_idx = -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
814 
815 static unsigned long map_size;
816 static int num_aborts;
817 static int num_dev_resets;
818 static int num_target_resets;
819 static int num_bus_resets;
820 static int num_host_resets;
821 static int dix_writes;
822 static int dix_reads;
823 static int dif_errors;
824 
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb;
828 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
829 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
830 
831 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
833 
834 static DEFINE_RWLOCK(atomic_rw);
835 static DEFINE_RWLOCK(atomic_rw2);
836 
837 static rwlock_t *ramdisk_lck_a[2];
838 
839 static char sdebug_proc_name[] = MY_NAME;
840 static const char *my_name = MY_NAME;
841 
842 static struct bus_type pseudo_lld_bus;
843 
844 static struct device_driver sdebug_driverfs_driver = {
845 	.name 		= sdebug_proc_name,
846 	.bus		= &pseudo_lld_bus,
847 };
848 
849 static const int check_condition_result =
850 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
851 
852 static const int illegal_condition_result =
853 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
854 
855 static const int device_qfull_result =
856 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
857 
858 static const int condition_met_result = SAM_STAT_CONDITION_MET;
859 
860 
861 /* Only do the extra work involved in logical block provisioning if one or
862  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863  * real reads and writes (i.e. not skipping them for speed).
864  */
865 static inline bool scsi_debug_lbp(void)
866 {
867 	return 0 == sdebug_fake_rw &&
868 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
869 }
870 
871 static void *lba2fake_store(struct sdeb_store_info *sip,
872 			    unsigned long long lba)
873 {
874 	struct sdeb_store_info *lsip = sip;
875 
876 	lba = do_div(lba, sdebug_store_sectors);
877 	if (!sip || !sip->storep) {
878 		WARN_ON_ONCE(true);
879 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
880 	}
881 	return lsip->storep + lba * sdebug_sector_size;
882 }
883 
884 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
885 				      sector_t sector)
886 {
887 	sector = sector_div(sector, sdebug_store_sectors);
888 
889 	return sip->dif_storep + sector;
890 }
891 
892 static void sdebug_max_tgts_luns(void)
893 {
894 	struct sdebug_host_info *sdbg_host;
895 	struct Scsi_Host *hpnt;
896 
897 	spin_lock(&sdebug_host_list_lock);
898 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 		hpnt = sdbg_host->shost;
900 		if ((hpnt->this_id >= 0) &&
901 		    (sdebug_num_tgts > hpnt->this_id))
902 			hpnt->max_id = sdebug_num_tgts + 1;
903 		else
904 			hpnt->max_id = sdebug_num_tgts;
905 		/* sdebug_max_luns; */
906 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
907 	}
908 	spin_unlock(&sdebug_host_list_lock);
909 }
910 
911 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
912 
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
914 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
915 				 enum sdeb_cmd_data c_d,
916 				 int in_byte, int in_bit)
917 {
918 	unsigned char *sbuff;
919 	u8 sks[4];
920 	int sl, asc;
921 
922 	sbuff = scp->sense_buffer;
923 	if (!sbuff) {
924 		sdev_printk(KERN_ERR, scp->device,
925 			    "%s: sense_buffer is NULL\n", __func__);
926 		return;
927 	}
928 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
929 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
930 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
931 	memset(sks, 0, sizeof(sks));
932 	sks[0] = 0x80;
933 	if (c_d)
934 		sks[0] |= 0x40;
935 	if (in_bit >= 0) {
936 		sks[0] |= 0x8;
937 		sks[0] |= 0x7 & in_bit;
938 	}
939 	put_unaligned_be16(in_byte, sks + 1);
940 	if (sdebug_dsense) {
941 		sl = sbuff[7] + 8;
942 		sbuff[7] = sl;
943 		sbuff[sl] = 0x2;
944 		sbuff[sl + 1] = 0x6;
945 		memcpy(sbuff + sl + 4, sks, 3);
946 	} else
947 		memcpy(sbuff + 15, sks, 3);
948 	if (sdebug_verbose)
949 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
950 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
952 }
953 
954 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
955 {
956 	unsigned char *sbuff;
957 
958 	sbuff = scp->sense_buffer;
959 	if (!sbuff) {
960 		sdev_printk(KERN_ERR, scp->device,
961 			    "%s: sense_buffer is NULL\n", __func__);
962 		return;
963 	}
964 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
965 
966 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
967 
968 	if (sdebug_verbose)
969 		sdev_printk(KERN_INFO, scp->device,
970 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 			    my_name, key, asc, asq);
972 }
973 
974 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
975 {
976 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
977 }
978 
979 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
980 			    void __user *arg)
981 {
982 	if (sdebug_verbose) {
983 		if (0x1261 == cmd)
984 			sdev_printk(KERN_INFO, dev,
985 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
986 		else if (0x5331 == cmd)
987 			sdev_printk(KERN_INFO, dev,
988 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
989 				    __func__);
990 		else
991 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
992 				    __func__, cmd);
993 	}
994 	return -EINVAL;
995 	/* return -ENOTTY; // correct return but upsets fdisk */
996 }
997 
998 static void config_cdb_len(struct scsi_device *sdev)
999 {
1000 	switch (sdebug_cdb_len) {
1001 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 		sdev->use_10_for_rw = false;
1003 		sdev->use_16_for_rw = false;
1004 		sdev->use_10_for_ms = false;
1005 		break;
1006 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 		sdev->use_10_for_rw = true;
1008 		sdev->use_16_for_rw = false;
1009 		sdev->use_10_for_ms = false;
1010 		break;
1011 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 		sdev->use_10_for_rw = true;
1013 		sdev->use_16_for_rw = false;
1014 		sdev->use_10_for_ms = true;
1015 		break;
1016 	case 16:
1017 		sdev->use_10_for_rw = false;
1018 		sdev->use_16_for_rw = true;
1019 		sdev->use_10_for_ms = true;
1020 		break;
1021 	case 32: /* No knobs to suggest this so same as 16 for now */
1022 		sdev->use_10_for_rw = false;
1023 		sdev->use_16_for_rw = true;
1024 		sdev->use_10_for_ms = true;
1025 		break;
1026 	default:
1027 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1028 			sdebug_cdb_len);
1029 		sdev->use_10_for_rw = true;
1030 		sdev->use_16_for_rw = false;
1031 		sdev->use_10_for_ms = false;
1032 		sdebug_cdb_len = 10;
1033 		break;
1034 	}
1035 }
1036 
1037 static void all_config_cdb_len(void)
1038 {
1039 	struct sdebug_host_info *sdbg_host;
1040 	struct Scsi_Host *shost;
1041 	struct scsi_device *sdev;
1042 
1043 	spin_lock(&sdebug_host_list_lock);
1044 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1045 		shost = sdbg_host->shost;
1046 		shost_for_each_device(sdev, shost) {
1047 			config_cdb_len(sdev);
1048 		}
1049 	}
1050 	spin_unlock(&sdebug_host_list_lock);
1051 }
1052 
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1054 {
1055 	struct sdebug_host_info *sdhp;
1056 	struct sdebug_dev_info *dp;
1057 
1058 	spin_lock(&sdebug_host_list_lock);
1059 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1060 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 			if ((devip->sdbg_host == dp->sdbg_host) &&
1062 			    (devip->target == dp->target))
1063 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 		}
1065 	}
1066 	spin_unlock(&sdebug_host_list_lock);
1067 }
1068 
1069 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1070 {
1071 	int k;
1072 
1073 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1074 	if (k != SDEBUG_NUM_UAS) {
1075 		const char *cp = NULL;
1076 
1077 		switch (k) {
1078 		case SDEBUG_UA_POR:
1079 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 					POWER_ON_RESET_ASCQ);
1081 			if (sdebug_verbose)
1082 				cp = "power on reset";
1083 			break;
1084 		case SDEBUG_UA_BUS_RESET:
1085 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1086 					BUS_RESET_ASCQ);
1087 			if (sdebug_verbose)
1088 				cp = "bus reset";
1089 			break;
1090 		case SDEBUG_UA_MODE_CHANGED:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 					MODE_CHANGED_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "mode parameters changed";
1095 			break;
1096 		case SDEBUG_UA_CAPACITY_CHANGED:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1098 					CAPACITY_CHANGED_ASCQ);
1099 			if (sdebug_verbose)
1100 				cp = "capacity data changed";
1101 			break;
1102 		case SDEBUG_UA_MICROCODE_CHANGED:
1103 			mk_sense_buffer(scp, UNIT_ATTENTION,
1104 					TARGET_CHANGED_ASC,
1105 					MICROCODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "microcode has been changed";
1108 			break;
1109 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION,
1111 					TARGET_CHANGED_ASC,
1112 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "microcode has been changed without reset";
1115 			break;
1116 		case SDEBUG_UA_LUNS_CHANGED:
1117 			/*
1118 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 			 * on the target, until a REPORT LUNS command is
1121 			 * received.  SPC-4 behavior is to report it only once.
1122 			 * NOTE:  sdebug_scsi_level does not use the same
1123 			 * values as struct scsi_device->scsi_level.
1124 			 */
1125 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1126 				clear_luns_changed_on_target(devip);
1127 			mk_sense_buffer(scp, UNIT_ATTENTION,
1128 					TARGET_CHANGED_ASC,
1129 					LUNS_CHANGED_ASCQ);
1130 			if (sdebug_verbose)
1131 				cp = "reported luns data has changed";
1132 			break;
1133 		default:
1134 			pr_warn("unexpected unit attention code=%d\n", k);
1135 			if (sdebug_verbose)
1136 				cp = "unknown";
1137 			break;
1138 		}
1139 		clear_bit(k, devip->uas_bm);
1140 		if (sdebug_verbose)
1141 			sdev_printk(KERN_INFO, scp->device,
1142 				   "%s reports: Unit attention: %s\n",
1143 				   my_name, cp);
1144 		return check_condition_result;
1145 	}
1146 	return 0;
1147 }
1148 
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1151 				int arr_len)
1152 {
1153 	int act_len;
1154 	struct scsi_data_buffer *sdb = &scp->sdb;
1155 
1156 	if (!sdb->length)
1157 		return 0;
1158 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1159 		return DID_ERROR << 16;
1160 
1161 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1162 				      arr, arr_len);
1163 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1164 
1165 	return 0;
1166 }
1167 
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170  * calls, not required to write in ascending offset order. Assumes resid
1171  * set to scsi_bufflen() prior to any calls.
1172  */
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1174 				  int arr_len, unsigned int off_dst)
1175 {
1176 	unsigned int act_len, n;
1177 	struct scsi_data_buffer *sdb = &scp->sdb;
1178 	off_t skip = off_dst;
1179 
1180 	if (sdb->length <= off_dst)
1181 		return 0;
1182 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1183 		return DID_ERROR << 16;
1184 
1185 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1186 				       arr, arr_len, skip);
1187 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1189 		 scsi_get_resid(scp));
1190 	n = scsi_bufflen(scp) - (off_dst + act_len);
1191 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1192 	return 0;
1193 }
1194 
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196  * 'arr' or -1 if error.
1197  */
1198 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1199 			       int arr_len)
1200 {
1201 	if (!scsi_bufflen(scp))
1202 		return 0;
1203 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1204 		return -1;
1205 
1206 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1207 }
1208 
1209 
1210 static char sdebug_inq_vendor_id[9] = "Linux   ";
1211 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1212 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1215 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1216 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1217 
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1220 			  int target_dev_id, int dev_id_num,
1221 			  const char *dev_id_str, int dev_id_str_len,
1222 			  const uuid_t *lu_name)
1223 {
1224 	int num, port_a;
1225 	char b[32];
1226 
1227 	port_a = target_dev_id + 1;
1228 	/* T10 vendor identifier field format (faked) */
1229 	arr[0] = 0x2;	/* ASCII */
1230 	arr[1] = 0x1;
1231 	arr[2] = 0x0;
1232 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1233 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1234 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1235 	num = 8 + 16 + dev_id_str_len;
1236 	arr[3] = num;
1237 	num += 4;
1238 	if (dev_id_num >= 0) {
1239 		if (sdebug_uuid_ctl) {
1240 			/* Locally assigned UUID */
1241 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1242 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1243 			arr[num++] = 0x0;
1244 			arr[num++] = 0x12;
1245 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1246 			arr[num++] = 0x0;
1247 			memcpy(arr + num, lu_name, 16);
1248 			num += 16;
1249 		} else {
1250 			/* NAA-3, Logical unit identifier (binary) */
1251 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1252 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1253 			arr[num++] = 0x0;
1254 			arr[num++] = 0x8;
1255 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1256 			num += 8;
1257 		}
1258 		/* Target relative port number */
1259 		arr[num++] = 0x61;	/* proto=sas, binary */
1260 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1261 		arr[num++] = 0x0;	/* reserved */
1262 		arr[num++] = 0x4;	/* length */
1263 		arr[num++] = 0x0;	/* reserved */
1264 		arr[num++] = 0x0;	/* reserved */
1265 		arr[num++] = 0x0;
1266 		arr[num++] = 0x1;	/* relative port A */
1267 	}
1268 	/* NAA-3, Target port identifier */
1269 	arr[num++] = 0x61;	/* proto=sas, binary */
1270 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1271 	arr[num++] = 0x0;
1272 	arr[num++] = 0x8;
1273 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1274 	num += 8;
1275 	/* NAA-3, Target port group identifier */
1276 	arr[num++] = 0x61;	/* proto=sas, binary */
1277 	arr[num++] = 0x95;	/* piv=1, target port group id */
1278 	arr[num++] = 0x0;
1279 	arr[num++] = 0x4;
1280 	arr[num++] = 0;
1281 	arr[num++] = 0;
1282 	put_unaligned_be16(port_group_id, arr + num);
1283 	num += 2;
1284 	/* NAA-3, Target device identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x8;
1289 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1290 	num += 8;
1291 	/* SCSI name string: Target device identifier */
1292 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1293 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1294 	arr[num++] = 0x0;
1295 	arr[num++] = 24;
1296 	memcpy(arr + num, "naa.32222220", 12);
1297 	num += 12;
1298 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1299 	memcpy(arr + num, b, 8);
1300 	num += 8;
1301 	memset(arr + num, 0, 4);
1302 	num += 4;
1303 	return num;
1304 }
1305 
1306 static unsigned char vpd84_data[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308     0x22,0x22,0x22,0x0,0xbb,0x1,
1309     0x22,0x22,0x22,0x0,0xbb,0x2,
1310 };
1311 
1312 /*  Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr)
1314 {
1315 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1316 	return sizeof(vpd84_data);
1317 }
1318 
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr)
1321 {
1322 	int num = 0;
1323 	const char *na1 = "https://www.kernel.org/config";
1324 	const char *na2 = "http://www.kernel.org/log";
1325 	int plen, olen;
1326 
1327 	arr[num++] = 0x1;	/* lu, storage config */
1328 	arr[num++] = 0x0;	/* reserved */
1329 	arr[num++] = 0x0;
1330 	olen = strlen(na1);
1331 	plen = olen + 1;
1332 	if (plen % 4)
1333 		plen = ((plen / 4) + 1) * 4;
1334 	arr[num++] = plen;	/* length, null termianted, padded */
1335 	memcpy(arr + num, na1, olen);
1336 	memset(arr + num + olen, 0, plen - olen);
1337 	num += plen;
1338 
1339 	arr[num++] = 0x4;	/* lu, logging */
1340 	arr[num++] = 0x0;	/* reserved */
1341 	arr[num++] = 0x0;
1342 	olen = strlen(na2);
1343 	plen = olen + 1;
1344 	if (plen % 4)
1345 		plen = ((plen / 4) + 1) * 4;
1346 	arr[num++] = plen;	/* length, null terminated, padded */
1347 	memcpy(arr + num, na2, olen);
1348 	memset(arr + num + olen, 0, plen - olen);
1349 	num += plen;
1350 
1351 	return num;
1352 }
1353 
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1356 {
1357 	int num = 0;
1358 	int port_a, port_b;
1359 
1360 	port_a = target_dev_id + 1;
1361 	port_b = port_a + 1;
1362 	arr[num++] = 0x0;	/* reserved */
1363 	arr[num++] = 0x0;	/* reserved */
1364 	arr[num++] = 0x0;
1365 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1366 	memset(arr + num, 0, 6);
1367 	num += 6;
1368 	arr[num++] = 0x0;
1369 	arr[num++] = 12;	/* length tp descriptor */
1370 	/* naa-5 target port identifier (A) */
1371 	arr[num++] = 0x61;	/* proto=sas, binary */
1372 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1373 	arr[num++] = 0x0;	/* reserved */
1374 	arr[num++] = 0x8;	/* length */
1375 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1376 	num += 8;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (B) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1391 	num += 8;
1392 
1393 	return num;
1394 }
1395 
1396 
1397 static unsigned char vpd89_data[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1401 '1','2','3','4',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1403 0xec,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1408 0x53,0x41,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1410 0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x10,0x80,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1439 };
1440 
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr)
1443 {
1444 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1445 	return sizeof(vpd89_data);
1446 }
1447 
1448 
1449 static unsigned char vpdb0_data[] = {
1450 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 };
1455 
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr)
1458 {
1459 	unsigned int gran;
1460 
1461 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1462 
1463 	/* Optimal transfer length granularity */
1464 	if (sdebug_opt_xferlen_exp != 0 &&
1465 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1466 		gran = 1 << sdebug_opt_xferlen_exp;
1467 	else
1468 		gran = 1 << sdebug_physblk_exp;
1469 	put_unaligned_be16(gran, arr + 2);
1470 
1471 	/* Maximum Transfer Length */
1472 	if (sdebug_store_sectors > 0x400)
1473 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1474 
1475 	/* Optimal Transfer Length */
1476 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1477 
1478 	if (sdebug_lbpu) {
1479 		/* Maximum Unmap LBA Count */
1480 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1481 
1482 		/* Maximum Unmap Block Descriptor Count */
1483 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1484 	}
1485 
1486 	/* Unmap Granularity Alignment */
1487 	if (sdebug_unmap_alignment) {
1488 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1489 		arr[28] |= 0x80; /* UGAVALID */
1490 	}
1491 
1492 	/* Optimal Unmap Granularity */
1493 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1494 
1495 	/* Maximum WRITE SAME Length */
1496 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1497 
1498 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1499 
1500 	return sizeof(vpdb0_data);
1501 }
1502 
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1505 {
1506 	memset(arr, 0, 0x3c);
1507 	arr[0] = 0;
1508 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1509 	arr[2] = 0;
1510 	arr[3] = 5;	/* less than 1.8" */
1511 	if (devip->zmodel == BLK_ZONED_HA)
1512 		arr[4] = 1 << 4;	/* zoned field = 01b */
1513 
1514 	return 0x3c;
1515 }
1516 
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr)
1519 {
1520 	memset(arr, 0, 0x4);
1521 	arr[0] = 0;			/* threshold exponent */
1522 	if (sdebug_lbpu)
1523 		arr[1] = 1 << 7;
1524 	if (sdebug_lbpws)
1525 		arr[1] |= 1 << 6;
1526 	if (sdebug_lbpws10)
1527 		arr[1] |= 1 << 5;
1528 	if (sdebug_lbprz && scsi_debug_lbp())
1529 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1530 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 	/* threshold_percentage=0 */
1533 	return 0x4;
1534 }
1535 
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1538 {
1539 	memset(arr, 0, 0x3c);
1540 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1541 	/*
1542 	 * Set Optimal number of open sequential write preferred zones and
1543 	 * Optimal number of non-sequentially written sequential write
1544 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 	 * fields set to zero, apart from Max. number of open swrz_s field.
1546 	 */
1547 	put_unaligned_be32(0xffffffff, &arr[4]);
1548 	put_unaligned_be32(0xffffffff, &arr[8]);
1549 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1550 		put_unaligned_be32(devip->max_open, &arr[12]);
1551 	else
1552 		put_unaligned_be32(0xffffffff, &arr[12]);
1553 	return 0x3c;
1554 }
1555 
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1558 
1559 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1560 {
1561 	unsigned char pq_pdt;
1562 	unsigned char *arr;
1563 	unsigned char *cmd = scp->cmnd;
1564 	int alloc_len, n, ret;
1565 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1566 
1567 	alloc_len = get_unaligned_be16(cmd + 3);
1568 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1569 	if (! arr)
1570 		return DID_REQUEUE << 16;
1571 	is_disk = (sdebug_ptype == TYPE_DISK);
1572 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1573 	is_disk_zbc = (is_disk || is_zbc);
1574 	have_wlun = scsi_is_wlun(scp->device->lun);
1575 	if (have_wlun)
1576 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1577 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1578 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1579 	else
1580 		pq_pdt = (sdebug_ptype & 0x1f);
1581 	arr[0] = pq_pdt;
1582 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1583 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1584 		kfree(arr);
1585 		return check_condition_result;
1586 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1587 		int lu_id_num, port_group_id, target_dev_id, len;
1588 		char lu_id_str[6];
1589 		int host_no = devip->sdbg_host->shost->host_no;
1590 
1591 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1592 		    (devip->channel & 0x7f);
1593 		if (sdebug_vpd_use_hostno == 0)
1594 			host_no = 0;
1595 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1596 			    (devip->target * 1000) + devip->lun);
1597 		target_dev_id = ((host_no + 1) * 2000) +
1598 				 (devip->target * 1000) - 3;
1599 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1600 		if (0 == cmd[2]) { /* supported vital product data pages */
1601 			arr[1] = cmd[2];	/*sanity */
1602 			n = 4;
1603 			arr[n++] = 0x0;   /* this page */
1604 			arr[n++] = 0x80;  /* unit serial number */
1605 			arr[n++] = 0x83;  /* device identification */
1606 			arr[n++] = 0x84;  /* software interface ident. */
1607 			arr[n++] = 0x85;  /* management network addresses */
1608 			arr[n++] = 0x86;  /* extended inquiry */
1609 			arr[n++] = 0x87;  /* mode page policy */
1610 			arr[n++] = 0x88;  /* SCSI ports */
1611 			if (is_disk_zbc) {	  /* SBC or ZBC */
1612 				arr[n++] = 0x89;  /* ATA information */
1613 				arr[n++] = 0xb0;  /* Block limits */
1614 				arr[n++] = 0xb1;  /* Block characteristics */
1615 				if (is_disk)
1616 					arr[n++] = 0xb2;  /* LB Provisioning */
1617 				if (is_zbc)
1618 					arr[n++] = 0xb6;  /* ZB dev. char. */
1619 			}
1620 			arr[3] = n - 4;	  /* number of supported VPD pages */
1621 		} else if (0x80 == cmd[2]) { /* unit serial number */
1622 			arr[1] = cmd[2];	/*sanity */
1623 			arr[3] = len;
1624 			memcpy(&arr[4], lu_id_str, len);
1625 		} else if (0x83 == cmd[2]) { /* device identification */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1628 						target_dev_id, lu_id_num,
1629 						lu_id_str, len,
1630 						&devip->lu_name);
1631 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1632 			arr[1] = cmd[2];	/*sanity */
1633 			arr[3] = inquiry_vpd_84(&arr[4]);
1634 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = inquiry_vpd_85(&arr[4]);
1637 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1638 			arr[1] = cmd[2];	/*sanity */
1639 			arr[3] = 0x3c;	/* number of following entries */
1640 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1641 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1642 			else if (have_dif_prot)
1643 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1644 			else
1645 				arr[4] = 0x0;   /* no protection stuff */
1646 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1647 		} else if (0x87 == cmd[2]) { /* mode page policy */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = 0x8;	/* number of following entries */
1650 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1651 			arr[6] = 0x80;	/* mlus, shared */
1652 			arr[8] = 0x18;	 /* protocol specific lu */
1653 			arr[10] = 0x82;	 /* mlus, per initiator port */
1654 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1657 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1658 			arr[1] = cmd[2];        /*sanity */
1659 			n = inquiry_vpd_89(&arr[4]);
1660 			put_unaligned_be16(n, arr + 2);
1661 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			arr[3] = inquiry_vpd_b0(&arr[4]);
1664 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1667 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1668 			arr[1] = cmd[2];        /*sanity */
1669 			arr[3] = inquiry_vpd_b2(&arr[4]);
1670 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1673 		} else {
1674 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1675 			kfree(arr);
1676 			return check_condition_result;
1677 		}
1678 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1679 		ret = fill_from_dev_buffer(scp, arr,
1680 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1681 		kfree(arr);
1682 		return ret;
1683 	}
1684 	/* drops through here for a standard inquiry */
1685 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1686 	arr[2] = sdebug_scsi_level;
1687 	arr[3] = 2;    /* response_data_format==2 */
1688 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1689 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1690 	if (sdebug_vpd_use_hostno == 0)
1691 		arr[5] |= 0x10; /* claim: implicit TPGS */
1692 	arr[6] = 0x10; /* claim: MultiP */
1693 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1694 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1695 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1696 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1697 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1698 	/* Use Vendor Specific area to place driver date in ASCII hex */
1699 	memcpy(&arr[36], sdebug_version_date, 8);
1700 	/* version descriptors (2 bytes each) follow */
1701 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1702 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1703 	n = 62;
1704 	if (is_disk) {		/* SBC-4 no version claimed */
1705 		put_unaligned_be16(0x600, arr + n);
1706 		n += 2;
1707 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1708 		put_unaligned_be16(0x525, arr + n);
1709 		n += 2;
1710 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1711 		put_unaligned_be16(0x624, arr + n);
1712 		n += 2;
1713 	}
1714 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1715 	ret = fill_from_dev_buffer(scp, arr,
1716 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1717 	kfree(arr);
1718 	return ret;
1719 }
1720 
1721 /* See resp_iec_m_pg() for how this data is manipulated */
1722 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1723 				   0, 0, 0x0, 0x0};
1724 
1725 static int resp_requests(struct scsi_cmnd *scp,
1726 			 struct sdebug_dev_info *devip)
1727 {
1728 	unsigned char *cmd = scp->cmnd;
1729 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1730 	bool dsense = !!(cmd[1] & 1);
1731 	int alloc_len = cmd[4];
1732 	int len = 18;
1733 	int stopped_state = atomic_read(&devip->stopped);
1734 
1735 	memset(arr, 0, sizeof(arr));
1736 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1737 		if (dsense) {
1738 			arr[0] = 0x72;
1739 			arr[1] = NOT_READY;
1740 			arr[2] = LOGICAL_UNIT_NOT_READY;
1741 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1742 			len = 8;
1743 		} else {
1744 			arr[0] = 0x70;
1745 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1746 			arr[7] = 0xa;			/* 18 byte sense buffer */
1747 			arr[12] = LOGICAL_UNIT_NOT_READY;
1748 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1749 		}
1750 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1751 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1752 		if (dsense) {
1753 			arr[0] = 0x72;
1754 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1755 			arr[2] = THRESHOLD_EXCEEDED;
1756 			arr[3] = 0xff;		/* Failure prediction(false) */
1757 			len = 8;
1758 		} else {
1759 			arr[0] = 0x70;
1760 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1761 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1762 			arr[12] = THRESHOLD_EXCEEDED;
1763 			arr[13] = 0xff;		/* Failure prediction(false) */
1764 		}
1765 	} else {	/* nothing to report */
1766 		if (dsense) {
1767 			len = 8;
1768 			memset(arr, 0, len);
1769 			arr[0] = 0x72;
1770 		} else {
1771 			memset(arr, 0, len);
1772 			arr[0] = 0x70;
1773 			arr[7] = 0xa;
1774 		}
1775 	}
1776 	return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1777 }
1778 
1779 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1780 {
1781 	unsigned char *cmd = scp->cmnd;
1782 	int power_cond, want_stop, stopped_state;
1783 	bool changing;
1784 
1785 	power_cond = (cmd[4] & 0xf0) >> 4;
1786 	if (power_cond) {
1787 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1788 		return check_condition_result;
1789 	}
1790 	want_stop = !(cmd[4] & 1);
1791 	stopped_state = atomic_read(&devip->stopped);
1792 	if (stopped_state == 2) {
1793 		ktime_t now_ts = ktime_get_boottime();
1794 
1795 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1796 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1797 
1798 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1799 				/* tur_ms_to_ready timer extinguished */
1800 				atomic_set(&devip->stopped, 0);
1801 				stopped_state = 0;
1802 			}
1803 		}
1804 		if (stopped_state == 2) {
1805 			if (want_stop) {
1806 				stopped_state = 1;	/* dummy up success */
1807 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1808 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1809 				return check_condition_result;
1810 			}
1811 		}
1812 	}
1813 	changing = (stopped_state != want_stop);
1814 	if (changing)
1815 		atomic_xchg(&devip->stopped, want_stop);
1816 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1817 		return SDEG_RES_IMMED_MASK;
1818 	else
1819 		return 0;
1820 }
1821 
1822 static sector_t get_sdebug_capacity(void)
1823 {
1824 	static const unsigned int gibibyte = 1073741824;
1825 
1826 	if (sdebug_virtual_gb > 0)
1827 		return (sector_t)sdebug_virtual_gb *
1828 			(gibibyte / sdebug_sector_size);
1829 	else
1830 		return sdebug_store_sectors;
1831 }
1832 
1833 #define SDEBUG_READCAP_ARR_SZ 8
1834 static int resp_readcap(struct scsi_cmnd *scp,
1835 			struct sdebug_dev_info *devip)
1836 {
1837 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1838 	unsigned int capac;
1839 
1840 	/* following just in case virtual_gb changed */
1841 	sdebug_capacity = get_sdebug_capacity();
1842 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1843 	if (sdebug_capacity < 0xffffffff) {
1844 		capac = (unsigned int)sdebug_capacity - 1;
1845 		put_unaligned_be32(capac, arr + 0);
1846 	} else
1847 		put_unaligned_be32(0xffffffff, arr + 0);
1848 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1849 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1850 }
1851 
1852 #define SDEBUG_READCAP16_ARR_SZ 32
1853 static int resp_readcap16(struct scsi_cmnd *scp,
1854 			  struct sdebug_dev_info *devip)
1855 {
1856 	unsigned char *cmd = scp->cmnd;
1857 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1858 	int alloc_len;
1859 
1860 	alloc_len = get_unaligned_be32(cmd + 10);
1861 	/* following just in case virtual_gb changed */
1862 	sdebug_capacity = get_sdebug_capacity();
1863 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1864 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1865 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1866 	arr[13] = sdebug_physblk_exp & 0xf;
1867 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1868 
1869 	if (scsi_debug_lbp()) {
1870 		arr[14] |= 0x80; /* LBPME */
1871 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1872 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1873 		 * in the wider field maps to 0 in this field.
1874 		 */
1875 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1876 			arr[14] |= 0x40;
1877 	}
1878 
1879 	arr[15] = sdebug_lowest_aligned & 0xff;
1880 
1881 	if (have_dif_prot) {
1882 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1883 		arr[12] |= 1; /* PROT_EN */
1884 	}
1885 
1886 	return fill_from_dev_buffer(scp, arr,
1887 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1888 }
1889 
1890 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1891 
1892 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1893 			      struct sdebug_dev_info *devip)
1894 {
1895 	unsigned char *cmd = scp->cmnd;
1896 	unsigned char *arr;
1897 	int host_no = devip->sdbg_host->shost->host_no;
1898 	int n, ret, alen, rlen;
1899 	int port_group_a, port_group_b, port_a, port_b;
1900 
1901 	alen = get_unaligned_be32(cmd + 6);
1902 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1903 	if (! arr)
1904 		return DID_REQUEUE << 16;
1905 	/*
1906 	 * EVPD page 0x88 states we have two ports, one
1907 	 * real and a fake port with no device connected.
1908 	 * So we create two port groups with one port each
1909 	 * and set the group with port B to unavailable.
1910 	 */
1911 	port_a = 0x1; /* relative port A */
1912 	port_b = 0x2; /* relative port B */
1913 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1914 			(devip->channel & 0x7f);
1915 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1916 			(devip->channel & 0x7f) + 0x80;
1917 
1918 	/*
1919 	 * The asymmetric access state is cycled according to the host_id.
1920 	 */
1921 	n = 4;
1922 	if (sdebug_vpd_use_hostno == 0) {
1923 		arr[n++] = host_no % 3; /* Asymm access state */
1924 		arr[n++] = 0x0F; /* claim: all states are supported */
1925 	} else {
1926 		arr[n++] = 0x0; /* Active/Optimized path */
1927 		arr[n++] = 0x01; /* only support active/optimized paths */
1928 	}
1929 	put_unaligned_be16(port_group_a, arr + n);
1930 	n += 2;
1931 	arr[n++] = 0;    /* Reserved */
1932 	arr[n++] = 0;    /* Status code */
1933 	arr[n++] = 0;    /* Vendor unique */
1934 	arr[n++] = 0x1;  /* One port per group */
1935 	arr[n++] = 0;    /* Reserved */
1936 	arr[n++] = 0;    /* Reserved */
1937 	put_unaligned_be16(port_a, arr + n);
1938 	n += 2;
1939 	arr[n++] = 3;    /* Port unavailable */
1940 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1941 	put_unaligned_be16(port_group_b, arr + n);
1942 	n += 2;
1943 	arr[n++] = 0;    /* Reserved */
1944 	arr[n++] = 0;    /* Status code */
1945 	arr[n++] = 0;    /* Vendor unique */
1946 	arr[n++] = 0x1;  /* One port per group */
1947 	arr[n++] = 0;    /* Reserved */
1948 	arr[n++] = 0;    /* Reserved */
1949 	put_unaligned_be16(port_b, arr + n);
1950 	n += 2;
1951 
1952 	rlen = n - 4;
1953 	put_unaligned_be32(rlen, arr + 0);
1954 
1955 	/*
1956 	 * Return the smallest value of either
1957 	 * - The allocated length
1958 	 * - The constructed command length
1959 	 * - The maximum array size
1960 	 */
1961 	rlen = min_t(int, alen, n);
1962 	ret = fill_from_dev_buffer(scp, arr,
1963 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1964 	kfree(arr);
1965 	return ret;
1966 }
1967 
1968 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1969 			     struct sdebug_dev_info *devip)
1970 {
1971 	bool rctd;
1972 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1973 	u16 req_sa, u;
1974 	u32 alloc_len, a_len;
1975 	int k, offset, len, errsts, count, bump, na;
1976 	const struct opcode_info_t *oip;
1977 	const struct opcode_info_t *r_oip;
1978 	u8 *arr;
1979 	u8 *cmd = scp->cmnd;
1980 
1981 	rctd = !!(cmd[2] & 0x80);
1982 	reporting_opts = cmd[2] & 0x7;
1983 	req_opcode = cmd[3];
1984 	req_sa = get_unaligned_be16(cmd + 4);
1985 	alloc_len = get_unaligned_be32(cmd + 6);
1986 	if (alloc_len < 4 || alloc_len > 0xffff) {
1987 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1988 		return check_condition_result;
1989 	}
1990 	if (alloc_len > 8192)
1991 		a_len = 8192;
1992 	else
1993 		a_len = alloc_len;
1994 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1995 	if (NULL == arr) {
1996 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1997 				INSUFF_RES_ASCQ);
1998 		return check_condition_result;
1999 	}
2000 	switch (reporting_opts) {
2001 	case 0:	/* all commands */
2002 		/* count number of commands */
2003 		for (count = 0, oip = opcode_info_arr;
2004 		     oip->num_attached != 0xff; ++oip) {
2005 			if (F_INV_OP & oip->flags)
2006 				continue;
2007 			count += (oip->num_attached + 1);
2008 		}
2009 		bump = rctd ? 20 : 8;
2010 		put_unaligned_be32(count * bump, arr);
2011 		for (offset = 4, oip = opcode_info_arr;
2012 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2013 			if (F_INV_OP & oip->flags)
2014 				continue;
2015 			na = oip->num_attached;
2016 			arr[offset] = oip->opcode;
2017 			put_unaligned_be16(oip->sa, arr + offset + 2);
2018 			if (rctd)
2019 				arr[offset + 5] |= 0x2;
2020 			if (FF_SA & oip->flags)
2021 				arr[offset + 5] |= 0x1;
2022 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2023 			if (rctd)
2024 				put_unaligned_be16(0xa, arr + offset + 8);
2025 			r_oip = oip;
2026 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2027 				if (F_INV_OP & oip->flags)
2028 					continue;
2029 				offset += bump;
2030 				arr[offset] = oip->opcode;
2031 				put_unaligned_be16(oip->sa, arr + offset + 2);
2032 				if (rctd)
2033 					arr[offset + 5] |= 0x2;
2034 				if (FF_SA & oip->flags)
2035 					arr[offset + 5] |= 0x1;
2036 				put_unaligned_be16(oip->len_mask[0],
2037 						   arr + offset + 6);
2038 				if (rctd)
2039 					put_unaligned_be16(0xa,
2040 							   arr + offset + 8);
2041 			}
2042 			oip = r_oip;
2043 			offset += bump;
2044 		}
2045 		break;
2046 	case 1:	/* one command: opcode only */
2047 	case 2:	/* one command: opcode plus service action */
2048 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2049 		sdeb_i = opcode_ind_arr[req_opcode];
2050 		oip = &opcode_info_arr[sdeb_i];
2051 		if (F_INV_OP & oip->flags) {
2052 			supp = 1;
2053 			offset = 4;
2054 		} else {
2055 			if (1 == reporting_opts) {
2056 				if (FF_SA & oip->flags) {
2057 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2058 							     2, 2);
2059 					kfree(arr);
2060 					return check_condition_result;
2061 				}
2062 				req_sa = 0;
2063 			} else if (2 == reporting_opts &&
2064 				   0 == (FF_SA & oip->flags)) {
2065 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2066 				kfree(arr);	/* point at requested sa */
2067 				return check_condition_result;
2068 			}
2069 			if (0 == (FF_SA & oip->flags) &&
2070 			    req_opcode == oip->opcode)
2071 				supp = 3;
2072 			else if (0 == (FF_SA & oip->flags)) {
2073 				na = oip->num_attached;
2074 				for (k = 0, oip = oip->arrp; k < na;
2075 				     ++k, ++oip) {
2076 					if (req_opcode == oip->opcode)
2077 						break;
2078 				}
2079 				supp = (k >= na) ? 1 : 3;
2080 			} else if (req_sa != oip->sa) {
2081 				na = oip->num_attached;
2082 				for (k = 0, oip = oip->arrp; k < na;
2083 				     ++k, ++oip) {
2084 					if (req_sa == oip->sa)
2085 						break;
2086 				}
2087 				supp = (k >= na) ? 1 : 3;
2088 			} else
2089 				supp = 3;
2090 			if (3 == supp) {
2091 				u = oip->len_mask[0];
2092 				put_unaligned_be16(u, arr + 2);
2093 				arr[4] = oip->opcode;
2094 				for (k = 1; k < u; ++k)
2095 					arr[4 + k] = (k < 16) ?
2096 						 oip->len_mask[k] : 0xff;
2097 				offset = 4 + u;
2098 			} else
2099 				offset = 4;
2100 		}
2101 		arr[1] = (rctd ? 0x80 : 0) | supp;
2102 		if (rctd) {
2103 			put_unaligned_be16(0xa, arr + offset);
2104 			offset += 12;
2105 		}
2106 		break;
2107 	default:
2108 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2109 		kfree(arr);
2110 		return check_condition_result;
2111 	}
2112 	offset = (offset < a_len) ? offset : a_len;
2113 	len = (offset < alloc_len) ? offset : alloc_len;
2114 	errsts = fill_from_dev_buffer(scp, arr, len);
2115 	kfree(arr);
2116 	return errsts;
2117 }
2118 
2119 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2120 			  struct sdebug_dev_info *devip)
2121 {
2122 	bool repd;
2123 	u32 alloc_len, len;
2124 	u8 arr[16];
2125 	u8 *cmd = scp->cmnd;
2126 
2127 	memset(arr, 0, sizeof(arr));
2128 	repd = !!(cmd[2] & 0x80);
2129 	alloc_len = get_unaligned_be32(cmd + 6);
2130 	if (alloc_len < 4) {
2131 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2132 		return check_condition_result;
2133 	}
2134 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2135 	arr[1] = 0x1;		/* ITNRS */
2136 	if (repd) {
2137 		arr[3] = 0xc;
2138 		len = 16;
2139 	} else
2140 		len = 4;
2141 
2142 	len = (len < alloc_len) ? len : alloc_len;
2143 	return fill_from_dev_buffer(scp, arr, len);
2144 }
2145 
2146 /* <<Following mode page info copied from ST318451LW>> */
2147 
2148 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2149 {	/* Read-Write Error Recovery page for mode_sense */
2150 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2151 					5, 0, 0xff, 0xff};
2152 
2153 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2154 	if (1 == pcontrol)
2155 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2156 	return sizeof(err_recov_pg);
2157 }
2158 
2159 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2160 { 	/* Disconnect-Reconnect page for mode_sense */
2161 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2162 					 0, 0, 0, 0, 0, 0, 0, 0};
2163 
2164 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2165 	if (1 == pcontrol)
2166 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2167 	return sizeof(disconnect_pg);
2168 }
2169 
2170 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2171 {       /* Format device page for mode_sense */
2172 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2173 				     0, 0, 0, 0, 0, 0, 0, 0,
2174 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2175 
2176 	memcpy(p, format_pg, sizeof(format_pg));
2177 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2178 	put_unaligned_be16(sdebug_sector_size, p + 12);
2179 	if (sdebug_removable)
2180 		p[20] |= 0x20; /* should agree with INQUIRY */
2181 	if (1 == pcontrol)
2182 		memset(p + 2, 0, sizeof(format_pg) - 2);
2183 	return sizeof(format_pg);
2184 }
2185 
2186 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2187 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2188 				     0, 0, 0, 0};
2189 
2190 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2191 { 	/* Caching page for mode_sense */
2192 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2193 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2194 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2195 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2196 
2197 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2198 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2199 	memcpy(p, caching_pg, sizeof(caching_pg));
2200 	if (1 == pcontrol)
2201 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2202 	else if (2 == pcontrol)
2203 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2204 	return sizeof(caching_pg);
2205 }
2206 
2207 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2208 				    0, 0, 0x2, 0x4b};
2209 
2210 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2211 { 	/* Control mode page for mode_sense */
2212 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2213 					0, 0, 0, 0};
2214 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2215 				     0, 0, 0x2, 0x4b};
2216 
2217 	if (sdebug_dsense)
2218 		ctrl_m_pg[2] |= 0x4;
2219 	else
2220 		ctrl_m_pg[2] &= ~0x4;
2221 
2222 	if (sdebug_ato)
2223 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2224 
2225 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2226 	if (1 == pcontrol)
2227 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2228 	else if (2 == pcontrol)
2229 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2230 	return sizeof(ctrl_m_pg);
2231 }
2232 
2233 
2234 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2235 {	/* Informational Exceptions control mode page for mode_sense */
2236 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2237 				       0, 0, 0x0, 0x0};
2238 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2239 				      0, 0, 0x0, 0x0};
2240 
2241 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2242 	if (1 == pcontrol)
2243 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2244 	else if (2 == pcontrol)
2245 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2246 	return sizeof(iec_m_pg);
2247 }
2248 
2249 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2250 {	/* SAS SSP mode page - short format for mode_sense */
2251 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2252 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2253 
2254 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2255 	if (1 == pcontrol)
2256 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2257 	return sizeof(sas_sf_m_pg);
2258 }
2259 
2260 
2261 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2262 			      int target_dev_id)
2263 {	/* SAS phy control and discover mode page for mode_sense */
2264 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2265 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2266 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2267 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2268 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2269 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2270 		    0, 0, 0, 0, 0, 0, 0, 0,
2271 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2272 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2273 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2274 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2275 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2276 		    0, 0, 0, 0, 0, 0, 0, 0,
2277 		};
2278 	int port_a, port_b;
2279 
2280 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2281 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2282 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2283 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2284 	port_a = target_dev_id + 1;
2285 	port_b = port_a + 1;
2286 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2287 	put_unaligned_be32(port_a, p + 20);
2288 	put_unaligned_be32(port_b, p + 48 + 20);
2289 	if (1 == pcontrol)
2290 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2291 	return sizeof(sas_pcd_m_pg);
2292 }
2293 
2294 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2295 {	/* SAS SSP shared protocol specific port mode subpage */
2296 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2297 		    0, 0, 0, 0, 0, 0, 0, 0,
2298 		};
2299 
2300 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2301 	if (1 == pcontrol)
2302 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2303 	return sizeof(sas_sha_m_pg);
2304 }
2305 
2306 #define SDEBUG_MAX_MSENSE_SZ 256
2307 
2308 static int resp_mode_sense(struct scsi_cmnd *scp,
2309 			   struct sdebug_dev_info *devip)
2310 {
2311 	int pcontrol, pcode, subpcode, bd_len;
2312 	unsigned char dev_spec;
2313 	int alloc_len, offset, len, target_dev_id;
2314 	int target = scp->device->id;
2315 	unsigned char *ap;
2316 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2317 	unsigned char *cmd = scp->cmnd;
2318 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2319 
2320 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2321 	pcontrol = (cmd[2] & 0xc0) >> 6;
2322 	pcode = cmd[2] & 0x3f;
2323 	subpcode = cmd[3];
2324 	msense_6 = (MODE_SENSE == cmd[0]);
2325 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2326 	is_disk = (sdebug_ptype == TYPE_DISK);
2327 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2328 	if ((is_disk || is_zbc) && !dbd)
2329 		bd_len = llbaa ? 16 : 8;
2330 	else
2331 		bd_len = 0;
2332 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2333 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2334 	if (0x3 == pcontrol) {  /* Saving values not supported */
2335 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2336 		return check_condition_result;
2337 	}
2338 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2339 			(devip->target * 1000) - 3;
2340 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2341 	if (is_disk || is_zbc) {
2342 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2343 		if (sdebug_wp)
2344 			dev_spec |= 0x80;
2345 	} else
2346 		dev_spec = 0x0;
2347 	if (msense_6) {
2348 		arr[2] = dev_spec;
2349 		arr[3] = bd_len;
2350 		offset = 4;
2351 	} else {
2352 		arr[3] = dev_spec;
2353 		if (16 == bd_len)
2354 			arr[4] = 0x1;	/* set LONGLBA bit */
2355 		arr[7] = bd_len;	/* assume 255 or less */
2356 		offset = 8;
2357 	}
2358 	ap = arr + offset;
2359 	if ((bd_len > 0) && (!sdebug_capacity))
2360 		sdebug_capacity = get_sdebug_capacity();
2361 
2362 	if (8 == bd_len) {
2363 		if (sdebug_capacity > 0xfffffffe)
2364 			put_unaligned_be32(0xffffffff, ap + 0);
2365 		else
2366 			put_unaligned_be32(sdebug_capacity, ap + 0);
2367 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2368 		offset += bd_len;
2369 		ap = arr + offset;
2370 	} else if (16 == bd_len) {
2371 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2372 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2373 		offset += bd_len;
2374 		ap = arr + offset;
2375 	}
2376 
2377 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2378 		/* TODO: Control Extension page */
2379 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2380 		return check_condition_result;
2381 	}
2382 	bad_pcode = false;
2383 
2384 	switch (pcode) {
2385 	case 0x1:	/* Read-Write error recovery page, direct access */
2386 		len = resp_err_recov_pg(ap, pcontrol, target);
2387 		offset += len;
2388 		break;
2389 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2390 		len = resp_disconnect_pg(ap, pcontrol, target);
2391 		offset += len;
2392 		break;
2393 	case 0x3:       /* Format device page, direct access */
2394 		if (is_disk) {
2395 			len = resp_format_pg(ap, pcontrol, target);
2396 			offset += len;
2397 		} else
2398 			bad_pcode = true;
2399 		break;
2400 	case 0x8:	/* Caching page, direct access */
2401 		if (is_disk || is_zbc) {
2402 			len = resp_caching_pg(ap, pcontrol, target);
2403 			offset += len;
2404 		} else
2405 			bad_pcode = true;
2406 		break;
2407 	case 0xa:	/* Control Mode page, all devices */
2408 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2409 		offset += len;
2410 		break;
2411 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2412 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2413 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2414 			return check_condition_result;
2415 		}
2416 		len = 0;
2417 		if ((0x0 == subpcode) || (0xff == subpcode))
2418 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2419 		if ((0x1 == subpcode) || (0xff == subpcode))
2420 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2421 						  target_dev_id);
2422 		if ((0x2 == subpcode) || (0xff == subpcode))
2423 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2424 		offset += len;
2425 		break;
2426 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2427 		len = resp_iec_m_pg(ap, pcontrol, target);
2428 		offset += len;
2429 		break;
2430 	case 0x3f:	/* Read all Mode pages */
2431 		if ((0 == subpcode) || (0xff == subpcode)) {
2432 			len = resp_err_recov_pg(ap, pcontrol, target);
2433 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2434 			if (is_disk) {
2435 				len += resp_format_pg(ap + len, pcontrol,
2436 						      target);
2437 				len += resp_caching_pg(ap + len, pcontrol,
2438 						       target);
2439 			} else if (is_zbc) {
2440 				len += resp_caching_pg(ap + len, pcontrol,
2441 						       target);
2442 			}
2443 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2444 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2445 			if (0xff == subpcode) {
2446 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2447 						  target, target_dev_id);
2448 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2449 			}
2450 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2451 			offset += len;
2452 		} else {
2453 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2454 			return check_condition_result;
2455 		}
2456 		break;
2457 	default:
2458 		bad_pcode = true;
2459 		break;
2460 	}
2461 	if (bad_pcode) {
2462 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2463 		return check_condition_result;
2464 	}
2465 	if (msense_6)
2466 		arr[0] = offset - 1;
2467 	else
2468 		put_unaligned_be16((offset - 2), arr + 0);
2469 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2470 }
2471 
2472 #define SDEBUG_MAX_MSELECT_SZ 512
2473 
2474 static int resp_mode_select(struct scsi_cmnd *scp,
2475 			    struct sdebug_dev_info *devip)
2476 {
2477 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2478 	int param_len, res, mpage;
2479 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2480 	unsigned char *cmd = scp->cmnd;
2481 	int mselect6 = (MODE_SELECT == cmd[0]);
2482 
2483 	memset(arr, 0, sizeof(arr));
2484 	pf = cmd[1] & 0x10;
2485 	sp = cmd[1] & 0x1;
2486 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2487 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2488 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2489 		return check_condition_result;
2490 	}
2491 	res = fetch_to_dev_buffer(scp, arr, param_len);
2492 	if (-1 == res)
2493 		return DID_ERROR << 16;
2494 	else if (sdebug_verbose && (res < param_len))
2495 		sdev_printk(KERN_INFO, scp->device,
2496 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2497 			    __func__, param_len, res);
2498 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2499 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2500 	if (md_len > 2) {
2501 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2502 		return check_condition_result;
2503 	}
2504 	off = bd_len + (mselect6 ? 4 : 8);
2505 	mpage = arr[off] & 0x3f;
2506 	ps = !!(arr[off] & 0x80);
2507 	if (ps) {
2508 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2509 		return check_condition_result;
2510 	}
2511 	spf = !!(arr[off] & 0x40);
2512 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2513 		       (arr[off + 1] + 2);
2514 	if ((pg_len + off) > param_len) {
2515 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2516 				PARAMETER_LIST_LENGTH_ERR, 0);
2517 		return check_condition_result;
2518 	}
2519 	switch (mpage) {
2520 	case 0x8:      /* Caching Mode page */
2521 		if (caching_pg[1] == arr[off + 1]) {
2522 			memcpy(caching_pg + 2, arr + off + 2,
2523 			       sizeof(caching_pg) - 2);
2524 			goto set_mode_changed_ua;
2525 		}
2526 		break;
2527 	case 0xa:      /* Control Mode page */
2528 		if (ctrl_m_pg[1] == arr[off + 1]) {
2529 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2530 			       sizeof(ctrl_m_pg) - 2);
2531 			if (ctrl_m_pg[4] & 0x8)
2532 				sdebug_wp = true;
2533 			else
2534 				sdebug_wp = false;
2535 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2536 			goto set_mode_changed_ua;
2537 		}
2538 		break;
2539 	case 0x1c:      /* Informational Exceptions Mode page */
2540 		if (iec_m_pg[1] == arr[off + 1]) {
2541 			memcpy(iec_m_pg + 2, arr + off + 2,
2542 			       sizeof(iec_m_pg) - 2);
2543 			goto set_mode_changed_ua;
2544 		}
2545 		break;
2546 	default:
2547 		break;
2548 	}
2549 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2550 	return check_condition_result;
2551 set_mode_changed_ua:
2552 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2553 	return 0;
2554 }
2555 
2556 static int resp_temp_l_pg(unsigned char *arr)
2557 {
2558 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2559 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2560 		};
2561 
2562 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2563 	return sizeof(temp_l_pg);
2564 }
2565 
2566 static int resp_ie_l_pg(unsigned char *arr)
2567 {
2568 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2569 		};
2570 
2571 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2572 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2573 		arr[4] = THRESHOLD_EXCEEDED;
2574 		arr[5] = 0xff;
2575 	}
2576 	return sizeof(ie_l_pg);
2577 }
2578 
2579 #define SDEBUG_MAX_LSENSE_SZ 512
2580 
2581 static int resp_log_sense(struct scsi_cmnd *scp,
2582 			  struct sdebug_dev_info *devip)
2583 {
2584 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2585 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2586 	unsigned char *cmd = scp->cmnd;
2587 
2588 	memset(arr, 0, sizeof(arr));
2589 	ppc = cmd[1] & 0x2;
2590 	sp = cmd[1] & 0x1;
2591 	if (ppc || sp) {
2592 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2593 		return check_condition_result;
2594 	}
2595 	pcode = cmd[2] & 0x3f;
2596 	subpcode = cmd[3] & 0xff;
2597 	alloc_len = get_unaligned_be16(cmd + 7);
2598 	arr[0] = pcode;
2599 	if (0 == subpcode) {
2600 		switch (pcode) {
2601 		case 0x0:	/* Supported log pages log page */
2602 			n = 4;
2603 			arr[n++] = 0x0;		/* this page */
2604 			arr[n++] = 0xd;		/* Temperature */
2605 			arr[n++] = 0x2f;	/* Informational exceptions */
2606 			arr[3] = n - 4;
2607 			break;
2608 		case 0xd:	/* Temperature log page */
2609 			arr[3] = resp_temp_l_pg(arr + 4);
2610 			break;
2611 		case 0x2f:	/* Informational exceptions log page */
2612 			arr[3] = resp_ie_l_pg(arr + 4);
2613 			break;
2614 		default:
2615 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2616 			return check_condition_result;
2617 		}
2618 	} else if (0xff == subpcode) {
2619 		arr[0] |= 0x40;
2620 		arr[1] = subpcode;
2621 		switch (pcode) {
2622 		case 0x0:	/* Supported log pages and subpages log page */
2623 			n = 4;
2624 			arr[n++] = 0x0;
2625 			arr[n++] = 0x0;		/* 0,0 page */
2626 			arr[n++] = 0x0;
2627 			arr[n++] = 0xff;	/* this page */
2628 			arr[n++] = 0xd;
2629 			arr[n++] = 0x0;		/* Temperature */
2630 			arr[n++] = 0x2f;
2631 			arr[n++] = 0x0;	/* Informational exceptions */
2632 			arr[3] = n - 4;
2633 			break;
2634 		case 0xd:	/* Temperature subpages */
2635 			n = 4;
2636 			arr[n++] = 0xd;
2637 			arr[n++] = 0x0;		/* Temperature */
2638 			arr[3] = n - 4;
2639 			break;
2640 		case 0x2f:	/* Informational exceptions subpages */
2641 			n = 4;
2642 			arr[n++] = 0x2f;
2643 			arr[n++] = 0x0;		/* Informational exceptions */
2644 			arr[3] = n - 4;
2645 			break;
2646 		default:
2647 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2648 			return check_condition_result;
2649 		}
2650 	} else {
2651 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2652 		return check_condition_result;
2653 	}
2654 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2655 	return fill_from_dev_buffer(scp, arr,
2656 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2657 }
2658 
2659 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2660 {
2661 	return devip->nr_zones != 0;
2662 }
2663 
2664 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2665 					unsigned long long lba)
2666 {
2667 	return &devip->zstate[lba >> devip->zsize_shift];
2668 }
2669 
2670 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2671 {
2672 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2673 }
2674 
2675 static void zbc_close_zone(struct sdebug_dev_info *devip,
2676 			   struct sdeb_zone_state *zsp)
2677 {
2678 	enum sdebug_z_cond zc;
2679 
2680 	if (zbc_zone_is_conv(zsp))
2681 		return;
2682 
2683 	zc = zsp->z_cond;
2684 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2685 		return;
2686 
2687 	if (zc == ZC2_IMPLICIT_OPEN)
2688 		devip->nr_imp_open--;
2689 	else
2690 		devip->nr_exp_open--;
2691 
2692 	if (zsp->z_wp == zsp->z_start) {
2693 		zsp->z_cond = ZC1_EMPTY;
2694 	} else {
2695 		zsp->z_cond = ZC4_CLOSED;
2696 		devip->nr_closed++;
2697 	}
2698 }
2699 
2700 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2701 {
2702 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2703 	unsigned int i;
2704 
2705 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2706 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2707 			zbc_close_zone(devip, zsp);
2708 			return;
2709 		}
2710 	}
2711 }
2712 
2713 static void zbc_open_zone(struct sdebug_dev_info *devip,
2714 			  struct sdeb_zone_state *zsp, bool explicit)
2715 {
2716 	enum sdebug_z_cond zc;
2717 
2718 	if (zbc_zone_is_conv(zsp))
2719 		return;
2720 
2721 	zc = zsp->z_cond;
2722 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2723 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2724 		return;
2725 
2726 	/* Close an implicit open zone if necessary */
2727 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2728 		zbc_close_zone(devip, zsp);
2729 	else if (devip->max_open &&
2730 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2731 		zbc_close_imp_open_zone(devip);
2732 
2733 	if (zsp->z_cond == ZC4_CLOSED)
2734 		devip->nr_closed--;
2735 	if (explicit) {
2736 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2737 		devip->nr_exp_open++;
2738 	} else {
2739 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2740 		devip->nr_imp_open++;
2741 	}
2742 }
2743 
2744 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2745 		       unsigned long long lba, unsigned int num)
2746 {
2747 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2748 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2749 
2750 	if (zbc_zone_is_conv(zsp))
2751 		return;
2752 
2753 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2754 		zsp->z_wp += num;
2755 		if (zsp->z_wp >= zend)
2756 			zsp->z_cond = ZC5_FULL;
2757 		return;
2758 	}
2759 
2760 	while (num) {
2761 		if (lba != zsp->z_wp)
2762 			zsp->z_non_seq_resource = true;
2763 
2764 		end = lba + num;
2765 		if (end >= zend) {
2766 			n = zend - lba;
2767 			zsp->z_wp = zend;
2768 		} else if (end > zsp->z_wp) {
2769 			n = num;
2770 			zsp->z_wp = end;
2771 		} else {
2772 			n = num;
2773 		}
2774 		if (zsp->z_wp >= zend)
2775 			zsp->z_cond = ZC5_FULL;
2776 
2777 		num -= n;
2778 		lba += n;
2779 		if (num) {
2780 			zsp++;
2781 			zend = zsp->z_start + zsp->z_size;
2782 		}
2783 	}
2784 }
2785 
2786 static int check_zbc_access_params(struct scsi_cmnd *scp,
2787 			unsigned long long lba, unsigned int num, bool write)
2788 {
2789 	struct scsi_device *sdp = scp->device;
2790 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2791 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2792 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2793 
2794 	if (!write) {
2795 		if (devip->zmodel == BLK_ZONED_HA)
2796 			return 0;
2797 		/* For host-managed, reads cannot cross zone types boundaries */
2798 		if (zsp_end != zsp &&
2799 		    zbc_zone_is_conv(zsp) &&
2800 		    !zbc_zone_is_conv(zsp_end)) {
2801 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2802 					LBA_OUT_OF_RANGE,
2803 					READ_INVDATA_ASCQ);
2804 			return check_condition_result;
2805 		}
2806 		return 0;
2807 	}
2808 
2809 	/* No restrictions for writes within conventional zones */
2810 	if (zbc_zone_is_conv(zsp)) {
2811 		if (!zbc_zone_is_conv(zsp_end)) {
2812 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2813 					LBA_OUT_OF_RANGE,
2814 					WRITE_BOUNDARY_ASCQ);
2815 			return check_condition_result;
2816 		}
2817 		return 0;
2818 	}
2819 
2820 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2821 		/* Writes cannot cross sequential zone boundaries */
2822 		if (zsp_end != zsp) {
2823 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2824 					LBA_OUT_OF_RANGE,
2825 					WRITE_BOUNDARY_ASCQ);
2826 			return check_condition_result;
2827 		}
2828 		/* Cannot write full zones */
2829 		if (zsp->z_cond == ZC5_FULL) {
2830 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2831 					INVALID_FIELD_IN_CDB, 0);
2832 			return check_condition_result;
2833 		}
2834 		/* Writes must be aligned to the zone WP */
2835 		if (lba != zsp->z_wp) {
2836 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 					LBA_OUT_OF_RANGE,
2838 					UNALIGNED_WRITE_ASCQ);
2839 			return check_condition_result;
2840 		}
2841 	}
2842 
2843 	/* Handle implicit open of closed and empty zones */
2844 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2845 		if (devip->max_open &&
2846 		    devip->nr_exp_open >= devip->max_open) {
2847 			mk_sense_buffer(scp, DATA_PROTECT,
2848 					INSUFF_RES_ASC,
2849 					INSUFF_ZONE_ASCQ);
2850 			return check_condition_result;
2851 		}
2852 		zbc_open_zone(devip, zsp, false);
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 static inline int check_device_access_params
2859 			(struct scsi_cmnd *scp, unsigned long long lba,
2860 			 unsigned int num, bool write)
2861 {
2862 	struct scsi_device *sdp = scp->device;
2863 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2864 
2865 	if (lba + num > sdebug_capacity) {
2866 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2867 		return check_condition_result;
2868 	}
2869 	/* transfer length excessive (tie in to block limits VPD page) */
2870 	if (num > sdebug_store_sectors) {
2871 		/* needs work to find which cdb byte 'num' comes from */
2872 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2873 		return check_condition_result;
2874 	}
2875 	if (write && unlikely(sdebug_wp)) {
2876 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2877 		return check_condition_result;
2878 	}
2879 	if (sdebug_dev_is_zoned(devip))
2880 		return check_zbc_access_params(scp, lba, num, write);
2881 
2882 	return 0;
2883 }
2884 
2885 /*
2886  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2887  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2888  * that access any of the "stores" in struct sdeb_store_info should call this
2889  * function with bug_if_fake_rw set to true.
2890  */
2891 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2892 						bool bug_if_fake_rw)
2893 {
2894 	if (sdebug_fake_rw) {
2895 		BUG_ON(bug_if_fake_rw);	/* See note above */
2896 		return NULL;
2897 	}
2898 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2899 }
2900 
2901 /* Returns number of bytes copied or -1 if error. */
2902 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2903 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2904 {
2905 	int ret;
2906 	u64 block, rest = 0;
2907 	enum dma_data_direction dir;
2908 	struct scsi_data_buffer *sdb = &scp->sdb;
2909 	u8 *fsp;
2910 
2911 	if (do_write) {
2912 		dir = DMA_TO_DEVICE;
2913 		write_since_sync = true;
2914 	} else {
2915 		dir = DMA_FROM_DEVICE;
2916 	}
2917 
2918 	if (!sdb->length || !sip)
2919 		return 0;
2920 	if (scp->sc_data_direction != dir)
2921 		return -1;
2922 	fsp = sip->storep;
2923 
2924 	block = do_div(lba, sdebug_store_sectors);
2925 	if (block + num > sdebug_store_sectors)
2926 		rest = block + num - sdebug_store_sectors;
2927 
2928 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2929 		   fsp + (block * sdebug_sector_size),
2930 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2931 	if (ret != (num - rest) * sdebug_sector_size)
2932 		return ret;
2933 
2934 	if (rest) {
2935 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2936 			    fsp, rest * sdebug_sector_size,
2937 			    sg_skip + ((num - rest) * sdebug_sector_size),
2938 			    do_write);
2939 	}
2940 
2941 	return ret;
2942 }
2943 
2944 /* Returns number of bytes copied or -1 if error. */
2945 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2946 {
2947 	struct scsi_data_buffer *sdb = &scp->sdb;
2948 
2949 	if (!sdb->length)
2950 		return 0;
2951 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2952 		return -1;
2953 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2954 			      num * sdebug_sector_size, 0, true);
2955 }
2956 
2957 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2958  * arr into sip->storep+lba and return true. If comparison fails then
2959  * return false. */
2960 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2961 			      const u8 *arr, bool compare_only)
2962 {
2963 	bool res;
2964 	u64 block, rest = 0;
2965 	u32 store_blks = sdebug_store_sectors;
2966 	u32 lb_size = sdebug_sector_size;
2967 	u8 *fsp = sip->storep;
2968 
2969 	block = do_div(lba, store_blks);
2970 	if (block + num > store_blks)
2971 		rest = block + num - store_blks;
2972 
2973 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2974 	if (!res)
2975 		return res;
2976 	if (rest)
2977 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2978 			     rest * lb_size);
2979 	if (!res)
2980 		return res;
2981 	if (compare_only)
2982 		return true;
2983 	arr += num * lb_size;
2984 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2985 	if (rest)
2986 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2987 	return res;
2988 }
2989 
2990 static __be16 dif_compute_csum(const void *buf, int len)
2991 {
2992 	__be16 csum;
2993 
2994 	if (sdebug_guard)
2995 		csum = (__force __be16)ip_compute_csum(buf, len);
2996 	else
2997 		csum = cpu_to_be16(crc_t10dif(buf, len));
2998 
2999 	return csum;
3000 }
3001 
3002 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3003 		      sector_t sector, u32 ei_lba)
3004 {
3005 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3006 
3007 	if (sdt->guard_tag != csum) {
3008 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3009 			(unsigned long)sector,
3010 			be16_to_cpu(sdt->guard_tag),
3011 			be16_to_cpu(csum));
3012 		return 0x01;
3013 	}
3014 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3015 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3016 		pr_err("REF check failed on sector %lu\n",
3017 			(unsigned long)sector);
3018 		return 0x03;
3019 	}
3020 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3021 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3022 		pr_err("REF check failed on sector %lu\n",
3023 			(unsigned long)sector);
3024 		return 0x03;
3025 	}
3026 	return 0;
3027 }
3028 
3029 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3030 			  unsigned int sectors, bool read)
3031 {
3032 	size_t resid;
3033 	void *paddr;
3034 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3035 						scp->device->hostdata, true);
3036 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3037 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3038 	struct sg_mapping_iter miter;
3039 
3040 	/* Bytes of protection data to copy into sgl */
3041 	resid = sectors * sizeof(*dif_storep);
3042 
3043 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3044 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3045 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3046 
3047 	while (sg_miter_next(&miter) && resid > 0) {
3048 		size_t len = min_t(size_t, miter.length, resid);
3049 		void *start = dif_store(sip, sector);
3050 		size_t rest = 0;
3051 
3052 		if (dif_store_end < start + len)
3053 			rest = start + len - dif_store_end;
3054 
3055 		paddr = miter.addr;
3056 
3057 		if (read)
3058 			memcpy(paddr, start, len - rest);
3059 		else
3060 			memcpy(start, paddr, len - rest);
3061 
3062 		if (rest) {
3063 			if (read)
3064 				memcpy(paddr + len - rest, dif_storep, rest);
3065 			else
3066 				memcpy(dif_storep, paddr + len - rest, rest);
3067 		}
3068 
3069 		sector += len / sizeof(*dif_storep);
3070 		resid -= len;
3071 	}
3072 	sg_miter_stop(&miter);
3073 }
3074 
3075 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3076 			    unsigned int sectors, u32 ei_lba)
3077 {
3078 	unsigned int i;
3079 	sector_t sector;
3080 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3081 						scp->device->hostdata, true);
3082 	struct t10_pi_tuple *sdt;
3083 
3084 	for (i = 0; i < sectors; i++, ei_lba++) {
3085 		int ret;
3086 
3087 		sector = start_sec + i;
3088 		sdt = dif_store(sip, sector);
3089 
3090 		if (sdt->app_tag == cpu_to_be16(0xffff))
3091 			continue;
3092 
3093 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3094 				 ei_lba);
3095 		if (ret) {
3096 			dif_errors++;
3097 			return ret;
3098 		}
3099 	}
3100 
3101 	dif_copy_prot(scp, start_sec, sectors, true);
3102 	dix_reads++;
3103 
3104 	return 0;
3105 }
3106 
3107 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3108 {
3109 	bool check_prot;
3110 	u32 num;
3111 	u32 ei_lba;
3112 	int ret;
3113 	u64 lba;
3114 	struct sdeb_store_info *sip = devip2sip(devip, true);
3115 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3116 	u8 *cmd = scp->cmnd;
3117 
3118 	switch (cmd[0]) {
3119 	case READ_16:
3120 		ei_lba = 0;
3121 		lba = get_unaligned_be64(cmd + 2);
3122 		num = get_unaligned_be32(cmd + 10);
3123 		check_prot = true;
3124 		break;
3125 	case READ_10:
3126 		ei_lba = 0;
3127 		lba = get_unaligned_be32(cmd + 2);
3128 		num = get_unaligned_be16(cmd + 7);
3129 		check_prot = true;
3130 		break;
3131 	case READ_6:
3132 		ei_lba = 0;
3133 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3134 		      (u32)(cmd[1] & 0x1f) << 16;
3135 		num = (0 == cmd[4]) ? 256 : cmd[4];
3136 		check_prot = true;
3137 		break;
3138 	case READ_12:
3139 		ei_lba = 0;
3140 		lba = get_unaligned_be32(cmd + 2);
3141 		num = get_unaligned_be32(cmd + 6);
3142 		check_prot = true;
3143 		break;
3144 	case XDWRITEREAD_10:
3145 		ei_lba = 0;
3146 		lba = get_unaligned_be32(cmd + 2);
3147 		num = get_unaligned_be16(cmd + 7);
3148 		check_prot = false;
3149 		break;
3150 	default:	/* assume READ(32) */
3151 		lba = get_unaligned_be64(cmd + 12);
3152 		ei_lba = get_unaligned_be32(cmd + 20);
3153 		num = get_unaligned_be32(cmd + 28);
3154 		check_prot = false;
3155 		break;
3156 	}
3157 	if (unlikely(have_dif_prot && check_prot)) {
3158 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3159 		    (cmd[1] & 0xe0)) {
3160 			mk_sense_invalid_opcode(scp);
3161 			return check_condition_result;
3162 		}
3163 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3164 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3165 		    (cmd[1] & 0xe0) == 0)
3166 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3167 				    "to DIF device\n");
3168 	}
3169 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3170 		     atomic_read(&sdeb_inject_pending))) {
3171 		num /= 2;
3172 		atomic_set(&sdeb_inject_pending, 0);
3173 	}
3174 
3175 	ret = check_device_access_params(scp, lba, num, false);
3176 	if (ret)
3177 		return ret;
3178 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3179 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3180 		     ((lba + num) > sdebug_medium_error_start))) {
3181 		/* claim unrecoverable read error */
3182 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3183 		/* set info field and valid bit for fixed descriptor */
3184 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3185 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3186 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3187 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3188 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3189 		}
3190 		scsi_set_resid(scp, scsi_bufflen(scp));
3191 		return check_condition_result;
3192 	}
3193 
3194 	read_lock(macc_lckp);
3195 
3196 	/* DIX + T10 DIF */
3197 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3198 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3199 
3200 		if (prot_ret) {
3201 			read_unlock(macc_lckp);
3202 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3203 			return illegal_condition_result;
3204 		}
3205 	}
3206 
3207 	ret = do_device_access(sip, scp, 0, lba, num, false);
3208 	read_unlock(macc_lckp);
3209 	if (unlikely(ret == -1))
3210 		return DID_ERROR << 16;
3211 
3212 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3213 
3214 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3215 		     atomic_read(&sdeb_inject_pending))) {
3216 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3217 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3218 			atomic_set(&sdeb_inject_pending, 0);
3219 			return check_condition_result;
3220 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3221 			/* Logical block guard check failed */
3222 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3223 			atomic_set(&sdeb_inject_pending, 0);
3224 			return illegal_condition_result;
3225 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3226 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3227 			atomic_set(&sdeb_inject_pending, 0);
3228 			return illegal_condition_result;
3229 		}
3230 	}
3231 	return 0;
3232 }
3233 
3234 static void dump_sector(unsigned char *buf, int len)
3235 {
3236 	int i, j, n;
3237 
3238 	pr_err(">>> Sector Dump <<<\n");
3239 	for (i = 0 ; i < len ; i += 16) {
3240 		char b[128];
3241 
3242 		for (j = 0, n = 0; j < 16; j++) {
3243 			unsigned char c = buf[i+j];
3244 
3245 			if (c >= 0x20 && c < 0x7e)
3246 				n += scnprintf(b + n, sizeof(b) - n,
3247 					       " %c ", buf[i+j]);
3248 			else
3249 				n += scnprintf(b + n, sizeof(b) - n,
3250 					       "%02x ", buf[i+j]);
3251 		}
3252 		pr_err("%04d: %s\n", i, b);
3253 	}
3254 }
3255 
3256 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3257 			     unsigned int sectors, u32 ei_lba)
3258 {
3259 	int ret;
3260 	struct t10_pi_tuple *sdt;
3261 	void *daddr;
3262 	sector_t sector = start_sec;
3263 	int ppage_offset;
3264 	int dpage_offset;
3265 	struct sg_mapping_iter diter;
3266 	struct sg_mapping_iter piter;
3267 
3268 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3269 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3270 
3271 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3272 			scsi_prot_sg_count(SCpnt),
3273 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3274 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3275 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3276 
3277 	/* For each protection page */
3278 	while (sg_miter_next(&piter)) {
3279 		dpage_offset = 0;
3280 		if (WARN_ON(!sg_miter_next(&diter))) {
3281 			ret = 0x01;
3282 			goto out;
3283 		}
3284 
3285 		for (ppage_offset = 0; ppage_offset < piter.length;
3286 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3287 			/* If we're at the end of the current
3288 			 * data page advance to the next one
3289 			 */
3290 			if (dpage_offset >= diter.length) {
3291 				if (WARN_ON(!sg_miter_next(&diter))) {
3292 					ret = 0x01;
3293 					goto out;
3294 				}
3295 				dpage_offset = 0;
3296 			}
3297 
3298 			sdt = piter.addr + ppage_offset;
3299 			daddr = diter.addr + dpage_offset;
3300 
3301 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3302 			if (ret) {
3303 				dump_sector(daddr, sdebug_sector_size);
3304 				goto out;
3305 			}
3306 
3307 			sector++;
3308 			ei_lba++;
3309 			dpage_offset += sdebug_sector_size;
3310 		}
3311 		diter.consumed = dpage_offset;
3312 		sg_miter_stop(&diter);
3313 	}
3314 	sg_miter_stop(&piter);
3315 
3316 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3317 	dix_writes++;
3318 
3319 	return 0;
3320 
3321 out:
3322 	dif_errors++;
3323 	sg_miter_stop(&diter);
3324 	sg_miter_stop(&piter);
3325 	return ret;
3326 }
3327 
3328 static unsigned long lba_to_map_index(sector_t lba)
3329 {
3330 	if (sdebug_unmap_alignment)
3331 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3332 	sector_div(lba, sdebug_unmap_granularity);
3333 	return lba;
3334 }
3335 
3336 static sector_t map_index_to_lba(unsigned long index)
3337 {
3338 	sector_t lba = index * sdebug_unmap_granularity;
3339 
3340 	if (sdebug_unmap_alignment)
3341 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3342 	return lba;
3343 }
3344 
3345 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3346 			      unsigned int *num)
3347 {
3348 	sector_t end;
3349 	unsigned int mapped;
3350 	unsigned long index;
3351 	unsigned long next;
3352 
3353 	index = lba_to_map_index(lba);
3354 	mapped = test_bit(index, sip->map_storep);
3355 
3356 	if (mapped)
3357 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3358 	else
3359 		next = find_next_bit(sip->map_storep, map_size, index);
3360 
3361 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3362 	*num = end - lba;
3363 	return mapped;
3364 }
3365 
3366 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3367 		       unsigned int len)
3368 {
3369 	sector_t end = lba + len;
3370 
3371 	while (lba < end) {
3372 		unsigned long index = lba_to_map_index(lba);
3373 
3374 		if (index < map_size)
3375 			set_bit(index, sip->map_storep);
3376 
3377 		lba = map_index_to_lba(index + 1);
3378 	}
3379 }
3380 
3381 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3382 			 unsigned int len)
3383 {
3384 	sector_t end = lba + len;
3385 	u8 *fsp = sip->storep;
3386 
3387 	while (lba < end) {
3388 		unsigned long index = lba_to_map_index(lba);
3389 
3390 		if (lba == map_index_to_lba(index) &&
3391 		    lba + sdebug_unmap_granularity <= end &&
3392 		    index < map_size) {
3393 			clear_bit(index, sip->map_storep);
3394 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3395 				memset(fsp + lba * sdebug_sector_size,
3396 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3397 				       sdebug_sector_size *
3398 				       sdebug_unmap_granularity);
3399 			}
3400 			if (sip->dif_storep) {
3401 				memset(sip->dif_storep + lba, 0xff,
3402 				       sizeof(*sip->dif_storep) *
3403 				       sdebug_unmap_granularity);
3404 			}
3405 		}
3406 		lba = map_index_to_lba(index + 1);
3407 	}
3408 }
3409 
3410 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3411 {
3412 	bool check_prot;
3413 	u32 num;
3414 	u32 ei_lba;
3415 	int ret;
3416 	u64 lba;
3417 	struct sdeb_store_info *sip = devip2sip(devip, true);
3418 	rwlock_t *macc_lckp = &sip->macc_lck;
3419 	u8 *cmd = scp->cmnd;
3420 
3421 	switch (cmd[0]) {
3422 	case WRITE_16:
3423 		ei_lba = 0;
3424 		lba = get_unaligned_be64(cmd + 2);
3425 		num = get_unaligned_be32(cmd + 10);
3426 		check_prot = true;
3427 		break;
3428 	case WRITE_10:
3429 		ei_lba = 0;
3430 		lba = get_unaligned_be32(cmd + 2);
3431 		num = get_unaligned_be16(cmd + 7);
3432 		check_prot = true;
3433 		break;
3434 	case WRITE_6:
3435 		ei_lba = 0;
3436 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3437 		      (u32)(cmd[1] & 0x1f) << 16;
3438 		num = (0 == cmd[4]) ? 256 : cmd[4];
3439 		check_prot = true;
3440 		break;
3441 	case WRITE_12:
3442 		ei_lba = 0;
3443 		lba = get_unaligned_be32(cmd + 2);
3444 		num = get_unaligned_be32(cmd + 6);
3445 		check_prot = true;
3446 		break;
3447 	case 0x53:	/* XDWRITEREAD(10) */
3448 		ei_lba = 0;
3449 		lba = get_unaligned_be32(cmd + 2);
3450 		num = get_unaligned_be16(cmd + 7);
3451 		check_prot = false;
3452 		break;
3453 	default:	/* assume WRITE(32) */
3454 		lba = get_unaligned_be64(cmd + 12);
3455 		ei_lba = get_unaligned_be32(cmd + 20);
3456 		num = get_unaligned_be32(cmd + 28);
3457 		check_prot = false;
3458 		break;
3459 	}
3460 	if (unlikely(have_dif_prot && check_prot)) {
3461 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3462 		    (cmd[1] & 0xe0)) {
3463 			mk_sense_invalid_opcode(scp);
3464 			return check_condition_result;
3465 		}
3466 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3467 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3468 		    (cmd[1] & 0xe0) == 0)
3469 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3470 				    "to DIF device\n");
3471 	}
3472 
3473 	write_lock(macc_lckp);
3474 	ret = check_device_access_params(scp, lba, num, true);
3475 	if (ret) {
3476 		write_unlock(macc_lckp);
3477 		return ret;
3478 	}
3479 
3480 	/* DIX + T10 DIF */
3481 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3482 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3483 
3484 		if (prot_ret) {
3485 			write_unlock(macc_lckp);
3486 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3487 			return illegal_condition_result;
3488 		}
3489 	}
3490 
3491 	ret = do_device_access(sip, scp, 0, lba, num, true);
3492 	if (unlikely(scsi_debug_lbp()))
3493 		map_region(sip, lba, num);
3494 	/* If ZBC zone then bump its write pointer */
3495 	if (sdebug_dev_is_zoned(devip))
3496 		zbc_inc_wp(devip, lba, num);
3497 	write_unlock(macc_lckp);
3498 	if (unlikely(-1 == ret))
3499 		return DID_ERROR << 16;
3500 	else if (unlikely(sdebug_verbose &&
3501 			  (ret < (num * sdebug_sector_size))))
3502 		sdev_printk(KERN_INFO, scp->device,
3503 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3504 			    my_name, num * sdebug_sector_size, ret);
3505 
3506 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3507 		     atomic_read(&sdeb_inject_pending))) {
3508 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3509 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3510 			atomic_set(&sdeb_inject_pending, 0);
3511 			return check_condition_result;
3512 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3513 			/* Logical block guard check failed */
3514 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3515 			atomic_set(&sdeb_inject_pending, 0);
3516 			return illegal_condition_result;
3517 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3518 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3519 			atomic_set(&sdeb_inject_pending, 0);
3520 			return illegal_condition_result;
3521 		}
3522 	}
3523 	return 0;
3524 }
3525 
3526 /*
3527  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3528  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3529  */
3530 static int resp_write_scat(struct scsi_cmnd *scp,
3531 			   struct sdebug_dev_info *devip)
3532 {
3533 	u8 *cmd = scp->cmnd;
3534 	u8 *lrdp = NULL;
3535 	u8 *up;
3536 	struct sdeb_store_info *sip = devip2sip(devip, true);
3537 	rwlock_t *macc_lckp = &sip->macc_lck;
3538 	u8 wrprotect;
3539 	u16 lbdof, num_lrd, k;
3540 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3541 	u32 lb_size = sdebug_sector_size;
3542 	u32 ei_lba;
3543 	u64 lba;
3544 	int ret, res;
3545 	bool is_16;
3546 	static const u32 lrd_size = 32; /* + parameter list header size */
3547 
3548 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3549 		is_16 = false;
3550 		wrprotect = (cmd[10] >> 5) & 0x7;
3551 		lbdof = get_unaligned_be16(cmd + 12);
3552 		num_lrd = get_unaligned_be16(cmd + 16);
3553 		bt_len = get_unaligned_be32(cmd + 28);
3554 	} else {        /* that leaves WRITE SCATTERED(16) */
3555 		is_16 = true;
3556 		wrprotect = (cmd[2] >> 5) & 0x7;
3557 		lbdof = get_unaligned_be16(cmd + 4);
3558 		num_lrd = get_unaligned_be16(cmd + 8);
3559 		bt_len = get_unaligned_be32(cmd + 10);
3560 		if (unlikely(have_dif_prot)) {
3561 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3562 			    wrprotect) {
3563 				mk_sense_invalid_opcode(scp);
3564 				return illegal_condition_result;
3565 			}
3566 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3567 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3568 			     wrprotect == 0)
3569 				sdev_printk(KERN_ERR, scp->device,
3570 					    "Unprotected WR to DIF device\n");
3571 		}
3572 	}
3573 	if ((num_lrd == 0) || (bt_len == 0))
3574 		return 0;       /* T10 says these do-nothings are not errors */
3575 	if (lbdof == 0) {
3576 		if (sdebug_verbose)
3577 			sdev_printk(KERN_INFO, scp->device,
3578 				"%s: %s: LB Data Offset field bad\n",
3579 				my_name, __func__);
3580 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3581 		return illegal_condition_result;
3582 	}
3583 	lbdof_blen = lbdof * lb_size;
3584 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3585 		if (sdebug_verbose)
3586 			sdev_printk(KERN_INFO, scp->device,
3587 				"%s: %s: LBA range descriptors don't fit\n",
3588 				my_name, __func__);
3589 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3590 		return illegal_condition_result;
3591 	}
3592 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3593 	if (lrdp == NULL)
3594 		return SCSI_MLQUEUE_HOST_BUSY;
3595 	if (sdebug_verbose)
3596 		sdev_printk(KERN_INFO, scp->device,
3597 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3598 			my_name, __func__, lbdof_blen);
3599 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3600 	if (res == -1) {
3601 		ret = DID_ERROR << 16;
3602 		goto err_out;
3603 	}
3604 
3605 	write_lock(macc_lckp);
3606 	sg_off = lbdof_blen;
3607 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3608 	cum_lb = 0;
3609 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3610 		lba = get_unaligned_be64(up + 0);
3611 		num = get_unaligned_be32(up + 8);
3612 		if (sdebug_verbose)
3613 			sdev_printk(KERN_INFO, scp->device,
3614 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3615 				my_name, __func__, k, lba, num, sg_off);
3616 		if (num == 0)
3617 			continue;
3618 		ret = check_device_access_params(scp, lba, num, true);
3619 		if (ret)
3620 			goto err_out_unlock;
3621 		num_by = num * lb_size;
3622 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3623 
3624 		if ((cum_lb + num) > bt_len) {
3625 			if (sdebug_verbose)
3626 				sdev_printk(KERN_INFO, scp->device,
3627 				    "%s: %s: sum of blocks > data provided\n",
3628 				    my_name, __func__);
3629 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3630 					0);
3631 			ret = illegal_condition_result;
3632 			goto err_out_unlock;
3633 		}
3634 
3635 		/* DIX + T10 DIF */
3636 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3637 			int prot_ret = prot_verify_write(scp, lba, num,
3638 							 ei_lba);
3639 
3640 			if (prot_ret) {
3641 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3642 						prot_ret);
3643 				ret = illegal_condition_result;
3644 				goto err_out_unlock;
3645 			}
3646 		}
3647 
3648 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3649 		/* If ZBC zone then bump its write pointer */
3650 		if (sdebug_dev_is_zoned(devip))
3651 			zbc_inc_wp(devip, lba, num);
3652 		if (unlikely(scsi_debug_lbp()))
3653 			map_region(sip, lba, num);
3654 		if (unlikely(-1 == ret)) {
3655 			ret = DID_ERROR << 16;
3656 			goto err_out_unlock;
3657 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3658 			sdev_printk(KERN_INFO, scp->device,
3659 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3660 			    my_name, num_by, ret);
3661 
3662 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3663 			     atomic_read(&sdeb_inject_pending))) {
3664 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3665 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3666 				atomic_set(&sdeb_inject_pending, 0);
3667 				ret = check_condition_result;
3668 				goto err_out_unlock;
3669 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3670 				/* Logical block guard check failed */
3671 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3672 				atomic_set(&sdeb_inject_pending, 0);
3673 				ret = illegal_condition_result;
3674 				goto err_out_unlock;
3675 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3676 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3677 				atomic_set(&sdeb_inject_pending, 0);
3678 				ret = illegal_condition_result;
3679 				goto err_out_unlock;
3680 			}
3681 		}
3682 		sg_off += num_by;
3683 		cum_lb += num;
3684 	}
3685 	ret = 0;
3686 err_out_unlock:
3687 	write_unlock(macc_lckp);
3688 err_out:
3689 	kfree(lrdp);
3690 	return ret;
3691 }
3692 
3693 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3694 			   u32 ei_lba, bool unmap, bool ndob)
3695 {
3696 	struct scsi_device *sdp = scp->device;
3697 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3698 	unsigned long long i;
3699 	u64 block, lbaa;
3700 	u32 lb_size = sdebug_sector_size;
3701 	int ret;
3702 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3703 						scp->device->hostdata, true);
3704 	rwlock_t *macc_lckp = &sip->macc_lck;
3705 	u8 *fs1p;
3706 	u8 *fsp;
3707 
3708 	write_lock(macc_lckp);
3709 
3710 	ret = check_device_access_params(scp, lba, num, true);
3711 	if (ret) {
3712 		write_unlock(macc_lckp);
3713 		return ret;
3714 	}
3715 
3716 	if (unmap && scsi_debug_lbp()) {
3717 		unmap_region(sip, lba, num);
3718 		goto out;
3719 	}
3720 	lbaa = lba;
3721 	block = do_div(lbaa, sdebug_store_sectors);
3722 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3723 	fsp = sip->storep;
3724 	fs1p = fsp + (block * lb_size);
3725 	if (ndob) {
3726 		memset(fs1p, 0, lb_size);
3727 		ret = 0;
3728 	} else
3729 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3730 
3731 	if (-1 == ret) {
3732 		write_unlock(&sip->macc_lck);
3733 		return DID_ERROR << 16;
3734 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3735 		sdev_printk(KERN_INFO, scp->device,
3736 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3737 			    my_name, "write same", lb_size, ret);
3738 
3739 	/* Copy first sector to remaining blocks */
3740 	for (i = 1 ; i < num ; i++) {
3741 		lbaa = lba + i;
3742 		block = do_div(lbaa, sdebug_store_sectors);
3743 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3744 	}
3745 	if (scsi_debug_lbp())
3746 		map_region(sip, lba, num);
3747 	/* If ZBC zone then bump its write pointer */
3748 	if (sdebug_dev_is_zoned(devip))
3749 		zbc_inc_wp(devip, lba, num);
3750 out:
3751 	write_unlock(macc_lckp);
3752 
3753 	return 0;
3754 }
3755 
3756 static int resp_write_same_10(struct scsi_cmnd *scp,
3757 			      struct sdebug_dev_info *devip)
3758 {
3759 	u8 *cmd = scp->cmnd;
3760 	u32 lba;
3761 	u16 num;
3762 	u32 ei_lba = 0;
3763 	bool unmap = false;
3764 
3765 	if (cmd[1] & 0x8) {
3766 		if (sdebug_lbpws10 == 0) {
3767 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3768 			return check_condition_result;
3769 		} else
3770 			unmap = true;
3771 	}
3772 	lba = get_unaligned_be32(cmd + 2);
3773 	num = get_unaligned_be16(cmd + 7);
3774 	if (num > sdebug_write_same_length) {
3775 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3776 		return check_condition_result;
3777 	}
3778 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3779 }
3780 
3781 static int resp_write_same_16(struct scsi_cmnd *scp,
3782 			      struct sdebug_dev_info *devip)
3783 {
3784 	u8 *cmd = scp->cmnd;
3785 	u64 lba;
3786 	u32 num;
3787 	u32 ei_lba = 0;
3788 	bool unmap = false;
3789 	bool ndob = false;
3790 
3791 	if (cmd[1] & 0x8) {	/* UNMAP */
3792 		if (sdebug_lbpws == 0) {
3793 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3794 			return check_condition_result;
3795 		} else
3796 			unmap = true;
3797 	}
3798 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3799 		ndob = true;
3800 	lba = get_unaligned_be64(cmd + 2);
3801 	num = get_unaligned_be32(cmd + 10);
3802 	if (num > sdebug_write_same_length) {
3803 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3804 		return check_condition_result;
3805 	}
3806 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3807 }
3808 
3809 /* Note the mode field is in the same position as the (lower) service action
3810  * field. For the Report supported operation codes command, SPC-4 suggests
3811  * each mode of this command should be reported separately; for future. */
3812 static int resp_write_buffer(struct scsi_cmnd *scp,
3813 			     struct sdebug_dev_info *devip)
3814 {
3815 	u8 *cmd = scp->cmnd;
3816 	struct scsi_device *sdp = scp->device;
3817 	struct sdebug_dev_info *dp;
3818 	u8 mode;
3819 
3820 	mode = cmd[1] & 0x1f;
3821 	switch (mode) {
3822 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3823 		/* set UAs on this device only */
3824 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3825 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3826 		break;
3827 	case 0x5:	/* download MC, save and ACT */
3828 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3829 		break;
3830 	case 0x6:	/* download MC with offsets and ACT */
3831 		/* set UAs on most devices (LUs) in this target */
3832 		list_for_each_entry(dp,
3833 				    &devip->sdbg_host->dev_info_list,
3834 				    dev_list)
3835 			if (dp->target == sdp->id) {
3836 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3837 				if (devip != dp)
3838 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3839 						dp->uas_bm);
3840 			}
3841 		break;
3842 	case 0x7:	/* download MC with offsets, save, and ACT */
3843 		/* set UA on all devices (LUs) in this target */
3844 		list_for_each_entry(dp,
3845 				    &devip->sdbg_host->dev_info_list,
3846 				    dev_list)
3847 			if (dp->target == sdp->id)
3848 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3849 					dp->uas_bm);
3850 		break;
3851 	default:
3852 		/* do nothing for this command for other mode values */
3853 		break;
3854 	}
3855 	return 0;
3856 }
3857 
3858 static int resp_comp_write(struct scsi_cmnd *scp,
3859 			   struct sdebug_dev_info *devip)
3860 {
3861 	u8 *cmd = scp->cmnd;
3862 	u8 *arr;
3863 	struct sdeb_store_info *sip = devip2sip(devip, true);
3864 	rwlock_t *macc_lckp = &sip->macc_lck;
3865 	u64 lba;
3866 	u32 dnum;
3867 	u32 lb_size = sdebug_sector_size;
3868 	u8 num;
3869 	int ret;
3870 	int retval = 0;
3871 
3872 	lba = get_unaligned_be64(cmd + 2);
3873 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3874 	if (0 == num)
3875 		return 0;	/* degenerate case, not an error */
3876 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3877 	    (cmd[1] & 0xe0)) {
3878 		mk_sense_invalid_opcode(scp);
3879 		return check_condition_result;
3880 	}
3881 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3882 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3883 	    (cmd[1] & 0xe0) == 0)
3884 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3885 			    "to DIF device\n");
3886 	ret = check_device_access_params(scp, lba, num, false);
3887 	if (ret)
3888 		return ret;
3889 	dnum = 2 * num;
3890 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3891 	if (NULL == arr) {
3892 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3893 				INSUFF_RES_ASCQ);
3894 		return check_condition_result;
3895 	}
3896 
3897 	write_lock(macc_lckp);
3898 
3899 	ret = do_dout_fetch(scp, dnum, arr);
3900 	if (ret == -1) {
3901 		retval = DID_ERROR << 16;
3902 		goto cleanup;
3903 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3904 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3905 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3906 			    dnum * lb_size, ret);
3907 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3908 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3909 		retval = check_condition_result;
3910 		goto cleanup;
3911 	}
3912 	if (scsi_debug_lbp())
3913 		map_region(sip, lba, num);
3914 cleanup:
3915 	write_unlock(macc_lckp);
3916 	kfree(arr);
3917 	return retval;
3918 }
3919 
3920 struct unmap_block_desc {
3921 	__be64	lba;
3922 	__be32	blocks;
3923 	__be32	__reserved;
3924 };
3925 
3926 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3927 {
3928 	unsigned char *buf;
3929 	struct unmap_block_desc *desc;
3930 	struct sdeb_store_info *sip = devip2sip(devip, true);
3931 	rwlock_t *macc_lckp = &sip->macc_lck;
3932 	unsigned int i, payload_len, descriptors;
3933 	int ret;
3934 
3935 	if (!scsi_debug_lbp())
3936 		return 0;	/* fib and say its done */
3937 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3938 	BUG_ON(scsi_bufflen(scp) != payload_len);
3939 
3940 	descriptors = (payload_len - 8) / 16;
3941 	if (descriptors > sdebug_unmap_max_desc) {
3942 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3943 		return check_condition_result;
3944 	}
3945 
3946 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3947 	if (!buf) {
3948 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3949 				INSUFF_RES_ASCQ);
3950 		return check_condition_result;
3951 	}
3952 
3953 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3954 
3955 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3956 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3957 
3958 	desc = (void *)&buf[8];
3959 
3960 	write_lock(macc_lckp);
3961 
3962 	for (i = 0 ; i < descriptors ; i++) {
3963 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3964 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3965 
3966 		ret = check_device_access_params(scp, lba, num, true);
3967 		if (ret)
3968 			goto out;
3969 
3970 		unmap_region(sip, lba, num);
3971 	}
3972 
3973 	ret = 0;
3974 
3975 out:
3976 	write_unlock(macc_lckp);
3977 	kfree(buf);
3978 
3979 	return ret;
3980 }
3981 
3982 #define SDEBUG_GET_LBA_STATUS_LEN 32
3983 
3984 static int resp_get_lba_status(struct scsi_cmnd *scp,
3985 			       struct sdebug_dev_info *devip)
3986 {
3987 	u8 *cmd = scp->cmnd;
3988 	u64 lba;
3989 	u32 alloc_len, mapped, num;
3990 	int ret;
3991 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3992 
3993 	lba = get_unaligned_be64(cmd + 2);
3994 	alloc_len = get_unaligned_be32(cmd + 10);
3995 
3996 	if (alloc_len < 24)
3997 		return 0;
3998 
3999 	ret = check_device_access_params(scp, lba, 1, false);
4000 	if (ret)
4001 		return ret;
4002 
4003 	if (scsi_debug_lbp()) {
4004 		struct sdeb_store_info *sip = devip2sip(devip, true);
4005 
4006 		mapped = map_state(sip, lba, &num);
4007 	} else {
4008 		mapped = 1;
4009 		/* following just in case virtual_gb changed */
4010 		sdebug_capacity = get_sdebug_capacity();
4011 		if (sdebug_capacity - lba <= 0xffffffff)
4012 			num = sdebug_capacity - lba;
4013 		else
4014 			num = 0xffffffff;
4015 	}
4016 
4017 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4018 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4019 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4020 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4021 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4022 
4023 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4024 }
4025 
4026 static int resp_sync_cache(struct scsi_cmnd *scp,
4027 			   struct sdebug_dev_info *devip)
4028 {
4029 	int res = 0;
4030 	u64 lba;
4031 	u32 num_blocks;
4032 	u8 *cmd = scp->cmnd;
4033 
4034 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4035 		lba = get_unaligned_be32(cmd + 2);
4036 		num_blocks = get_unaligned_be16(cmd + 7);
4037 	} else {				/* SYNCHRONIZE_CACHE(16) */
4038 		lba = get_unaligned_be64(cmd + 2);
4039 		num_blocks = get_unaligned_be32(cmd + 10);
4040 	}
4041 	if (lba + num_blocks > sdebug_capacity) {
4042 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4043 		return check_condition_result;
4044 	}
4045 	if (!write_since_sync || (cmd[1] & 0x2))
4046 		res = SDEG_RES_IMMED_MASK;
4047 	else		/* delay if write_since_sync and IMMED clear */
4048 		write_since_sync = false;
4049 	return res;
4050 }
4051 
4052 /*
4053  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4054  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4055  * a GOOD status otherwise. Model a disk with a big cache and yield
4056  * CONDITION MET. Actually tries to bring range in main memory into the
4057  * cache associated with the CPU(s).
4058  */
4059 static int resp_pre_fetch(struct scsi_cmnd *scp,
4060 			  struct sdebug_dev_info *devip)
4061 {
4062 	int res = 0;
4063 	u64 lba;
4064 	u64 block, rest = 0;
4065 	u32 nblks;
4066 	u8 *cmd = scp->cmnd;
4067 	struct sdeb_store_info *sip = devip2sip(devip, true);
4068 	rwlock_t *macc_lckp = &sip->macc_lck;
4069 	u8 *fsp = sip->storep;
4070 
4071 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4072 		lba = get_unaligned_be32(cmd + 2);
4073 		nblks = get_unaligned_be16(cmd + 7);
4074 	} else {			/* PRE-FETCH(16) */
4075 		lba = get_unaligned_be64(cmd + 2);
4076 		nblks = get_unaligned_be32(cmd + 10);
4077 	}
4078 	if (lba + nblks > sdebug_capacity) {
4079 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4080 		return check_condition_result;
4081 	}
4082 	if (!fsp)
4083 		goto fini;
4084 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4085 	block = do_div(lba, sdebug_store_sectors);
4086 	if (block + nblks > sdebug_store_sectors)
4087 		rest = block + nblks - sdebug_store_sectors;
4088 
4089 	/* Try to bring the PRE-FETCH range into CPU's cache */
4090 	read_lock(macc_lckp);
4091 	prefetch_range(fsp + (sdebug_sector_size * block),
4092 		       (nblks - rest) * sdebug_sector_size);
4093 	if (rest)
4094 		prefetch_range(fsp, rest * sdebug_sector_size);
4095 	read_unlock(macc_lckp);
4096 fini:
4097 	if (cmd[1] & 0x2)
4098 		res = SDEG_RES_IMMED_MASK;
4099 	return res | condition_met_result;
4100 }
4101 
4102 #define RL_BUCKET_ELEMS 8
4103 
4104 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4105  * (W-LUN), the normal Linux scanning logic does not associate it with a
4106  * device (e.g. /dev/sg7). The following magic will make that association:
4107  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4108  * where <n> is a host number. If there are multiple targets in a host then
4109  * the above will associate a W-LUN to each target. To only get a W-LUN
4110  * for target 2, then use "echo '- 2 49409' > scan" .
4111  */
4112 static int resp_report_luns(struct scsi_cmnd *scp,
4113 			    struct sdebug_dev_info *devip)
4114 {
4115 	unsigned char *cmd = scp->cmnd;
4116 	unsigned int alloc_len;
4117 	unsigned char select_report;
4118 	u64 lun;
4119 	struct scsi_lun *lun_p;
4120 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4121 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4122 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4123 	unsigned int tlun_cnt;	/* total LUN count */
4124 	unsigned int rlen;	/* response length (in bytes) */
4125 	int k, j, n, res;
4126 	unsigned int off_rsp = 0;
4127 	const int sz_lun = sizeof(struct scsi_lun);
4128 
4129 	clear_luns_changed_on_target(devip);
4130 
4131 	select_report = cmd[2];
4132 	alloc_len = get_unaligned_be32(cmd + 6);
4133 
4134 	if (alloc_len < 4) {
4135 		pr_err("alloc len too small %d\n", alloc_len);
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	switch (select_report) {
4141 	case 0:		/* all LUNs apart from W-LUNs */
4142 		lun_cnt = sdebug_max_luns;
4143 		wlun_cnt = 0;
4144 		break;
4145 	case 1:		/* only W-LUNs */
4146 		lun_cnt = 0;
4147 		wlun_cnt = 1;
4148 		break;
4149 	case 2:		/* all LUNs */
4150 		lun_cnt = sdebug_max_luns;
4151 		wlun_cnt = 1;
4152 		break;
4153 	case 0x10:	/* only administrative LUs */
4154 	case 0x11:	/* see SPC-5 */
4155 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4156 	default:
4157 		pr_debug("select report invalid %d\n", select_report);
4158 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4159 		return check_condition_result;
4160 	}
4161 
4162 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4163 		--lun_cnt;
4164 
4165 	tlun_cnt = lun_cnt + wlun_cnt;
4166 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4167 	scsi_set_resid(scp, scsi_bufflen(scp));
4168 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4169 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4170 
4171 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4172 	lun = sdebug_no_lun_0 ? 1 : 0;
4173 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4174 		memset(arr, 0, sizeof(arr));
4175 		lun_p = (struct scsi_lun *)&arr[0];
4176 		if (k == 0) {
4177 			put_unaligned_be32(rlen, &arr[0]);
4178 			++lun_p;
4179 			j = 1;
4180 		}
4181 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4182 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4183 				break;
4184 			int_to_scsilun(lun++, lun_p);
4185 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4186 				lun_p->scsi_lun[0] |= 0x40;
4187 		}
4188 		if (j < RL_BUCKET_ELEMS)
4189 			break;
4190 		n = j * sz_lun;
4191 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4192 		if (res)
4193 			return res;
4194 		off_rsp += n;
4195 	}
4196 	if (wlun_cnt) {
4197 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4198 		++j;
4199 	}
4200 	if (j > 0)
4201 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4202 	return res;
4203 }
4204 
4205 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4206 {
4207 	bool is_bytchk3 = false;
4208 	u8 bytchk;
4209 	int ret, j;
4210 	u32 vnum, a_num, off;
4211 	const u32 lb_size = sdebug_sector_size;
4212 	u64 lba;
4213 	u8 *arr;
4214 	u8 *cmd = scp->cmnd;
4215 	struct sdeb_store_info *sip = devip2sip(devip, true);
4216 	rwlock_t *macc_lckp = &sip->macc_lck;
4217 
4218 	bytchk = (cmd[1] >> 1) & 0x3;
4219 	if (bytchk == 0) {
4220 		return 0;	/* always claim internal verify okay */
4221 	} else if (bytchk == 2) {
4222 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4223 		return check_condition_result;
4224 	} else if (bytchk == 3) {
4225 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4226 	}
4227 	switch (cmd[0]) {
4228 	case VERIFY_16:
4229 		lba = get_unaligned_be64(cmd + 2);
4230 		vnum = get_unaligned_be32(cmd + 10);
4231 		break;
4232 	case VERIFY:		/* is VERIFY(10) */
4233 		lba = get_unaligned_be32(cmd + 2);
4234 		vnum = get_unaligned_be16(cmd + 7);
4235 		break;
4236 	default:
4237 		mk_sense_invalid_opcode(scp);
4238 		return check_condition_result;
4239 	}
4240 	a_num = is_bytchk3 ? 1 : vnum;
4241 	/* Treat following check like one for read (i.e. no write) access */
4242 	ret = check_device_access_params(scp, lba, a_num, false);
4243 	if (ret)
4244 		return ret;
4245 
4246 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4247 	if (!arr) {
4248 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4249 				INSUFF_RES_ASCQ);
4250 		return check_condition_result;
4251 	}
4252 	/* Not changing store, so only need read access */
4253 	read_lock(macc_lckp);
4254 
4255 	ret = do_dout_fetch(scp, a_num, arr);
4256 	if (ret == -1) {
4257 		ret = DID_ERROR << 16;
4258 		goto cleanup;
4259 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4260 		sdev_printk(KERN_INFO, scp->device,
4261 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4262 			    my_name, __func__, a_num * lb_size, ret);
4263 	}
4264 	if (is_bytchk3) {
4265 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4266 			memcpy(arr + off, arr, lb_size);
4267 	}
4268 	ret = 0;
4269 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4270 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4271 		ret = check_condition_result;
4272 		goto cleanup;
4273 	}
4274 cleanup:
4275 	read_unlock(macc_lckp);
4276 	kfree(arr);
4277 	return ret;
4278 }
4279 
4280 #define RZONES_DESC_HD 64
4281 
4282 /* Report zones depending on start LBA nad reporting options */
4283 static int resp_report_zones(struct scsi_cmnd *scp,
4284 			     struct sdebug_dev_info *devip)
4285 {
4286 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4287 	int ret = 0;
4288 	u32 alloc_len, rep_opts, rep_len;
4289 	bool partial;
4290 	u64 lba, zs_lba;
4291 	u8 *arr = NULL, *desc;
4292 	u8 *cmd = scp->cmnd;
4293 	struct sdeb_zone_state *zsp;
4294 	struct sdeb_store_info *sip = devip2sip(devip, false);
4295 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4296 
4297 	if (!sdebug_dev_is_zoned(devip)) {
4298 		mk_sense_invalid_opcode(scp);
4299 		return check_condition_result;
4300 	}
4301 	zs_lba = get_unaligned_be64(cmd + 2);
4302 	alloc_len = get_unaligned_be32(cmd + 10);
4303 	rep_opts = cmd[14] & 0x3f;
4304 	partial = cmd[14] & 0x80;
4305 
4306 	if (zs_lba >= sdebug_capacity) {
4307 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4308 		return check_condition_result;
4309 	}
4310 
4311 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4312 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4313 			    max_zones);
4314 
4315 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4316 	if (!arr) {
4317 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4318 				INSUFF_RES_ASCQ);
4319 		return check_condition_result;
4320 	}
4321 
4322 	read_lock(macc_lckp);
4323 
4324 	desc = arr + 64;
4325 	for (i = 0; i < max_zones; i++) {
4326 		lba = zs_lba + devip->zsize * i;
4327 		if (lba > sdebug_capacity)
4328 			break;
4329 		zsp = zbc_zone(devip, lba);
4330 		switch (rep_opts) {
4331 		case 0x00:
4332 			/* All zones */
4333 			break;
4334 		case 0x01:
4335 			/* Empty zones */
4336 			if (zsp->z_cond != ZC1_EMPTY)
4337 				continue;
4338 			break;
4339 		case 0x02:
4340 			/* Implicit open zones */
4341 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4342 				continue;
4343 			break;
4344 		case 0x03:
4345 			/* Explicit open zones */
4346 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4347 				continue;
4348 			break;
4349 		case 0x04:
4350 			/* Closed zones */
4351 			if (zsp->z_cond != ZC4_CLOSED)
4352 				continue;
4353 			break;
4354 		case 0x05:
4355 			/* Full zones */
4356 			if (zsp->z_cond != ZC5_FULL)
4357 				continue;
4358 			break;
4359 		case 0x06:
4360 		case 0x07:
4361 		case 0x10:
4362 			/*
4363 			 * Read-only, offline, reset WP recommended are
4364 			 * not emulated: no zones to report;
4365 			 */
4366 			continue;
4367 		case 0x11:
4368 			/* non-seq-resource set */
4369 			if (!zsp->z_non_seq_resource)
4370 				continue;
4371 			break;
4372 		case 0x3f:
4373 			/* Not write pointer (conventional) zones */
4374 			if (!zbc_zone_is_conv(zsp))
4375 				continue;
4376 			break;
4377 		default:
4378 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4379 					INVALID_FIELD_IN_CDB, 0);
4380 			ret = check_condition_result;
4381 			goto fini;
4382 		}
4383 
4384 		if (nrz < rep_max_zones) {
4385 			/* Fill zone descriptor */
4386 			desc[0] = zsp->z_type;
4387 			desc[1] = zsp->z_cond << 4;
4388 			if (zsp->z_non_seq_resource)
4389 				desc[1] |= 1 << 1;
4390 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4391 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4392 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4393 			desc += 64;
4394 		}
4395 
4396 		if (partial && nrz >= rep_max_zones)
4397 			break;
4398 
4399 		nrz++;
4400 	}
4401 
4402 	/* Report header */
4403 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4404 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4405 
4406 	rep_len = (unsigned long)desc - (unsigned long)arr;
4407 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4408 
4409 fini:
4410 	read_unlock(macc_lckp);
4411 	kfree(arr);
4412 	return ret;
4413 }
4414 
4415 /* Logic transplanted from tcmu-runner, file_zbc.c */
4416 static void zbc_open_all(struct sdebug_dev_info *devip)
4417 {
4418 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4419 	unsigned int i;
4420 
4421 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4422 		if (zsp->z_cond == ZC4_CLOSED)
4423 			zbc_open_zone(devip, &devip->zstate[i], true);
4424 	}
4425 }
4426 
4427 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4428 {
4429 	int res = 0;
4430 	u64 z_id;
4431 	enum sdebug_z_cond zc;
4432 	u8 *cmd = scp->cmnd;
4433 	struct sdeb_zone_state *zsp;
4434 	bool all = cmd[14] & 0x01;
4435 	struct sdeb_store_info *sip = devip2sip(devip, false);
4436 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4437 
4438 	if (!sdebug_dev_is_zoned(devip)) {
4439 		mk_sense_invalid_opcode(scp);
4440 		return check_condition_result;
4441 	}
4442 
4443 	write_lock(macc_lckp);
4444 
4445 	if (all) {
4446 		/* Check if all closed zones can be open */
4447 		if (devip->max_open &&
4448 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4449 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4450 					INSUFF_ZONE_ASCQ);
4451 			res = check_condition_result;
4452 			goto fini;
4453 		}
4454 		/* Open all closed zones */
4455 		zbc_open_all(devip);
4456 		goto fini;
4457 	}
4458 
4459 	/* Open the specified zone */
4460 	z_id = get_unaligned_be64(cmd + 2);
4461 	if (z_id >= sdebug_capacity) {
4462 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4463 		res = check_condition_result;
4464 		goto fini;
4465 	}
4466 
4467 	zsp = zbc_zone(devip, z_id);
4468 	if (z_id != zsp->z_start) {
4469 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4470 		res = check_condition_result;
4471 		goto fini;
4472 	}
4473 	if (zbc_zone_is_conv(zsp)) {
4474 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4475 		res = check_condition_result;
4476 		goto fini;
4477 	}
4478 
4479 	zc = zsp->z_cond;
4480 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4481 		goto fini;
4482 
4483 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4484 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4485 				INSUFF_ZONE_ASCQ);
4486 		res = check_condition_result;
4487 		goto fini;
4488 	}
4489 
4490 	if (zc == ZC2_IMPLICIT_OPEN)
4491 		zbc_close_zone(devip, zsp);
4492 	zbc_open_zone(devip, zsp, true);
4493 fini:
4494 	write_unlock(macc_lckp);
4495 	return res;
4496 }
4497 
4498 static void zbc_close_all(struct sdebug_dev_info *devip)
4499 {
4500 	unsigned int i;
4501 
4502 	for (i = 0; i < devip->nr_zones; i++)
4503 		zbc_close_zone(devip, &devip->zstate[i]);
4504 }
4505 
4506 static int resp_close_zone(struct scsi_cmnd *scp,
4507 			   struct sdebug_dev_info *devip)
4508 {
4509 	int res = 0;
4510 	u64 z_id;
4511 	u8 *cmd = scp->cmnd;
4512 	struct sdeb_zone_state *zsp;
4513 	bool all = cmd[14] & 0x01;
4514 	struct sdeb_store_info *sip = devip2sip(devip, false);
4515 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4516 
4517 	if (!sdebug_dev_is_zoned(devip)) {
4518 		mk_sense_invalid_opcode(scp);
4519 		return check_condition_result;
4520 	}
4521 
4522 	write_lock(macc_lckp);
4523 
4524 	if (all) {
4525 		zbc_close_all(devip);
4526 		goto fini;
4527 	}
4528 
4529 	/* Close specified zone */
4530 	z_id = get_unaligned_be64(cmd + 2);
4531 	if (z_id >= sdebug_capacity) {
4532 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4533 		res = check_condition_result;
4534 		goto fini;
4535 	}
4536 
4537 	zsp = zbc_zone(devip, z_id);
4538 	if (z_id != zsp->z_start) {
4539 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4540 		res = check_condition_result;
4541 		goto fini;
4542 	}
4543 	if (zbc_zone_is_conv(zsp)) {
4544 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4545 		res = check_condition_result;
4546 		goto fini;
4547 	}
4548 
4549 	zbc_close_zone(devip, zsp);
4550 fini:
4551 	write_unlock(macc_lckp);
4552 	return res;
4553 }
4554 
4555 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4556 			    struct sdeb_zone_state *zsp, bool empty)
4557 {
4558 	enum sdebug_z_cond zc = zsp->z_cond;
4559 
4560 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4561 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4562 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4563 			zbc_close_zone(devip, zsp);
4564 		if (zsp->z_cond == ZC4_CLOSED)
4565 			devip->nr_closed--;
4566 		zsp->z_wp = zsp->z_start + zsp->z_size;
4567 		zsp->z_cond = ZC5_FULL;
4568 	}
4569 }
4570 
4571 static void zbc_finish_all(struct sdebug_dev_info *devip)
4572 {
4573 	unsigned int i;
4574 
4575 	for (i = 0; i < devip->nr_zones; i++)
4576 		zbc_finish_zone(devip, &devip->zstate[i], false);
4577 }
4578 
4579 static int resp_finish_zone(struct scsi_cmnd *scp,
4580 			    struct sdebug_dev_info *devip)
4581 {
4582 	struct sdeb_zone_state *zsp;
4583 	int res = 0;
4584 	u64 z_id;
4585 	u8 *cmd = scp->cmnd;
4586 	bool all = cmd[14] & 0x01;
4587 	struct sdeb_store_info *sip = devip2sip(devip, false);
4588 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4589 
4590 	if (!sdebug_dev_is_zoned(devip)) {
4591 		mk_sense_invalid_opcode(scp);
4592 		return check_condition_result;
4593 	}
4594 
4595 	write_lock(macc_lckp);
4596 
4597 	if (all) {
4598 		zbc_finish_all(devip);
4599 		goto fini;
4600 	}
4601 
4602 	/* Finish the specified zone */
4603 	z_id = get_unaligned_be64(cmd + 2);
4604 	if (z_id >= sdebug_capacity) {
4605 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4606 		res = check_condition_result;
4607 		goto fini;
4608 	}
4609 
4610 	zsp = zbc_zone(devip, z_id);
4611 	if (z_id != zsp->z_start) {
4612 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4613 		res = check_condition_result;
4614 		goto fini;
4615 	}
4616 	if (zbc_zone_is_conv(zsp)) {
4617 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4618 		res = check_condition_result;
4619 		goto fini;
4620 	}
4621 
4622 	zbc_finish_zone(devip, zsp, true);
4623 fini:
4624 	write_unlock(macc_lckp);
4625 	return res;
4626 }
4627 
4628 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4629 			 struct sdeb_zone_state *zsp)
4630 {
4631 	enum sdebug_z_cond zc;
4632 
4633 	if (zbc_zone_is_conv(zsp))
4634 		return;
4635 
4636 	zc = zsp->z_cond;
4637 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4638 		zbc_close_zone(devip, zsp);
4639 
4640 	if (zsp->z_cond == ZC4_CLOSED)
4641 		devip->nr_closed--;
4642 
4643 	zsp->z_non_seq_resource = false;
4644 	zsp->z_wp = zsp->z_start;
4645 	zsp->z_cond = ZC1_EMPTY;
4646 }
4647 
4648 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4649 {
4650 	unsigned int i;
4651 
4652 	for (i = 0; i < devip->nr_zones; i++)
4653 		zbc_rwp_zone(devip, &devip->zstate[i]);
4654 }
4655 
4656 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4657 {
4658 	struct sdeb_zone_state *zsp;
4659 	int res = 0;
4660 	u64 z_id;
4661 	u8 *cmd = scp->cmnd;
4662 	bool all = cmd[14] & 0x01;
4663 	struct sdeb_store_info *sip = devip2sip(devip, false);
4664 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4665 
4666 	if (!sdebug_dev_is_zoned(devip)) {
4667 		mk_sense_invalid_opcode(scp);
4668 		return check_condition_result;
4669 	}
4670 
4671 	write_lock(macc_lckp);
4672 
4673 	if (all) {
4674 		zbc_rwp_all(devip);
4675 		goto fini;
4676 	}
4677 
4678 	z_id = get_unaligned_be64(cmd + 2);
4679 	if (z_id >= sdebug_capacity) {
4680 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4681 		res = check_condition_result;
4682 		goto fini;
4683 	}
4684 
4685 	zsp = zbc_zone(devip, z_id);
4686 	if (z_id != zsp->z_start) {
4687 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4688 		res = check_condition_result;
4689 		goto fini;
4690 	}
4691 	if (zbc_zone_is_conv(zsp)) {
4692 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4693 		res = check_condition_result;
4694 		goto fini;
4695 	}
4696 
4697 	zbc_rwp_zone(devip, zsp);
4698 fini:
4699 	write_unlock(macc_lckp);
4700 	return res;
4701 }
4702 
4703 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4704 {
4705 	u16 hwq;
4706 
4707 	if (sdebug_host_max_queue) {
4708 		/* Provide a simple method to choose the hwq */
4709 		hwq = smp_processor_id() % submit_queues;
4710 	} else {
4711 		u32 tag = blk_mq_unique_tag(cmnd->request);
4712 
4713 		hwq = blk_mq_unique_tag_to_hwq(tag);
4714 
4715 		pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4716 		if (WARN_ON_ONCE(hwq >= submit_queues))
4717 			hwq = 0;
4718 	}
4719 	return sdebug_q_arr + hwq;
4720 }
4721 
4722 static u32 get_tag(struct scsi_cmnd *cmnd)
4723 {
4724 	return blk_mq_unique_tag(cmnd->request);
4725 }
4726 
4727 /* Queued (deferred) command completions converge here. */
4728 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4729 {
4730 	bool aborted = sd_dp->aborted;
4731 	int qc_idx;
4732 	int retiring = 0;
4733 	unsigned long iflags;
4734 	struct sdebug_queue *sqp;
4735 	struct sdebug_queued_cmd *sqcp;
4736 	struct scsi_cmnd *scp;
4737 	struct sdebug_dev_info *devip;
4738 
4739 	sd_dp->defer_t = SDEB_DEFER_NONE;
4740 	if (unlikely(aborted))
4741 		sd_dp->aborted = false;
4742 	qc_idx = sd_dp->qc_idx;
4743 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4744 	if (sdebug_statistics) {
4745 		atomic_inc(&sdebug_completions);
4746 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4747 			atomic_inc(&sdebug_miss_cpus);
4748 	}
4749 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4750 		pr_err("wild qc_idx=%d\n", qc_idx);
4751 		return;
4752 	}
4753 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4754 	sqcp = &sqp->qc_arr[qc_idx];
4755 	scp = sqcp->a_cmnd;
4756 	if (unlikely(scp == NULL)) {
4757 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4758 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4759 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4760 		return;
4761 	}
4762 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4763 	if (likely(devip))
4764 		atomic_dec(&devip->num_in_q);
4765 	else
4766 		pr_err("devip=NULL\n");
4767 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4768 		retiring = 1;
4769 
4770 	sqcp->a_cmnd = NULL;
4771 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4772 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4773 		pr_err("Unexpected completion\n");
4774 		return;
4775 	}
4776 
4777 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4778 		int k, retval;
4779 
4780 		retval = atomic_read(&retired_max_queue);
4781 		if (qc_idx >= retval) {
4782 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4783 			pr_err("index %d too large\n", retval);
4784 			return;
4785 		}
4786 		k = find_last_bit(sqp->in_use_bm, retval);
4787 		if ((k < sdebug_max_queue) || (k == retval))
4788 			atomic_set(&retired_max_queue, 0);
4789 		else
4790 			atomic_set(&retired_max_queue, k + 1);
4791 	}
4792 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4793 	if (unlikely(aborted)) {
4794 		if (sdebug_verbose)
4795 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4796 		return;
4797 	}
4798 	scp->scsi_done(scp); /* callback to mid level */
4799 }
4800 
4801 /* When high resolution timer goes off this function is called. */
4802 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4803 {
4804 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4805 						  hrt);
4806 	sdebug_q_cmd_complete(sd_dp);
4807 	return HRTIMER_NORESTART;
4808 }
4809 
4810 /* When work queue schedules work, it calls this function. */
4811 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4812 {
4813 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4814 						  ew.work);
4815 	sdebug_q_cmd_complete(sd_dp);
4816 }
4817 
4818 static bool got_shared_uuid;
4819 static uuid_t shared_uuid;
4820 
4821 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4822 {
4823 	struct sdeb_zone_state *zsp;
4824 	sector_t capacity = get_sdebug_capacity();
4825 	sector_t zstart = 0;
4826 	unsigned int i;
4827 
4828 	/*
4829 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4830 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4831 	 * use the specified zone size checking that at least 2 zones can be
4832 	 * created for the device.
4833 	 */
4834 	if (!sdeb_zbc_zone_size_mb) {
4835 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4836 			>> ilog2(sdebug_sector_size);
4837 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4838 			devip->zsize >>= 1;
4839 		if (devip->zsize < 2) {
4840 			pr_err("Device capacity too small\n");
4841 			return -EINVAL;
4842 		}
4843 	} else {
4844 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4845 			pr_err("Zone size is not a power of 2\n");
4846 			return -EINVAL;
4847 		}
4848 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4849 			>> ilog2(sdebug_sector_size);
4850 		if (devip->zsize >= capacity) {
4851 			pr_err("Zone size too large for device capacity\n");
4852 			return -EINVAL;
4853 		}
4854 	}
4855 
4856 	devip->zsize_shift = ilog2(devip->zsize);
4857 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4858 
4859 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4860 		pr_err("Number of conventional zones too large\n");
4861 		return -EINVAL;
4862 	}
4863 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4864 
4865 	if (devip->zmodel == BLK_ZONED_HM) {
4866 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4867 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4868 			devip->max_open = (devip->nr_zones - 1) / 2;
4869 		else
4870 			devip->max_open = sdeb_zbc_max_open;
4871 	}
4872 
4873 	devip->zstate = kcalloc(devip->nr_zones,
4874 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4875 	if (!devip->zstate)
4876 		return -ENOMEM;
4877 
4878 	for (i = 0; i < devip->nr_zones; i++) {
4879 		zsp = &devip->zstate[i];
4880 
4881 		zsp->z_start = zstart;
4882 
4883 		if (i < devip->nr_conv_zones) {
4884 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4885 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4886 			zsp->z_wp = (sector_t)-1;
4887 		} else {
4888 			if (devip->zmodel == BLK_ZONED_HM)
4889 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4890 			else
4891 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4892 			zsp->z_cond = ZC1_EMPTY;
4893 			zsp->z_wp = zsp->z_start;
4894 		}
4895 
4896 		if (zsp->z_start + devip->zsize < capacity)
4897 			zsp->z_size = devip->zsize;
4898 		else
4899 			zsp->z_size = capacity - zsp->z_start;
4900 
4901 		zstart += zsp->z_size;
4902 	}
4903 
4904 	return 0;
4905 }
4906 
4907 static struct sdebug_dev_info *sdebug_device_create(
4908 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4909 {
4910 	struct sdebug_dev_info *devip;
4911 
4912 	devip = kzalloc(sizeof(*devip), flags);
4913 	if (devip) {
4914 		if (sdebug_uuid_ctl == 1)
4915 			uuid_gen(&devip->lu_name);
4916 		else if (sdebug_uuid_ctl == 2) {
4917 			if (got_shared_uuid)
4918 				devip->lu_name = shared_uuid;
4919 			else {
4920 				uuid_gen(&shared_uuid);
4921 				got_shared_uuid = true;
4922 				devip->lu_name = shared_uuid;
4923 			}
4924 		}
4925 		devip->sdbg_host = sdbg_host;
4926 		if (sdeb_zbc_in_use) {
4927 			devip->zmodel = sdeb_zbc_model;
4928 			if (sdebug_device_create_zones(devip)) {
4929 				kfree(devip);
4930 				return NULL;
4931 			}
4932 		} else {
4933 			devip->zmodel = BLK_ZONED_NONE;
4934 		}
4935 		devip->sdbg_host = sdbg_host;
4936 		devip->create_ts = ktime_get_boottime();
4937 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4938 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4939 	}
4940 	return devip;
4941 }
4942 
4943 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4944 {
4945 	struct sdebug_host_info *sdbg_host;
4946 	struct sdebug_dev_info *open_devip = NULL;
4947 	struct sdebug_dev_info *devip;
4948 
4949 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4950 	if (!sdbg_host) {
4951 		pr_err("Host info NULL\n");
4952 		return NULL;
4953 	}
4954 
4955 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4956 		if ((devip->used) && (devip->channel == sdev->channel) &&
4957 		    (devip->target == sdev->id) &&
4958 		    (devip->lun == sdev->lun))
4959 			return devip;
4960 		else {
4961 			if ((!devip->used) && (!open_devip))
4962 				open_devip = devip;
4963 		}
4964 	}
4965 	if (!open_devip) { /* try and make a new one */
4966 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4967 		if (!open_devip) {
4968 			pr_err("out of memory at line %d\n", __LINE__);
4969 			return NULL;
4970 		}
4971 	}
4972 
4973 	open_devip->channel = sdev->channel;
4974 	open_devip->target = sdev->id;
4975 	open_devip->lun = sdev->lun;
4976 	open_devip->sdbg_host = sdbg_host;
4977 	atomic_set(&open_devip->num_in_q, 0);
4978 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4979 	open_devip->used = true;
4980 	return open_devip;
4981 }
4982 
4983 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4984 {
4985 	if (sdebug_verbose)
4986 		pr_info("slave_alloc <%u %u %u %llu>\n",
4987 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4988 	return 0;
4989 }
4990 
4991 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4992 {
4993 	struct sdebug_dev_info *devip =
4994 			(struct sdebug_dev_info *)sdp->hostdata;
4995 
4996 	if (sdebug_verbose)
4997 		pr_info("slave_configure <%u %u %u %llu>\n",
4998 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4999 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5000 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5001 	if (devip == NULL) {
5002 		devip = find_build_dev_info(sdp);
5003 		if (devip == NULL)
5004 			return 1;  /* no resources, will be marked offline */
5005 	}
5006 	sdp->hostdata = devip;
5007 	if (sdebug_no_uld)
5008 		sdp->no_uld_attach = 1;
5009 	config_cdb_len(sdp);
5010 	return 0;
5011 }
5012 
5013 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5014 {
5015 	struct sdebug_dev_info *devip =
5016 		(struct sdebug_dev_info *)sdp->hostdata;
5017 
5018 	if (sdebug_verbose)
5019 		pr_info("slave_destroy <%u %u %u %llu>\n",
5020 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5021 	if (devip) {
5022 		/* make this slot available for re-use */
5023 		devip->used = false;
5024 		sdp->hostdata = NULL;
5025 	}
5026 }
5027 
5028 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5029 			   enum sdeb_defer_type defer_t)
5030 {
5031 	if (!sd_dp)
5032 		return;
5033 	if (defer_t == SDEB_DEFER_HRT)
5034 		hrtimer_cancel(&sd_dp->hrt);
5035 	else if (defer_t == SDEB_DEFER_WQ)
5036 		cancel_work_sync(&sd_dp->ew.work);
5037 }
5038 
5039 /* If @cmnd found deletes its timer or work queue and returns true; else
5040    returns false */
5041 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5042 {
5043 	unsigned long iflags;
5044 	int j, k, qmax, r_qmax;
5045 	enum sdeb_defer_type l_defer_t;
5046 	struct sdebug_queue *sqp;
5047 	struct sdebug_queued_cmd *sqcp;
5048 	struct sdebug_dev_info *devip;
5049 	struct sdebug_defer *sd_dp;
5050 
5051 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5052 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5053 		qmax = sdebug_max_queue;
5054 		r_qmax = atomic_read(&retired_max_queue);
5055 		if (r_qmax > qmax)
5056 			qmax = r_qmax;
5057 		for (k = 0; k < qmax; ++k) {
5058 			if (test_bit(k, sqp->in_use_bm)) {
5059 				sqcp = &sqp->qc_arr[k];
5060 				if (cmnd != sqcp->a_cmnd)
5061 					continue;
5062 				/* found */
5063 				devip = (struct sdebug_dev_info *)
5064 						cmnd->device->hostdata;
5065 				if (devip)
5066 					atomic_dec(&devip->num_in_q);
5067 				sqcp->a_cmnd = NULL;
5068 				sd_dp = sqcp->sd_dp;
5069 				if (sd_dp) {
5070 					l_defer_t = sd_dp->defer_t;
5071 					sd_dp->defer_t = SDEB_DEFER_NONE;
5072 				} else
5073 					l_defer_t = SDEB_DEFER_NONE;
5074 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5075 				stop_qc_helper(sd_dp, l_defer_t);
5076 				clear_bit(k, sqp->in_use_bm);
5077 				return true;
5078 			}
5079 		}
5080 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5081 	}
5082 	return false;
5083 }
5084 
5085 /* Deletes (stops) timers or work queues of all queued commands */
5086 static void stop_all_queued(void)
5087 {
5088 	unsigned long iflags;
5089 	int j, k;
5090 	enum sdeb_defer_type l_defer_t;
5091 	struct sdebug_queue *sqp;
5092 	struct sdebug_queued_cmd *sqcp;
5093 	struct sdebug_dev_info *devip;
5094 	struct sdebug_defer *sd_dp;
5095 
5096 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5097 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5098 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5099 			if (test_bit(k, sqp->in_use_bm)) {
5100 				sqcp = &sqp->qc_arr[k];
5101 				if (sqcp->a_cmnd == NULL)
5102 					continue;
5103 				devip = (struct sdebug_dev_info *)
5104 					sqcp->a_cmnd->device->hostdata;
5105 				if (devip)
5106 					atomic_dec(&devip->num_in_q);
5107 				sqcp->a_cmnd = NULL;
5108 				sd_dp = sqcp->sd_dp;
5109 				if (sd_dp) {
5110 					l_defer_t = sd_dp->defer_t;
5111 					sd_dp->defer_t = SDEB_DEFER_NONE;
5112 				} else
5113 					l_defer_t = SDEB_DEFER_NONE;
5114 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5115 				stop_qc_helper(sd_dp, l_defer_t);
5116 				clear_bit(k, sqp->in_use_bm);
5117 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5118 			}
5119 		}
5120 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5121 	}
5122 }
5123 
5124 /* Free queued command memory on heap */
5125 static void free_all_queued(void)
5126 {
5127 	int j, k;
5128 	struct sdebug_queue *sqp;
5129 	struct sdebug_queued_cmd *sqcp;
5130 
5131 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5132 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5133 			sqcp = &sqp->qc_arr[k];
5134 			kfree(sqcp->sd_dp);
5135 			sqcp->sd_dp = NULL;
5136 		}
5137 	}
5138 }
5139 
5140 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5141 {
5142 	bool ok;
5143 
5144 	++num_aborts;
5145 	if (SCpnt) {
5146 		ok = stop_queued_cmnd(SCpnt);
5147 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5148 			sdev_printk(KERN_INFO, SCpnt->device,
5149 				    "%s: command%s found\n", __func__,
5150 				    ok ? "" : " not");
5151 	}
5152 	return SUCCESS;
5153 }
5154 
5155 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5156 {
5157 	++num_dev_resets;
5158 	if (SCpnt && SCpnt->device) {
5159 		struct scsi_device *sdp = SCpnt->device;
5160 		struct sdebug_dev_info *devip =
5161 				(struct sdebug_dev_info *)sdp->hostdata;
5162 
5163 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5164 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5165 		if (devip)
5166 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5167 	}
5168 	return SUCCESS;
5169 }
5170 
5171 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5172 {
5173 	struct sdebug_host_info *sdbg_host;
5174 	struct sdebug_dev_info *devip;
5175 	struct scsi_device *sdp;
5176 	struct Scsi_Host *hp;
5177 	int k = 0;
5178 
5179 	++num_target_resets;
5180 	if (!SCpnt)
5181 		goto lie;
5182 	sdp = SCpnt->device;
5183 	if (!sdp)
5184 		goto lie;
5185 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5186 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5187 	hp = sdp->host;
5188 	if (!hp)
5189 		goto lie;
5190 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5191 	if (sdbg_host) {
5192 		list_for_each_entry(devip,
5193 				    &sdbg_host->dev_info_list,
5194 				    dev_list)
5195 			if (devip->target == sdp->id) {
5196 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5197 				++k;
5198 			}
5199 	}
5200 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5201 		sdev_printk(KERN_INFO, sdp,
5202 			    "%s: %d device(s) found in target\n", __func__, k);
5203 lie:
5204 	return SUCCESS;
5205 }
5206 
5207 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5208 {
5209 	struct sdebug_host_info *sdbg_host;
5210 	struct sdebug_dev_info *devip;
5211 	struct scsi_device *sdp;
5212 	struct Scsi_Host *hp;
5213 	int k = 0;
5214 
5215 	++num_bus_resets;
5216 	if (!(SCpnt && SCpnt->device))
5217 		goto lie;
5218 	sdp = SCpnt->device;
5219 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5220 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5221 	hp = sdp->host;
5222 	if (hp) {
5223 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5224 		if (sdbg_host) {
5225 			list_for_each_entry(devip,
5226 					    &sdbg_host->dev_info_list,
5227 					    dev_list) {
5228 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5229 				++k;
5230 			}
5231 		}
5232 	}
5233 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5234 		sdev_printk(KERN_INFO, sdp,
5235 			    "%s: %d device(s) found in host\n", __func__, k);
5236 lie:
5237 	return SUCCESS;
5238 }
5239 
5240 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5241 {
5242 	struct sdebug_host_info *sdbg_host;
5243 	struct sdebug_dev_info *devip;
5244 	int k = 0;
5245 
5246 	++num_host_resets;
5247 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5248 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5249 	spin_lock(&sdebug_host_list_lock);
5250 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5251 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5252 				    dev_list) {
5253 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5254 			++k;
5255 		}
5256 	}
5257 	spin_unlock(&sdebug_host_list_lock);
5258 	stop_all_queued();
5259 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5260 		sdev_printk(KERN_INFO, SCpnt->device,
5261 			    "%s: %d device(s) found\n", __func__, k);
5262 	return SUCCESS;
5263 }
5264 
5265 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5266 {
5267 	struct msdos_partition *pp;
5268 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5269 	int sectors_per_part, num_sectors, k;
5270 	int heads_by_sects, start_sec, end_sec;
5271 
5272 	/* assume partition table already zeroed */
5273 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5274 		return;
5275 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5276 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5277 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5278 	}
5279 	num_sectors = (int)get_sdebug_capacity();
5280 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5281 			   / sdebug_num_parts;
5282 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5283 	starts[0] = sdebug_sectors_per;
5284 	max_part_secs = sectors_per_part;
5285 	for (k = 1; k < sdebug_num_parts; ++k) {
5286 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5287 			    * heads_by_sects;
5288 		if (starts[k] - starts[k - 1] < max_part_secs)
5289 			max_part_secs = starts[k] - starts[k - 1];
5290 	}
5291 	starts[sdebug_num_parts] = num_sectors;
5292 	starts[sdebug_num_parts + 1] = 0;
5293 
5294 	ramp[510] = 0x55;	/* magic partition markings */
5295 	ramp[511] = 0xAA;
5296 	pp = (struct msdos_partition *)(ramp + 0x1be);
5297 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5298 		start_sec = starts[k];
5299 		end_sec = starts[k] + max_part_secs - 1;
5300 		pp->boot_ind = 0;
5301 
5302 		pp->cyl = start_sec / heads_by_sects;
5303 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5304 			   / sdebug_sectors_per;
5305 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5306 
5307 		pp->end_cyl = end_sec / heads_by_sects;
5308 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5309 			       / sdebug_sectors_per;
5310 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5311 
5312 		pp->start_sect = cpu_to_le32(start_sec);
5313 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5314 		pp->sys_ind = 0x83;	/* plain Linux partition */
5315 	}
5316 }
5317 
5318 static void block_unblock_all_queues(bool block)
5319 {
5320 	int j;
5321 	struct sdebug_queue *sqp;
5322 
5323 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5324 		atomic_set(&sqp->blocked, (int)block);
5325 }
5326 
5327 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5328  * commands will be processed normally before triggers occur.
5329  */
5330 static void tweak_cmnd_count(void)
5331 {
5332 	int count, modulo;
5333 
5334 	modulo = abs(sdebug_every_nth);
5335 	if (modulo < 2)
5336 		return;
5337 	block_unblock_all_queues(true);
5338 	count = atomic_read(&sdebug_cmnd_count);
5339 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5340 	block_unblock_all_queues(false);
5341 }
5342 
5343 static void clear_queue_stats(void)
5344 {
5345 	atomic_set(&sdebug_cmnd_count, 0);
5346 	atomic_set(&sdebug_completions, 0);
5347 	atomic_set(&sdebug_miss_cpus, 0);
5348 	atomic_set(&sdebug_a_tsf, 0);
5349 }
5350 
5351 static bool inject_on_this_cmd(void)
5352 {
5353 	if (sdebug_every_nth == 0)
5354 		return false;
5355 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5356 }
5357 
5358 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5359 
5360 /* Complete the processing of the thread that queued a SCSI command to this
5361  * driver. It either completes the command by calling cmnd_done() or
5362  * schedules a hr timer or work queue then returns 0. Returns
5363  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5364  */
5365 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5366 			 int scsi_result,
5367 			 int (*pfp)(struct scsi_cmnd *,
5368 				    struct sdebug_dev_info *),
5369 			 int delta_jiff, int ndelay)
5370 {
5371 	bool new_sd_dp;
5372 	bool inject = false;
5373 	int k, num_in_q, qdepth;
5374 	unsigned long iflags;
5375 	u64 ns_from_boot = 0;
5376 	struct sdebug_queue *sqp;
5377 	struct sdebug_queued_cmd *sqcp;
5378 	struct scsi_device *sdp;
5379 	struct sdebug_defer *sd_dp;
5380 
5381 	if (unlikely(devip == NULL)) {
5382 		if (scsi_result == 0)
5383 			scsi_result = DID_NO_CONNECT << 16;
5384 		goto respond_in_thread;
5385 	}
5386 	sdp = cmnd->device;
5387 
5388 	if (delta_jiff == 0)
5389 		goto respond_in_thread;
5390 
5391 	sqp = get_queue(cmnd);
5392 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5393 	if (unlikely(atomic_read(&sqp->blocked))) {
5394 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5395 		return SCSI_MLQUEUE_HOST_BUSY;
5396 	}
5397 	num_in_q = atomic_read(&devip->num_in_q);
5398 	qdepth = cmnd->device->queue_depth;
5399 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5400 		if (scsi_result) {
5401 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5402 			goto respond_in_thread;
5403 		} else
5404 			scsi_result = device_qfull_result;
5405 	} else if (unlikely(sdebug_every_nth &&
5406 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5407 			    (scsi_result == 0))) {
5408 		if ((num_in_q == (qdepth - 1)) &&
5409 		    (atomic_inc_return(&sdebug_a_tsf) >=
5410 		     abs(sdebug_every_nth))) {
5411 			atomic_set(&sdebug_a_tsf, 0);
5412 			inject = true;
5413 			scsi_result = device_qfull_result;
5414 		}
5415 	}
5416 
5417 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5418 	if (unlikely(k >= sdebug_max_queue)) {
5419 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5420 		if (scsi_result)
5421 			goto respond_in_thread;
5422 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5423 			scsi_result = device_qfull_result;
5424 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5425 			sdev_printk(KERN_INFO, sdp,
5426 				    "%s: max_queue=%d exceeded, %s\n",
5427 				    __func__, sdebug_max_queue,
5428 				    (scsi_result ?  "status: TASK SET FULL" :
5429 						    "report: host busy"));
5430 		if (scsi_result)
5431 			goto respond_in_thread;
5432 		else
5433 			return SCSI_MLQUEUE_HOST_BUSY;
5434 	}
5435 	set_bit(k, sqp->in_use_bm);
5436 	atomic_inc(&devip->num_in_q);
5437 	sqcp = &sqp->qc_arr[k];
5438 	sqcp->a_cmnd = cmnd;
5439 	cmnd->host_scribble = (unsigned char *)sqcp;
5440 	sd_dp = sqcp->sd_dp;
5441 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5442 	if (!sd_dp) {
5443 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5444 		if (!sd_dp) {
5445 			atomic_dec(&devip->num_in_q);
5446 			clear_bit(k, sqp->in_use_bm);
5447 			return SCSI_MLQUEUE_HOST_BUSY;
5448 		}
5449 		new_sd_dp = true;
5450 	} else {
5451 		new_sd_dp = false;
5452 	}
5453 
5454 	/* Set the hostwide tag */
5455 	if (sdebug_host_max_queue)
5456 		sd_dp->hc_idx = get_tag(cmnd);
5457 
5458 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5459 		ns_from_boot = ktime_get_boottime_ns();
5460 
5461 	/* one of the resp_*() response functions is called here */
5462 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5463 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5464 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5465 		delta_jiff = ndelay = 0;
5466 	}
5467 	if (cmnd->result == 0 && scsi_result != 0)
5468 		cmnd->result = scsi_result;
5469 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5470 		if (atomic_read(&sdeb_inject_pending)) {
5471 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5472 			atomic_set(&sdeb_inject_pending, 0);
5473 			cmnd->result = check_condition_result;
5474 		}
5475 	}
5476 
5477 	if (unlikely(sdebug_verbose && cmnd->result))
5478 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5479 			    __func__, cmnd->result);
5480 
5481 	if (delta_jiff > 0 || ndelay > 0) {
5482 		ktime_t kt;
5483 
5484 		if (delta_jiff > 0) {
5485 			u64 ns = jiffies_to_nsecs(delta_jiff);
5486 
5487 			if (sdebug_random && ns < U32_MAX) {
5488 				ns = prandom_u32_max((u32)ns);
5489 			} else if (sdebug_random) {
5490 				ns >>= 12;	/* scale to 4 usec precision */
5491 				if (ns < U32_MAX)	/* over 4 hours max */
5492 					ns = prandom_u32_max((u32)ns);
5493 				ns <<= 12;
5494 			}
5495 			kt = ns_to_ktime(ns);
5496 		} else {	/* ndelay has a 4.2 second max */
5497 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5498 					     (u32)ndelay;
5499 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5500 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5501 
5502 				if (kt <= d) {	/* elapsed duration >= kt */
5503 					sqcp->a_cmnd = NULL;
5504 					atomic_dec(&devip->num_in_q);
5505 					clear_bit(k, sqp->in_use_bm);
5506 					if (new_sd_dp)
5507 						kfree(sd_dp);
5508 					/* call scsi_done() from this thread */
5509 					cmnd->scsi_done(cmnd);
5510 					return 0;
5511 				}
5512 				/* otherwise reduce kt by elapsed time */
5513 				kt -= d;
5514 			}
5515 		}
5516 		if (!sd_dp->init_hrt) {
5517 			sd_dp->init_hrt = true;
5518 			sqcp->sd_dp = sd_dp;
5519 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5520 				     HRTIMER_MODE_REL_PINNED);
5521 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5522 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5523 			sd_dp->qc_idx = k;
5524 		}
5525 		if (sdebug_statistics)
5526 			sd_dp->issuing_cpu = raw_smp_processor_id();
5527 		sd_dp->defer_t = SDEB_DEFER_HRT;
5528 		/* schedule the invocation of scsi_done() for a later time */
5529 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5530 	} else {	/* jdelay < 0, use work queue */
5531 		if (!sd_dp->init_wq) {
5532 			sd_dp->init_wq = true;
5533 			sqcp->sd_dp = sd_dp;
5534 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5535 			sd_dp->qc_idx = k;
5536 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5537 		}
5538 		if (sdebug_statistics)
5539 			sd_dp->issuing_cpu = raw_smp_processor_id();
5540 		sd_dp->defer_t = SDEB_DEFER_WQ;
5541 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5542 			     atomic_read(&sdeb_inject_pending)))
5543 			sd_dp->aborted = true;
5544 		schedule_work(&sd_dp->ew.work);
5545 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5546 			     atomic_read(&sdeb_inject_pending))) {
5547 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5548 			blk_abort_request(cmnd->request);
5549 			atomic_set(&sdeb_inject_pending, 0);
5550 		}
5551 	}
5552 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5553 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5554 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5555 	return 0;
5556 
5557 respond_in_thread:	/* call back to mid-layer using invocation thread */
5558 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5559 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5560 	if (cmnd->result == 0 && scsi_result != 0)
5561 		cmnd->result = scsi_result;
5562 	cmnd->scsi_done(cmnd);
5563 	return 0;
5564 }
5565 
5566 /* Note: The following macros create attribute files in the
5567    /sys/module/scsi_debug/parameters directory. Unfortunately this
5568    driver is unaware of a change and cannot trigger auxiliary actions
5569    as it can when the corresponding attribute in the
5570    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5571  */
5572 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5573 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5574 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5575 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5576 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5577 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5578 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5579 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5580 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5581 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5582 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5583 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5584 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5585 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5586 module_param_string(inq_product, sdebug_inq_product_id,
5587 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5588 module_param_string(inq_rev, sdebug_inq_product_rev,
5589 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5590 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5591 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5592 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5593 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5594 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5595 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5596 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5597 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5598 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5599 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5600 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5601 		   S_IRUGO | S_IWUSR);
5602 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5603 		   S_IRUGO | S_IWUSR);
5604 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5605 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5606 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5607 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5608 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5609 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5610 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5611 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5612 module_param_named(per_host_store, sdebug_per_host_store, bool,
5613 		   S_IRUGO | S_IWUSR);
5614 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5615 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5616 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5617 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5618 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5619 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5620 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5621 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5622 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5623 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5624 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5625 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5626 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5627 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5628 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5629 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5630 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5631 		   S_IRUGO | S_IWUSR);
5632 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5633 module_param_named(write_same_length, sdebug_write_same_length, int,
5634 		   S_IRUGO | S_IWUSR);
5635 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5636 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5637 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5638 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5639 
5640 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5641 MODULE_DESCRIPTION("SCSI debug adapter driver");
5642 MODULE_LICENSE("GPL");
5643 MODULE_VERSION(SDEBUG_VERSION);
5644 
5645 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5646 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5647 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5648 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5649 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5650 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5651 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5652 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5653 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5654 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5655 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5656 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5657 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5658 MODULE_PARM_DESC(host_max_queue,
5659 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5660 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5661 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5662 		 SDEBUG_VERSION "\")");
5663 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5664 MODULE_PARM_DESC(lbprz,
5665 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5666 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5667 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5668 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5669 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5670 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5671 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5672 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5673 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5674 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5675 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5676 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5677 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5678 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5679 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5680 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5681 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5682 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5683 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5684 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5685 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5686 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5687 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5688 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5689 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5690 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5691 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5692 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5693 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5694 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5695 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5696 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5697 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5698 MODULE_PARM_DESC(uuid_ctl,
5699 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5700 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5701 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5702 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5703 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5704 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5705 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5706 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5707 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5708 
5709 #define SDEBUG_INFO_LEN 256
5710 static char sdebug_info[SDEBUG_INFO_LEN];
5711 
5712 static const char *scsi_debug_info(struct Scsi_Host *shp)
5713 {
5714 	int k;
5715 
5716 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5717 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5718 	if (k >= (SDEBUG_INFO_LEN - 1))
5719 		return sdebug_info;
5720 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5721 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5722 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5723 		  "statistics", (int)sdebug_statistics);
5724 	return sdebug_info;
5725 }
5726 
5727 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5728 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5729 				 int length)
5730 {
5731 	char arr[16];
5732 	int opts;
5733 	int minLen = length > 15 ? 15 : length;
5734 
5735 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5736 		return -EACCES;
5737 	memcpy(arr, buffer, minLen);
5738 	arr[minLen] = '\0';
5739 	if (1 != sscanf(arr, "%d", &opts))
5740 		return -EINVAL;
5741 	sdebug_opts = opts;
5742 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5743 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5744 	if (sdebug_every_nth != 0)
5745 		tweak_cmnd_count();
5746 	return length;
5747 }
5748 
5749 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5750  * same for each scsi_debug host (if more than one). Some of the counters
5751  * output are not atomics so might be inaccurate in a busy system. */
5752 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5753 {
5754 	int f, j, l;
5755 	struct sdebug_queue *sqp;
5756 	struct sdebug_host_info *sdhp;
5757 
5758 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5759 		   SDEBUG_VERSION, sdebug_version_date);
5760 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5761 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5762 		   sdebug_opts, sdebug_every_nth);
5763 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5764 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5765 		   sdebug_sector_size, "bytes");
5766 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5767 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5768 		   num_aborts);
5769 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5770 		   num_dev_resets, num_target_resets, num_bus_resets,
5771 		   num_host_resets);
5772 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5773 		   dix_reads, dix_writes, dif_errors);
5774 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5775 		   sdebug_statistics);
5776 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5777 		   atomic_read(&sdebug_cmnd_count),
5778 		   atomic_read(&sdebug_completions),
5779 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5780 		   atomic_read(&sdebug_a_tsf));
5781 
5782 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5783 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5784 		seq_printf(m, "  queue %d:\n", j);
5785 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5786 		if (f != sdebug_max_queue) {
5787 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5788 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5789 				   "first,last bits", f, l);
5790 		}
5791 	}
5792 
5793 	seq_printf(m, "this host_no=%d\n", host->host_no);
5794 	if (!xa_empty(per_store_ap)) {
5795 		bool niu;
5796 		int idx;
5797 		unsigned long l_idx;
5798 		struct sdeb_store_info *sip;
5799 
5800 		seq_puts(m, "\nhost list:\n");
5801 		j = 0;
5802 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5803 			idx = sdhp->si_idx;
5804 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5805 				   sdhp->shost->host_no, idx);
5806 			++j;
5807 		}
5808 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5809 			   sdeb_most_recent_idx);
5810 		j = 0;
5811 		xa_for_each(per_store_ap, l_idx, sip) {
5812 			niu = xa_get_mark(per_store_ap, l_idx,
5813 					  SDEB_XA_NOT_IN_USE);
5814 			idx = (int)l_idx;
5815 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5816 				   (niu ? "  not_in_use" : ""));
5817 			++j;
5818 		}
5819 	}
5820 	return 0;
5821 }
5822 
5823 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5824 {
5825 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5826 }
5827 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5828  * of delay is jiffies.
5829  */
5830 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5831 			   size_t count)
5832 {
5833 	int jdelay, res;
5834 
5835 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5836 		res = count;
5837 		if (sdebug_jdelay != jdelay) {
5838 			int j, k;
5839 			struct sdebug_queue *sqp;
5840 
5841 			block_unblock_all_queues(true);
5842 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5843 			     ++j, ++sqp) {
5844 				k = find_first_bit(sqp->in_use_bm,
5845 						   sdebug_max_queue);
5846 				if (k != sdebug_max_queue) {
5847 					res = -EBUSY;   /* queued commands */
5848 					break;
5849 				}
5850 			}
5851 			if (res > 0) {
5852 				sdebug_jdelay = jdelay;
5853 				sdebug_ndelay = 0;
5854 			}
5855 			block_unblock_all_queues(false);
5856 		}
5857 		return res;
5858 	}
5859 	return -EINVAL;
5860 }
5861 static DRIVER_ATTR_RW(delay);
5862 
5863 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5864 {
5865 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5866 }
5867 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5868 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5869 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5870 			    size_t count)
5871 {
5872 	int ndelay, res;
5873 
5874 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5875 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5876 		res = count;
5877 		if (sdebug_ndelay != ndelay) {
5878 			int j, k;
5879 			struct sdebug_queue *sqp;
5880 
5881 			block_unblock_all_queues(true);
5882 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5883 			     ++j, ++sqp) {
5884 				k = find_first_bit(sqp->in_use_bm,
5885 						   sdebug_max_queue);
5886 				if (k != sdebug_max_queue) {
5887 					res = -EBUSY;   /* queued commands */
5888 					break;
5889 				}
5890 			}
5891 			if (res > 0) {
5892 				sdebug_ndelay = ndelay;
5893 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5894 							: DEF_JDELAY;
5895 			}
5896 			block_unblock_all_queues(false);
5897 		}
5898 		return res;
5899 	}
5900 	return -EINVAL;
5901 }
5902 static DRIVER_ATTR_RW(ndelay);
5903 
5904 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5905 {
5906 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5907 }
5908 
5909 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5910 			  size_t count)
5911 {
5912 	int opts;
5913 	char work[20];
5914 
5915 	if (sscanf(buf, "%10s", work) == 1) {
5916 		if (strncasecmp(work, "0x", 2) == 0) {
5917 			if (kstrtoint(work + 2, 16, &opts) == 0)
5918 				goto opts_done;
5919 		} else {
5920 			if (kstrtoint(work, 10, &opts) == 0)
5921 				goto opts_done;
5922 		}
5923 	}
5924 	return -EINVAL;
5925 opts_done:
5926 	sdebug_opts = opts;
5927 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5928 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5929 	tweak_cmnd_count();
5930 	return count;
5931 }
5932 static DRIVER_ATTR_RW(opts);
5933 
5934 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5935 {
5936 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5937 }
5938 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5939 			   size_t count)
5940 {
5941 	int n;
5942 
5943 	/* Cannot change from or to TYPE_ZBC with sysfs */
5944 	if (sdebug_ptype == TYPE_ZBC)
5945 		return -EINVAL;
5946 
5947 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5948 		if (n == TYPE_ZBC)
5949 			return -EINVAL;
5950 		sdebug_ptype = n;
5951 		return count;
5952 	}
5953 	return -EINVAL;
5954 }
5955 static DRIVER_ATTR_RW(ptype);
5956 
5957 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5958 {
5959 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5960 }
5961 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5962 			    size_t count)
5963 {
5964 	int n;
5965 
5966 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5967 		sdebug_dsense = n;
5968 		return count;
5969 	}
5970 	return -EINVAL;
5971 }
5972 static DRIVER_ATTR_RW(dsense);
5973 
5974 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5975 {
5976 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5977 }
5978 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5979 			     size_t count)
5980 {
5981 	int n, idx;
5982 
5983 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5984 		bool want_store = (n == 0);
5985 		struct sdebug_host_info *sdhp;
5986 
5987 		n = (n > 0);
5988 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5989 		if (sdebug_fake_rw == n)
5990 			return count;	/* not transitioning so do nothing */
5991 
5992 		if (want_store) {	/* 1 --> 0 transition, set up store */
5993 			if (sdeb_first_idx < 0) {
5994 				idx = sdebug_add_store();
5995 				if (idx < 0)
5996 					return idx;
5997 			} else {
5998 				idx = sdeb_first_idx;
5999 				xa_clear_mark(per_store_ap, idx,
6000 					      SDEB_XA_NOT_IN_USE);
6001 			}
6002 			/* make all hosts use same store */
6003 			list_for_each_entry(sdhp, &sdebug_host_list,
6004 					    host_list) {
6005 				if (sdhp->si_idx != idx) {
6006 					xa_set_mark(per_store_ap, sdhp->si_idx,
6007 						    SDEB_XA_NOT_IN_USE);
6008 					sdhp->si_idx = idx;
6009 				}
6010 			}
6011 			sdeb_most_recent_idx = idx;
6012 		} else {	/* 0 --> 1 transition is trigger for shrink */
6013 			sdebug_erase_all_stores(true /* apart from first */);
6014 		}
6015 		sdebug_fake_rw = n;
6016 		return count;
6017 	}
6018 	return -EINVAL;
6019 }
6020 static DRIVER_ATTR_RW(fake_rw);
6021 
6022 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6023 {
6024 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6025 }
6026 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6027 			      size_t count)
6028 {
6029 	int n;
6030 
6031 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6032 		sdebug_no_lun_0 = n;
6033 		return count;
6034 	}
6035 	return -EINVAL;
6036 }
6037 static DRIVER_ATTR_RW(no_lun_0);
6038 
6039 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6040 {
6041 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6042 }
6043 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6044 			      size_t count)
6045 {
6046 	int n;
6047 
6048 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6049 		sdebug_num_tgts = n;
6050 		sdebug_max_tgts_luns();
6051 		return count;
6052 	}
6053 	return -EINVAL;
6054 }
6055 static DRIVER_ATTR_RW(num_tgts);
6056 
6057 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6058 {
6059 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6060 }
6061 static DRIVER_ATTR_RO(dev_size_mb);
6062 
6063 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6064 {
6065 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6066 }
6067 
6068 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6069 				    size_t count)
6070 {
6071 	bool v;
6072 
6073 	if (kstrtobool(buf, &v))
6074 		return -EINVAL;
6075 
6076 	sdebug_per_host_store = v;
6077 	return count;
6078 }
6079 static DRIVER_ATTR_RW(per_host_store);
6080 
6081 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6082 {
6083 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6084 }
6085 static DRIVER_ATTR_RO(num_parts);
6086 
6087 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6088 {
6089 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6090 }
6091 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6092 			       size_t count)
6093 {
6094 	int nth;
6095 	char work[20];
6096 
6097 	if (sscanf(buf, "%10s", work) == 1) {
6098 		if (strncasecmp(work, "0x", 2) == 0) {
6099 			if (kstrtoint(work + 2, 16, &nth) == 0)
6100 				goto every_nth_done;
6101 		} else {
6102 			if (kstrtoint(work, 10, &nth) == 0)
6103 				goto every_nth_done;
6104 		}
6105 	}
6106 	return -EINVAL;
6107 
6108 every_nth_done:
6109 	sdebug_every_nth = nth;
6110 	if (nth && !sdebug_statistics) {
6111 		pr_info("every_nth needs statistics=1, set it\n");
6112 		sdebug_statistics = true;
6113 	}
6114 	tweak_cmnd_count();
6115 	return count;
6116 }
6117 static DRIVER_ATTR_RW(every_nth);
6118 
6119 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6120 {
6121 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6122 }
6123 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6124 				size_t count)
6125 {
6126 	int n;
6127 	bool changed;
6128 
6129 	if (kstrtoint(buf, 0, &n))
6130 		return -EINVAL;
6131 	if (n >= 0) {
6132 		if (n > (int)SAM_LUN_AM_FLAT) {
6133 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6134 			return -EINVAL;
6135 		}
6136 		changed = ((int)sdebug_lun_am != n);
6137 		sdebug_lun_am = n;
6138 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6139 			struct sdebug_host_info *sdhp;
6140 			struct sdebug_dev_info *dp;
6141 
6142 			spin_lock(&sdebug_host_list_lock);
6143 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6144 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6145 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6146 				}
6147 			}
6148 			spin_unlock(&sdebug_host_list_lock);
6149 		}
6150 		return count;
6151 	}
6152 	return -EINVAL;
6153 }
6154 static DRIVER_ATTR_RW(lun_format);
6155 
6156 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6157 {
6158 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6159 }
6160 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6161 			      size_t count)
6162 {
6163 	int n;
6164 	bool changed;
6165 
6166 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6167 		if (n > 256) {
6168 			pr_warn("max_luns can be no more than 256\n");
6169 			return -EINVAL;
6170 		}
6171 		changed = (sdebug_max_luns != n);
6172 		sdebug_max_luns = n;
6173 		sdebug_max_tgts_luns();
6174 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6175 			struct sdebug_host_info *sdhp;
6176 			struct sdebug_dev_info *dp;
6177 
6178 			spin_lock(&sdebug_host_list_lock);
6179 			list_for_each_entry(sdhp, &sdebug_host_list,
6180 					    host_list) {
6181 				list_for_each_entry(dp, &sdhp->dev_info_list,
6182 						    dev_list) {
6183 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6184 						dp->uas_bm);
6185 				}
6186 			}
6187 			spin_unlock(&sdebug_host_list_lock);
6188 		}
6189 		return count;
6190 	}
6191 	return -EINVAL;
6192 }
6193 static DRIVER_ATTR_RW(max_luns);
6194 
6195 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6196 {
6197 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6198 }
6199 /* N.B. max_queue can be changed while there are queued commands. In flight
6200  * commands beyond the new max_queue will be completed. */
6201 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6202 			       size_t count)
6203 {
6204 	int j, n, k, a;
6205 	struct sdebug_queue *sqp;
6206 
6207 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6208 	    (n <= SDEBUG_CANQUEUE) &&
6209 	    (sdebug_host_max_queue == 0)) {
6210 		block_unblock_all_queues(true);
6211 		k = 0;
6212 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6213 		     ++j, ++sqp) {
6214 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6215 			if (a > k)
6216 				k = a;
6217 		}
6218 		sdebug_max_queue = n;
6219 		if (k == SDEBUG_CANQUEUE)
6220 			atomic_set(&retired_max_queue, 0);
6221 		else if (k >= n)
6222 			atomic_set(&retired_max_queue, k + 1);
6223 		else
6224 			atomic_set(&retired_max_queue, 0);
6225 		block_unblock_all_queues(false);
6226 		return count;
6227 	}
6228 	return -EINVAL;
6229 }
6230 static DRIVER_ATTR_RW(max_queue);
6231 
6232 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6233 {
6234 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6235 }
6236 
6237 /*
6238  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6239  * in range [0, sdebug_host_max_queue), we can't change it.
6240  */
6241 static DRIVER_ATTR_RO(host_max_queue);
6242 
6243 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6244 {
6245 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6246 }
6247 static DRIVER_ATTR_RO(no_uld);
6248 
6249 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6250 {
6251 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6252 }
6253 static DRIVER_ATTR_RO(scsi_level);
6254 
6255 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6256 {
6257 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6258 }
6259 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6260 				size_t count)
6261 {
6262 	int n;
6263 	bool changed;
6264 
6265 	/* Ignore capacity change for ZBC drives for now */
6266 	if (sdeb_zbc_in_use)
6267 		return -ENOTSUPP;
6268 
6269 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6270 		changed = (sdebug_virtual_gb != n);
6271 		sdebug_virtual_gb = n;
6272 		sdebug_capacity = get_sdebug_capacity();
6273 		if (changed) {
6274 			struct sdebug_host_info *sdhp;
6275 			struct sdebug_dev_info *dp;
6276 
6277 			spin_lock(&sdebug_host_list_lock);
6278 			list_for_each_entry(sdhp, &sdebug_host_list,
6279 					    host_list) {
6280 				list_for_each_entry(dp, &sdhp->dev_info_list,
6281 						    dev_list) {
6282 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6283 						dp->uas_bm);
6284 				}
6285 			}
6286 			spin_unlock(&sdebug_host_list_lock);
6287 		}
6288 		return count;
6289 	}
6290 	return -EINVAL;
6291 }
6292 static DRIVER_ATTR_RW(virtual_gb);
6293 
6294 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6295 {
6296 	/* absolute number of hosts currently active is what is shown */
6297 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6298 }
6299 
6300 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6301 			      size_t count)
6302 {
6303 	bool found;
6304 	unsigned long idx;
6305 	struct sdeb_store_info *sip;
6306 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6307 	int delta_hosts;
6308 
6309 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6310 		return -EINVAL;
6311 	if (delta_hosts > 0) {
6312 		do {
6313 			found = false;
6314 			if (want_phs) {
6315 				xa_for_each_marked(per_store_ap, idx, sip,
6316 						   SDEB_XA_NOT_IN_USE) {
6317 					sdeb_most_recent_idx = (int)idx;
6318 					found = true;
6319 					break;
6320 				}
6321 				if (found)	/* re-use case */
6322 					sdebug_add_host_helper((int)idx);
6323 				else
6324 					sdebug_do_add_host(true);
6325 			} else {
6326 				sdebug_do_add_host(false);
6327 			}
6328 		} while (--delta_hosts);
6329 	} else if (delta_hosts < 0) {
6330 		do {
6331 			sdebug_do_remove_host(false);
6332 		} while (++delta_hosts);
6333 	}
6334 	return count;
6335 }
6336 static DRIVER_ATTR_RW(add_host);
6337 
6338 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6339 {
6340 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6341 }
6342 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6343 				    size_t count)
6344 {
6345 	int n;
6346 
6347 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6348 		sdebug_vpd_use_hostno = n;
6349 		return count;
6350 	}
6351 	return -EINVAL;
6352 }
6353 static DRIVER_ATTR_RW(vpd_use_hostno);
6354 
6355 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6356 {
6357 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6358 }
6359 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6360 				size_t count)
6361 {
6362 	int n;
6363 
6364 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6365 		if (n > 0)
6366 			sdebug_statistics = true;
6367 		else {
6368 			clear_queue_stats();
6369 			sdebug_statistics = false;
6370 		}
6371 		return count;
6372 	}
6373 	return -EINVAL;
6374 }
6375 static DRIVER_ATTR_RW(statistics);
6376 
6377 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6378 {
6379 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6380 }
6381 static DRIVER_ATTR_RO(sector_size);
6382 
6383 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6384 {
6385 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6386 }
6387 static DRIVER_ATTR_RO(submit_queues);
6388 
6389 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6390 {
6391 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6392 }
6393 static DRIVER_ATTR_RO(dix);
6394 
6395 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6396 {
6397 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6398 }
6399 static DRIVER_ATTR_RO(dif);
6400 
6401 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6402 {
6403 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6404 }
6405 static DRIVER_ATTR_RO(guard);
6406 
6407 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6408 {
6409 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6410 }
6411 static DRIVER_ATTR_RO(ato);
6412 
6413 static ssize_t map_show(struct device_driver *ddp, char *buf)
6414 {
6415 	ssize_t count = 0;
6416 
6417 	if (!scsi_debug_lbp())
6418 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6419 				 sdebug_store_sectors);
6420 
6421 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6422 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6423 
6424 		if (sip)
6425 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6426 					  (int)map_size, sip->map_storep);
6427 	}
6428 	buf[count++] = '\n';
6429 	buf[count] = '\0';
6430 
6431 	return count;
6432 }
6433 static DRIVER_ATTR_RO(map);
6434 
6435 static ssize_t random_show(struct device_driver *ddp, char *buf)
6436 {
6437 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6438 }
6439 
6440 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6441 			    size_t count)
6442 {
6443 	bool v;
6444 
6445 	if (kstrtobool(buf, &v))
6446 		return -EINVAL;
6447 
6448 	sdebug_random = v;
6449 	return count;
6450 }
6451 static DRIVER_ATTR_RW(random);
6452 
6453 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6454 {
6455 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6456 }
6457 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6458 			       size_t count)
6459 {
6460 	int n;
6461 
6462 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6463 		sdebug_removable = (n > 0);
6464 		return count;
6465 	}
6466 	return -EINVAL;
6467 }
6468 static DRIVER_ATTR_RW(removable);
6469 
6470 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6471 {
6472 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6473 }
6474 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6475 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6476 			       size_t count)
6477 {
6478 	int n;
6479 
6480 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6481 		sdebug_host_lock = (n > 0);
6482 		return count;
6483 	}
6484 	return -EINVAL;
6485 }
6486 static DRIVER_ATTR_RW(host_lock);
6487 
6488 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6489 {
6490 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6491 }
6492 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6493 			    size_t count)
6494 {
6495 	int n;
6496 
6497 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6498 		sdebug_strict = (n > 0);
6499 		return count;
6500 	}
6501 	return -EINVAL;
6502 }
6503 static DRIVER_ATTR_RW(strict);
6504 
6505 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6506 {
6507 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6508 }
6509 static DRIVER_ATTR_RO(uuid_ctl);
6510 
6511 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6512 {
6513 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6514 }
6515 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6516 			     size_t count)
6517 {
6518 	int ret, n;
6519 
6520 	ret = kstrtoint(buf, 0, &n);
6521 	if (ret)
6522 		return ret;
6523 	sdebug_cdb_len = n;
6524 	all_config_cdb_len();
6525 	return count;
6526 }
6527 static DRIVER_ATTR_RW(cdb_len);
6528 
6529 static const char * const zbc_model_strs_a[] = {
6530 	[BLK_ZONED_NONE] = "none",
6531 	[BLK_ZONED_HA]   = "host-aware",
6532 	[BLK_ZONED_HM]   = "host-managed",
6533 };
6534 
6535 static const char * const zbc_model_strs_b[] = {
6536 	[BLK_ZONED_NONE] = "no",
6537 	[BLK_ZONED_HA]   = "aware",
6538 	[BLK_ZONED_HM]   = "managed",
6539 };
6540 
6541 static const char * const zbc_model_strs_c[] = {
6542 	[BLK_ZONED_NONE] = "0",
6543 	[BLK_ZONED_HA]   = "1",
6544 	[BLK_ZONED_HM]   = "2",
6545 };
6546 
6547 static int sdeb_zbc_model_str(const char *cp)
6548 {
6549 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6550 
6551 	if (res < 0) {
6552 		res = sysfs_match_string(zbc_model_strs_b, cp);
6553 		if (res < 0) {
6554 			res = sysfs_match_string(zbc_model_strs_c, cp);
6555 			if (res < 0)
6556 				return -EINVAL;
6557 		}
6558 	}
6559 	return res;
6560 }
6561 
6562 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6563 {
6564 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6565 			 zbc_model_strs_a[sdeb_zbc_model]);
6566 }
6567 static DRIVER_ATTR_RO(zbc);
6568 
6569 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6570 {
6571 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6572 }
6573 static DRIVER_ATTR_RO(tur_ms_to_ready);
6574 
6575 /* Note: The following array creates attribute files in the
6576    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6577    files (over those found in the /sys/module/scsi_debug/parameters
6578    directory) is that auxiliary actions can be triggered when an attribute
6579    is changed. For example see: add_host_store() above.
6580  */
6581 
6582 static struct attribute *sdebug_drv_attrs[] = {
6583 	&driver_attr_delay.attr,
6584 	&driver_attr_opts.attr,
6585 	&driver_attr_ptype.attr,
6586 	&driver_attr_dsense.attr,
6587 	&driver_attr_fake_rw.attr,
6588 	&driver_attr_host_max_queue.attr,
6589 	&driver_attr_no_lun_0.attr,
6590 	&driver_attr_num_tgts.attr,
6591 	&driver_attr_dev_size_mb.attr,
6592 	&driver_attr_num_parts.attr,
6593 	&driver_attr_every_nth.attr,
6594 	&driver_attr_lun_format.attr,
6595 	&driver_attr_max_luns.attr,
6596 	&driver_attr_max_queue.attr,
6597 	&driver_attr_no_uld.attr,
6598 	&driver_attr_scsi_level.attr,
6599 	&driver_attr_virtual_gb.attr,
6600 	&driver_attr_add_host.attr,
6601 	&driver_attr_per_host_store.attr,
6602 	&driver_attr_vpd_use_hostno.attr,
6603 	&driver_attr_sector_size.attr,
6604 	&driver_attr_statistics.attr,
6605 	&driver_attr_submit_queues.attr,
6606 	&driver_attr_dix.attr,
6607 	&driver_attr_dif.attr,
6608 	&driver_attr_guard.attr,
6609 	&driver_attr_ato.attr,
6610 	&driver_attr_map.attr,
6611 	&driver_attr_random.attr,
6612 	&driver_attr_removable.attr,
6613 	&driver_attr_host_lock.attr,
6614 	&driver_attr_ndelay.attr,
6615 	&driver_attr_strict.attr,
6616 	&driver_attr_uuid_ctl.attr,
6617 	&driver_attr_cdb_len.attr,
6618 	&driver_attr_tur_ms_to_ready.attr,
6619 	&driver_attr_zbc.attr,
6620 	NULL,
6621 };
6622 ATTRIBUTE_GROUPS(sdebug_drv);
6623 
6624 static struct device *pseudo_primary;
6625 
6626 static int __init scsi_debug_init(void)
6627 {
6628 	bool want_store = (sdebug_fake_rw == 0);
6629 	unsigned long sz;
6630 	int k, ret, hosts_to_add;
6631 	int idx = -1;
6632 
6633 	ramdisk_lck_a[0] = &atomic_rw;
6634 	ramdisk_lck_a[1] = &atomic_rw2;
6635 	atomic_set(&retired_max_queue, 0);
6636 
6637 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6638 		pr_warn("ndelay must be less than 1 second, ignored\n");
6639 		sdebug_ndelay = 0;
6640 	} else if (sdebug_ndelay > 0)
6641 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6642 
6643 	switch (sdebug_sector_size) {
6644 	case  512:
6645 	case 1024:
6646 	case 2048:
6647 	case 4096:
6648 		break;
6649 	default:
6650 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6651 		return -EINVAL;
6652 	}
6653 
6654 	switch (sdebug_dif) {
6655 	case T10_PI_TYPE0_PROTECTION:
6656 		break;
6657 	case T10_PI_TYPE1_PROTECTION:
6658 	case T10_PI_TYPE2_PROTECTION:
6659 	case T10_PI_TYPE3_PROTECTION:
6660 		have_dif_prot = true;
6661 		break;
6662 
6663 	default:
6664 		pr_err("dif must be 0, 1, 2 or 3\n");
6665 		return -EINVAL;
6666 	}
6667 
6668 	if (sdebug_num_tgts < 0) {
6669 		pr_err("num_tgts must be >= 0\n");
6670 		return -EINVAL;
6671 	}
6672 
6673 	if (sdebug_guard > 1) {
6674 		pr_err("guard must be 0 or 1\n");
6675 		return -EINVAL;
6676 	}
6677 
6678 	if (sdebug_ato > 1) {
6679 		pr_err("ato must be 0 or 1\n");
6680 		return -EINVAL;
6681 	}
6682 
6683 	if (sdebug_physblk_exp > 15) {
6684 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6685 		return -EINVAL;
6686 	}
6687 
6688 	sdebug_lun_am = sdebug_lun_am_i;
6689 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6690 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6691 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6692 	}
6693 
6694 	if (sdebug_max_luns > 256) {
6695 		if (sdebug_max_luns > 16384) {
6696 			pr_warn("max_luns can be no more than 16384, use default\n");
6697 			sdebug_max_luns = DEF_MAX_LUNS;
6698 		}
6699 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6700 	}
6701 
6702 	if (sdebug_lowest_aligned > 0x3fff) {
6703 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6704 		return -EINVAL;
6705 	}
6706 
6707 	if (submit_queues < 1) {
6708 		pr_err("submit_queues must be 1 or more\n");
6709 		return -EINVAL;
6710 	}
6711 
6712 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6713 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6714 		return -EINVAL;
6715 	}
6716 
6717 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6718 	    (sdebug_host_max_queue < 0)) {
6719 		pr_err("host_max_queue must be in range [0 %d]\n",
6720 		       SDEBUG_CANQUEUE);
6721 		return -EINVAL;
6722 	}
6723 
6724 	if (sdebug_host_max_queue &&
6725 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6726 		sdebug_max_queue = sdebug_host_max_queue;
6727 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6728 			sdebug_max_queue);
6729 	}
6730 
6731 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6732 			       GFP_KERNEL);
6733 	if (sdebug_q_arr == NULL)
6734 		return -ENOMEM;
6735 	for (k = 0; k < submit_queues; ++k)
6736 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6737 
6738 	/*
6739 	 * check for host managed zoned block device specified with
6740 	 * ptype=0x14 or zbc=XXX.
6741 	 */
6742 	if (sdebug_ptype == TYPE_ZBC) {
6743 		sdeb_zbc_model = BLK_ZONED_HM;
6744 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6745 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6746 		if (k < 0) {
6747 			ret = k;
6748 			goto free_vm;
6749 		}
6750 		sdeb_zbc_model = k;
6751 		switch (sdeb_zbc_model) {
6752 		case BLK_ZONED_NONE:
6753 		case BLK_ZONED_HA:
6754 			sdebug_ptype = TYPE_DISK;
6755 			break;
6756 		case BLK_ZONED_HM:
6757 			sdebug_ptype = TYPE_ZBC;
6758 			break;
6759 		default:
6760 			pr_err("Invalid ZBC model\n");
6761 			return -EINVAL;
6762 		}
6763 	}
6764 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6765 		sdeb_zbc_in_use = true;
6766 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6767 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6768 	}
6769 
6770 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6771 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6772 	if (sdebug_dev_size_mb < 1)
6773 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6774 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6775 	sdebug_store_sectors = sz / sdebug_sector_size;
6776 	sdebug_capacity = get_sdebug_capacity();
6777 
6778 	/* play around with geometry, don't waste too much on track 0 */
6779 	sdebug_heads = 8;
6780 	sdebug_sectors_per = 32;
6781 	if (sdebug_dev_size_mb >= 256)
6782 		sdebug_heads = 64;
6783 	else if (sdebug_dev_size_mb >= 16)
6784 		sdebug_heads = 32;
6785 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6786 			       (sdebug_sectors_per * sdebug_heads);
6787 	if (sdebug_cylinders_per >= 1024) {
6788 		/* other LLDs do this; implies >= 1GB ram disk ... */
6789 		sdebug_heads = 255;
6790 		sdebug_sectors_per = 63;
6791 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6792 			       (sdebug_sectors_per * sdebug_heads);
6793 	}
6794 	if (scsi_debug_lbp()) {
6795 		sdebug_unmap_max_blocks =
6796 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6797 
6798 		sdebug_unmap_max_desc =
6799 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6800 
6801 		sdebug_unmap_granularity =
6802 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6803 
6804 		if (sdebug_unmap_alignment &&
6805 		    sdebug_unmap_granularity <=
6806 		    sdebug_unmap_alignment) {
6807 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6808 			ret = -EINVAL;
6809 			goto free_q_arr;
6810 		}
6811 	}
6812 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6813 	if (want_store) {
6814 		idx = sdebug_add_store();
6815 		if (idx < 0) {
6816 			ret = idx;
6817 			goto free_q_arr;
6818 		}
6819 	}
6820 
6821 	pseudo_primary = root_device_register("pseudo_0");
6822 	if (IS_ERR(pseudo_primary)) {
6823 		pr_warn("root_device_register() error\n");
6824 		ret = PTR_ERR(pseudo_primary);
6825 		goto free_vm;
6826 	}
6827 	ret = bus_register(&pseudo_lld_bus);
6828 	if (ret < 0) {
6829 		pr_warn("bus_register error: %d\n", ret);
6830 		goto dev_unreg;
6831 	}
6832 	ret = driver_register(&sdebug_driverfs_driver);
6833 	if (ret < 0) {
6834 		pr_warn("driver_register error: %d\n", ret);
6835 		goto bus_unreg;
6836 	}
6837 
6838 	hosts_to_add = sdebug_add_host;
6839 	sdebug_add_host = 0;
6840 
6841 	for (k = 0; k < hosts_to_add; k++) {
6842 		if (want_store && k == 0) {
6843 			ret = sdebug_add_host_helper(idx);
6844 			if (ret < 0) {
6845 				pr_err("add_host_helper k=%d, error=%d\n",
6846 				       k, -ret);
6847 				break;
6848 			}
6849 		} else {
6850 			ret = sdebug_do_add_host(want_store &&
6851 						 sdebug_per_host_store);
6852 			if (ret < 0) {
6853 				pr_err("add_host k=%d error=%d\n", k, -ret);
6854 				break;
6855 			}
6856 		}
6857 	}
6858 	if (sdebug_verbose)
6859 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6860 
6861 	return 0;
6862 
6863 bus_unreg:
6864 	bus_unregister(&pseudo_lld_bus);
6865 dev_unreg:
6866 	root_device_unregister(pseudo_primary);
6867 free_vm:
6868 	sdebug_erase_store(idx, NULL);
6869 free_q_arr:
6870 	kfree(sdebug_q_arr);
6871 	return ret;
6872 }
6873 
6874 static void __exit scsi_debug_exit(void)
6875 {
6876 	int k = sdebug_num_hosts;
6877 
6878 	stop_all_queued();
6879 	for (; k; k--)
6880 		sdebug_do_remove_host(true);
6881 	free_all_queued();
6882 	driver_unregister(&sdebug_driverfs_driver);
6883 	bus_unregister(&pseudo_lld_bus);
6884 	root_device_unregister(pseudo_primary);
6885 
6886 	sdebug_erase_all_stores(false);
6887 	xa_destroy(per_store_ap);
6888 }
6889 
6890 device_initcall(scsi_debug_init);
6891 module_exit(scsi_debug_exit);
6892 
6893 static void sdebug_release_adapter(struct device *dev)
6894 {
6895 	struct sdebug_host_info *sdbg_host;
6896 
6897 	sdbg_host = to_sdebug_host(dev);
6898 	kfree(sdbg_host);
6899 }
6900 
6901 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6902 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6903 {
6904 	if (idx < 0)
6905 		return;
6906 	if (!sip) {
6907 		if (xa_empty(per_store_ap))
6908 			return;
6909 		sip = xa_load(per_store_ap, idx);
6910 		if (!sip)
6911 			return;
6912 	}
6913 	vfree(sip->map_storep);
6914 	vfree(sip->dif_storep);
6915 	vfree(sip->storep);
6916 	xa_erase(per_store_ap, idx);
6917 	kfree(sip);
6918 }
6919 
6920 /* Assume apart_from_first==false only in shutdown case. */
6921 static void sdebug_erase_all_stores(bool apart_from_first)
6922 {
6923 	unsigned long idx;
6924 	struct sdeb_store_info *sip = NULL;
6925 
6926 	xa_for_each(per_store_ap, idx, sip) {
6927 		if (apart_from_first)
6928 			apart_from_first = false;
6929 		else
6930 			sdebug_erase_store(idx, sip);
6931 	}
6932 	if (apart_from_first)
6933 		sdeb_most_recent_idx = sdeb_first_idx;
6934 }
6935 
6936 /*
6937  * Returns store xarray new element index (idx) if >=0 else negated errno.
6938  * Limit the number of stores to 65536.
6939  */
6940 static int sdebug_add_store(void)
6941 {
6942 	int res;
6943 	u32 n_idx;
6944 	unsigned long iflags;
6945 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6946 	struct sdeb_store_info *sip = NULL;
6947 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6948 
6949 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6950 	if (!sip)
6951 		return -ENOMEM;
6952 
6953 	xa_lock_irqsave(per_store_ap, iflags);
6954 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6955 	if (unlikely(res < 0)) {
6956 		xa_unlock_irqrestore(per_store_ap, iflags);
6957 		kfree(sip);
6958 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6959 		return res;
6960 	}
6961 	sdeb_most_recent_idx = n_idx;
6962 	if (sdeb_first_idx < 0)
6963 		sdeb_first_idx = n_idx;
6964 	xa_unlock_irqrestore(per_store_ap, iflags);
6965 
6966 	res = -ENOMEM;
6967 	sip->storep = vzalloc(sz);
6968 	if (!sip->storep) {
6969 		pr_err("user data oom\n");
6970 		goto err;
6971 	}
6972 	if (sdebug_num_parts > 0)
6973 		sdebug_build_parts(sip->storep, sz);
6974 
6975 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6976 	if (sdebug_dix) {
6977 		int dif_size;
6978 
6979 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6980 		sip->dif_storep = vmalloc(dif_size);
6981 
6982 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6983 			sip->dif_storep);
6984 
6985 		if (!sip->dif_storep) {
6986 			pr_err("DIX oom\n");
6987 			goto err;
6988 		}
6989 		memset(sip->dif_storep, 0xff, dif_size);
6990 	}
6991 	/* Logical Block Provisioning */
6992 	if (scsi_debug_lbp()) {
6993 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6994 		sip->map_storep = vmalloc(array_size(sizeof(long),
6995 						     BITS_TO_LONGS(map_size)));
6996 
6997 		pr_info("%lu provisioning blocks\n", map_size);
6998 
6999 		if (!sip->map_storep) {
7000 			pr_err("LBP map oom\n");
7001 			goto err;
7002 		}
7003 
7004 		bitmap_zero(sip->map_storep, map_size);
7005 
7006 		/* Map first 1KB for partition table */
7007 		if (sdebug_num_parts)
7008 			map_region(sip, 0, 2);
7009 	}
7010 
7011 	rwlock_init(&sip->macc_lck);
7012 	return (int)n_idx;
7013 err:
7014 	sdebug_erase_store((int)n_idx, sip);
7015 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7016 	return res;
7017 }
7018 
7019 static int sdebug_add_host_helper(int per_host_idx)
7020 {
7021 	int k, devs_per_host, idx;
7022 	int error = -ENOMEM;
7023 	struct sdebug_host_info *sdbg_host;
7024 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7025 
7026 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7027 	if (!sdbg_host)
7028 		return -ENOMEM;
7029 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7030 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7031 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7032 	sdbg_host->si_idx = idx;
7033 
7034 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7035 
7036 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7037 	for (k = 0; k < devs_per_host; k++) {
7038 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7039 		if (!sdbg_devinfo)
7040 			goto clean;
7041 	}
7042 
7043 	spin_lock(&sdebug_host_list_lock);
7044 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7045 	spin_unlock(&sdebug_host_list_lock);
7046 
7047 	sdbg_host->dev.bus = &pseudo_lld_bus;
7048 	sdbg_host->dev.parent = pseudo_primary;
7049 	sdbg_host->dev.release = &sdebug_release_adapter;
7050 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7051 
7052 	error = device_register(&sdbg_host->dev);
7053 	if (error)
7054 		goto clean;
7055 
7056 	++sdebug_num_hosts;
7057 	return 0;
7058 
7059 clean:
7060 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7061 				 dev_list) {
7062 		list_del(&sdbg_devinfo->dev_list);
7063 		kfree(sdbg_devinfo->zstate);
7064 		kfree(sdbg_devinfo);
7065 	}
7066 	kfree(sdbg_host);
7067 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7068 	return error;
7069 }
7070 
7071 static int sdebug_do_add_host(bool mk_new_store)
7072 {
7073 	int ph_idx = sdeb_most_recent_idx;
7074 
7075 	if (mk_new_store) {
7076 		ph_idx = sdebug_add_store();
7077 		if (ph_idx < 0)
7078 			return ph_idx;
7079 	}
7080 	return sdebug_add_host_helper(ph_idx);
7081 }
7082 
7083 static void sdebug_do_remove_host(bool the_end)
7084 {
7085 	int idx = -1;
7086 	struct sdebug_host_info *sdbg_host = NULL;
7087 	struct sdebug_host_info *sdbg_host2;
7088 
7089 	spin_lock(&sdebug_host_list_lock);
7090 	if (!list_empty(&sdebug_host_list)) {
7091 		sdbg_host = list_entry(sdebug_host_list.prev,
7092 				       struct sdebug_host_info, host_list);
7093 		idx = sdbg_host->si_idx;
7094 	}
7095 	if (!the_end && idx >= 0) {
7096 		bool unique = true;
7097 
7098 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7099 			if (sdbg_host2 == sdbg_host)
7100 				continue;
7101 			if (idx == sdbg_host2->si_idx) {
7102 				unique = false;
7103 				break;
7104 			}
7105 		}
7106 		if (unique) {
7107 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7108 			if (idx == sdeb_most_recent_idx)
7109 				--sdeb_most_recent_idx;
7110 		}
7111 	}
7112 	if (sdbg_host)
7113 		list_del(&sdbg_host->host_list);
7114 	spin_unlock(&sdebug_host_list_lock);
7115 
7116 	if (!sdbg_host)
7117 		return;
7118 
7119 	device_unregister(&sdbg_host->dev);
7120 	--sdebug_num_hosts;
7121 }
7122 
7123 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7124 {
7125 	int num_in_q = 0;
7126 	struct sdebug_dev_info *devip;
7127 
7128 	block_unblock_all_queues(true);
7129 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7130 	if (NULL == devip) {
7131 		block_unblock_all_queues(false);
7132 		return	-ENODEV;
7133 	}
7134 	num_in_q = atomic_read(&devip->num_in_q);
7135 
7136 	if (qdepth < 1)
7137 		qdepth = 1;
7138 	/* allow to exceed max host qc_arr elements for testing */
7139 	if (qdepth > SDEBUG_CANQUEUE + 10)
7140 		qdepth = SDEBUG_CANQUEUE + 10;
7141 	scsi_change_queue_depth(sdev, qdepth);
7142 
7143 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7144 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7145 			    __func__, qdepth, num_in_q);
7146 	}
7147 	block_unblock_all_queues(false);
7148 	return sdev->queue_depth;
7149 }
7150 
7151 static bool fake_timeout(struct scsi_cmnd *scp)
7152 {
7153 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7154 		if (sdebug_every_nth < -1)
7155 			sdebug_every_nth = -1;
7156 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7157 			return true; /* ignore command causing timeout */
7158 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7159 			 scsi_medium_access_command(scp))
7160 			return true; /* time out reads and writes */
7161 	}
7162 	return false;
7163 }
7164 
7165 /* Response to TUR or media access command when device stopped */
7166 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7167 {
7168 	int stopped_state;
7169 	u64 diff_ns = 0;
7170 	ktime_t now_ts = ktime_get_boottime();
7171 	struct scsi_device *sdp = scp->device;
7172 
7173 	stopped_state = atomic_read(&devip->stopped);
7174 	if (stopped_state == 2) {
7175 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7176 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7177 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7178 				/* tur_ms_to_ready timer extinguished */
7179 				atomic_set(&devip->stopped, 0);
7180 				return 0;
7181 			}
7182 		}
7183 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7184 		if (sdebug_verbose)
7185 			sdev_printk(KERN_INFO, sdp,
7186 				    "%s: Not ready: in process of becoming ready\n", my_name);
7187 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7188 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7189 
7190 			if (diff_ns <= tur_nanosecs_to_ready)
7191 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7192 			else
7193 				diff_ns = tur_nanosecs_to_ready;
7194 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7195 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7196 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7197 						   diff_ns);
7198 			return check_condition_result;
7199 		}
7200 	}
7201 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7202 	if (sdebug_verbose)
7203 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7204 			    my_name);
7205 	return check_condition_result;
7206 }
7207 
7208 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7209 				   struct scsi_cmnd *scp)
7210 {
7211 	u8 sdeb_i;
7212 	struct scsi_device *sdp = scp->device;
7213 	const struct opcode_info_t *oip;
7214 	const struct opcode_info_t *r_oip;
7215 	struct sdebug_dev_info *devip;
7216 	u8 *cmd = scp->cmnd;
7217 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7218 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7219 	int k, na;
7220 	int errsts = 0;
7221 	u64 lun_index = sdp->lun & 0x3FFF;
7222 	u32 flags;
7223 	u16 sa;
7224 	u8 opcode = cmd[0];
7225 	bool has_wlun_rl;
7226 	bool inject_now;
7227 
7228 	scsi_set_resid(scp, 0);
7229 	if (sdebug_statistics) {
7230 		atomic_inc(&sdebug_cmnd_count);
7231 		inject_now = inject_on_this_cmd();
7232 	} else {
7233 		inject_now = false;
7234 	}
7235 	if (unlikely(sdebug_verbose &&
7236 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7237 		char b[120];
7238 		int n, len, sb;
7239 
7240 		len = scp->cmd_len;
7241 		sb = (int)sizeof(b);
7242 		if (len > 32)
7243 			strcpy(b, "too long, over 32 bytes");
7244 		else {
7245 			for (k = 0, n = 0; k < len && n < sb; ++k)
7246 				n += scnprintf(b + n, sb - n, "%02x ",
7247 					       (u32)cmd[k]);
7248 		}
7249 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7250 			    blk_mq_unique_tag(scp->request), b);
7251 	}
7252 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7253 		return SCSI_MLQUEUE_HOST_BUSY;
7254 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7255 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7256 		goto err_out;
7257 
7258 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7259 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7260 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7261 	if (unlikely(!devip)) {
7262 		devip = find_build_dev_info(sdp);
7263 		if (NULL == devip)
7264 			goto err_out;
7265 	}
7266 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7267 		atomic_set(&sdeb_inject_pending, 1);
7268 
7269 	na = oip->num_attached;
7270 	r_pfp = oip->pfp;
7271 	if (na) {	/* multiple commands with this opcode */
7272 		r_oip = oip;
7273 		if (FF_SA & r_oip->flags) {
7274 			if (F_SA_LOW & oip->flags)
7275 				sa = 0x1f & cmd[1];
7276 			else
7277 				sa = get_unaligned_be16(cmd + 8);
7278 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7279 				if (opcode == oip->opcode && sa == oip->sa)
7280 					break;
7281 			}
7282 		} else {   /* since no service action only check opcode */
7283 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7284 				if (opcode == oip->opcode)
7285 					break;
7286 			}
7287 		}
7288 		if (k > na) {
7289 			if (F_SA_LOW & r_oip->flags)
7290 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7291 			else if (F_SA_HIGH & r_oip->flags)
7292 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7293 			else
7294 				mk_sense_invalid_opcode(scp);
7295 			goto check_cond;
7296 		}
7297 	}	/* else (when na==0) we assume the oip is a match */
7298 	flags = oip->flags;
7299 	if (unlikely(F_INV_OP & flags)) {
7300 		mk_sense_invalid_opcode(scp);
7301 		goto check_cond;
7302 	}
7303 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7304 		if (sdebug_verbose)
7305 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7306 				    my_name, opcode, " supported for wlun");
7307 		mk_sense_invalid_opcode(scp);
7308 		goto check_cond;
7309 	}
7310 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7311 		u8 rem;
7312 		int j;
7313 
7314 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7315 			rem = ~oip->len_mask[k] & cmd[k];
7316 			if (rem) {
7317 				for (j = 7; j >= 0; --j, rem <<= 1) {
7318 					if (0x80 & rem)
7319 						break;
7320 				}
7321 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7322 				goto check_cond;
7323 			}
7324 		}
7325 	}
7326 	if (unlikely(!(F_SKIP_UA & flags) &&
7327 		     find_first_bit(devip->uas_bm,
7328 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7329 		errsts = make_ua(scp, devip);
7330 		if (errsts)
7331 			goto check_cond;
7332 	}
7333 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7334 		     atomic_read(&devip->stopped))) {
7335 		errsts = resp_not_ready(scp, devip);
7336 		if (errsts)
7337 			goto fini;
7338 	}
7339 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7340 		goto fini;
7341 	if (unlikely(sdebug_every_nth)) {
7342 		if (fake_timeout(scp))
7343 			return 0;	/* ignore command: make trouble */
7344 	}
7345 	if (likely(oip->pfp))
7346 		pfp = oip->pfp;	/* calls a resp_* function */
7347 	else
7348 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7349 
7350 fini:
7351 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7352 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7353 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7354 					    sdebug_ndelay > 10000)) {
7355 		/*
7356 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7357 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7358 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7359 		 * For Synchronize Cache want 1/20 of SSU's delay.
7360 		 */
7361 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7362 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7363 
7364 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7365 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7366 	} else
7367 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7368 				     sdebug_ndelay);
7369 check_cond:
7370 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7371 err_out:
7372 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7373 }
7374 
7375 static struct scsi_host_template sdebug_driver_template = {
7376 	.show_info =		scsi_debug_show_info,
7377 	.write_info =		scsi_debug_write_info,
7378 	.proc_name =		sdebug_proc_name,
7379 	.name =			"SCSI DEBUG",
7380 	.info =			scsi_debug_info,
7381 	.slave_alloc =		scsi_debug_slave_alloc,
7382 	.slave_configure =	scsi_debug_slave_configure,
7383 	.slave_destroy =	scsi_debug_slave_destroy,
7384 	.ioctl =		scsi_debug_ioctl,
7385 	.queuecommand =		scsi_debug_queuecommand,
7386 	.change_queue_depth =	sdebug_change_qdepth,
7387 	.eh_abort_handler =	scsi_debug_abort,
7388 	.eh_device_reset_handler = scsi_debug_device_reset,
7389 	.eh_target_reset_handler = scsi_debug_target_reset,
7390 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7391 	.eh_host_reset_handler = scsi_debug_host_reset,
7392 	.can_queue =		SDEBUG_CANQUEUE,
7393 	.this_id =		7,
7394 	.sg_tablesize =		SG_MAX_SEGMENTS,
7395 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7396 	.max_sectors =		-1U,
7397 	.max_segment_size =	-1U,
7398 	.module =		THIS_MODULE,
7399 	.track_queue_depth =	1,
7400 };
7401 
7402 static int sdebug_driver_probe(struct device *dev)
7403 {
7404 	int error = 0;
7405 	struct sdebug_host_info *sdbg_host;
7406 	struct Scsi_Host *hpnt;
7407 	int hprot;
7408 
7409 	sdbg_host = to_sdebug_host(dev);
7410 
7411 	if (sdebug_host_max_queue)
7412 		sdebug_driver_template.can_queue = sdebug_host_max_queue;
7413 	else
7414 		sdebug_driver_template.can_queue = sdebug_max_queue;
7415 	if (!sdebug_clustering)
7416 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7417 
7418 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7419 	if (NULL == hpnt) {
7420 		pr_err("scsi_host_alloc failed\n");
7421 		error = -ENODEV;
7422 		return error;
7423 	}
7424 	if (submit_queues > nr_cpu_ids) {
7425 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7426 			my_name, submit_queues, nr_cpu_ids);
7427 		submit_queues = nr_cpu_ids;
7428 	}
7429 	/*
7430 	 * Decide whether to tell scsi subsystem that we want mq. The
7431 	 * following should give the same answer for each host. If the host
7432 	 * has a limit of hostwide max commands, then do not set.
7433 	 */
7434 	if (!sdebug_host_max_queue)
7435 		hpnt->nr_hw_queues = submit_queues;
7436 
7437 	sdbg_host->shost = hpnt;
7438 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7439 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7440 		hpnt->max_id = sdebug_num_tgts + 1;
7441 	else
7442 		hpnt->max_id = sdebug_num_tgts;
7443 	/* = sdebug_max_luns; */
7444 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7445 
7446 	hprot = 0;
7447 
7448 	switch (sdebug_dif) {
7449 
7450 	case T10_PI_TYPE1_PROTECTION:
7451 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7452 		if (sdebug_dix)
7453 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7454 		break;
7455 
7456 	case T10_PI_TYPE2_PROTECTION:
7457 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7458 		if (sdebug_dix)
7459 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7460 		break;
7461 
7462 	case T10_PI_TYPE3_PROTECTION:
7463 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7464 		if (sdebug_dix)
7465 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7466 		break;
7467 
7468 	default:
7469 		if (sdebug_dix)
7470 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7471 		break;
7472 	}
7473 
7474 	scsi_host_set_prot(hpnt, hprot);
7475 
7476 	if (have_dif_prot || sdebug_dix)
7477 		pr_info("host protection%s%s%s%s%s%s%s\n",
7478 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7479 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7480 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7481 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7482 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7483 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7484 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7485 
7486 	if (sdebug_guard == 1)
7487 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7488 	else
7489 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7490 
7491 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7492 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7493 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7494 		sdebug_statistics = true;
7495 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7496 	if (error) {
7497 		pr_err("scsi_add_host failed\n");
7498 		error = -ENODEV;
7499 		scsi_host_put(hpnt);
7500 	} else {
7501 		scsi_scan_host(hpnt);
7502 	}
7503 
7504 	return error;
7505 }
7506 
7507 static int sdebug_driver_remove(struct device *dev)
7508 {
7509 	struct sdebug_host_info *sdbg_host;
7510 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7511 
7512 	sdbg_host = to_sdebug_host(dev);
7513 
7514 	if (!sdbg_host) {
7515 		pr_err("Unable to locate host info\n");
7516 		return -ENODEV;
7517 	}
7518 
7519 	scsi_remove_host(sdbg_host->shost);
7520 
7521 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7522 				 dev_list) {
7523 		list_del(&sdbg_devinfo->dev_list);
7524 		kfree(sdbg_devinfo->zstate);
7525 		kfree(sdbg_devinfo);
7526 	}
7527 
7528 	scsi_host_put(sdbg_host->shost);
7529 	return 0;
7530 }
7531 
7532 static int pseudo_lld_bus_match(struct device *dev,
7533 				struct device_driver *dev_driver)
7534 {
7535 	return 1;
7536 }
7537 
7538 static struct bus_type pseudo_lld_bus = {
7539 	.name = "pseudo",
7540 	.match = pseudo_lld_bus_match,
7541 	.probe = sdebug_driver_probe,
7542 	.remove = sdebug_driver_remove,
7543 	.drv_groups = sdebug_drv_groups,
7544 };
7545