xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision c10fa55f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 /* When "every_nth" > 0 then modulo "every_nth" commands:
191  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
192  *   - a RECOVERED_ERROR is simulated on successful read and write
193  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
194  *   - a TRANSPORT_ERROR is simulated on successful read and write
195  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
196  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
197  *     CMD_ABORT
198  *
199  * When "every_nth" < 0 then after "- every_nth" commands the selected
200  * error will be injected. The error will be injected on every subsequent
201  * command until some other action occurs; for example, the user writing
202  * a new value (other than -1 or 1) to every_nth:
203  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
204  */
205 
206 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
207  * priority order. In the subset implemented here lower numbers have higher
208  * priority. The UA numbers should be a sequence starting from 0 with
209  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
210 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
211 #define SDEBUG_UA_BUS_RESET 1
212 #define SDEBUG_UA_MODE_CHANGED 2
213 #define SDEBUG_UA_CAPACITY_CHANGED 3
214 #define SDEBUG_UA_LUNS_CHANGED 4
215 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
216 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
217 #define SDEBUG_NUM_UAS 7
218 
219 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
220  * sector on read commands: */
221 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
222 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
223 
224 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
225  * or "peripheral device" addressing (value 0) */
226 #define SAM2_LUN_ADDRESS_METHOD 0
227 
228 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
229  * (for response) per submit queue at one time. Can be reduced by max_queue
230  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
231  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
232  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
233  * but cannot exceed SDEBUG_CANQUEUE .
234  */
235 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
236 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
237 #define DEF_CMD_PER_LUN  255
238 
239 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
240 #define F_D_IN			1	/* Data-in command (e.g. READ) */
241 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
242 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
243 #define F_D_UNKN		8
244 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
245 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
246 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
247 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
248 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
249 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
250 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
251 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
252 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
253 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
254 
255 /* Useful combinations of the above flags */
256 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
257 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
258 #define FF_SA (F_SA_HIGH | F_SA_LOW)
259 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
260 
261 #define SDEBUG_MAX_PARTS 4
262 
263 #define SDEBUG_MAX_CMD_LEN 32
264 
265 #define SDEB_XA_NOT_IN_USE XA_MARK_1
266 
267 /* Zone types (zbcr05 table 25) */
268 enum sdebug_z_type {
269 	ZBC_ZONE_TYPE_CNV	= 0x1,
270 	ZBC_ZONE_TYPE_SWR	= 0x2,
271 	ZBC_ZONE_TYPE_SWP	= 0x3,
272 };
273 
274 /* enumeration names taken from table 26, zbcr05 */
275 enum sdebug_z_cond {
276 	ZBC_NOT_WRITE_POINTER	= 0x0,
277 	ZC1_EMPTY		= 0x1,
278 	ZC2_IMPLICIT_OPEN	= 0x2,
279 	ZC3_EXPLICIT_OPEN	= 0x3,
280 	ZC4_CLOSED		= 0x4,
281 	ZC6_READ_ONLY		= 0xd,
282 	ZC5_FULL		= 0xe,
283 	ZC7_OFFLINE		= 0xf,
284 };
285 
286 struct sdeb_zone_state {	/* ZBC: per zone state */
287 	enum sdebug_z_type z_type;
288 	enum sdebug_z_cond z_cond;
289 	bool z_non_seq_resource;
290 	unsigned int z_size;
291 	sector_t z_start;
292 	sector_t z_wp;
293 };
294 
295 struct sdebug_dev_info {
296 	struct list_head dev_list;
297 	unsigned int channel;
298 	unsigned int target;
299 	u64 lun;
300 	uuid_t lu_name;
301 	struct sdebug_host_info *sdbg_host;
302 	unsigned long uas_bm[1];
303 	atomic_t num_in_q;
304 	atomic_t stopped;
305 	bool used;
306 
307 	/* For ZBC devices */
308 	enum blk_zoned_model zmodel;
309 	unsigned int zsize;
310 	unsigned int zsize_shift;
311 	unsigned int nr_zones;
312 	unsigned int nr_conv_zones;
313 	unsigned int nr_imp_open;
314 	unsigned int nr_exp_open;
315 	unsigned int nr_closed;
316 	unsigned int max_open;
317 	struct sdeb_zone_state *zstate;
318 };
319 
320 struct sdebug_host_info {
321 	struct list_head host_list;
322 	int si_idx;	/* sdeb_store_info (per host) xarray index */
323 	struct Scsi_Host *shost;
324 	struct device dev;
325 	struct list_head dev_info_list;
326 };
327 
328 /* There is an xarray of pointers to this struct's objects, one per host */
329 struct sdeb_store_info {
330 	rwlock_t macc_lck;	/* for atomic media access on this store */
331 	u8 *storep;		/* user data storage (ram) */
332 	struct t10_pi_tuple *dif_storep; /* protection info */
333 	void *map_storep;	/* provisioning map */
334 };
335 
336 #define to_sdebug_host(d)	\
337 	container_of(d, struct sdebug_host_info, dev)
338 
339 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
340 		      SDEB_DEFER_WQ = 2};
341 
342 struct sdebug_defer {
343 	struct hrtimer hrt;
344 	struct execute_work ew;
345 	int sqa_idx;	/* index of sdebug_queue array */
346 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
347 	int hc_idx;	/* hostwide tag index */
348 	int issuing_cpu;
349 	bool init_hrt;
350 	bool init_wq;
351 	bool aborted;	/* true when blk_abort_request() already called */
352 	enum sdeb_defer_type defer_t;
353 };
354 
355 struct sdebug_queued_cmd {
356 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
357 	 * instance indicates this slot is in use.
358 	 */
359 	struct sdebug_defer *sd_dp;
360 	struct scsi_cmnd *a_cmnd;
361 	unsigned int inj_recovered:1;
362 	unsigned int inj_transport:1;
363 	unsigned int inj_dif:1;
364 	unsigned int inj_dix:1;
365 	unsigned int inj_short:1;
366 	unsigned int inj_host_busy:1;
367 	unsigned int inj_cmd_abort:1;
368 };
369 
370 struct sdebug_queue {
371 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
372 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
373 	spinlock_t qc_lock;
374 	atomic_t blocked;	/* to temporarily stop more being queued */
375 };
376 
377 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
378 static atomic_t sdebug_completions;  /* count of deferred completions */
379 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
380 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
381 
382 struct opcode_info_t {
383 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
384 				/* for terminating element */
385 	u8 opcode;		/* if num_attached > 0, preferred */
386 	u16 sa;			/* service action */
387 	u32 flags;		/* OR-ed set of SDEB_F_* */
388 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
389 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
390 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
391 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
392 };
393 
394 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
395 enum sdeb_opcode_index {
396 	SDEB_I_INVALID_OPCODE =	0,
397 	SDEB_I_INQUIRY = 1,
398 	SDEB_I_REPORT_LUNS = 2,
399 	SDEB_I_REQUEST_SENSE = 3,
400 	SDEB_I_TEST_UNIT_READY = 4,
401 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
402 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
403 	SDEB_I_LOG_SENSE = 7,
404 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
405 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
406 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
407 	SDEB_I_START_STOP = 11,
408 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
409 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
410 	SDEB_I_MAINT_IN = 14,
411 	SDEB_I_MAINT_OUT = 15,
412 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
413 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
414 	SDEB_I_RESERVE = 18,		/* 6, 10 */
415 	SDEB_I_RELEASE = 19,		/* 6, 10 */
416 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
417 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
418 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
419 	SDEB_I_SEND_DIAG = 23,
420 	SDEB_I_UNMAP = 24,
421 	SDEB_I_WRITE_BUFFER = 25,
422 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
423 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
424 	SDEB_I_COMP_WRITE = 28,
425 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
426 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
427 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
428 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
429 };
430 
431 
432 static const unsigned char opcode_ind_arr[256] = {
433 /* 0x0; 0x0->0x1f: 6 byte cdbs */
434 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
435 	    0, 0, 0, 0,
436 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
437 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
440 	    SDEB_I_ALLOW_REMOVAL, 0,
441 /* 0x20; 0x20->0x3f: 10 byte cdbs */
442 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
443 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
444 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
445 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
446 /* 0x40; 0x40->0x5f: 10 byte cdbs */
447 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
449 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
450 	    SDEB_I_RELEASE,
451 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
452 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, SDEB_I_VARIABLE_LEN,
456 /* 0x80; 0x80->0x9f: 16 byte cdbs */
457 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
458 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
459 	0, 0, 0, SDEB_I_VERIFY,
460 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
461 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
462 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
463 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
464 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
465 	     SDEB_I_MAINT_OUT, 0, 0, 0,
466 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
467 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
468 	0, 0, 0, 0, 0, 0, 0, 0,
469 	0, 0, 0, 0, 0, 0, 0, 0,
470 /* 0xc0; 0xc0->0xff: vendor specific */
471 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
472 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
473 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
474 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
475 };
476 
477 /*
478  * The following "response" functions return the SCSI mid-level's 4 byte
479  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
480  * command completion, they can mask their return value with
481  * SDEG_RES_IMMED_MASK .
482  */
483 #define SDEG_RES_IMMED_MASK 0x40000000
484 
485 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
508 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
509 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
510 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
511 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
512 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
513 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
514 
515 static int sdebug_do_add_host(bool mk_new_store);
516 static int sdebug_add_host_helper(int per_host_idx);
517 static void sdebug_do_remove_host(bool the_end);
518 static int sdebug_add_store(void);
519 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
520 static void sdebug_erase_all_stores(bool apart_from_first);
521 
522 /*
523  * The following are overflow arrays for cdbs that "hit" the same index in
524  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
525  * should be placed in opcode_info_arr[], the others should be placed here.
526  */
527 static const struct opcode_info_t msense_iarr[] = {
528 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
529 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
530 };
531 
532 static const struct opcode_info_t mselect_iarr[] = {
533 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
534 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
535 };
536 
537 static const struct opcode_info_t read_iarr[] = {
538 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
539 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
540 	     0, 0, 0, 0} },
541 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
542 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
544 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
545 	     0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t write_iarr[] = {
549 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
550 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
553 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
554 		   0, 0, 0} },
555 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
556 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 		   0xbf, 0xc7, 0, 0, 0, 0} },
558 };
559 
560 static const struct opcode_info_t verify_iarr[] = {
561 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
562 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
563 		   0, 0, 0, 0, 0, 0} },
564 };
565 
566 static const struct opcode_info_t sa_in_16_iarr[] = {
567 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
568 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
569 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
570 };
571 
572 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
573 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
574 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
575 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
576 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
577 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
578 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
579 };
580 
581 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
582 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
583 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
584 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
585 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
586 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
587 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
588 };
589 
590 static const struct opcode_info_t write_same_iarr[] = {
591 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
592 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
593 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
594 };
595 
596 static const struct opcode_info_t reserve_iarr[] = {
597 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
598 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
599 };
600 
601 static const struct opcode_info_t release_iarr[] = {
602 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
603 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
604 };
605 
606 static const struct opcode_info_t sync_cache_iarr[] = {
607 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
608 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
610 };
611 
612 static const struct opcode_info_t pre_fetch_iarr[] = {
613 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
614 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
616 };
617 
618 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
619 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
620 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
622 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
623 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
624 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
625 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
626 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
627 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
628 };
629 
630 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
631 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
632 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
633 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
634 };
635 
636 
637 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
638  * plus the terminating elements for logic that scans this table such as
639  * REPORT SUPPORTED OPERATION CODES. */
640 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
641 /* 0 */
642 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
643 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
645 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
646 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
647 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
648 	     0, 0} },					/* REPORT LUNS */
649 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
650 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
651 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
652 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
653 /* 5 */
654 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
655 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
656 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
657 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
658 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
659 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
660 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
661 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
662 	     0, 0, 0} },
663 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
664 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
665 	     0, 0} },
666 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
667 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
668 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
669 /* 10 */
670 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
671 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
672 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
673 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
674 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
675 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
676 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
677 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
678 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
679 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
680 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
681 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
682 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
683 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
684 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
685 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
686 				0xff, 0, 0xc7, 0, 0, 0, 0} },
687 /* 15 */
688 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
689 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
691 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
692 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
693 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
694 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
695 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
696 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
697 	     0xff, 0xff} },
698 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
699 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
700 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
701 	     0} },
702 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
703 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
704 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
705 	     0} },
706 /* 20 */
707 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
708 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
709 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
710 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
712 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
713 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
714 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
715 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
716 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
717 /* 25 */
718 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
719 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
720 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
721 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
722 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
723 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
724 		 0, 0, 0, 0, 0} },
725 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
726 	    resp_sync_cache, sync_cache_iarr,
727 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
728 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
729 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
730 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
731 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
732 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
733 	    resp_pre_fetch, pre_fetch_iarr,
734 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
735 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
736 
737 /* 30 */
738 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
739 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
740 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
742 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
743 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
744 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
745 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
746 /* sentinel */
747 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
748 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
749 };
750 
751 static int sdebug_num_hosts;
752 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
753 static int sdebug_ato = DEF_ATO;
754 static int sdebug_cdb_len = DEF_CDB_LEN;
755 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
756 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
757 static int sdebug_dif = DEF_DIF;
758 static int sdebug_dix = DEF_DIX;
759 static int sdebug_dsense = DEF_D_SENSE;
760 static int sdebug_every_nth = DEF_EVERY_NTH;
761 static int sdebug_fake_rw = DEF_FAKE_RW;
762 static unsigned int sdebug_guard = DEF_GUARD;
763 static int sdebug_host_max_queue;	/* per host */
764 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
765 static int sdebug_max_luns = DEF_MAX_LUNS;
766 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
767 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
768 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
769 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
770 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
771 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
772 static int sdebug_no_uld;
773 static int sdebug_num_parts = DEF_NUM_PARTS;
774 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
775 static int sdebug_opt_blks = DEF_OPT_BLKS;
776 static int sdebug_opts = DEF_OPTS;
777 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
778 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
779 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
780 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
781 static int sdebug_sector_size = DEF_SECTOR_SIZE;
782 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
783 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
784 static unsigned int sdebug_lbpu = DEF_LBPU;
785 static unsigned int sdebug_lbpws = DEF_LBPWS;
786 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
787 static unsigned int sdebug_lbprz = DEF_LBPRZ;
788 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
789 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
790 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
791 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
792 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
793 static int sdebug_uuid_ctl = DEF_UUID_CTL;
794 static bool sdebug_random = DEF_RANDOM;
795 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
796 static bool sdebug_removable = DEF_REMOVABLE;
797 static bool sdebug_clustering;
798 static bool sdebug_host_lock = DEF_HOST_LOCK;
799 static bool sdebug_strict = DEF_STRICT;
800 static bool sdebug_any_injecting_opt;
801 static bool sdebug_verbose;
802 static bool have_dif_prot;
803 static bool write_since_sync;
804 static bool sdebug_statistics = DEF_STATISTICS;
805 static bool sdebug_wp;
806 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
807 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
808 static char *sdeb_zbc_model_s;
809 
810 static unsigned int sdebug_store_sectors;
811 static sector_t sdebug_capacity;	/* in sectors */
812 
813 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
814    may still need them */
815 static int sdebug_heads;		/* heads per disk */
816 static int sdebug_cylinders_per;	/* cylinders per surface */
817 static int sdebug_sectors_per;		/* sectors per cylinder */
818 
819 static LIST_HEAD(sdebug_host_list);
820 static DEFINE_SPINLOCK(sdebug_host_list_lock);
821 
822 static struct xarray per_store_arr;
823 static struct xarray *per_store_ap = &per_store_arr;
824 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
825 static int sdeb_most_recent_idx = -1;
826 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
827 
828 static unsigned long map_size;
829 static int num_aborts;
830 static int num_dev_resets;
831 static int num_target_resets;
832 static int num_bus_resets;
833 static int num_host_resets;
834 static int dix_writes;
835 static int dix_reads;
836 static int dif_errors;
837 
838 /* ZBC global data */
839 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
840 static int sdeb_zbc_zone_size_mb;
841 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
842 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
843 
844 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
845 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
846 
847 static DEFINE_RWLOCK(atomic_rw);
848 static DEFINE_RWLOCK(atomic_rw2);
849 
850 static rwlock_t *ramdisk_lck_a[2];
851 
852 static char sdebug_proc_name[] = MY_NAME;
853 static const char *my_name = MY_NAME;
854 
855 static struct bus_type pseudo_lld_bus;
856 
857 static struct device_driver sdebug_driverfs_driver = {
858 	.name 		= sdebug_proc_name,
859 	.bus		= &pseudo_lld_bus,
860 };
861 
862 static const int check_condition_result =
863 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
864 
865 static const int illegal_condition_result =
866 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
867 
868 static const int device_qfull_result =
869 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
870 
871 static const int condition_met_result = SAM_STAT_CONDITION_MET;
872 
873 
874 /* Only do the extra work involved in logical block provisioning if one or
875  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
876  * real reads and writes (i.e. not skipping them for speed).
877  */
878 static inline bool scsi_debug_lbp(void)
879 {
880 	return 0 == sdebug_fake_rw &&
881 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
882 }
883 
884 static void *lba2fake_store(struct sdeb_store_info *sip,
885 			    unsigned long long lba)
886 {
887 	struct sdeb_store_info *lsip = sip;
888 
889 	lba = do_div(lba, sdebug_store_sectors);
890 	if (!sip || !sip->storep) {
891 		WARN_ON_ONCE(true);
892 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
893 	}
894 	return lsip->storep + lba * sdebug_sector_size;
895 }
896 
897 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
898 				      sector_t sector)
899 {
900 	sector = sector_div(sector, sdebug_store_sectors);
901 
902 	return sip->dif_storep + sector;
903 }
904 
905 static void sdebug_max_tgts_luns(void)
906 {
907 	struct sdebug_host_info *sdbg_host;
908 	struct Scsi_Host *hpnt;
909 
910 	spin_lock(&sdebug_host_list_lock);
911 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
912 		hpnt = sdbg_host->shost;
913 		if ((hpnt->this_id >= 0) &&
914 		    (sdebug_num_tgts > hpnt->this_id))
915 			hpnt->max_id = sdebug_num_tgts + 1;
916 		else
917 			hpnt->max_id = sdebug_num_tgts;
918 		/* sdebug_max_luns; */
919 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
920 	}
921 	spin_unlock(&sdebug_host_list_lock);
922 }
923 
924 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
925 
926 /* Set in_bit to -1 to indicate no bit position of invalid field */
927 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
928 				 enum sdeb_cmd_data c_d,
929 				 int in_byte, int in_bit)
930 {
931 	unsigned char *sbuff;
932 	u8 sks[4];
933 	int sl, asc;
934 
935 	sbuff = scp->sense_buffer;
936 	if (!sbuff) {
937 		sdev_printk(KERN_ERR, scp->device,
938 			    "%s: sense_buffer is NULL\n", __func__);
939 		return;
940 	}
941 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
942 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
943 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
944 	memset(sks, 0, sizeof(sks));
945 	sks[0] = 0x80;
946 	if (c_d)
947 		sks[0] |= 0x40;
948 	if (in_bit >= 0) {
949 		sks[0] |= 0x8;
950 		sks[0] |= 0x7 & in_bit;
951 	}
952 	put_unaligned_be16(in_byte, sks + 1);
953 	if (sdebug_dsense) {
954 		sl = sbuff[7] + 8;
955 		sbuff[7] = sl;
956 		sbuff[sl] = 0x2;
957 		sbuff[sl + 1] = 0x6;
958 		memcpy(sbuff + sl + 4, sks, 3);
959 	} else
960 		memcpy(sbuff + 15, sks, 3);
961 	if (sdebug_verbose)
962 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
963 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
964 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
965 }
966 
967 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
968 {
969 	unsigned char *sbuff;
970 
971 	sbuff = scp->sense_buffer;
972 	if (!sbuff) {
973 		sdev_printk(KERN_ERR, scp->device,
974 			    "%s: sense_buffer is NULL\n", __func__);
975 		return;
976 	}
977 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
978 
979 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
980 
981 	if (sdebug_verbose)
982 		sdev_printk(KERN_INFO, scp->device,
983 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
984 			    my_name, key, asc, asq);
985 }
986 
987 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
988 {
989 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
990 }
991 
992 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
993 			    void __user *arg)
994 {
995 	if (sdebug_verbose) {
996 		if (0x1261 == cmd)
997 			sdev_printk(KERN_INFO, dev,
998 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
999 		else if (0x5331 == cmd)
1000 			sdev_printk(KERN_INFO, dev,
1001 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1002 				    __func__);
1003 		else
1004 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1005 				    __func__, cmd);
1006 	}
1007 	return -EINVAL;
1008 	/* return -ENOTTY; // correct return but upsets fdisk */
1009 }
1010 
1011 static void config_cdb_len(struct scsi_device *sdev)
1012 {
1013 	switch (sdebug_cdb_len) {
1014 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1015 		sdev->use_10_for_rw = false;
1016 		sdev->use_16_for_rw = false;
1017 		sdev->use_10_for_ms = false;
1018 		break;
1019 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1020 		sdev->use_10_for_rw = true;
1021 		sdev->use_16_for_rw = false;
1022 		sdev->use_10_for_ms = false;
1023 		break;
1024 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1025 		sdev->use_10_for_rw = true;
1026 		sdev->use_16_for_rw = false;
1027 		sdev->use_10_for_ms = true;
1028 		break;
1029 	case 16:
1030 		sdev->use_10_for_rw = false;
1031 		sdev->use_16_for_rw = true;
1032 		sdev->use_10_for_ms = true;
1033 		break;
1034 	case 32: /* No knobs to suggest this so same as 16 for now */
1035 		sdev->use_10_for_rw = false;
1036 		sdev->use_16_for_rw = true;
1037 		sdev->use_10_for_ms = true;
1038 		break;
1039 	default:
1040 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1041 			sdebug_cdb_len);
1042 		sdev->use_10_for_rw = true;
1043 		sdev->use_16_for_rw = false;
1044 		sdev->use_10_for_ms = false;
1045 		sdebug_cdb_len = 10;
1046 		break;
1047 	}
1048 }
1049 
1050 static void all_config_cdb_len(void)
1051 {
1052 	struct sdebug_host_info *sdbg_host;
1053 	struct Scsi_Host *shost;
1054 	struct scsi_device *sdev;
1055 
1056 	spin_lock(&sdebug_host_list_lock);
1057 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1058 		shost = sdbg_host->shost;
1059 		shost_for_each_device(sdev, shost) {
1060 			config_cdb_len(sdev);
1061 		}
1062 	}
1063 	spin_unlock(&sdebug_host_list_lock);
1064 }
1065 
1066 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1067 {
1068 	struct sdebug_host_info *sdhp;
1069 	struct sdebug_dev_info *dp;
1070 
1071 	spin_lock(&sdebug_host_list_lock);
1072 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1073 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1074 			if ((devip->sdbg_host == dp->sdbg_host) &&
1075 			    (devip->target == dp->target))
1076 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1077 		}
1078 	}
1079 	spin_unlock(&sdebug_host_list_lock);
1080 }
1081 
1082 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1083 {
1084 	int k;
1085 
1086 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1087 	if (k != SDEBUG_NUM_UAS) {
1088 		const char *cp = NULL;
1089 
1090 		switch (k) {
1091 		case SDEBUG_UA_POR:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1093 					POWER_ON_RESET_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "power on reset";
1096 			break;
1097 		case SDEBUG_UA_BUS_RESET:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1099 					BUS_RESET_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "bus reset";
1102 			break;
1103 		case SDEBUG_UA_MODE_CHANGED:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1105 					MODE_CHANGED_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "mode parameters changed";
1108 			break;
1109 		case SDEBUG_UA_CAPACITY_CHANGED:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1111 					CAPACITY_CHANGED_ASCQ);
1112 			if (sdebug_verbose)
1113 				cp = "capacity data changed";
1114 			break;
1115 		case SDEBUG_UA_MICROCODE_CHANGED:
1116 			mk_sense_buffer(scp, UNIT_ATTENTION,
1117 					TARGET_CHANGED_ASC,
1118 					MICROCODE_CHANGED_ASCQ);
1119 			if (sdebug_verbose)
1120 				cp = "microcode has been changed";
1121 			break;
1122 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1123 			mk_sense_buffer(scp, UNIT_ATTENTION,
1124 					TARGET_CHANGED_ASC,
1125 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1126 			if (sdebug_verbose)
1127 				cp = "microcode has been changed without reset";
1128 			break;
1129 		case SDEBUG_UA_LUNS_CHANGED:
1130 			/*
1131 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1132 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1133 			 * on the target, until a REPORT LUNS command is
1134 			 * received.  SPC-4 behavior is to report it only once.
1135 			 * NOTE:  sdebug_scsi_level does not use the same
1136 			 * values as struct scsi_device->scsi_level.
1137 			 */
1138 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1139 				clear_luns_changed_on_target(devip);
1140 			mk_sense_buffer(scp, UNIT_ATTENTION,
1141 					TARGET_CHANGED_ASC,
1142 					LUNS_CHANGED_ASCQ);
1143 			if (sdebug_verbose)
1144 				cp = "reported luns data has changed";
1145 			break;
1146 		default:
1147 			pr_warn("unexpected unit attention code=%d\n", k);
1148 			if (sdebug_verbose)
1149 				cp = "unknown";
1150 			break;
1151 		}
1152 		clear_bit(k, devip->uas_bm);
1153 		if (sdebug_verbose)
1154 			sdev_printk(KERN_INFO, scp->device,
1155 				   "%s reports: Unit attention: %s\n",
1156 				   my_name, cp);
1157 		return check_condition_result;
1158 	}
1159 	return 0;
1160 }
1161 
1162 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1163 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1164 				int arr_len)
1165 {
1166 	int act_len;
1167 	struct scsi_data_buffer *sdb = &scp->sdb;
1168 
1169 	if (!sdb->length)
1170 		return 0;
1171 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1172 		return DID_ERROR << 16;
1173 
1174 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1175 				      arr, arr_len);
1176 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1177 
1178 	return 0;
1179 }
1180 
1181 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1182  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1183  * calls, not required to write in ascending offset order. Assumes resid
1184  * set to scsi_bufflen() prior to any calls.
1185  */
1186 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1187 				  int arr_len, unsigned int off_dst)
1188 {
1189 	unsigned int act_len, n;
1190 	struct scsi_data_buffer *sdb = &scp->sdb;
1191 	off_t skip = off_dst;
1192 
1193 	if (sdb->length <= off_dst)
1194 		return 0;
1195 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1196 		return DID_ERROR << 16;
1197 
1198 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1199 				       arr, arr_len, skip);
1200 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1201 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1202 		 scsi_get_resid(scp));
1203 	n = scsi_bufflen(scp) - (off_dst + act_len);
1204 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1205 	return 0;
1206 }
1207 
1208 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1209  * 'arr' or -1 if error.
1210  */
1211 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1212 			       int arr_len)
1213 {
1214 	if (!scsi_bufflen(scp))
1215 		return 0;
1216 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1217 		return -1;
1218 
1219 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1220 }
1221 
1222 
1223 static char sdebug_inq_vendor_id[9] = "Linux   ";
1224 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1225 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1226 /* Use some locally assigned NAAs for SAS addresses. */
1227 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1228 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1229 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1230 
1231 /* Device identification VPD page. Returns number of bytes placed in arr */
1232 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1233 			  int target_dev_id, int dev_id_num,
1234 			  const char *dev_id_str, int dev_id_str_len,
1235 			  const uuid_t *lu_name)
1236 {
1237 	int num, port_a;
1238 	char b[32];
1239 
1240 	port_a = target_dev_id + 1;
1241 	/* T10 vendor identifier field format (faked) */
1242 	arr[0] = 0x2;	/* ASCII */
1243 	arr[1] = 0x1;
1244 	arr[2] = 0x0;
1245 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1246 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1247 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1248 	num = 8 + 16 + dev_id_str_len;
1249 	arr[3] = num;
1250 	num += 4;
1251 	if (dev_id_num >= 0) {
1252 		if (sdebug_uuid_ctl) {
1253 			/* Locally assigned UUID */
1254 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1255 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1256 			arr[num++] = 0x0;
1257 			arr[num++] = 0x12;
1258 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1259 			arr[num++] = 0x0;
1260 			memcpy(arr + num, lu_name, 16);
1261 			num += 16;
1262 		} else {
1263 			/* NAA-3, Logical unit identifier (binary) */
1264 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1265 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1266 			arr[num++] = 0x0;
1267 			arr[num++] = 0x8;
1268 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1269 			num += 8;
1270 		}
1271 		/* Target relative port number */
1272 		arr[num++] = 0x61;	/* proto=sas, binary */
1273 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1274 		arr[num++] = 0x0;	/* reserved */
1275 		arr[num++] = 0x4;	/* length */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x0;
1279 		arr[num++] = 0x1;	/* relative port A */
1280 	}
1281 	/* NAA-3, Target port identifier */
1282 	arr[num++] = 0x61;	/* proto=sas, binary */
1283 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1284 	arr[num++] = 0x0;
1285 	arr[num++] = 0x8;
1286 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1287 	num += 8;
1288 	/* NAA-3, Target port group identifier */
1289 	arr[num++] = 0x61;	/* proto=sas, binary */
1290 	arr[num++] = 0x95;	/* piv=1, target port group id */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 0x4;
1293 	arr[num++] = 0;
1294 	arr[num++] = 0;
1295 	put_unaligned_be16(port_group_id, arr + num);
1296 	num += 2;
1297 	/* NAA-3, Target device identifier */
1298 	arr[num++] = 0x61;	/* proto=sas, binary */
1299 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1300 	arr[num++] = 0x0;
1301 	arr[num++] = 0x8;
1302 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1303 	num += 8;
1304 	/* SCSI name string: Target device identifier */
1305 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1306 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1307 	arr[num++] = 0x0;
1308 	arr[num++] = 24;
1309 	memcpy(arr + num, "naa.32222220", 12);
1310 	num += 12;
1311 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1312 	memcpy(arr + num, b, 8);
1313 	num += 8;
1314 	memset(arr + num, 0, 4);
1315 	num += 4;
1316 	return num;
1317 }
1318 
1319 static unsigned char vpd84_data[] = {
1320 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1321     0x22,0x22,0x22,0x0,0xbb,0x1,
1322     0x22,0x22,0x22,0x0,0xbb,0x2,
1323 };
1324 
1325 /*  Software interface identification VPD page */
1326 static int inquiry_vpd_84(unsigned char *arr)
1327 {
1328 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1329 	return sizeof(vpd84_data);
1330 }
1331 
1332 /* Management network addresses VPD page */
1333 static int inquiry_vpd_85(unsigned char *arr)
1334 {
1335 	int num = 0;
1336 	const char *na1 = "https://www.kernel.org/config";
1337 	const char *na2 = "http://www.kernel.org/log";
1338 	int plen, olen;
1339 
1340 	arr[num++] = 0x1;	/* lu, storage config */
1341 	arr[num++] = 0x0;	/* reserved */
1342 	arr[num++] = 0x0;
1343 	olen = strlen(na1);
1344 	plen = olen + 1;
1345 	if (plen % 4)
1346 		plen = ((plen / 4) + 1) * 4;
1347 	arr[num++] = plen;	/* length, null termianted, padded */
1348 	memcpy(arr + num, na1, olen);
1349 	memset(arr + num + olen, 0, plen - olen);
1350 	num += plen;
1351 
1352 	arr[num++] = 0x4;	/* lu, logging */
1353 	arr[num++] = 0x0;	/* reserved */
1354 	arr[num++] = 0x0;
1355 	olen = strlen(na2);
1356 	plen = olen + 1;
1357 	if (plen % 4)
1358 		plen = ((plen / 4) + 1) * 4;
1359 	arr[num++] = plen;	/* length, null terminated, padded */
1360 	memcpy(arr + num, na2, olen);
1361 	memset(arr + num + olen, 0, plen - olen);
1362 	num += plen;
1363 
1364 	return num;
1365 }
1366 
1367 /* SCSI ports VPD page */
1368 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1369 {
1370 	int num = 0;
1371 	int port_a, port_b;
1372 
1373 	port_a = target_dev_id + 1;
1374 	port_b = port_a + 1;
1375 	arr[num++] = 0x0;	/* reserved */
1376 	arr[num++] = 0x0;	/* reserved */
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1379 	memset(arr + num, 0, 6);
1380 	num += 6;
1381 	arr[num++] = 0x0;
1382 	arr[num++] = 12;	/* length tp descriptor */
1383 	/* naa-5 target port identifier (A) */
1384 	arr[num++] = 0x61;	/* proto=sas, binary */
1385 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1386 	arr[num++] = 0x0;	/* reserved */
1387 	arr[num++] = 0x8;	/* length */
1388 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1389 	num += 8;
1390 	arr[num++] = 0x0;	/* reserved */
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x0;
1393 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1394 	memset(arr + num, 0, 6);
1395 	num += 6;
1396 	arr[num++] = 0x0;
1397 	arr[num++] = 12;	/* length tp descriptor */
1398 	/* naa-5 target port identifier (B) */
1399 	arr[num++] = 0x61;	/* proto=sas, binary */
1400 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1401 	arr[num++] = 0x0;	/* reserved */
1402 	arr[num++] = 0x8;	/* length */
1403 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1404 	num += 8;
1405 
1406 	return num;
1407 }
1408 
1409 
1410 static unsigned char vpd89_data[] = {
1411 /* from 4th byte */ 0,0,0,0,
1412 'l','i','n','u','x',' ',' ',' ',
1413 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1414 '1','2','3','4',
1415 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1416 0xec,0,0,0,
1417 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1418 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1420 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1421 0x53,0x41,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1423 0x20,0x20,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x10,0x80,
1426 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1427 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1428 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1430 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1431 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1432 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1437 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1438 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1439 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1452 };
1453 
1454 /* ATA Information VPD page */
1455 static int inquiry_vpd_89(unsigned char *arr)
1456 {
1457 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1458 	return sizeof(vpd89_data);
1459 }
1460 
1461 
1462 static unsigned char vpdb0_data[] = {
1463 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1464 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1465 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 };
1468 
1469 /* Block limits VPD page (SBC-3) */
1470 static int inquiry_vpd_b0(unsigned char *arr)
1471 {
1472 	unsigned int gran;
1473 
1474 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1475 
1476 	/* Optimal transfer length granularity */
1477 	if (sdebug_opt_xferlen_exp != 0 &&
1478 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1479 		gran = 1 << sdebug_opt_xferlen_exp;
1480 	else
1481 		gran = 1 << sdebug_physblk_exp;
1482 	put_unaligned_be16(gran, arr + 2);
1483 
1484 	/* Maximum Transfer Length */
1485 	if (sdebug_store_sectors > 0x400)
1486 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1487 
1488 	/* Optimal Transfer Length */
1489 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1490 
1491 	if (sdebug_lbpu) {
1492 		/* Maximum Unmap LBA Count */
1493 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1494 
1495 		/* Maximum Unmap Block Descriptor Count */
1496 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1497 	}
1498 
1499 	/* Unmap Granularity Alignment */
1500 	if (sdebug_unmap_alignment) {
1501 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1502 		arr[28] |= 0x80; /* UGAVALID */
1503 	}
1504 
1505 	/* Optimal Unmap Granularity */
1506 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1507 
1508 	/* Maximum WRITE SAME Length */
1509 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1510 
1511 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1512 
1513 	return sizeof(vpdb0_data);
1514 }
1515 
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1518 {
1519 	memset(arr, 0, 0x3c);
1520 	arr[0] = 0;
1521 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1522 	arr[2] = 0;
1523 	arr[3] = 5;	/* less than 1.8" */
1524 	if (devip->zmodel == BLK_ZONED_HA)
1525 		arr[4] = 1 << 4;	/* zoned field = 01b */
1526 
1527 	return 0x3c;
1528 }
1529 
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x4);
1534 	arr[0] = 0;			/* threshold exponent */
1535 	if (sdebug_lbpu)
1536 		arr[1] = 1 << 7;
1537 	if (sdebug_lbpws)
1538 		arr[1] |= 1 << 6;
1539 	if (sdebug_lbpws10)
1540 		arr[1] |= 1 << 5;
1541 	if (sdebug_lbprz && scsi_debug_lbp())
1542 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1543 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 	/* threshold_percentage=0 */
1546 	return 0x4;
1547 }
1548 
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1551 {
1552 	memset(arr, 0, 0x3c);
1553 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1554 	/*
1555 	 * Set Optimal number of open sequential write preferred zones and
1556 	 * Optimal number of non-sequentially written sequential write
1557 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 	 * fields set to zero, apart from Max. number of open swrz_s field.
1559 	 */
1560 	put_unaligned_be32(0xffffffff, &arr[4]);
1561 	put_unaligned_be32(0xffffffff, &arr[8]);
1562 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 		put_unaligned_be32(devip->max_open, &arr[12]);
1564 	else
1565 		put_unaligned_be32(0xffffffff, &arr[12]);
1566 	return 0x3c;
1567 }
1568 
1569 #define SDEBUG_LONG_INQ_SZ 96
1570 #define SDEBUG_MAX_INQ_ARR_SZ 584
1571 
1572 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1573 {
1574 	unsigned char pq_pdt;
1575 	unsigned char *arr;
1576 	unsigned char *cmd = scp->cmnd;
1577 	int alloc_len, n, ret;
1578 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1579 
1580 	alloc_len = get_unaligned_be16(cmd + 3);
1581 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1582 	if (! arr)
1583 		return DID_REQUEUE << 16;
1584 	is_disk = (sdebug_ptype == TYPE_DISK);
1585 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1586 	is_disk_zbc = (is_disk || is_zbc);
1587 	have_wlun = scsi_is_wlun(scp->device->lun);
1588 	if (have_wlun)
1589 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1590 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1591 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1592 	else
1593 		pq_pdt = (sdebug_ptype & 0x1f);
1594 	arr[0] = pq_pdt;
1595 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1596 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1597 		kfree(arr);
1598 		return check_condition_result;
1599 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1600 		int lu_id_num, port_group_id, target_dev_id, len;
1601 		char lu_id_str[6];
1602 		int host_no = devip->sdbg_host->shost->host_no;
1603 
1604 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1605 		    (devip->channel & 0x7f);
1606 		if (sdebug_vpd_use_hostno == 0)
1607 			host_no = 0;
1608 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1609 			    (devip->target * 1000) + devip->lun);
1610 		target_dev_id = ((host_no + 1) * 2000) +
1611 				 (devip->target * 1000) - 3;
1612 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1613 		if (0 == cmd[2]) { /* supported vital product data pages */
1614 			arr[1] = cmd[2];	/*sanity */
1615 			n = 4;
1616 			arr[n++] = 0x0;   /* this page */
1617 			arr[n++] = 0x80;  /* unit serial number */
1618 			arr[n++] = 0x83;  /* device identification */
1619 			arr[n++] = 0x84;  /* software interface ident. */
1620 			arr[n++] = 0x85;  /* management network addresses */
1621 			arr[n++] = 0x86;  /* extended inquiry */
1622 			arr[n++] = 0x87;  /* mode page policy */
1623 			arr[n++] = 0x88;  /* SCSI ports */
1624 			if (is_disk_zbc) {	  /* SBC or ZBC */
1625 				arr[n++] = 0x89;  /* ATA information */
1626 				arr[n++] = 0xb0;  /* Block limits */
1627 				arr[n++] = 0xb1;  /* Block characteristics */
1628 				if (is_disk)
1629 					arr[n++] = 0xb2;  /* LB Provisioning */
1630 				if (is_zbc)
1631 					arr[n++] = 0xb6;  /* ZB dev. char. */
1632 			}
1633 			arr[3] = n - 4;	  /* number of supported VPD pages */
1634 		} else if (0x80 == cmd[2]) { /* unit serial number */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = len;
1637 			memcpy(&arr[4], lu_id_str, len);
1638 		} else if (0x83 == cmd[2]) { /* device identification */
1639 			arr[1] = cmd[2];	/*sanity */
1640 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1641 						target_dev_id, lu_id_num,
1642 						lu_id_str, len,
1643 						&devip->lu_name);
1644 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = inquiry_vpd_84(&arr[4]);
1647 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1648 			arr[1] = cmd[2];	/*sanity */
1649 			arr[3] = inquiry_vpd_85(&arr[4]);
1650 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = 0x3c;	/* number of following entries */
1653 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1654 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1655 			else if (have_dif_prot)
1656 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1657 			else
1658 				arr[4] = 0x0;   /* no protection stuff */
1659 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1660 		} else if (0x87 == cmd[2]) { /* mode page policy */
1661 			arr[1] = cmd[2];	/*sanity */
1662 			arr[3] = 0x8;	/* number of following entries */
1663 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1664 			arr[6] = 0x80;	/* mlus, shared */
1665 			arr[8] = 0x18;	 /* protocol specific lu */
1666 			arr[10] = 0x82;	 /* mlus, per initiator port */
1667 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1668 			arr[1] = cmd[2];	/*sanity */
1669 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1670 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			n = inquiry_vpd_89(&arr[4]);
1673 			put_unaligned_be16(n, arr + 2);
1674 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1675 			arr[1] = cmd[2];        /*sanity */
1676 			arr[3] = inquiry_vpd_b0(&arr[4]);
1677 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1678 			arr[1] = cmd[2];        /*sanity */
1679 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1680 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1681 			arr[1] = cmd[2];        /*sanity */
1682 			arr[3] = inquiry_vpd_b2(&arr[4]);
1683 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1684 			arr[1] = cmd[2];        /*sanity */
1685 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1686 		} else {
1687 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1688 			kfree(arr);
1689 			return check_condition_result;
1690 		}
1691 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1692 		ret = fill_from_dev_buffer(scp, arr,
1693 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1694 		kfree(arr);
1695 		return ret;
1696 	}
1697 	/* drops through here for a standard inquiry */
1698 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1699 	arr[2] = sdebug_scsi_level;
1700 	arr[3] = 2;    /* response_data_format==2 */
1701 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1702 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1703 	if (sdebug_vpd_use_hostno == 0)
1704 		arr[5] |= 0x10; /* claim: implicit TPGS */
1705 	arr[6] = 0x10; /* claim: MultiP */
1706 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1707 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1708 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1709 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1710 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1711 	/* Use Vendor Specific area to place driver date in ASCII hex */
1712 	memcpy(&arr[36], sdebug_version_date, 8);
1713 	/* version descriptors (2 bytes each) follow */
1714 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1715 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1716 	n = 62;
1717 	if (is_disk) {		/* SBC-4 no version claimed */
1718 		put_unaligned_be16(0x600, arr + n);
1719 		n += 2;
1720 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1721 		put_unaligned_be16(0x525, arr + n);
1722 		n += 2;
1723 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1724 		put_unaligned_be16(0x624, arr + n);
1725 		n += 2;
1726 	}
1727 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1728 	ret = fill_from_dev_buffer(scp, arr,
1729 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1730 	kfree(arr);
1731 	return ret;
1732 }
1733 
1734 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1735 				   0, 0, 0x0, 0x0};
1736 
1737 static int resp_requests(struct scsi_cmnd *scp,
1738 			 struct sdebug_dev_info *devip)
1739 {
1740 	unsigned char *sbuff;
1741 	unsigned char *cmd = scp->cmnd;
1742 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1743 	bool dsense;
1744 	int len = 18;
1745 
1746 	memset(arr, 0, sizeof(arr));
1747 	dsense = !!(cmd[1] & 1);
1748 	sbuff = scp->sense_buffer;
1749 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1750 		if (dsense) {
1751 			arr[0] = 0x72;
1752 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1753 			arr[2] = THRESHOLD_EXCEEDED;
1754 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1755 			len = 8;
1756 		} else {
1757 			arr[0] = 0x70;
1758 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1759 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1760 			arr[12] = THRESHOLD_EXCEEDED;
1761 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1762 		}
1763 	} else {
1764 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1765 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1766 			;	/* have sense and formats match */
1767 		else if (arr[0] <= 0x70) {
1768 			if (dsense) {
1769 				memset(arr, 0, 8);
1770 				arr[0] = 0x72;
1771 				len = 8;
1772 			} else {
1773 				memset(arr, 0, 18);
1774 				arr[0] = 0x70;
1775 				arr[7] = 0xa;
1776 			}
1777 		} else if (dsense) {
1778 			memset(arr, 0, 8);
1779 			arr[0] = 0x72;
1780 			arr[1] = sbuff[2];     /* sense key */
1781 			arr[2] = sbuff[12];    /* asc */
1782 			arr[3] = sbuff[13];    /* ascq */
1783 			len = 8;
1784 		} else {
1785 			memset(arr, 0, 18);
1786 			arr[0] = 0x70;
1787 			arr[2] = sbuff[1];
1788 			arr[7] = 0xa;
1789 			arr[12] = sbuff[1];
1790 			arr[13] = sbuff[3];
1791 		}
1792 
1793 	}
1794 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1795 	return fill_from_dev_buffer(scp, arr, len);
1796 }
1797 
1798 static int resp_start_stop(struct scsi_cmnd *scp,
1799 			   struct sdebug_dev_info *devip)
1800 {
1801 	unsigned char *cmd = scp->cmnd;
1802 	int power_cond, stop;
1803 	bool changing;
1804 
1805 	power_cond = (cmd[4] & 0xf0) >> 4;
1806 	if (power_cond) {
1807 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1808 		return check_condition_result;
1809 	}
1810 	stop = !(cmd[4] & 1);
1811 	changing = atomic_read(&devip->stopped) == !stop;
1812 	atomic_xchg(&devip->stopped, stop);
1813 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1814 		return SDEG_RES_IMMED_MASK;
1815 	else
1816 		return 0;
1817 }
1818 
1819 static sector_t get_sdebug_capacity(void)
1820 {
1821 	static const unsigned int gibibyte = 1073741824;
1822 
1823 	if (sdebug_virtual_gb > 0)
1824 		return (sector_t)sdebug_virtual_gb *
1825 			(gibibyte / sdebug_sector_size);
1826 	else
1827 		return sdebug_store_sectors;
1828 }
1829 
1830 #define SDEBUG_READCAP_ARR_SZ 8
1831 static int resp_readcap(struct scsi_cmnd *scp,
1832 			struct sdebug_dev_info *devip)
1833 {
1834 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1835 	unsigned int capac;
1836 
1837 	/* following just in case virtual_gb changed */
1838 	sdebug_capacity = get_sdebug_capacity();
1839 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1840 	if (sdebug_capacity < 0xffffffff) {
1841 		capac = (unsigned int)sdebug_capacity - 1;
1842 		put_unaligned_be32(capac, arr + 0);
1843 	} else
1844 		put_unaligned_be32(0xffffffff, arr + 0);
1845 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1846 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1847 }
1848 
1849 #define SDEBUG_READCAP16_ARR_SZ 32
1850 static int resp_readcap16(struct scsi_cmnd *scp,
1851 			  struct sdebug_dev_info *devip)
1852 {
1853 	unsigned char *cmd = scp->cmnd;
1854 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1855 	int alloc_len;
1856 
1857 	alloc_len = get_unaligned_be32(cmd + 10);
1858 	/* following just in case virtual_gb changed */
1859 	sdebug_capacity = get_sdebug_capacity();
1860 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1861 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1862 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1863 	arr[13] = sdebug_physblk_exp & 0xf;
1864 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1865 
1866 	if (scsi_debug_lbp()) {
1867 		arr[14] |= 0x80; /* LBPME */
1868 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1869 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1870 		 * in the wider field maps to 0 in this field.
1871 		 */
1872 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1873 			arr[14] |= 0x40;
1874 	}
1875 
1876 	arr[15] = sdebug_lowest_aligned & 0xff;
1877 
1878 	if (have_dif_prot) {
1879 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1880 		arr[12] |= 1; /* PROT_EN */
1881 	}
1882 
1883 	return fill_from_dev_buffer(scp, arr,
1884 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1885 }
1886 
1887 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1888 
1889 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1890 			      struct sdebug_dev_info *devip)
1891 {
1892 	unsigned char *cmd = scp->cmnd;
1893 	unsigned char *arr;
1894 	int host_no = devip->sdbg_host->shost->host_no;
1895 	int n, ret, alen, rlen;
1896 	int port_group_a, port_group_b, port_a, port_b;
1897 
1898 	alen = get_unaligned_be32(cmd + 6);
1899 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1900 	if (! arr)
1901 		return DID_REQUEUE << 16;
1902 	/*
1903 	 * EVPD page 0x88 states we have two ports, one
1904 	 * real and a fake port with no device connected.
1905 	 * So we create two port groups with one port each
1906 	 * and set the group with port B to unavailable.
1907 	 */
1908 	port_a = 0x1; /* relative port A */
1909 	port_b = 0x2; /* relative port B */
1910 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1911 			(devip->channel & 0x7f);
1912 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1913 			(devip->channel & 0x7f) + 0x80;
1914 
1915 	/*
1916 	 * The asymmetric access state is cycled according to the host_id.
1917 	 */
1918 	n = 4;
1919 	if (sdebug_vpd_use_hostno == 0) {
1920 		arr[n++] = host_no % 3; /* Asymm access state */
1921 		arr[n++] = 0x0F; /* claim: all states are supported */
1922 	} else {
1923 		arr[n++] = 0x0; /* Active/Optimized path */
1924 		arr[n++] = 0x01; /* only support active/optimized paths */
1925 	}
1926 	put_unaligned_be16(port_group_a, arr + n);
1927 	n += 2;
1928 	arr[n++] = 0;    /* Reserved */
1929 	arr[n++] = 0;    /* Status code */
1930 	arr[n++] = 0;    /* Vendor unique */
1931 	arr[n++] = 0x1;  /* One port per group */
1932 	arr[n++] = 0;    /* Reserved */
1933 	arr[n++] = 0;    /* Reserved */
1934 	put_unaligned_be16(port_a, arr + n);
1935 	n += 2;
1936 	arr[n++] = 3;    /* Port unavailable */
1937 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1938 	put_unaligned_be16(port_group_b, arr + n);
1939 	n += 2;
1940 	arr[n++] = 0;    /* Reserved */
1941 	arr[n++] = 0;    /* Status code */
1942 	arr[n++] = 0;    /* Vendor unique */
1943 	arr[n++] = 0x1;  /* One port per group */
1944 	arr[n++] = 0;    /* Reserved */
1945 	arr[n++] = 0;    /* Reserved */
1946 	put_unaligned_be16(port_b, arr + n);
1947 	n += 2;
1948 
1949 	rlen = n - 4;
1950 	put_unaligned_be32(rlen, arr + 0);
1951 
1952 	/*
1953 	 * Return the smallest value of either
1954 	 * - The allocated length
1955 	 * - The constructed command length
1956 	 * - The maximum array size
1957 	 */
1958 	rlen = min_t(int, alen, n);
1959 	ret = fill_from_dev_buffer(scp, arr,
1960 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1961 	kfree(arr);
1962 	return ret;
1963 }
1964 
1965 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1966 			     struct sdebug_dev_info *devip)
1967 {
1968 	bool rctd;
1969 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1970 	u16 req_sa, u;
1971 	u32 alloc_len, a_len;
1972 	int k, offset, len, errsts, count, bump, na;
1973 	const struct opcode_info_t *oip;
1974 	const struct opcode_info_t *r_oip;
1975 	u8 *arr;
1976 	u8 *cmd = scp->cmnd;
1977 
1978 	rctd = !!(cmd[2] & 0x80);
1979 	reporting_opts = cmd[2] & 0x7;
1980 	req_opcode = cmd[3];
1981 	req_sa = get_unaligned_be16(cmd + 4);
1982 	alloc_len = get_unaligned_be32(cmd + 6);
1983 	if (alloc_len < 4 || alloc_len > 0xffff) {
1984 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1985 		return check_condition_result;
1986 	}
1987 	if (alloc_len > 8192)
1988 		a_len = 8192;
1989 	else
1990 		a_len = alloc_len;
1991 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1992 	if (NULL == arr) {
1993 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1994 				INSUFF_RES_ASCQ);
1995 		return check_condition_result;
1996 	}
1997 	switch (reporting_opts) {
1998 	case 0:	/* all commands */
1999 		/* count number of commands */
2000 		for (count = 0, oip = opcode_info_arr;
2001 		     oip->num_attached != 0xff; ++oip) {
2002 			if (F_INV_OP & oip->flags)
2003 				continue;
2004 			count += (oip->num_attached + 1);
2005 		}
2006 		bump = rctd ? 20 : 8;
2007 		put_unaligned_be32(count * bump, arr);
2008 		for (offset = 4, oip = opcode_info_arr;
2009 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2010 			if (F_INV_OP & oip->flags)
2011 				continue;
2012 			na = oip->num_attached;
2013 			arr[offset] = oip->opcode;
2014 			put_unaligned_be16(oip->sa, arr + offset + 2);
2015 			if (rctd)
2016 				arr[offset + 5] |= 0x2;
2017 			if (FF_SA & oip->flags)
2018 				arr[offset + 5] |= 0x1;
2019 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2020 			if (rctd)
2021 				put_unaligned_be16(0xa, arr + offset + 8);
2022 			r_oip = oip;
2023 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2024 				if (F_INV_OP & oip->flags)
2025 					continue;
2026 				offset += bump;
2027 				arr[offset] = oip->opcode;
2028 				put_unaligned_be16(oip->sa, arr + offset + 2);
2029 				if (rctd)
2030 					arr[offset + 5] |= 0x2;
2031 				if (FF_SA & oip->flags)
2032 					arr[offset + 5] |= 0x1;
2033 				put_unaligned_be16(oip->len_mask[0],
2034 						   arr + offset + 6);
2035 				if (rctd)
2036 					put_unaligned_be16(0xa,
2037 							   arr + offset + 8);
2038 			}
2039 			oip = r_oip;
2040 			offset += bump;
2041 		}
2042 		break;
2043 	case 1:	/* one command: opcode only */
2044 	case 2:	/* one command: opcode plus service action */
2045 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2046 		sdeb_i = opcode_ind_arr[req_opcode];
2047 		oip = &opcode_info_arr[sdeb_i];
2048 		if (F_INV_OP & oip->flags) {
2049 			supp = 1;
2050 			offset = 4;
2051 		} else {
2052 			if (1 == reporting_opts) {
2053 				if (FF_SA & oip->flags) {
2054 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2055 							     2, 2);
2056 					kfree(arr);
2057 					return check_condition_result;
2058 				}
2059 				req_sa = 0;
2060 			} else if (2 == reporting_opts &&
2061 				   0 == (FF_SA & oip->flags)) {
2062 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2063 				kfree(arr);	/* point at requested sa */
2064 				return check_condition_result;
2065 			}
2066 			if (0 == (FF_SA & oip->flags) &&
2067 			    req_opcode == oip->opcode)
2068 				supp = 3;
2069 			else if (0 == (FF_SA & oip->flags)) {
2070 				na = oip->num_attached;
2071 				for (k = 0, oip = oip->arrp; k < na;
2072 				     ++k, ++oip) {
2073 					if (req_opcode == oip->opcode)
2074 						break;
2075 				}
2076 				supp = (k >= na) ? 1 : 3;
2077 			} else if (req_sa != oip->sa) {
2078 				na = oip->num_attached;
2079 				for (k = 0, oip = oip->arrp; k < na;
2080 				     ++k, ++oip) {
2081 					if (req_sa == oip->sa)
2082 						break;
2083 				}
2084 				supp = (k >= na) ? 1 : 3;
2085 			} else
2086 				supp = 3;
2087 			if (3 == supp) {
2088 				u = oip->len_mask[0];
2089 				put_unaligned_be16(u, arr + 2);
2090 				arr[4] = oip->opcode;
2091 				for (k = 1; k < u; ++k)
2092 					arr[4 + k] = (k < 16) ?
2093 						 oip->len_mask[k] : 0xff;
2094 				offset = 4 + u;
2095 			} else
2096 				offset = 4;
2097 		}
2098 		arr[1] = (rctd ? 0x80 : 0) | supp;
2099 		if (rctd) {
2100 			put_unaligned_be16(0xa, arr + offset);
2101 			offset += 12;
2102 		}
2103 		break;
2104 	default:
2105 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2106 		kfree(arr);
2107 		return check_condition_result;
2108 	}
2109 	offset = (offset < a_len) ? offset : a_len;
2110 	len = (offset < alloc_len) ? offset : alloc_len;
2111 	errsts = fill_from_dev_buffer(scp, arr, len);
2112 	kfree(arr);
2113 	return errsts;
2114 }
2115 
2116 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2117 			  struct sdebug_dev_info *devip)
2118 {
2119 	bool repd;
2120 	u32 alloc_len, len;
2121 	u8 arr[16];
2122 	u8 *cmd = scp->cmnd;
2123 
2124 	memset(arr, 0, sizeof(arr));
2125 	repd = !!(cmd[2] & 0x80);
2126 	alloc_len = get_unaligned_be32(cmd + 6);
2127 	if (alloc_len < 4) {
2128 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2129 		return check_condition_result;
2130 	}
2131 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2132 	arr[1] = 0x1;		/* ITNRS */
2133 	if (repd) {
2134 		arr[3] = 0xc;
2135 		len = 16;
2136 	} else
2137 		len = 4;
2138 
2139 	len = (len < alloc_len) ? len : alloc_len;
2140 	return fill_from_dev_buffer(scp, arr, len);
2141 }
2142 
2143 /* <<Following mode page info copied from ST318451LW>> */
2144 
2145 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2146 {	/* Read-Write Error Recovery page for mode_sense */
2147 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2148 					5, 0, 0xff, 0xff};
2149 
2150 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2151 	if (1 == pcontrol)
2152 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2153 	return sizeof(err_recov_pg);
2154 }
2155 
2156 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2157 { 	/* Disconnect-Reconnect page for mode_sense */
2158 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2159 					 0, 0, 0, 0, 0, 0, 0, 0};
2160 
2161 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2162 	if (1 == pcontrol)
2163 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2164 	return sizeof(disconnect_pg);
2165 }
2166 
2167 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2168 {       /* Format device page for mode_sense */
2169 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2170 				     0, 0, 0, 0, 0, 0, 0, 0,
2171 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2172 
2173 	memcpy(p, format_pg, sizeof(format_pg));
2174 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2175 	put_unaligned_be16(sdebug_sector_size, p + 12);
2176 	if (sdebug_removable)
2177 		p[20] |= 0x20; /* should agree with INQUIRY */
2178 	if (1 == pcontrol)
2179 		memset(p + 2, 0, sizeof(format_pg) - 2);
2180 	return sizeof(format_pg);
2181 }
2182 
2183 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2184 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2185 				     0, 0, 0, 0};
2186 
2187 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2188 { 	/* Caching page for mode_sense */
2189 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2190 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2191 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2192 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2193 
2194 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2195 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2196 	memcpy(p, caching_pg, sizeof(caching_pg));
2197 	if (1 == pcontrol)
2198 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2199 	else if (2 == pcontrol)
2200 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2201 	return sizeof(caching_pg);
2202 }
2203 
2204 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2205 				    0, 0, 0x2, 0x4b};
2206 
2207 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2208 { 	/* Control mode page for mode_sense */
2209 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2210 					0, 0, 0, 0};
2211 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 				     0, 0, 0x2, 0x4b};
2213 
2214 	if (sdebug_dsense)
2215 		ctrl_m_pg[2] |= 0x4;
2216 	else
2217 		ctrl_m_pg[2] &= ~0x4;
2218 
2219 	if (sdebug_ato)
2220 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2221 
2222 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2223 	if (1 == pcontrol)
2224 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2225 	else if (2 == pcontrol)
2226 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2227 	return sizeof(ctrl_m_pg);
2228 }
2229 
2230 
2231 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2232 {	/* Informational Exceptions control mode page for mode_sense */
2233 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2234 				       0, 0, 0x0, 0x0};
2235 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2236 				      0, 0, 0x0, 0x0};
2237 
2238 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2239 	if (1 == pcontrol)
2240 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2241 	else if (2 == pcontrol)
2242 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2243 	return sizeof(iec_m_pg);
2244 }
2245 
2246 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2247 {	/* SAS SSP mode page - short format for mode_sense */
2248 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2249 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2250 
2251 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2252 	if (1 == pcontrol)
2253 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2254 	return sizeof(sas_sf_m_pg);
2255 }
2256 
2257 
2258 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2259 			      int target_dev_id)
2260 {	/* SAS phy control and discover mode page for mode_sense */
2261 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2262 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2263 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2264 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2265 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2266 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2267 		    0, 0, 0, 0, 0, 0, 0, 0,
2268 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2272 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 		    0, 0, 0, 0, 0, 0, 0, 0,
2274 		};
2275 	int port_a, port_b;
2276 
2277 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2278 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2279 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2280 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2281 	port_a = target_dev_id + 1;
2282 	port_b = port_a + 1;
2283 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2284 	put_unaligned_be32(port_a, p + 20);
2285 	put_unaligned_be32(port_b, p + 48 + 20);
2286 	if (1 == pcontrol)
2287 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2288 	return sizeof(sas_pcd_m_pg);
2289 }
2290 
2291 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2292 {	/* SAS SSP shared protocol specific port mode subpage */
2293 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2294 		    0, 0, 0, 0, 0, 0, 0, 0,
2295 		};
2296 
2297 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2298 	if (1 == pcontrol)
2299 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2300 	return sizeof(sas_sha_m_pg);
2301 }
2302 
2303 #define SDEBUG_MAX_MSENSE_SZ 256
2304 
2305 static int resp_mode_sense(struct scsi_cmnd *scp,
2306 			   struct sdebug_dev_info *devip)
2307 {
2308 	int pcontrol, pcode, subpcode, bd_len;
2309 	unsigned char dev_spec;
2310 	int alloc_len, offset, len, target_dev_id;
2311 	int target = scp->device->id;
2312 	unsigned char *ap;
2313 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2314 	unsigned char *cmd = scp->cmnd;
2315 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2316 
2317 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2318 	pcontrol = (cmd[2] & 0xc0) >> 6;
2319 	pcode = cmd[2] & 0x3f;
2320 	subpcode = cmd[3];
2321 	msense_6 = (MODE_SENSE == cmd[0]);
2322 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2323 	is_disk = (sdebug_ptype == TYPE_DISK);
2324 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2325 	if ((is_disk || is_zbc) && !dbd)
2326 		bd_len = llbaa ? 16 : 8;
2327 	else
2328 		bd_len = 0;
2329 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2330 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2331 	if (0x3 == pcontrol) {  /* Saving values not supported */
2332 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2333 		return check_condition_result;
2334 	}
2335 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2336 			(devip->target * 1000) - 3;
2337 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2338 	if (is_disk || is_zbc) {
2339 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2340 		if (sdebug_wp)
2341 			dev_spec |= 0x80;
2342 	} else
2343 		dev_spec = 0x0;
2344 	if (msense_6) {
2345 		arr[2] = dev_spec;
2346 		arr[3] = bd_len;
2347 		offset = 4;
2348 	} else {
2349 		arr[3] = dev_spec;
2350 		if (16 == bd_len)
2351 			arr[4] = 0x1;	/* set LONGLBA bit */
2352 		arr[7] = bd_len;	/* assume 255 or less */
2353 		offset = 8;
2354 	}
2355 	ap = arr + offset;
2356 	if ((bd_len > 0) && (!sdebug_capacity))
2357 		sdebug_capacity = get_sdebug_capacity();
2358 
2359 	if (8 == bd_len) {
2360 		if (sdebug_capacity > 0xfffffffe)
2361 			put_unaligned_be32(0xffffffff, ap + 0);
2362 		else
2363 			put_unaligned_be32(sdebug_capacity, ap + 0);
2364 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2365 		offset += bd_len;
2366 		ap = arr + offset;
2367 	} else if (16 == bd_len) {
2368 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2369 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2370 		offset += bd_len;
2371 		ap = arr + offset;
2372 	}
2373 
2374 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2375 		/* TODO: Control Extension page */
2376 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2377 		return check_condition_result;
2378 	}
2379 	bad_pcode = false;
2380 
2381 	switch (pcode) {
2382 	case 0x1:	/* Read-Write error recovery page, direct access */
2383 		len = resp_err_recov_pg(ap, pcontrol, target);
2384 		offset += len;
2385 		break;
2386 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2387 		len = resp_disconnect_pg(ap, pcontrol, target);
2388 		offset += len;
2389 		break;
2390 	case 0x3:       /* Format device page, direct access */
2391 		if (is_disk) {
2392 			len = resp_format_pg(ap, pcontrol, target);
2393 			offset += len;
2394 		} else
2395 			bad_pcode = true;
2396 		break;
2397 	case 0x8:	/* Caching page, direct access */
2398 		if (is_disk || is_zbc) {
2399 			len = resp_caching_pg(ap, pcontrol, target);
2400 			offset += len;
2401 		} else
2402 			bad_pcode = true;
2403 		break;
2404 	case 0xa:	/* Control Mode page, all devices */
2405 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2406 		offset += len;
2407 		break;
2408 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2409 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2410 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2411 			return check_condition_result;
2412 		}
2413 		len = 0;
2414 		if ((0x0 == subpcode) || (0xff == subpcode))
2415 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2416 		if ((0x1 == subpcode) || (0xff == subpcode))
2417 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2418 						  target_dev_id);
2419 		if ((0x2 == subpcode) || (0xff == subpcode))
2420 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2421 		offset += len;
2422 		break;
2423 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2424 		len = resp_iec_m_pg(ap, pcontrol, target);
2425 		offset += len;
2426 		break;
2427 	case 0x3f:	/* Read all Mode pages */
2428 		if ((0 == subpcode) || (0xff == subpcode)) {
2429 			len = resp_err_recov_pg(ap, pcontrol, target);
2430 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2431 			if (is_disk) {
2432 				len += resp_format_pg(ap + len, pcontrol,
2433 						      target);
2434 				len += resp_caching_pg(ap + len, pcontrol,
2435 						       target);
2436 			} else if (is_zbc) {
2437 				len += resp_caching_pg(ap + len, pcontrol,
2438 						       target);
2439 			}
2440 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2441 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2442 			if (0xff == subpcode) {
2443 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2444 						  target, target_dev_id);
2445 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2446 			}
2447 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2448 			offset += len;
2449 		} else {
2450 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451 			return check_condition_result;
2452 		}
2453 		break;
2454 	default:
2455 		bad_pcode = true;
2456 		break;
2457 	}
2458 	if (bad_pcode) {
2459 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2460 		return check_condition_result;
2461 	}
2462 	if (msense_6)
2463 		arr[0] = offset - 1;
2464 	else
2465 		put_unaligned_be16((offset - 2), arr + 0);
2466 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2467 }
2468 
2469 #define SDEBUG_MAX_MSELECT_SZ 512
2470 
2471 static int resp_mode_select(struct scsi_cmnd *scp,
2472 			    struct sdebug_dev_info *devip)
2473 {
2474 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2475 	int param_len, res, mpage;
2476 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2477 	unsigned char *cmd = scp->cmnd;
2478 	int mselect6 = (MODE_SELECT == cmd[0]);
2479 
2480 	memset(arr, 0, sizeof(arr));
2481 	pf = cmd[1] & 0x10;
2482 	sp = cmd[1] & 0x1;
2483 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2484 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2485 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2486 		return check_condition_result;
2487 	}
2488 	res = fetch_to_dev_buffer(scp, arr, param_len);
2489 	if (-1 == res)
2490 		return DID_ERROR << 16;
2491 	else if (sdebug_verbose && (res < param_len))
2492 		sdev_printk(KERN_INFO, scp->device,
2493 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2494 			    __func__, param_len, res);
2495 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2496 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2497 	if (md_len > 2) {
2498 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2499 		return check_condition_result;
2500 	}
2501 	off = bd_len + (mselect6 ? 4 : 8);
2502 	mpage = arr[off] & 0x3f;
2503 	ps = !!(arr[off] & 0x80);
2504 	if (ps) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2506 		return check_condition_result;
2507 	}
2508 	spf = !!(arr[off] & 0x40);
2509 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2510 		       (arr[off + 1] + 2);
2511 	if ((pg_len + off) > param_len) {
2512 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2513 				PARAMETER_LIST_LENGTH_ERR, 0);
2514 		return check_condition_result;
2515 	}
2516 	switch (mpage) {
2517 	case 0x8:      /* Caching Mode page */
2518 		if (caching_pg[1] == arr[off + 1]) {
2519 			memcpy(caching_pg + 2, arr + off + 2,
2520 			       sizeof(caching_pg) - 2);
2521 			goto set_mode_changed_ua;
2522 		}
2523 		break;
2524 	case 0xa:      /* Control Mode page */
2525 		if (ctrl_m_pg[1] == arr[off + 1]) {
2526 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2527 			       sizeof(ctrl_m_pg) - 2);
2528 			if (ctrl_m_pg[4] & 0x8)
2529 				sdebug_wp = true;
2530 			else
2531 				sdebug_wp = false;
2532 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2533 			goto set_mode_changed_ua;
2534 		}
2535 		break;
2536 	case 0x1c:      /* Informational Exceptions Mode page */
2537 		if (iec_m_pg[1] == arr[off + 1]) {
2538 			memcpy(iec_m_pg + 2, arr + off + 2,
2539 			       sizeof(iec_m_pg) - 2);
2540 			goto set_mode_changed_ua;
2541 		}
2542 		break;
2543 	default:
2544 		break;
2545 	}
2546 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2547 	return check_condition_result;
2548 set_mode_changed_ua:
2549 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2550 	return 0;
2551 }
2552 
2553 static int resp_temp_l_pg(unsigned char *arr)
2554 {
2555 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2556 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2557 		};
2558 
2559 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2560 	return sizeof(temp_l_pg);
2561 }
2562 
2563 static int resp_ie_l_pg(unsigned char *arr)
2564 {
2565 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2566 		};
2567 
2568 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2569 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2570 		arr[4] = THRESHOLD_EXCEEDED;
2571 		arr[5] = 0xff;
2572 	}
2573 	return sizeof(ie_l_pg);
2574 }
2575 
2576 #define SDEBUG_MAX_LSENSE_SZ 512
2577 
2578 static int resp_log_sense(struct scsi_cmnd *scp,
2579 			  struct sdebug_dev_info *devip)
2580 {
2581 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2582 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2583 	unsigned char *cmd = scp->cmnd;
2584 
2585 	memset(arr, 0, sizeof(arr));
2586 	ppc = cmd[1] & 0x2;
2587 	sp = cmd[1] & 0x1;
2588 	if (ppc || sp) {
2589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2590 		return check_condition_result;
2591 	}
2592 	pcode = cmd[2] & 0x3f;
2593 	subpcode = cmd[3] & 0xff;
2594 	alloc_len = get_unaligned_be16(cmd + 7);
2595 	arr[0] = pcode;
2596 	if (0 == subpcode) {
2597 		switch (pcode) {
2598 		case 0x0:	/* Supported log pages log page */
2599 			n = 4;
2600 			arr[n++] = 0x0;		/* this page */
2601 			arr[n++] = 0xd;		/* Temperature */
2602 			arr[n++] = 0x2f;	/* Informational exceptions */
2603 			arr[3] = n - 4;
2604 			break;
2605 		case 0xd:	/* Temperature log page */
2606 			arr[3] = resp_temp_l_pg(arr + 4);
2607 			break;
2608 		case 0x2f:	/* Informational exceptions log page */
2609 			arr[3] = resp_ie_l_pg(arr + 4);
2610 			break;
2611 		default:
2612 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2613 			return check_condition_result;
2614 		}
2615 	} else if (0xff == subpcode) {
2616 		arr[0] |= 0x40;
2617 		arr[1] = subpcode;
2618 		switch (pcode) {
2619 		case 0x0:	/* Supported log pages and subpages log page */
2620 			n = 4;
2621 			arr[n++] = 0x0;
2622 			arr[n++] = 0x0;		/* 0,0 page */
2623 			arr[n++] = 0x0;
2624 			arr[n++] = 0xff;	/* this page */
2625 			arr[n++] = 0xd;
2626 			arr[n++] = 0x0;		/* Temperature */
2627 			arr[n++] = 0x2f;
2628 			arr[n++] = 0x0;	/* Informational exceptions */
2629 			arr[3] = n - 4;
2630 			break;
2631 		case 0xd:	/* Temperature subpages */
2632 			n = 4;
2633 			arr[n++] = 0xd;
2634 			arr[n++] = 0x0;		/* Temperature */
2635 			arr[3] = n - 4;
2636 			break;
2637 		case 0x2f:	/* Informational exceptions subpages */
2638 			n = 4;
2639 			arr[n++] = 0x2f;
2640 			arr[n++] = 0x0;		/* Informational exceptions */
2641 			arr[3] = n - 4;
2642 			break;
2643 		default:
2644 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2645 			return check_condition_result;
2646 		}
2647 	} else {
2648 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2649 		return check_condition_result;
2650 	}
2651 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2652 	return fill_from_dev_buffer(scp, arr,
2653 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2654 }
2655 
2656 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2657 {
2658 	return devip->nr_zones != 0;
2659 }
2660 
2661 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2662 					unsigned long long lba)
2663 {
2664 	return &devip->zstate[lba >> devip->zsize_shift];
2665 }
2666 
2667 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2668 {
2669 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2670 }
2671 
2672 static void zbc_close_zone(struct sdebug_dev_info *devip,
2673 			   struct sdeb_zone_state *zsp)
2674 {
2675 	enum sdebug_z_cond zc;
2676 
2677 	if (zbc_zone_is_conv(zsp))
2678 		return;
2679 
2680 	zc = zsp->z_cond;
2681 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2682 		return;
2683 
2684 	if (zc == ZC2_IMPLICIT_OPEN)
2685 		devip->nr_imp_open--;
2686 	else
2687 		devip->nr_exp_open--;
2688 
2689 	if (zsp->z_wp == zsp->z_start) {
2690 		zsp->z_cond = ZC1_EMPTY;
2691 	} else {
2692 		zsp->z_cond = ZC4_CLOSED;
2693 		devip->nr_closed++;
2694 	}
2695 }
2696 
2697 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2698 {
2699 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2700 	unsigned int i;
2701 
2702 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2703 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2704 			zbc_close_zone(devip, zsp);
2705 			return;
2706 		}
2707 	}
2708 }
2709 
2710 static void zbc_open_zone(struct sdebug_dev_info *devip,
2711 			  struct sdeb_zone_state *zsp, bool explicit)
2712 {
2713 	enum sdebug_z_cond zc;
2714 
2715 	if (zbc_zone_is_conv(zsp))
2716 		return;
2717 
2718 	zc = zsp->z_cond;
2719 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2720 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2721 		return;
2722 
2723 	/* Close an implicit open zone if necessary */
2724 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2725 		zbc_close_zone(devip, zsp);
2726 	else if (devip->max_open &&
2727 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2728 		zbc_close_imp_open_zone(devip);
2729 
2730 	if (zsp->z_cond == ZC4_CLOSED)
2731 		devip->nr_closed--;
2732 	if (explicit) {
2733 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2734 		devip->nr_exp_open++;
2735 	} else {
2736 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2737 		devip->nr_imp_open++;
2738 	}
2739 }
2740 
2741 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2742 		       unsigned long long lba, unsigned int num)
2743 {
2744 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2745 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2746 
2747 	if (zbc_zone_is_conv(zsp))
2748 		return;
2749 
2750 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2751 		zsp->z_wp += num;
2752 		if (zsp->z_wp >= zend)
2753 			zsp->z_cond = ZC5_FULL;
2754 		return;
2755 	}
2756 
2757 	while (num) {
2758 		if (lba != zsp->z_wp)
2759 			zsp->z_non_seq_resource = true;
2760 
2761 		end = lba + num;
2762 		if (end >= zend) {
2763 			n = zend - lba;
2764 			zsp->z_wp = zend;
2765 		} else if (end > zsp->z_wp) {
2766 			n = num;
2767 			zsp->z_wp = end;
2768 		} else {
2769 			n = num;
2770 		}
2771 		if (zsp->z_wp >= zend)
2772 			zsp->z_cond = ZC5_FULL;
2773 
2774 		num -= n;
2775 		lba += n;
2776 		if (num) {
2777 			zsp++;
2778 			zend = zsp->z_start + zsp->z_size;
2779 		}
2780 	}
2781 }
2782 
2783 static int check_zbc_access_params(struct scsi_cmnd *scp,
2784 			unsigned long long lba, unsigned int num, bool write)
2785 {
2786 	struct scsi_device *sdp = scp->device;
2787 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2788 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2789 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2790 
2791 	if (!write) {
2792 		if (devip->zmodel == BLK_ZONED_HA)
2793 			return 0;
2794 		/* For host-managed, reads cannot cross zone types boundaries */
2795 		if (zsp_end != zsp &&
2796 		    zbc_zone_is_conv(zsp) &&
2797 		    !zbc_zone_is_conv(zsp_end)) {
2798 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2799 					LBA_OUT_OF_RANGE,
2800 					READ_INVDATA_ASCQ);
2801 			return check_condition_result;
2802 		}
2803 		return 0;
2804 	}
2805 
2806 	/* No restrictions for writes within conventional zones */
2807 	if (zbc_zone_is_conv(zsp)) {
2808 		if (!zbc_zone_is_conv(zsp_end)) {
2809 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2810 					LBA_OUT_OF_RANGE,
2811 					WRITE_BOUNDARY_ASCQ);
2812 			return check_condition_result;
2813 		}
2814 		return 0;
2815 	}
2816 
2817 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2818 		/* Writes cannot cross sequential zone boundaries */
2819 		if (zsp_end != zsp) {
2820 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2821 					LBA_OUT_OF_RANGE,
2822 					WRITE_BOUNDARY_ASCQ);
2823 			return check_condition_result;
2824 		}
2825 		/* Cannot write full zones */
2826 		if (zsp->z_cond == ZC5_FULL) {
2827 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2828 					INVALID_FIELD_IN_CDB, 0);
2829 			return check_condition_result;
2830 		}
2831 		/* Writes must be aligned to the zone WP */
2832 		if (lba != zsp->z_wp) {
2833 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2834 					LBA_OUT_OF_RANGE,
2835 					UNALIGNED_WRITE_ASCQ);
2836 			return check_condition_result;
2837 		}
2838 	}
2839 
2840 	/* Handle implicit open of closed and empty zones */
2841 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2842 		if (devip->max_open &&
2843 		    devip->nr_exp_open >= devip->max_open) {
2844 			mk_sense_buffer(scp, DATA_PROTECT,
2845 					INSUFF_RES_ASC,
2846 					INSUFF_ZONE_ASCQ);
2847 			return check_condition_result;
2848 		}
2849 		zbc_open_zone(devip, zsp, false);
2850 	}
2851 
2852 	return 0;
2853 }
2854 
2855 static inline int check_device_access_params
2856 			(struct scsi_cmnd *scp, unsigned long long lba,
2857 			 unsigned int num, bool write)
2858 {
2859 	struct scsi_device *sdp = scp->device;
2860 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2861 
2862 	if (lba + num > sdebug_capacity) {
2863 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2864 		return check_condition_result;
2865 	}
2866 	/* transfer length excessive (tie in to block limits VPD page) */
2867 	if (num > sdebug_store_sectors) {
2868 		/* needs work to find which cdb byte 'num' comes from */
2869 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2870 		return check_condition_result;
2871 	}
2872 	if (write && unlikely(sdebug_wp)) {
2873 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2874 		return check_condition_result;
2875 	}
2876 	if (sdebug_dev_is_zoned(devip))
2877 		return check_zbc_access_params(scp, lba, num, write);
2878 
2879 	return 0;
2880 }
2881 
2882 /*
2883  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2884  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2885  * that access any of the "stores" in struct sdeb_store_info should call this
2886  * function with bug_if_fake_rw set to true.
2887  */
2888 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2889 						bool bug_if_fake_rw)
2890 {
2891 	if (sdebug_fake_rw) {
2892 		BUG_ON(bug_if_fake_rw);	/* See note above */
2893 		return NULL;
2894 	}
2895 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2896 }
2897 
2898 /* Returns number of bytes copied or -1 if error. */
2899 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2900 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2901 {
2902 	int ret;
2903 	u64 block, rest = 0;
2904 	enum dma_data_direction dir;
2905 	struct scsi_data_buffer *sdb = &scp->sdb;
2906 	u8 *fsp;
2907 
2908 	if (do_write) {
2909 		dir = DMA_TO_DEVICE;
2910 		write_since_sync = true;
2911 	} else {
2912 		dir = DMA_FROM_DEVICE;
2913 	}
2914 
2915 	if (!sdb->length || !sip)
2916 		return 0;
2917 	if (scp->sc_data_direction != dir)
2918 		return -1;
2919 	fsp = sip->storep;
2920 
2921 	block = do_div(lba, sdebug_store_sectors);
2922 	if (block + num > sdebug_store_sectors)
2923 		rest = block + num - sdebug_store_sectors;
2924 
2925 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2926 		   fsp + (block * sdebug_sector_size),
2927 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2928 	if (ret != (num - rest) * sdebug_sector_size)
2929 		return ret;
2930 
2931 	if (rest) {
2932 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2933 			    fsp, rest * sdebug_sector_size,
2934 			    sg_skip + ((num - rest) * sdebug_sector_size),
2935 			    do_write);
2936 	}
2937 
2938 	return ret;
2939 }
2940 
2941 /* Returns number of bytes copied or -1 if error. */
2942 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2943 {
2944 	struct scsi_data_buffer *sdb = &scp->sdb;
2945 
2946 	if (!sdb->length)
2947 		return 0;
2948 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2949 		return -1;
2950 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2951 			      num * sdebug_sector_size, 0, true);
2952 }
2953 
2954 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2955  * arr into sip->storep+lba and return true. If comparison fails then
2956  * return false. */
2957 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2958 			      const u8 *arr, bool compare_only)
2959 {
2960 	bool res;
2961 	u64 block, rest = 0;
2962 	u32 store_blks = sdebug_store_sectors;
2963 	u32 lb_size = sdebug_sector_size;
2964 	u8 *fsp = sip->storep;
2965 
2966 	block = do_div(lba, store_blks);
2967 	if (block + num > store_blks)
2968 		rest = block + num - store_blks;
2969 
2970 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2971 	if (!res)
2972 		return res;
2973 	if (rest)
2974 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2975 			     rest * lb_size);
2976 	if (!res)
2977 		return res;
2978 	if (compare_only)
2979 		return true;
2980 	arr += num * lb_size;
2981 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2982 	if (rest)
2983 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2984 	return res;
2985 }
2986 
2987 static __be16 dif_compute_csum(const void *buf, int len)
2988 {
2989 	__be16 csum;
2990 
2991 	if (sdebug_guard)
2992 		csum = (__force __be16)ip_compute_csum(buf, len);
2993 	else
2994 		csum = cpu_to_be16(crc_t10dif(buf, len));
2995 
2996 	return csum;
2997 }
2998 
2999 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3000 		      sector_t sector, u32 ei_lba)
3001 {
3002 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3003 
3004 	if (sdt->guard_tag != csum) {
3005 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3006 			(unsigned long)sector,
3007 			be16_to_cpu(sdt->guard_tag),
3008 			be16_to_cpu(csum));
3009 		return 0x01;
3010 	}
3011 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3012 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3013 		pr_err("REF check failed on sector %lu\n",
3014 			(unsigned long)sector);
3015 		return 0x03;
3016 	}
3017 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3018 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3019 		pr_err("REF check failed on sector %lu\n",
3020 			(unsigned long)sector);
3021 		return 0x03;
3022 	}
3023 	return 0;
3024 }
3025 
3026 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3027 			  unsigned int sectors, bool read)
3028 {
3029 	size_t resid;
3030 	void *paddr;
3031 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3032 						scp->device->hostdata, true);
3033 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3034 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3035 	struct sg_mapping_iter miter;
3036 
3037 	/* Bytes of protection data to copy into sgl */
3038 	resid = sectors * sizeof(*dif_storep);
3039 
3040 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3041 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3042 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3043 
3044 	while (sg_miter_next(&miter) && resid > 0) {
3045 		size_t len = min_t(size_t, miter.length, resid);
3046 		void *start = dif_store(sip, sector);
3047 		size_t rest = 0;
3048 
3049 		if (dif_store_end < start + len)
3050 			rest = start + len - dif_store_end;
3051 
3052 		paddr = miter.addr;
3053 
3054 		if (read)
3055 			memcpy(paddr, start, len - rest);
3056 		else
3057 			memcpy(start, paddr, len - rest);
3058 
3059 		if (rest) {
3060 			if (read)
3061 				memcpy(paddr + len - rest, dif_storep, rest);
3062 			else
3063 				memcpy(dif_storep, paddr + len - rest, rest);
3064 		}
3065 
3066 		sector += len / sizeof(*dif_storep);
3067 		resid -= len;
3068 	}
3069 	sg_miter_stop(&miter);
3070 }
3071 
3072 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3073 			    unsigned int sectors, u32 ei_lba)
3074 {
3075 	unsigned int i;
3076 	sector_t sector;
3077 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3078 						scp->device->hostdata, true);
3079 	struct t10_pi_tuple *sdt;
3080 
3081 	for (i = 0; i < sectors; i++, ei_lba++) {
3082 		int ret;
3083 
3084 		sector = start_sec + i;
3085 		sdt = dif_store(sip, sector);
3086 
3087 		if (sdt->app_tag == cpu_to_be16(0xffff))
3088 			continue;
3089 
3090 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3091 				 ei_lba);
3092 		if (ret) {
3093 			dif_errors++;
3094 			return ret;
3095 		}
3096 	}
3097 
3098 	dif_copy_prot(scp, start_sec, sectors, true);
3099 	dix_reads++;
3100 
3101 	return 0;
3102 }
3103 
3104 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3105 {
3106 	bool check_prot;
3107 	u32 num;
3108 	u32 ei_lba;
3109 	int ret;
3110 	u64 lba;
3111 	struct sdeb_store_info *sip = devip2sip(devip, true);
3112 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3113 	u8 *cmd = scp->cmnd;
3114 	struct sdebug_queued_cmd *sqcp;
3115 
3116 	switch (cmd[0]) {
3117 	case READ_16:
3118 		ei_lba = 0;
3119 		lba = get_unaligned_be64(cmd + 2);
3120 		num = get_unaligned_be32(cmd + 10);
3121 		check_prot = true;
3122 		break;
3123 	case READ_10:
3124 		ei_lba = 0;
3125 		lba = get_unaligned_be32(cmd + 2);
3126 		num = get_unaligned_be16(cmd + 7);
3127 		check_prot = true;
3128 		break;
3129 	case READ_6:
3130 		ei_lba = 0;
3131 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3132 		      (u32)(cmd[1] & 0x1f) << 16;
3133 		num = (0 == cmd[4]) ? 256 : cmd[4];
3134 		check_prot = true;
3135 		break;
3136 	case READ_12:
3137 		ei_lba = 0;
3138 		lba = get_unaligned_be32(cmd + 2);
3139 		num = get_unaligned_be32(cmd + 6);
3140 		check_prot = true;
3141 		break;
3142 	case XDWRITEREAD_10:
3143 		ei_lba = 0;
3144 		lba = get_unaligned_be32(cmd + 2);
3145 		num = get_unaligned_be16(cmd + 7);
3146 		check_prot = false;
3147 		break;
3148 	default:	/* assume READ(32) */
3149 		lba = get_unaligned_be64(cmd + 12);
3150 		ei_lba = get_unaligned_be32(cmd + 20);
3151 		num = get_unaligned_be32(cmd + 28);
3152 		check_prot = false;
3153 		break;
3154 	}
3155 	if (unlikely(have_dif_prot && check_prot)) {
3156 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3157 		    (cmd[1] & 0xe0)) {
3158 			mk_sense_invalid_opcode(scp);
3159 			return check_condition_result;
3160 		}
3161 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3162 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3163 		    (cmd[1] & 0xe0) == 0)
3164 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3165 				    "to DIF device\n");
3166 	}
3167 	if (unlikely(sdebug_any_injecting_opt)) {
3168 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3169 
3170 		if (sqcp) {
3171 			if (sqcp->inj_short)
3172 				num /= 2;
3173 		}
3174 	} else
3175 		sqcp = NULL;
3176 
3177 	ret = check_device_access_params(scp, lba, num, false);
3178 	if (ret)
3179 		return ret;
3180 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3181 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3182 		     ((lba + num) > sdebug_medium_error_start))) {
3183 		/* claim unrecoverable read error */
3184 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3185 		/* set info field and valid bit for fixed descriptor */
3186 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3187 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3188 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3189 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3190 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3191 		}
3192 		scsi_set_resid(scp, scsi_bufflen(scp));
3193 		return check_condition_result;
3194 	}
3195 
3196 	read_lock(macc_lckp);
3197 
3198 	/* DIX + T10 DIF */
3199 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3200 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3201 
3202 		if (prot_ret) {
3203 			read_unlock(macc_lckp);
3204 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3205 			return illegal_condition_result;
3206 		}
3207 	}
3208 
3209 	ret = do_device_access(sip, scp, 0, lba, num, false);
3210 	read_unlock(macc_lckp);
3211 	if (unlikely(ret == -1))
3212 		return DID_ERROR << 16;
3213 
3214 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3215 
3216 	if (unlikely(sqcp)) {
3217 		if (sqcp->inj_recovered) {
3218 			mk_sense_buffer(scp, RECOVERED_ERROR,
3219 					THRESHOLD_EXCEEDED, 0);
3220 			return check_condition_result;
3221 		} else if (sqcp->inj_transport) {
3222 			mk_sense_buffer(scp, ABORTED_COMMAND,
3223 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3224 			return check_condition_result;
3225 		} else if (sqcp->inj_dif) {
3226 			/* Logical block guard check failed */
3227 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3228 			return illegal_condition_result;
3229 		} else if (sqcp->inj_dix) {
3230 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3231 			return illegal_condition_result;
3232 		}
3233 	}
3234 	return 0;
3235 }
3236 
3237 static void dump_sector(unsigned char *buf, int len)
3238 {
3239 	int i, j, n;
3240 
3241 	pr_err(">>> Sector Dump <<<\n");
3242 	for (i = 0 ; i < len ; i += 16) {
3243 		char b[128];
3244 
3245 		for (j = 0, n = 0; j < 16; j++) {
3246 			unsigned char c = buf[i+j];
3247 
3248 			if (c >= 0x20 && c < 0x7e)
3249 				n += scnprintf(b + n, sizeof(b) - n,
3250 					       " %c ", buf[i+j]);
3251 			else
3252 				n += scnprintf(b + n, sizeof(b) - n,
3253 					       "%02x ", buf[i+j]);
3254 		}
3255 		pr_err("%04d: %s\n", i, b);
3256 	}
3257 }
3258 
3259 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3260 			     unsigned int sectors, u32 ei_lba)
3261 {
3262 	int ret;
3263 	struct t10_pi_tuple *sdt;
3264 	void *daddr;
3265 	sector_t sector = start_sec;
3266 	int ppage_offset;
3267 	int dpage_offset;
3268 	struct sg_mapping_iter diter;
3269 	struct sg_mapping_iter piter;
3270 
3271 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3272 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3273 
3274 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3275 			scsi_prot_sg_count(SCpnt),
3276 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3277 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3278 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3279 
3280 	/* For each protection page */
3281 	while (sg_miter_next(&piter)) {
3282 		dpage_offset = 0;
3283 		if (WARN_ON(!sg_miter_next(&diter))) {
3284 			ret = 0x01;
3285 			goto out;
3286 		}
3287 
3288 		for (ppage_offset = 0; ppage_offset < piter.length;
3289 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3290 			/* If we're at the end of the current
3291 			 * data page advance to the next one
3292 			 */
3293 			if (dpage_offset >= diter.length) {
3294 				if (WARN_ON(!sg_miter_next(&diter))) {
3295 					ret = 0x01;
3296 					goto out;
3297 				}
3298 				dpage_offset = 0;
3299 			}
3300 
3301 			sdt = piter.addr + ppage_offset;
3302 			daddr = diter.addr + dpage_offset;
3303 
3304 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3305 			if (ret) {
3306 				dump_sector(daddr, sdebug_sector_size);
3307 				goto out;
3308 			}
3309 
3310 			sector++;
3311 			ei_lba++;
3312 			dpage_offset += sdebug_sector_size;
3313 		}
3314 		diter.consumed = dpage_offset;
3315 		sg_miter_stop(&diter);
3316 	}
3317 	sg_miter_stop(&piter);
3318 
3319 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3320 	dix_writes++;
3321 
3322 	return 0;
3323 
3324 out:
3325 	dif_errors++;
3326 	sg_miter_stop(&diter);
3327 	sg_miter_stop(&piter);
3328 	return ret;
3329 }
3330 
3331 static unsigned long lba_to_map_index(sector_t lba)
3332 {
3333 	if (sdebug_unmap_alignment)
3334 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3335 	sector_div(lba, sdebug_unmap_granularity);
3336 	return lba;
3337 }
3338 
3339 static sector_t map_index_to_lba(unsigned long index)
3340 {
3341 	sector_t lba = index * sdebug_unmap_granularity;
3342 
3343 	if (sdebug_unmap_alignment)
3344 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3345 	return lba;
3346 }
3347 
3348 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3349 			      unsigned int *num)
3350 {
3351 	sector_t end;
3352 	unsigned int mapped;
3353 	unsigned long index;
3354 	unsigned long next;
3355 
3356 	index = lba_to_map_index(lba);
3357 	mapped = test_bit(index, sip->map_storep);
3358 
3359 	if (mapped)
3360 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3361 	else
3362 		next = find_next_bit(sip->map_storep, map_size, index);
3363 
3364 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3365 	*num = end - lba;
3366 	return mapped;
3367 }
3368 
3369 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3370 		       unsigned int len)
3371 {
3372 	sector_t end = lba + len;
3373 
3374 	while (lba < end) {
3375 		unsigned long index = lba_to_map_index(lba);
3376 
3377 		if (index < map_size)
3378 			set_bit(index, sip->map_storep);
3379 
3380 		lba = map_index_to_lba(index + 1);
3381 	}
3382 }
3383 
3384 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3385 			 unsigned int len)
3386 {
3387 	sector_t end = lba + len;
3388 	u8 *fsp = sip->storep;
3389 
3390 	while (lba < end) {
3391 		unsigned long index = lba_to_map_index(lba);
3392 
3393 		if (lba == map_index_to_lba(index) &&
3394 		    lba + sdebug_unmap_granularity <= end &&
3395 		    index < map_size) {
3396 			clear_bit(index, sip->map_storep);
3397 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3398 				memset(fsp + lba * sdebug_sector_size,
3399 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3400 				       sdebug_sector_size *
3401 				       sdebug_unmap_granularity);
3402 			}
3403 			if (sip->dif_storep) {
3404 				memset(sip->dif_storep + lba, 0xff,
3405 				       sizeof(*sip->dif_storep) *
3406 				       sdebug_unmap_granularity);
3407 			}
3408 		}
3409 		lba = map_index_to_lba(index + 1);
3410 	}
3411 }
3412 
3413 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3414 {
3415 	bool check_prot;
3416 	u32 num;
3417 	u32 ei_lba;
3418 	int ret;
3419 	u64 lba;
3420 	struct sdeb_store_info *sip = devip2sip(devip, true);
3421 	rwlock_t *macc_lckp = &sip->macc_lck;
3422 	u8 *cmd = scp->cmnd;
3423 
3424 	switch (cmd[0]) {
3425 	case WRITE_16:
3426 		ei_lba = 0;
3427 		lba = get_unaligned_be64(cmd + 2);
3428 		num = get_unaligned_be32(cmd + 10);
3429 		check_prot = true;
3430 		break;
3431 	case WRITE_10:
3432 		ei_lba = 0;
3433 		lba = get_unaligned_be32(cmd + 2);
3434 		num = get_unaligned_be16(cmd + 7);
3435 		check_prot = true;
3436 		break;
3437 	case WRITE_6:
3438 		ei_lba = 0;
3439 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3440 		      (u32)(cmd[1] & 0x1f) << 16;
3441 		num = (0 == cmd[4]) ? 256 : cmd[4];
3442 		check_prot = true;
3443 		break;
3444 	case WRITE_12:
3445 		ei_lba = 0;
3446 		lba = get_unaligned_be32(cmd + 2);
3447 		num = get_unaligned_be32(cmd + 6);
3448 		check_prot = true;
3449 		break;
3450 	case 0x53:	/* XDWRITEREAD(10) */
3451 		ei_lba = 0;
3452 		lba = get_unaligned_be32(cmd + 2);
3453 		num = get_unaligned_be16(cmd + 7);
3454 		check_prot = false;
3455 		break;
3456 	default:	/* assume WRITE(32) */
3457 		lba = get_unaligned_be64(cmd + 12);
3458 		ei_lba = get_unaligned_be32(cmd + 20);
3459 		num = get_unaligned_be32(cmd + 28);
3460 		check_prot = false;
3461 		break;
3462 	}
3463 	if (unlikely(have_dif_prot && check_prot)) {
3464 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3465 		    (cmd[1] & 0xe0)) {
3466 			mk_sense_invalid_opcode(scp);
3467 			return check_condition_result;
3468 		}
3469 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3470 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3471 		    (cmd[1] & 0xe0) == 0)
3472 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3473 				    "to DIF device\n");
3474 	}
3475 
3476 	write_lock(macc_lckp);
3477 	ret = check_device_access_params(scp, lba, num, true);
3478 	if (ret) {
3479 		write_unlock(macc_lckp);
3480 		return ret;
3481 	}
3482 
3483 	/* DIX + T10 DIF */
3484 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3485 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3486 
3487 		if (prot_ret) {
3488 			write_unlock(macc_lckp);
3489 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3490 			return illegal_condition_result;
3491 		}
3492 	}
3493 
3494 	ret = do_device_access(sip, scp, 0, lba, num, true);
3495 	if (unlikely(scsi_debug_lbp()))
3496 		map_region(sip, lba, num);
3497 	/* If ZBC zone then bump its write pointer */
3498 	if (sdebug_dev_is_zoned(devip))
3499 		zbc_inc_wp(devip, lba, num);
3500 	write_unlock(macc_lckp);
3501 	if (unlikely(-1 == ret))
3502 		return DID_ERROR << 16;
3503 	else if (unlikely(sdebug_verbose &&
3504 			  (ret < (num * sdebug_sector_size))))
3505 		sdev_printk(KERN_INFO, scp->device,
3506 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3507 			    my_name, num * sdebug_sector_size, ret);
3508 
3509 	if (unlikely(sdebug_any_injecting_opt)) {
3510 		struct sdebug_queued_cmd *sqcp =
3511 				(struct sdebug_queued_cmd *)scp->host_scribble;
3512 
3513 		if (sqcp) {
3514 			if (sqcp->inj_recovered) {
3515 				mk_sense_buffer(scp, RECOVERED_ERROR,
3516 						THRESHOLD_EXCEEDED, 0);
3517 				return check_condition_result;
3518 			} else if (sqcp->inj_dif) {
3519 				/* Logical block guard check failed */
3520 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3521 				return illegal_condition_result;
3522 			} else if (sqcp->inj_dix) {
3523 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3524 				return illegal_condition_result;
3525 			}
3526 		}
3527 	}
3528 	return 0;
3529 }
3530 
3531 /*
3532  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3533  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3534  */
3535 static int resp_write_scat(struct scsi_cmnd *scp,
3536 			   struct sdebug_dev_info *devip)
3537 {
3538 	u8 *cmd = scp->cmnd;
3539 	u8 *lrdp = NULL;
3540 	u8 *up;
3541 	struct sdeb_store_info *sip = devip2sip(devip, true);
3542 	rwlock_t *macc_lckp = &sip->macc_lck;
3543 	u8 wrprotect;
3544 	u16 lbdof, num_lrd, k;
3545 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3546 	u32 lb_size = sdebug_sector_size;
3547 	u32 ei_lba;
3548 	u64 lba;
3549 	int ret, res;
3550 	bool is_16;
3551 	static const u32 lrd_size = 32; /* + parameter list header size */
3552 
3553 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3554 		is_16 = false;
3555 		wrprotect = (cmd[10] >> 5) & 0x7;
3556 		lbdof = get_unaligned_be16(cmd + 12);
3557 		num_lrd = get_unaligned_be16(cmd + 16);
3558 		bt_len = get_unaligned_be32(cmd + 28);
3559 	} else {        /* that leaves WRITE SCATTERED(16) */
3560 		is_16 = true;
3561 		wrprotect = (cmd[2] >> 5) & 0x7;
3562 		lbdof = get_unaligned_be16(cmd + 4);
3563 		num_lrd = get_unaligned_be16(cmd + 8);
3564 		bt_len = get_unaligned_be32(cmd + 10);
3565 		if (unlikely(have_dif_prot)) {
3566 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3567 			    wrprotect) {
3568 				mk_sense_invalid_opcode(scp);
3569 				return illegal_condition_result;
3570 			}
3571 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3572 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3573 			     wrprotect == 0)
3574 				sdev_printk(KERN_ERR, scp->device,
3575 					    "Unprotected WR to DIF device\n");
3576 		}
3577 	}
3578 	if ((num_lrd == 0) || (bt_len == 0))
3579 		return 0;       /* T10 says these do-nothings are not errors */
3580 	if (lbdof == 0) {
3581 		if (sdebug_verbose)
3582 			sdev_printk(KERN_INFO, scp->device,
3583 				"%s: %s: LB Data Offset field bad\n",
3584 				my_name, __func__);
3585 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3586 		return illegal_condition_result;
3587 	}
3588 	lbdof_blen = lbdof * lb_size;
3589 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3590 		if (sdebug_verbose)
3591 			sdev_printk(KERN_INFO, scp->device,
3592 				"%s: %s: LBA range descriptors don't fit\n",
3593 				my_name, __func__);
3594 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3595 		return illegal_condition_result;
3596 	}
3597 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3598 	if (lrdp == NULL)
3599 		return SCSI_MLQUEUE_HOST_BUSY;
3600 	if (sdebug_verbose)
3601 		sdev_printk(KERN_INFO, scp->device,
3602 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3603 			my_name, __func__, lbdof_blen);
3604 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3605 	if (res == -1) {
3606 		ret = DID_ERROR << 16;
3607 		goto err_out;
3608 	}
3609 
3610 	write_lock(macc_lckp);
3611 	sg_off = lbdof_blen;
3612 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3613 	cum_lb = 0;
3614 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3615 		lba = get_unaligned_be64(up + 0);
3616 		num = get_unaligned_be32(up + 8);
3617 		if (sdebug_verbose)
3618 			sdev_printk(KERN_INFO, scp->device,
3619 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3620 				my_name, __func__, k, lba, num, sg_off);
3621 		if (num == 0)
3622 			continue;
3623 		ret = check_device_access_params(scp, lba, num, true);
3624 		if (ret)
3625 			goto err_out_unlock;
3626 		num_by = num * lb_size;
3627 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3628 
3629 		if ((cum_lb + num) > bt_len) {
3630 			if (sdebug_verbose)
3631 				sdev_printk(KERN_INFO, scp->device,
3632 				    "%s: %s: sum of blocks > data provided\n",
3633 				    my_name, __func__);
3634 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3635 					0);
3636 			ret = illegal_condition_result;
3637 			goto err_out_unlock;
3638 		}
3639 
3640 		/* DIX + T10 DIF */
3641 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3642 			int prot_ret = prot_verify_write(scp, lba, num,
3643 							 ei_lba);
3644 
3645 			if (prot_ret) {
3646 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3647 						prot_ret);
3648 				ret = illegal_condition_result;
3649 				goto err_out_unlock;
3650 			}
3651 		}
3652 
3653 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3654 		/* If ZBC zone then bump its write pointer */
3655 		if (sdebug_dev_is_zoned(devip))
3656 			zbc_inc_wp(devip, lba, num);
3657 		if (unlikely(scsi_debug_lbp()))
3658 			map_region(sip, lba, num);
3659 		if (unlikely(-1 == ret)) {
3660 			ret = DID_ERROR << 16;
3661 			goto err_out_unlock;
3662 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3663 			sdev_printk(KERN_INFO, scp->device,
3664 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3665 			    my_name, num_by, ret);
3666 
3667 		if (unlikely(sdebug_any_injecting_opt)) {
3668 			struct sdebug_queued_cmd *sqcp =
3669 				(struct sdebug_queued_cmd *)scp->host_scribble;
3670 
3671 			if (sqcp) {
3672 				if (sqcp->inj_recovered) {
3673 					mk_sense_buffer(scp, RECOVERED_ERROR,
3674 							THRESHOLD_EXCEEDED, 0);
3675 					ret = illegal_condition_result;
3676 					goto err_out_unlock;
3677 				} else if (sqcp->inj_dif) {
3678 					/* Logical block guard check failed */
3679 					mk_sense_buffer(scp, ABORTED_COMMAND,
3680 							0x10, 1);
3681 					ret = illegal_condition_result;
3682 					goto err_out_unlock;
3683 				} else if (sqcp->inj_dix) {
3684 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3685 							0x10, 1);
3686 					ret = illegal_condition_result;
3687 					goto err_out_unlock;
3688 				}
3689 			}
3690 		}
3691 		sg_off += num_by;
3692 		cum_lb += num;
3693 	}
3694 	ret = 0;
3695 err_out_unlock:
3696 	write_unlock(macc_lckp);
3697 err_out:
3698 	kfree(lrdp);
3699 	return ret;
3700 }
3701 
3702 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3703 			   u32 ei_lba, bool unmap, bool ndob)
3704 {
3705 	struct scsi_device *sdp = scp->device;
3706 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3707 	unsigned long long i;
3708 	u64 block, lbaa;
3709 	u32 lb_size = sdebug_sector_size;
3710 	int ret;
3711 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3712 						scp->device->hostdata, true);
3713 	rwlock_t *macc_lckp = &sip->macc_lck;
3714 	u8 *fs1p;
3715 	u8 *fsp;
3716 
3717 	write_lock(macc_lckp);
3718 
3719 	ret = check_device_access_params(scp, lba, num, true);
3720 	if (ret) {
3721 		write_unlock(macc_lckp);
3722 		return ret;
3723 	}
3724 
3725 	if (unmap && scsi_debug_lbp()) {
3726 		unmap_region(sip, lba, num);
3727 		goto out;
3728 	}
3729 	lbaa = lba;
3730 	block = do_div(lbaa, sdebug_store_sectors);
3731 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3732 	fsp = sip->storep;
3733 	fs1p = fsp + (block * lb_size);
3734 	if (ndob) {
3735 		memset(fs1p, 0, lb_size);
3736 		ret = 0;
3737 	} else
3738 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3739 
3740 	if (-1 == ret) {
3741 		write_unlock(&sip->macc_lck);
3742 		return DID_ERROR << 16;
3743 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3744 		sdev_printk(KERN_INFO, scp->device,
3745 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3746 			    my_name, "write same", lb_size, ret);
3747 
3748 	/* Copy first sector to remaining blocks */
3749 	for (i = 1 ; i < num ; i++) {
3750 		lbaa = lba + i;
3751 		block = do_div(lbaa, sdebug_store_sectors);
3752 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3753 	}
3754 	if (scsi_debug_lbp())
3755 		map_region(sip, lba, num);
3756 	/* If ZBC zone then bump its write pointer */
3757 	if (sdebug_dev_is_zoned(devip))
3758 		zbc_inc_wp(devip, lba, num);
3759 out:
3760 	write_unlock(macc_lckp);
3761 
3762 	return 0;
3763 }
3764 
3765 static int resp_write_same_10(struct scsi_cmnd *scp,
3766 			      struct sdebug_dev_info *devip)
3767 {
3768 	u8 *cmd = scp->cmnd;
3769 	u32 lba;
3770 	u16 num;
3771 	u32 ei_lba = 0;
3772 	bool unmap = false;
3773 
3774 	if (cmd[1] & 0x8) {
3775 		if (sdebug_lbpws10 == 0) {
3776 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3777 			return check_condition_result;
3778 		} else
3779 			unmap = true;
3780 	}
3781 	lba = get_unaligned_be32(cmd + 2);
3782 	num = get_unaligned_be16(cmd + 7);
3783 	if (num > sdebug_write_same_length) {
3784 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3785 		return check_condition_result;
3786 	}
3787 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3788 }
3789 
3790 static int resp_write_same_16(struct scsi_cmnd *scp,
3791 			      struct sdebug_dev_info *devip)
3792 {
3793 	u8 *cmd = scp->cmnd;
3794 	u64 lba;
3795 	u32 num;
3796 	u32 ei_lba = 0;
3797 	bool unmap = false;
3798 	bool ndob = false;
3799 
3800 	if (cmd[1] & 0x8) {	/* UNMAP */
3801 		if (sdebug_lbpws == 0) {
3802 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3803 			return check_condition_result;
3804 		} else
3805 			unmap = true;
3806 	}
3807 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3808 		ndob = true;
3809 	lba = get_unaligned_be64(cmd + 2);
3810 	num = get_unaligned_be32(cmd + 10);
3811 	if (num > sdebug_write_same_length) {
3812 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3813 		return check_condition_result;
3814 	}
3815 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3816 }
3817 
3818 /* Note the mode field is in the same position as the (lower) service action
3819  * field. For the Report supported operation codes command, SPC-4 suggests
3820  * each mode of this command should be reported separately; for future. */
3821 static int resp_write_buffer(struct scsi_cmnd *scp,
3822 			     struct sdebug_dev_info *devip)
3823 {
3824 	u8 *cmd = scp->cmnd;
3825 	struct scsi_device *sdp = scp->device;
3826 	struct sdebug_dev_info *dp;
3827 	u8 mode;
3828 
3829 	mode = cmd[1] & 0x1f;
3830 	switch (mode) {
3831 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3832 		/* set UAs on this device only */
3833 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3834 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3835 		break;
3836 	case 0x5:	/* download MC, save and ACT */
3837 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3838 		break;
3839 	case 0x6:	/* download MC with offsets and ACT */
3840 		/* set UAs on most devices (LUs) in this target */
3841 		list_for_each_entry(dp,
3842 				    &devip->sdbg_host->dev_info_list,
3843 				    dev_list)
3844 			if (dp->target == sdp->id) {
3845 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3846 				if (devip != dp)
3847 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3848 						dp->uas_bm);
3849 			}
3850 		break;
3851 	case 0x7:	/* download MC with offsets, save, and ACT */
3852 		/* set UA on all devices (LUs) in this target */
3853 		list_for_each_entry(dp,
3854 				    &devip->sdbg_host->dev_info_list,
3855 				    dev_list)
3856 			if (dp->target == sdp->id)
3857 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3858 					dp->uas_bm);
3859 		break;
3860 	default:
3861 		/* do nothing for this command for other mode values */
3862 		break;
3863 	}
3864 	return 0;
3865 }
3866 
3867 static int resp_comp_write(struct scsi_cmnd *scp,
3868 			   struct sdebug_dev_info *devip)
3869 {
3870 	u8 *cmd = scp->cmnd;
3871 	u8 *arr;
3872 	struct sdeb_store_info *sip = devip2sip(devip, true);
3873 	rwlock_t *macc_lckp = &sip->macc_lck;
3874 	u64 lba;
3875 	u32 dnum;
3876 	u32 lb_size = sdebug_sector_size;
3877 	u8 num;
3878 	int ret;
3879 	int retval = 0;
3880 
3881 	lba = get_unaligned_be64(cmd + 2);
3882 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3883 	if (0 == num)
3884 		return 0;	/* degenerate case, not an error */
3885 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3886 	    (cmd[1] & 0xe0)) {
3887 		mk_sense_invalid_opcode(scp);
3888 		return check_condition_result;
3889 	}
3890 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3891 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3892 	    (cmd[1] & 0xe0) == 0)
3893 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3894 			    "to DIF device\n");
3895 	ret = check_device_access_params(scp, lba, num, false);
3896 	if (ret)
3897 		return ret;
3898 	dnum = 2 * num;
3899 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3900 	if (NULL == arr) {
3901 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3902 				INSUFF_RES_ASCQ);
3903 		return check_condition_result;
3904 	}
3905 
3906 	write_lock(macc_lckp);
3907 
3908 	ret = do_dout_fetch(scp, dnum, arr);
3909 	if (ret == -1) {
3910 		retval = DID_ERROR << 16;
3911 		goto cleanup;
3912 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3913 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3914 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3915 			    dnum * lb_size, ret);
3916 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3917 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3918 		retval = check_condition_result;
3919 		goto cleanup;
3920 	}
3921 	if (scsi_debug_lbp())
3922 		map_region(sip, lba, num);
3923 cleanup:
3924 	write_unlock(macc_lckp);
3925 	kfree(arr);
3926 	return retval;
3927 }
3928 
3929 struct unmap_block_desc {
3930 	__be64	lba;
3931 	__be32	blocks;
3932 	__be32	__reserved;
3933 };
3934 
3935 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3936 {
3937 	unsigned char *buf;
3938 	struct unmap_block_desc *desc;
3939 	struct sdeb_store_info *sip = devip2sip(devip, true);
3940 	rwlock_t *macc_lckp = &sip->macc_lck;
3941 	unsigned int i, payload_len, descriptors;
3942 	int ret;
3943 
3944 	if (!scsi_debug_lbp())
3945 		return 0;	/* fib and say its done */
3946 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3947 	BUG_ON(scsi_bufflen(scp) != payload_len);
3948 
3949 	descriptors = (payload_len - 8) / 16;
3950 	if (descriptors > sdebug_unmap_max_desc) {
3951 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3952 		return check_condition_result;
3953 	}
3954 
3955 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3956 	if (!buf) {
3957 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3958 				INSUFF_RES_ASCQ);
3959 		return check_condition_result;
3960 	}
3961 
3962 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3963 
3964 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3965 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3966 
3967 	desc = (void *)&buf[8];
3968 
3969 	write_lock(macc_lckp);
3970 
3971 	for (i = 0 ; i < descriptors ; i++) {
3972 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3973 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3974 
3975 		ret = check_device_access_params(scp, lba, num, true);
3976 		if (ret)
3977 			goto out;
3978 
3979 		unmap_region(sip, lba, num);
3980 	}
3981 
3982 	ret = 0;
3983 
3984 out:
3985 	write_unlock(macc_lckp);
3986 	kfree(buf);
3987 
3988 	return ret;
3989 }
3990 
3991 #define SDEBUG_GET_LBA_STATUS_LEN 32
3992 
3993 static int resp_get_lba_status(struct scsi_cmnd *scp,
3994 			       struct sdebug_dev_info *devip)
3995 {
3996 	u8 *cmd = scp->cmnd;
3997 	u64 lba;
3998 	u32 alloc_len, mapped, num;
3999 	int ret;
4000 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4001 
4002 	lba = get_unaligned_be64(cmd + 2);
4003 	alloc_len = get_unaligned_be32(cmd + 10);
4004 
4005 	if (alloc_len < 24)
4006 		return 0;
4007 
4008 	ret = check_device_access_params(scp, lba, 1, false);
4009 	if (ret)
4010 		return ret;
4011 
4012 	if (scsi_debug_lbp()) {
4013 		struct sdeb_store_info *sip = devip2sip(devip, true);
4014 
4015 		mapped = map_state(sip, lba, &num);
4016 	} else {
4017 		mapped = 1;
4018 		/* following just in case virtual_gb changed */
4019 		sdebug_capacity = get_sdebug_capacity();
4020 		if (sdebug_capacity - lba <= 0xffffffff)
4021 			num = sdebug_capacity - lba;
4022 		else
4023 			num = 0xffffffff;
4024 	}
4025 
4026 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4027 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4028 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4029 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4030 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4031 
4032 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4033 }
4034 
4035 static int resp_sync_cache(struct scsi_cmnd *scp,
4036 			   struct sdebug_dev_info *devip)
4037 {
4038 	int res = 0;
4039 	u64 lba;
4040 	u32 num_blocks;
4041 	u8 *cmd = scp->cmnd;
4042 
4043 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4044 		lba = get_unaligned_be32(cmd + 2);
4045 		num_blocks = get_unaligned_be16(cmd + 7);
4046 	} else {				/* SYNCHRONIZE_CACHE(16) */
4047 		lba = get_unaligned_be64(cmd + 2);
4048 		num_blocks = get_unaligned_be32(cmd + 10);
4049 	}
4050 	if (lba + num_blocks > sdebug_capacity) {
4051 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4052 		return check_condition_result;
4053 	}
4054 	if (!write_since_sync || cmd[1] & 0x2)
4055 		res = SDEG_RES_IMMED_MASK;
4056 	else		/* delay if write_since_sync and IMMED clear */
4057 		write_since_sync = false;
4058 	return res;
4059 }
4060 
4061 /*
4062  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4063  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4064  * a GOOD status otherwise. Model a disk with a big cache and yield
4065  * CONDITION MET. Actually tries to bring range in main memory into the
4066  * cache associated with the CPU(s).
4067  */
4068 static int resp_pre_fetch(struct scsi_cmnd *scp,
4069 			  struct sdebug_dev_info *devip)
4070 {
4071 	int res = 0;
4072 	u64 lba;
4073 	u64 block, rest = 0;
4074 	u32 nblks;
4075 	u8 *cmd = scp->cmnd;
4076 	struct sdeb_store_info *sip = devip2sip(devip, true);
4077 	rwlock_t *macc_lckp = &sip->macc_lck;
4078 	u8 *fsp = sip->storep;
4079 
4080 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4081 		lba = get_unaligned_be32(cmd + 2);
4082 		nblks = get_unaligned_be16(cmd + 7);
4083 	} else {			/* PRE-FETCH(16) */
4084 		lba = get_unaligned_be64(cmd + 2);
4085 		nblks = get_unaligned_be32(cmd + 10);
4086 	}
4087 	if (lba + nblks > sdebug_capacity) {
4088 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4089 		return check_condition_result;
4090 	}
4091 	if (!fsp)
4092 		goto fini;
4093 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4094 	block = do_div(lba, sdebug_store_sectors);
4095 	if (block + nblks > sdebug_store_sectors)
4096 		rest = block + nblks - sdebug_store_sectors;
4097 
4098 	/* Try to bring the PRE-FETCH range into CPU's cache */
4099 	read_lock(macc_lckp);
4100 	prefetch_range(fsp + (sdebug_sector_size * block),
4101 		       (nblks - rest) * sdebug_sector_size);
4102 	if (rest)
4103 		prefetch_range(fsp, rest * sdebug_sector_size);
4104 	read_unlock(macc_lckp);
4105 fini:
4106 	if (cmd[1] & 0x2)
4107 		res = SDEG_RES_IMMED_MASK;
4108 	return res | condition_met_result;
4109 }
4110 
4111 #define RL_BUCKET_ELEMS 8
4112 
4113 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4114  * (W-LUN), the normal Linux scanning logic does not associate it with a
4115  * device (e.g. /dev/sg7). The following magic will make that association:
4116  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4117  * where <n> is a host number. If there are multiple targets in a host then
4118  * the above will associate a W-LUN to each target. To only get a W-LUN
4119  * for target 2, then use "echo '- 2 49409' > scan" .
4120  */
4121 static int resp_report_luns(struct scsi_cmnd *scp,
4122 			    struct sdebug_dev_info *devip)
4123 {
4124 	unsigned char *cmd = scp->cmnd;
4125 	unsigned int alloc_len;
4126 	unsigned char select_report;
4127 	u64 lun;
4128 	struct scsi_lun *lun_p;
4129 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4130 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4131 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4132 	unsigned int tlun_cnt;	/* total LUN count */
4133 	unsigned int rlen;	/* response length (in bytes) */
4134 	int k, j, n, res;
4135 	unsigned int off_rsp = 0;
4136 	const int sz_lun = sizeof(struct scsi_lun);
4137 
4138 	clear_luns_changed_on_target(devip);
4139 
4140 	select_report = cmd[2];
4141 	alloc_len = get_unaligned_be32(cmd + 6);
4142 
4143 	if (alloc_len < 4) {
4144 		pr_err("alloc len too small %d\n", alloc_len);
4145 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4146 		return check_condition_result;
4147 	}
4148 
4149 	switch (select_report) {
4150 	case 0:		/* all LUNs apart from W-LUNs */
4151 		lun_cnt = sdebug_max_luns;
4152 		wlun_cnt = 0;
4153 		break;
4154 	case 1:		/* only W-LUNs */
4155 		lun_cnt = 0;
4156 		wlun_cnt = 1;
4157 		break;
4158 	case 2:		/* all LUNs */
4159 		lun_cnt = sdebug_max_luns;
4160 		wlun_cnt = 1;
4161 		break;
4162 	case 0x10:	/* only administrative LUs */
4163 	case 0x11:	/* see SPC-5 */
4164 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4165 	default:
4166 		pr_debug("select report invalid %d\n", select_report);
4167 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4168 		return check_condition_result;
4169 	}
4170 
4171 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4172 		--lun_cnt;
4173 
4174 	tlun_cnt = lun_cnt + wlun_cnt;
4175 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4176 	scsi_set_resid(scp, scsi_bufflen(scp));
4177 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4178 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4179 
4180 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4181 	lun = sdebug_no_lun_0 ? 1 : 0;
4182 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4183 		memset(arr, 0, sizeof(arr));
4184 		lun_p = (struct scsi_lun *)&arr[0];
4185 		if (k == 0) {
4186 			put_unaligned_be32(rlen, &arr[0]);
4187 			++lun_p;
4188 			j = 1;
4189 		}
4190 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4191 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4192 				break;
4193 			int_to_scsilun(lun++, lun_p);
4194 		}
4195 		if (j < RL_BUCKET_ELEMS)
4196 			break;
4197 		n = j * sz_lun;
4198 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4199 		if (res)
4200 			return res;
4201 		off_rsp += n;
4202 	}
4203 	if (wlun_cnt) {
4204 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4205 		++j;
4206 	}
4207 	if (j > 0)
4208 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4209 	return res;
4210 }
4211 
4212 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4213 {
4214 	bool is_bytchk3 = false;
4215 	u8 bytchk;
4216 	int ret, j;
4217 	u32 vnum, a_num, off;
4218 	const u32 lb_size = sdebug_sector_size;
4219 	u64 lba;
4220 	u8 *arr;
4221 	u8 *cmd = scp->cmnd;
4222 	struct sdeb_store_info *sip = devip2sip(devip, true);
4223 	rwlock_t *macc_lckp = &sip->macc_lck;
4224 
4225 	bytchk = (cmd[1] >> 1) & 0x3;
4226 	if (bytchk == 0) {
4227 		return 0;	/* always claim internal verify okay */
4228 	} else if (bytchk == 2) {
4229 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4230 		return check_condition_result;
4231 	} else if (bytchk == 3) {
4232 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4233 	}
4234 	switch (cmd[0]) {
4235 	case VERIFY_16:
4236 		lba = get_unaligned_be64(cmd + 2);
4237 		vnum = get_unaligned_be32(cmd + 10);
4238 		break;
4239 	case VERIFY:		/* is VERIFY(10) */
4240 		lba = get_unaligned_be32(cmd + 2);
4241 		vnum = get_unaligned_be16(cmd + 7);
4242 		break;
4243 	default:
4244 		mk_sense_invalid_opcode(scp);
4245 		return check_condition_result;
4246 	}
4247 	a_num = is_bytchk3 ? 1 : vnum;
4248 	/* Treat following check like one for read (i.e. no write) access */
4249 	ret = check_device_access_params(scp, lba, a_num, false);
4250 	if (ret)
4251 		return ret;
4252 
4253 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4254 	if (!arr) {
4255 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4256 				INSUFF_RES_ASCQ);
4257 		return check_condition_result;
4258 	}
4259 	/* Not changing store, so only need read access */
4260 	read_lock(macc_lckp);
4261 
4262 	ret = do_dout_fetch(scp, a_num, arr);
4263 	if (ret == -1) {
4264 		ret = DID_ERROR << 16;
4265 		goto cleanup;
4266 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4267 		sdev_printk(KERN_INFO, scp->device,
4268 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4269 			    my_name, __func__, a_num * lb_size, ret);
4270 	}
4271 	if (is_bytchk3) {
4272 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4273 			memcpy(arr + off, arr, lb_size);
4274 	}
4275 	ret = 0;
4276 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4277 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4278 		ret = check_condition_result;
4279 		goto cleanup;
4280 	}
4281 cleanup:
4282 	read_unlock(macc_lckp);
4283 	kfree(arr);
4284 	return ret;
4285 }
4286 
4287 #define RZONES_DESC_HD 64
4288 
4289 /* Report zones depending on start LBA nad reporting options */
4290 static int resp_report_zones(struct scsi_cmnd *scp,
4291 			     struct sdebug_dev_info *devip)
4292 {
4293 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4294 	int ret = 0;
4295 	u32 alloc_len, rep_opts, rep_len;
4296 	bool partial;
4297 	u64 lba, zs_lba;
4298 	u8 *arr = NULL, *desc;
4299 	u8 *cmd = scp->cmnd;
4300 	struct sdeb_zone_state *zsp;
4301 	struct sdeb_store_info *sip = devip2sip(devip, false);
4302 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4303 
4304 	if (!sdebug_dev_is_zoned(devip)) {
4305 		mk_sense_invalid_opcode(scp);
4306 		return check_condition_result;
4307 	}
4308 	zs_lba = get_unaligned_be64(cmd + 2);
4309 	alloc_len = get_unaligned_be32(cmd + 10);
4310 	rep_opts = cmd[14] & 0x3f;
4311 	partial = cmd[14] & 0x80;
4312 
4313 	if (zs_lba >= sdebug_capacity) {
4314 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4315 		return check_condition_result;
4316 	}
4317 
4318 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4319 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4320 			    max_zones);
4321 
4322 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4323 	if (!arr) {
4324 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4325 				INSUFF_RES_ASCQ);
4326 		return check_condition_result;
4327 	}
4328 
4329 	read_lock(macc_lckp);
4330 
4331 	desc = arr + 64;
4332 	for (i = 0; i < max_zones; i++) {
4333 		lba = zs_lba + devip->zsize * i;
4334 		if (lba > sdebug_capacity)
4335 			break;
4336 		zsp = zbc_zone(devip, lba);
4337 		switch (rep_opts) {
4338 		case 0x00:
4339 			/* All zones */
4340 			break;
4341 		case 0x01:
4342 			/* Empty zones */
4343 			if (zsp->z_cond != ZC1_EMPTY)
4344 				continue;
4345 			break;
4346 		case 0x02:
4347 			/* Implicit open zones */
4348 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4349 				continue;
4350 			break;
4351 		case 0x03:
4352 			/* Explicit open zones */
4353 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4354 				continue;
4355 			break;
4356 		case 0x04:
4357 			/* Closed zones */
4358 			if (zsp->z_cond != ZC4_CLOSED)
4359 				continue;
4360 			break;
4361 		case 0x05:
4362 			/* Full zones */
4363 			if (zsp->z_cond != ZC5_FULL)
4364 				continue;
4365 			break;
4366 		case 0x06:
4367 		case 0x07:
4368 		case 0x10:
4369 			/*
4370 			 * Read-only, offline, reset WP recommended are
4371 			 * not emulated: no zones to report;
4372 			 */
4373 			continue;
4374 		case 0x11:
4375 			/* non-seq-resource set */
4376 			if (!zsp->z_non_seq_resource)
4377 				continue;
4378 			break;
4379 		case 0x3f:
4380 			/* Not write pointer (conventional) zones */
4381 			if (!zbc_zone_is_conv(zsp))
4382 				continue;
4383 			break;
4384 		default:
4385 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4386 					INVALID_FIELD_IN_CDB, 0);
4387 			ret = check_condition_result;
4388 			goto fini;
4389 		}
4390 
4391 		if (nrz < rep_max_zones) {
4392 			/* Fill zone descriptor */
4393 			desc[0] = zsp->z_type;
4394 			desc[1] = zsp->z_cond << 4;
4395 			if (zsp->z_non_seq_resource)
4396 				desc[1] |= 1 << 1;
4397 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4398 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4399 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4400 			desc += 64;
4401 		}
4402 
4403 		if (partial && nrz >= rep_max_zones)
4404 			break;
4405 
4406 		nrz++;
4407 	}
4408 
4409 	/* Report header */
4410 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4411 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4412 
4413 	rep_len = (unsigned long)desc - (unsigned long)arr;
4414 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4415 
4416 fini:
4417 	read_unlock(macc_lckp);
4418 	kfree(arr);
4419 	return ret;
4420 }
4421 
4422 /* Logic transplanted from tcmu-runner, file_zbc.c */
4423 static void zbc_open_all(struct sdebug_dev_info *devip)
4424 {
4425 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4426 	unsigned int i;
4427 
4428 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4429 		if (zsp->z_cond == ZC4_CLOSED)
4430 			zbc_open_zone(devip, &devip->zstate[i], true);
4431 	}
4432 }
4433 
4434 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4435 {
4436 	int res = 0;
4437 	u64 z_id;
4438 	enum sdebug_z_cond zc;
4439 	u8 *cmd = scp->cmnd;
4440 	struct sdeb_zone_state *zsp;
4441 	bool all = cmd[14] & 0x01;
4442 	struct sdeb_store_info *sip = devip2sip(devip, false);
4443 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4444 
4445 	if (!sdebug_dev_is_zoned(devip)) {
4446 		mk_sense_invalid_opcode(scp);
4447 		return check_condition_result;
4448 	}
4449 
4450 	write_lock(macc_lckp);
4451 
4452 	if (all) {
4453 		/* Check if all closed zones can be open */
4454 		if (devip->max_open &&
4455 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4456 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4457 					INSUFF_ZONE_ASCQ);
4458 			res = check_condition_result;
4459 			goto fini;
4460 		}
4461 		/* Open all closed zones */
4462 		zbc_open_all(devip);
4463 		goto fini;
4464 	}
4465 
4466 	/* Open the specified zone */
4467 	z_id = get_unaligned_be64(cmd + 2);
4468 	if (z_id >= sdebug_capacity) {
4469 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4470 		res = check_condition_result;
4471 		goto fini;
4472 	}
4473 
4474 	zsp = zbc_zone(devip, z_id);
4475 	if (z_id != zsp->z_start) {
4476 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4477 		res = check_condition_result;
4478 		goto fini;
4479 	}
4480 	if (zbc_zone_is_conv(zsp)) {
4481 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4482 		res = check_condition_result;
4483 		goto fini;
4484 	}
4485 
4486 	zc = zsp->z_cond;
4487 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4488 		goto fini;
4489 
4490 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4491 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4492 				INSUFF_ZONE_ASCQ);
4493 		res = check_condition_result;
4494 		goto fini;
4495 	}
4496 
4497 	if (zc == ZC2_IMPLICIT_OPEN)
4498 		zbc_close_zone(devip, zsp);
4499 	zbc_open_zone(devip, zsp, true);
4500 fini:
4501 	write_unlock(macc_lckp);
4502 	return res;
4503 }
4504 
4505 static void zbc_close_all(struct sdebug_dev_info *devip)
4506 {
4507 	unsigned int i;
4508 
4509 	for (i = 0; i < devip->nr_zones; i++)
4510 		zbc_close_zone(devip, &devip->zstate[i]);
4511 }
4512 
4513 static int resp_close_zone(struct scsi_cmnd *scp,
4514 			   struct sdebug_dev_info *devip)
4515 {
4516 	int res = 0;
4517 	u64 z_id;
4518 	u8 *cmd = scp->cmnd;
4519 	struct sdeb_zone_state *zsp;
4520 	bool all = cmd[14] & 0x01;
4521 	struct sdeb_store_info *sip = devip2sip(devip, false);
4522 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4523 
4524 	if (!sdebug_dev_is_zoned(devip)) {
4525 		mk_sense_invalid_opcode(scp);
4526 		return check_condition_result;
4527 	}
4528 
4529 	write_lock(macc_lckp);
4530 
4531 	if (all) {
4532 		zbc_close_all(devip);
4533 		goto fini;
4534 	}
4535 
4536 	/* Close specified zone */
4537 	z_id = get_unaligned_be64(cmd + 2);
4538 	if (z_id >= sdebug_capacity) {
4539 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4540 		res = check_condition_result;
4541 		goto fini;
4542 	}
4543 
4544 	zsp = zbc_zone(devip, z_id);
4545 	if (z_id != zsp->z_start) {
4546 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4547 		res = check_condition_result;
4548 		goto fini;
4549 	}
4550 	if (zbc_zone_is_conv(zsp)) {
4551 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4552 		res = check_condition_result;
4553 		goto fini;
4554 	}
4555 
4556 	zbc_close_zone(devip, zsp);
4557 fini:
4558 	write_unlock(macc_lckp);
4559 	return res;
4560 }
4561 
4562 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4563 			    struct sdeb_zone_state *zsp, bool empty)
4564 {
4565 	enum sdebug_z_cond zc = zsp->z_cond;
4566 
4567 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4568 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4569 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4570 			zbc_close_zone(devip, zsp);
4571 		if (zsp->z_cond == ZC4_CLOSED)
4572 			devip->nr_closed--;
4573 		zsp->z_wp = zsp->z_start + zsp->z_size;
4574 		zsp->z_cond = ZC5_FULL;
4575 	}
4576 }
4577 
4578 static void zbc_finish_all(struct sdebug_dev_info *devip)
4579 {
4580 	unsigned int i;
4581 
4582 	for (i = 0; i < devip->nr_zones; i++)
4583 		zbc_finish_zone(devip, &devip->zstate[i], false);
4584 }
4585 
4586 static int resp_finish_zone(struct scsi_cmnd *scp,
4587 			    struct sdebug_dev_info *devip)
4588 {
4589 	struct sdeb_zone_state *zsp;
4590 	int res = 0;
4591 	u64 z_id;
4592 	u8 *cmd = scp->cmnd;
4593 	bool all = cmd[14] & 0x01;
4594 	struct sdeb_store_info *sip = devip2sip(devip, false);
4595 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4596 
4597 	if (!sdebug_dev_is_zoned(devip)) {
4598 		mk_sense_invalid_opcode(scp);
4599 		return check_condition_result;
4600 	}
4601 
4602 	write_lock(macc_lckp);
4603 
4604 	if (all) {
4605 		zbc_finish_all(devip);
4606 		goto fini;
4607 	}
4608 
4609 	/* Finish the specified zone */
4610 	z_id = get_unaligned_be64(cmd + 2);
4611 	if (z_id >= sdebug_capacity) {
4612 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4613 		res = check_condition_result;
4614 		goto fini;
4615 	}
4616 
4617 	zsp = zbc_zone(devip, z_id);
4618 	if (z_id != zsp->z_start) {
4619 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4620 		res = check_condition_result;
4621 		goto fini;
4622 	}
4623 	if (zbc_zone_is_conv(zsp)) {
4624 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4625 		res = check_condition_result;
4626 		goto fini;
4627 	}
4628 
4629 	zbc_finish_zone(devip, zsp, true);
4630 fini:
4631 	write_unlock(macc_lckp);
4632 	return res;
4633 }
4634 
4635 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4636 			 struct sdeb_zone_state *zsp)
4637 {
4638 	enum sdebug_z_cond zc;
4639 
4640 	if (zbc_zone_is_conv(zsp))
4641 		return;
4642 
4643 	zc = zsp->z_cond;
4644 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4645 		zbc_close_zone(devip, zsp);
4646 
4647 	if (zsp->z_cond == ZC4_CLOSED)
4648 		devip->nr_closed--;
4649 
4650 	zsp->z_non_seq_resource = false;
4651 	zsp->z_wp = zsp->z_start;
4652 	zsp->z_cond = ZC1_EMPTY;
4653 }
4654 
4655 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4656 {
4657 	unsigned int i;
4658 
4659 	for (i = 0; i < devip->nr_zones; i++)
4660 		zbc_rwp_zone(devip, &devip->zstate[i]);
4661 }
4662 
4663 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4664 {
4665 	struct sdeb_zone_state *zsp;
4666 	int res = 0;
4667 	u64 z_id;
4668 	u8 *cmd = scp->cmnd;
4669 	bool all = cmd[14] & 0x01;
4670 	struct sdeb_store_info *sip = devip2sip(devip, false);
4671 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4672 
4673 	if (!sdebug_dev_is_zoned(devip)) {
4674 		mk_sense_invalid_opcode(scp);
4675 		return check_condition_result;
4676 	}
4677 
4678 	write_lock(macc_lckp);
4679 
4680 	if (all) {
4681 		zbc_rwp_all(devip);
4682 		goto fini;
4683 	}
4684 
4685 	z_id = get_unaligned_be64(cmd + 2);
4686 	if (z_id >= sdebug_capacity) {
4687 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4688 		res = check_condition_result;
4689 		goto fini;
4690 	}
4691 
4692 	zsp = zbc_zone(devip, z_id);
4693 	if (z_id != zsp->z_start) {
4694 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4695 		res = check_condition_result;
4696 		goto fini;
4697 	}
4698 	if (zbc_zone_is_conv(zsp)) {
4699 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4700 		res = check_condition_result;
4701 		goto fini;
4702 	}
4703 
4704 	zbc_rwp_zone(devip, zsp);
4705 fini:
4706 	write_unlock(macc_lckp);
4707 	return res;
4708 }
4709 
4710 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4711 {
4712 	u16 hwq;
4713 
4714 	if (sdebug_host_max_queue) {
4715 		/* Provide a simple method to choose the hwq */
4716 		hwq = smp_processor_id() % submit_queues;
4717 	} else {
4718 		u32 tag = blk_mq_unique_tag(cmnd->request);
4719 
4720 		hwq = blk_mq_unique_tag_to_hwq(tag);
4721 
4722 		pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4723 		if (WARN_ON_ONCE(hwq >= submit_queues))
4724 			hwq = 0;
4725 	}
4726 	return sdebug_q_arr + hwq;
4727 }
4728 
4729 static u32 get_tag(struct scsi_cmnd *cmnd)
4730 {
4731 	return blk_mq_unique_tag(cmnd->request);
4732 }
4733 
4734 /* Queued (deferred) command completions converge here. */
4735 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4736 {
4737 	bool aborted = sd_dp->aborted;
4738 	int qc_idx;
4739 	int retiring = 0;
4740 	unsigned long iflags;
4741 	struct sdebug_queue *sqp;
4742 	struct sdebug_queued_cmd *sqcp;
4743 	struct scsi_cmnd *scp;
4744 	struct sdebug_dev_info *devip;
4745 
4746 	sd_dp->defer_t = SDEB_DEFER_NONE;
4747 	if (unlikely(aborted))
4748 		sd_dp->aborted = false;
4749 	qc_idx = sd_dp->qc_idx;
4750 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4751 	if (sdebug_statistics) {
4752 		atomic_inc(&sdebug_completions);
4753 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4754 			atomic_inc(&sdebug_miss_cpus);
4755 	}
4756 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4757 		pr_err("wild qc_idx=%d\n", qc_idx);
4758 		return;
4759 	}
4760 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4761 	sqcp = &sqp->qc_arr[qc_idx];
4762 	scp = sqcp->a_cmnd;
4763 	if (unlikely(scp == NULL)) {
4764 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4765 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4766 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4767 		return;
4768 	}
4769 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4770 	if (likely(devip))
4771 		atomic_dec(&devip->num_in_q);
4772 	else
4773 		pr_err("devip=NULL\n");
4774 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4775 		retiring = 1;
4776 
4777 	sqcp->a_cmnd = NULL;
4778 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4779 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4780 		pr_err("Unexpected completion\n");
4781 		return;
4782 	}
4783 
4784 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4785 		int k, retval;
4786 
4787 		retval = atomic_read(&retired_max_queue);
4788 		if (qc_idx >= retval) {
4789 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4790 			pr_err("index %d too large\n", retval);
4791 			return;
4792 		}
4793 		k = find_last_bit(sqp->in_use_bm, retval);
4794 		if ((k < sdebug_max_queue) || (k == retval))
4795 			atomic_set(&retired_max_queue, 0);
4796 		else
4797 			atomic_set(&retired_max_queue, k + 1);
4798 	}
4799 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4800 	if (unlikely(aborted)) {
4801 		if (sdebug_verbose)
4802 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4803 		return;
4804 	}
4805 	scp->scsi_done(scp); /* callback to mid level */
4806 }
4807 
4808 /* When high resolution timer goes off this function is called. */
4809 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4810 {
4811 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4812 						  hrt);
4813 	sdebug_q_cmd_complete(sd_dp);
4814 	return HRTIMER_NORESTART;
4815 }
4816 
4817 /* When work queue schedules work, it calls this function. */
4818 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4819 {
4820 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4821 						  ew.work);
4822 	sdebug_q_cmd_complete(sd_dp);
4823 }
4824 
4825 static bool got_shared_uuid;
4826 static uuid_t shared_uuid;
4827 
4828 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4829 {
4830 	struct sdeb_zone_state *zsp;
4831 	sector_t capacity = get_sdebug_capacity();
4832 	sector_t zstart = 0;
4833 	unsigned int i;
4834 
4835 	/*
4836 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4837 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4838 	 * use the specified zone size checking that at least 2 zones can be
4839 	 * created for the device.
4840 	 */
4841 	if (!sdeb_zbc_zone_size_mb) {
4842 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4843 			>> ilog2(sdebug_sector_size);
4844 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4845 			devip->zsize >>= 1;
4846 		if (devip->zsize < 2) {
4847 			pr_err("Device capacity too small\n");
4848 			return -EINVAL;
4849 		}
4850 	} else {
4851 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4852 			pr_err("Zone size is not a power of 2\n");
4853 			return -EINVAL;
4854 		}
4855 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4856 			>> ilog2(sdebug_sector_size);
4857 		if (devip->zsize >= capacity) {
4858 			pr_err("Zone size too large for device capacity\n");
4859 			return -EINVAL;
4860 		}
4861 	}
4862 
4863 	devip->zsize_shift = ilog2(devip->zsize);
4864 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4865 
4866 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4867 		pr_err("Number of conventional zones too large\n");
4868 		return -EINVAL;
4869 	}
4870 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4871 
4872 	if (devip->zmodel == BLK_ZONED_HM) {
4873 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4874 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4875 			devip->max_open = (devip->nr_zones - 1) / 2;
4876 		else
4877 			devip->max_open = sdeb_zbc_max_open;
4878 	}
4879 
4880 	devip->zstate = kcalloc(devip->nr_zones,
4881 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4882 	if (!devip->zstate)
4883 		return -ENOMEM;
4884 
4885 	for (i = 0; i < devip->nr_zones; i++) {
4886 		zsp = &devip->zstate[i];
4887 
4888 		zsp->z_start = zstart;
4889 
4890 		if (i < devip->nr_conv_zones) {
4891 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4892 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4893 			zsp->z_wp = (sector_t)-1;
4894 		} else {
4895 			if (devip->zmodel == BLK_ZONED_HM)
4896 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4897 			else
4898 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4899 			zsp->z_cond = ZC1_EMPTY;
4900 			zsp->z_wp = zsp->z_start;
4901 		}
4902 
4903 		if (zsp->z_start + devip->zsize < capacity)
4904 			zsp->z_size = devip->zsize;
4905 		else
4906 			zsp->z_size = capacity - zsp->z_start;
4907 
4908 		zstart += zsp->z_size;
4909 	}
4910 
4911 	return 0;
4912 }
4913 
4914 static struct sdebug_dev_info *sdebug_device_create(
4915 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4916 {
4917 	struct sdebug_dev_info *devip;
4918 
4919 	devip = kzalloc(sizeof(*devip), flags);
4920 	if (devip) {
4921 		if (sdebug_uuid_ctl == 1)
4922 			uuid_gen(&devip->lu_name);
4923 		else if (sdebug_uuid_ctl == 2) {
4924 			if (got_shared_uuid)
4925 				devip->lu_name = shared_uuid;
4926 			else {
4927 				uuid_gen(&shared_uuid);
4928 				got_shared_uuid = true;
4929 				devip->lu_name = shared_uuid;
4930 			}
4931 		}
4932 		devip->sdbg_host = sdbg_host;
4933 		if (sdeb_zbc_in_use) {
4934 			devip->zmodel = sdeb_zbc_model;
4935 			if (sdebug_device_create_zones(devip)) {
4936 				kfree(devip);
4937 				return NULL;
4938 			}
4939 		} else {
4940 			devip->zmodel = BLK_ZONED_NONE;
4941 		}
4942 		devip->sdbg_host = sdbg_host;
4943 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4944 	}
4945 	return devip;
4946 }
4947 
4948 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4949 {
4950 	struct sdebug_host_info *sdbg_host;
4951 	struct sdebug_dev_info *open_devip = NULL;
4952 	struct sdebug_dev_info *devip;
4953 
4954 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4955 	if (!sdbg_host) {
4956 		pr_err("Host info NULL\n");
4957 		return NULL;
4958 	}
4959 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4960 		if ((devip->used) && (devip->channel == sdev->channel) &&
4961 		    (devip->target == sdev->id) &&
4962 		    (devip->lun == sdev->lun))
4963 			return devip;
4964 		else {
4965 			if ((!devip->used) && (!open_devip))
4966 				open_devip = devip;
4967 		}
4968 	}
4969 	if (!open_devip) { /* try and make a new one */
4970 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4971 		if (!open_devip) {
4972 			pr_err("out of memory at line %d\n", __LINE__);
4973 			return NULL;
4974 		}
4975 	}
4976 
4977 	open_devip->channel = sdev->channel;
4978 	open_devip->target = sdev->id;
4979 	open_devip->lun = sdev->lun;
4980 	open_devip->sdbg_host = sdbg_host;
4981 	atomic_set(&open_devip->num_in_q, 0);
4982 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4983 	open_devip->used = true;
4984 	return open_devip;
4985 }
4986 
4987 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4988 {
4989 	if (sdebug_verbose)
4990 		pr_info("slave_alloc <%u %u %u %llu>\n",
4991 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4992 	return 0;
4993 }
4994 
4995 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4996 {
4997 	struct sdebug_dev_info *devip =
4998 			(struct sdebug_dev_info *)sdp->hostdata;
4999 
5000 	if (sdebug_verbose)
5001 		pr_info("slave_configure <%u %u %u %llu>\n",
5002 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5003 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5004 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5005 	if (devip == NULL) {
5006 		devip = find_build_dev_info(sdp);
5007 		if (devip == NULL)
5008 			return 1;  /* no resources, will be marked offline */
5009 	}
5010 	sdp->hostdata = devip;
5011 	if (sdebug_no_uld)
5012 		sdp->no_uld_attach = 1;
5013 	config_cdb_len(sdp);
5014 	return 0;
5015 }
5016 
5017 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5018 {
5019 	struct sdebug_dev_info *devip =
5020 		(struct sdebug_dev_info *)sdp->hostdata;
5021 
5022 	if (sdebug_verbose)
5023 		pr_info("slave_destroy <%u %u %u %llu>\n",
5024 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5025 	if (devip) {
5026 		/* make this slot available for re-use */
5027 		devip->used = false;
5028 		sdp->hostdata = NULL;
5029 	}
5030 }
5031 
5032 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5033 			   enum sdeb_defer_type defer_t)
5034 {
5035 	if (!sd_dp)
5036 		return;
5037 	if (defer_t == SDEB_DEFER_HRT)
5038 		hrtimer_cancel(&sd_dp->hrt);
5039 	else if (defer_t == SDEB_DEFER_WQ)
5040 		cancel_work_sync(&sd_dp->ew.work);
5041 }
5042 
5043 /* If @cmnd found deletes its timer or work queue and returns true; else
5044    returns false */
5045 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5046 {
5047 	unsigned long iflags;
5048 	int j, k, qmax, r_qmax;
5049 	enum sdeb_defer_type l_defer_t;
5050 	struct sdebug_queue *sqp;
5051 	struct sdebug_queued_cmd *sqcp;
5052 	struct sdebug_dev_info *devip;
5053 	struct sdebug_defer *sd_dp;
5054 
5055 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5056 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5057 		qmax = sdebug_max_queue;
5058 		r_qmax = atomic_read(&retired_max_queue);
5059 		if (r_qmax > qmax)
5060 			qmax = r_qmax;
5061 		for (k = 0; k < qmax; ++k) {
5062 			if (test_bit(k, sqp->in_use_bm)) {
5063 				sqcp = &sqp->qc_arr[k];
5064 				if (cmnd != sqcp->a_cmnd)
5065 					continue;
5066 				/* found */
5067 				devip = (struct sdebug_dev_info *)
5068 						cmnd->device->hostdata;
5069 				if (devip)
5070 					atomic_dec(&devip->num_in_q);
5071 				sqcp->a_cmnd = NULL;
5072 				sd_dp = sqcp->sd_dp;
5073 				if (sd_dp) {
5074 					l_defer_t = sd_dp->defer_t;
5075 					sd_dp->defer_t = SDEB_DEFER_NONE;
5076 				} else
5077 					l_defer_t = SDEB_DEFER_NONE;
5078 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5079 				stop_qc_helper(sd_dp, l_defer_t);
5080 				clear_bit(k, sqp->in_use_bm);
5081 				return true;
5082 			}
5083 		}
5084 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5085 	}
5086 	return false;
5087 }
5088 
5089 /* Deletes (stops) timers or work queues of all queued commands */
5090 static void stop_all_queued(void)
5091 {
5092 	unsigned long iflags;
5093 	int j, k;
5094 	enum sdeb_defer_type l_defer_t;
5095 	struct sdebug_queue *sqp;
5096 	struct sdebug_queued_cmd *sqcp;
5097 	struct sdebug_dev_info *devip;
5098 	struct sdebug_defer *sd_dp;
5099 
5100 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5101 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5102 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5103 			if (test_bit(k, sqp->in_use_bm)) {
5104 				sqcp = &sqp->qc_arr[k];
5105 				if (sqcp->a_cmnd == NULL)
5106 					continue;
5107 				devip = (struct sdebug_dev_info *)
5108 					sqcp->a_cmnd->device->hostdata;
5109 				if (devip)
5110 					atomic_dec(&devip->num_in_q);
5111 				sqcp->a_cmnd = NULL;
5112 				sd_dp = sqcp->sd_dp;
5113 				if (sd_dp) {
5114 					l_defer_t = sd_dp->defer_t;
5115 					sd_dp->defer_t = SDEB_DEFER_NONE;
5116 				} else
5117 					l_defer_t = SDEB_DEFER_NONE;
5118 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5119 				stop_qc_helper(sd_dp, l_defer_t);
5120 				clear_bit(k, sqp->in_use_bm);
5121 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5122 			}
5123 		}
5124 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5125 	}
5126 }
5127 
5128 /* Free queued command memory on heap */
5129 static void free_all_queued(void)
5130 {
5131 	int j, k;
5132 	struct sdebug_queue *sqp;
5133 	struct sdebug_queued_cmd *sqcp;
5134 
5135 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5136 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5137 			sqcp = &sqp->qc_arr[k];
5138 			kfree(sqcp->sd_dp);
5139 			sqcp->sd_dp = NULL;
5140 		}
5141 	}
5142 }
5143 
5144 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5145 {
5146 	bool ok;
5147 
5148 	++num_aborts;
5149 	if (SCpnt) {
5150 		ok = stop_queued_cmnd(SCpnt);
5151 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5152 			sdev_printk(KERN_INFO, SCpnt->device,
5153 				    "%s: command%s found\n", __func__,
5154 				    ok ? "" : " not");
5155 	}
5156 	return SUCCESS;
5157 }
5158 
5159 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5160 {
5161 	++num_dev_resets;
5162 	if (SCpnt && SCpnt->device) {
5163 		struct scsi_device *sdp = SCpnt->device;
5164 		struct sdebug_dev_info *devip =
5165 				(struct sdebug_dev_info *)sdp->hostdata;
5166 
5167 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5168 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5169 		if (devip)
5170 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5171 	}
5172 	return SUCCESS;
5173 }
5174 
5175 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5176 {
5177 	struct sdebug_host_info *sdbg_host;
5178 	struct sdebug_dev_info *devip;
5179 	struct scsi_device *sdp;
5180 	struct Scsi_Host *hp;
5181 	int k = 0;
5182 
5183 	++num_target_resets;
5184 	if (!SCpnt)
5185 		goto lie;
5186 	sdp = SCpnt->device;
5187 	if (!sdp)
5188 		goto lie;
5189 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5190 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5191 	hp = sdp->host;
5192 	if (!hp)
5193 		goto lie;
5194 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5195 	if (sdbg_host) {
5196 		list_for_each_entry(devip,
5197 				    &sdbg_host->dev_info_list,
5198 				    dev_list)
5199 			if (devip->target == sdp->id) {
5200 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5201 				++k;
5202 			}
5203 	}
5204 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5205 		sdev_printk(KERN_INFO, sdp,
5206 			    "%s: %d device(s) found in target\n", __func__, k);
5207 lie:
5208 	return SUCCESS;
5209 }
5210 
5211 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5212 {
5213 	struct sdebug_host_info *sdbg_host;
5214 	struct sdebug_dev_info *devip;
5215 	struct scsi_device *sdp;
5216 	struct Scsi_Host *hp;
5217 	int k = 0;
5218 
5219 	++num_bus_resets;
5220 	if (!(SCpnt && SCpnt->device))
5221 		goto lie;
5222 	sdp = SCpnt->device;
5223 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5224 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5225 	hp = sdp->host;
5226 	if (hp) {
5227 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5228 		if (sdbg_host) {
5229 			list_for_each_entry(devip,
5230 					    &sdbg_host->dev_info_list,
5231 					    dev_list) {
5232 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5233 				++k;
5234 			}
5235 		}
5236 	}
5237 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5238 		sdev_printk(KERN_INFO, sdp,
5239 			    "%s: %d device(s) found in host\n", __func__, k);
5240 lie:
5241 	return SUCCESS;
5242 }
5243 
5244 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5245 {
5246 	struct sdebug_host_info *sdbg_host;
5247 	struct sdebug_dev_info *devip;
5248 	int k = 0;
5249 
5250 	++num_host_resets;
5251 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5252 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5253 	spin_lock(&sdebug_host_list_lock);
5254 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5255 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5256 				    dev_list) {
5257 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5258 			++k;
5259 		}
5260 	}
5261 	spin_unlock(&sdebug_host_list_lock);
5262 	stop_all_queued();
5263 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5264 		sdev_printk(KERN_INFO, SCpnt->device,
5265 			    "%s: %d device(s) found\n", __func__, k);
5266 	return SUCCESS;
5267 }
5268 
5269 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5270 {
5271 	struct msdos_partition *pp;
5272 	int starts[SDEBUG_MAX_PARTS + 2];
5273 	int sectors_per_part, num_sectors, k;
5274 	int heads_by_sects, start_sec, end_sec;
5275 
5276 	/* assume partition table already zeroed */
5277 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5278 		return;
5279 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5280 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5281 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5282 	}
5283 	num_sectors = (int)sdebug_store_sectors;
5284 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5285 			   / sdebug_num_parts;
5286 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5287 	starts[0] = sdebug_sectors_per;
5288 	for (k = 1; k < sdebug_num_parts; ++k)
5289 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5290 			    * heads_by_sects;
5291 	starts[sdebug_num_parts] = num_sectors;
5292 	starts[sdebug_num_parts + 1] = 0;
5293 
5294 	ramp[510] = 0x55;	/* magic partition markings */
5295 	ramp[511] = 0xAA;
5296 	pp = (struct msdos_partition *)(ramp + 0x1be);
5297 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5298 		start_sec = starts[k];
5299 		end_sec = starts[k + 1] - 1;
5300 		pp->boot_ind = 0;
5301 
5302 		pp->cyl = start_sec / heads_by_sects;
5303 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5304 			   / sdebug_sectors_per;
5305 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5306 
5307 		pp->end_cyl = end_sec / heads_by_sects;
5308 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5309 			       / sdebug_sectors_per;
5310 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5311 
5312 		pp->start_sect = cpu_to_le32(start_sec);
5313 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5314 		pp->sys_ind = 0x83;	/* plain Linux partition */
5315 	}
5316 }
5317 
5318 static void block_unblock_all_queues(bool block)
5319 {
5320 	int j;
5321 	struct sdebug_queue *sqp;
5322 
5323 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5324 		atomic_set(&sqp->blocked, (int)block);
5325 }
5326 
5327 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5328  * commands will be processed normally before triggers occur.
5329  */
5330 static void tweak_cmnd_count(void)
5331 {
5332 	int count, modulo;
5333 
5334 	modulo = abs(sdebug_every_nth);
5335 	if (modulo < 2)
5336 		return;
5337 	block_unblock_all_queues(true);
5338 	count = atomic_read(&sdebug_cmnd_count);
5339 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5340 	block_unblock_all_queues(false);
5341 }
5342 
5343 static void clear_queue_stats(void)
5344 {
5345 	atomic_set(&sdebug_cmnd_count, 0);
5346 	atomic_set(&sdebug_completions, 0);
5347 	atomic_set(&sdebug_miss_cpus, 0);
5348 	atomic_set(&sdebug_a_tsf, 0);
5349 }
5350 
5351 static void setup_inject(struct sdebug_queue *sqp,
5352 			 struct sdebug_queued_cmd *sqcp)
5353 {
5354 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5355 		if (sdebug_every_nth > 0)
5356 			sqcp->inj_recovered = sqcp->inj_transport
5357 				= sqcp->inj_dif
5358 				= sqcp->inj_dix = sqcp->inj_short
5359 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5360 		return;
5361 	}
5362 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5363 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5364 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5365 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5366 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5367 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5368 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5369 }
5370 
5371 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5372 
5373 /* Complete the processing of the thread that queued a SCSI command to this
5374  * driver. It either completes the command by calling cmnd_done() or
5375  * schedules a hr timer or work queue then returns 0. Returns
5376  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5377  */
5378 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5379 			 int scsi_result,
5380 			 int (*pfp)(struct scsi_cmnd *,
5381 				    struct sdebug_dev_info *),
5382 			 int delta_jiff, int ndelay)
5383 {
5384 	bool new_sd_dp;
5385 	int k, num_in_q, qdepth, inject;
5386 	unsigned long iflags;
5387 	u64 ns_from_boot = 0;
5388 	struct sdebug_queue *sqp;
5389 	struct sdebug_queued_cmd *sqcp;
5390 	struct scsi_device *sdp;
5391 	struct sdebug_defer *sd_dp;
5392 
5393 	if (unlikely(devip == NULL)) {
5394 		if (scsi_result == 0)
5395 			scsi_result = DID_NO_CONNECT << 16;
5396 		goto respond_in_thread;
5397 	}
5398 	sdp = cmnd->device;
5399 
5400 	if (delta_jiff == 0)
5401 		goto respond_in_thread;
5402 
5403 	sqp = get_queue(cmnd);
5404 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5405 	if (unlikely(atomic_read(&sqp->blocked))) {
5406 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5407 		return SCSI_MLQUEUE_HOST_BUSY;
5408 	}
5409 	num_in_q = atomic_read(&devip->num_in_q);
5410 	qdepth = cmnd->device->queue_depth;
5411 	inject = 0;
5412 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5413 		if (scsi_result) {
5414 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5415 			goto respond_in_thread;
5416 		} else
5417 			scsi_result = device_qfull_result;
5418 	} else if (unlikely(sdebug_every_nth &&
5419 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5420 			    (scsi_result == 0))) {
5421 		if ((num_in_q == (qdepth - 1)) &&
5422 		    (atomic_inc_return(&sdebug_a_tsf) >=
5423 		     abs(sdebug_every_nth))) {
5424 			atomic_set(&sdebug_a_tsf, 0);
5425 			inject = 1;
5426 			scsi_result = device_qfull_result;
5427 		}
5428 	}
5429 
5430 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5431 	if (unlikely(k >= sdebug_max_queue)) {
5432 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5433 		if (scsi_result)
5434 			goto respond_in_thread;
5435 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5436 			scsi_result = device_qfull_result;
5437 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5438 			sdev_printk(KERN_INFO, sdp,
5439 				    "%s: max_queue=%d exceeded, %s\n",
5440 				    __func__, sdebug_max_queue,
5441 				    (scsi_result ?  "status: TASK SET FULL" :
5442 						    "report: host busy"));
5443 		if (scsi_result)
5444 			goto respond_in_thread;
5445 		else
5446 			return SCSI_MLQUEUE_HOST_BUSY;
5447 	}
5448 	set_bit(k, sqp->in_use_bm);
5449 	atomic_inc(&devip->num_in_q);
5450 	sqcp = &sqp->qc_arr[k];
5451 	sqcp->a_cmnd = cmnd;
5452 	cmnd->host_scribble = (unsigned char *)sqcp;
5453 	sd_dp = sqcp->sd_dp;
5454 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5455 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5456 		setup_inject(sqp, sqcp);
5457 	if (!sd_dp) {
5458 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5459 		if (!sd_dp) {
5460 			atomic_dec(&devip->num_in_q);
5461 			clear_bit(k, sqp->in_use_bm);
5462 			return SCSI_MLQUEUE_HOST_BUSY;
5463 		}
5464 		new_sd_dp = true;
5465 	} else {
5466 		new_sd_dp = false;
5467 	}
5468 
5469 	/* Set the hostwide tag */
5470 	if (sdebug_host_max_queue)
5471 		sd_dp->hc_idx = get_tag(cmnd);
5472 
5473 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5474 		ns_from_boot = ktime_get_boottime_ns();
5475 
5476 	/* one of the resp_*() response functions is called here */
5477 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5478 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5479 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5480 		delta_jiff = ndelay = 0;
5481 	}
5482 	if (cmnd->result == 0 && scsi_result != 0)
5483 		cmnd->result = scsi_result;
5484 
5485 	if (unlikely(sdebug_verbose && cmnd->result))
5486 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5487 			    __func__, cmnd->result);
5488 
5489 	if (delta_jiff > 0 || ndelay > 0) {
5490 		ktime_t kt;
5491 
5492 		if (delta_jiff > 0) {
5493 			u64 ns = jiffies_to_nsecs(delta_jiff);
5494 
5495 			if (sdebug_random && ns < U32_MAX) {
5496 				ns = prandom_u32_max((u32)ns);
5497 			} else if (sdebug_random) {
5498 				ns >>= 12;	/* scale to 4 usec precision */
5499 				if (ns < U32_MAX)	/* over 4 hours max */
5500 					ns = prandom_u32_max((u32)ns);
5501 				ns <<= 12;
5502 			}
5503 			kt = ns_to_ktime(ns);
5504 		} else {	/* ndelay has a 4.2 second max */
5505 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5506 					     (u32)ndelay;
5507 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5508 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5509 
5510 				if (kt <= d) {	/* elapsed duration >= kt */
5511 					sqcp->a_cmnd = NULL;
5512 					atomic_dec(&devip->num_in_q);
5513 					clear_bit(k, sqp->in_use_bm);
5514 					if (new_sd_dp)
5515 						kfree(sd_dp);
5516 					/* call scsi_done() from this thread */
5517 					cmnd->scsi_done(cmnd);
5518 					return 0;
5519 				}
5520 				/* otherwise reduce kt by elapsed time */
5521 				kt -= d;
5522 			}
5523 		}
5524 		if (!sd_dp->init_hrt) {
5525 			sd_dp->init_hrt = true;
5526 			sqcp->sd_dp = sd_dp;
5527 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5528 				     HRTIMER_MODE_REL_PINNED);
5529 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5530 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5531 			sd_dp->qc_idx = k;
5532 		}
5533 		if (sdebug_statistics)
5534 			sd_dp->issuing_cpu = raw_smp_processor_id();
5535 		sd_dp->defer_t = SDEB_DEFER_HRT;
5536 		/* schedule the invocation of scsi_done() for a later time */
5537 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5538 	} else {	/* jdelay < 0, use work queue */
5539 		if (!sd_dp->init_wq) {
5540 			sd_dp->init_wq = true;
5541 			sqcp->sd_dp = sd_dp;
5542 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5543 			sd_dp->qc_idx = k;
5544 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5545 		}
5546 		if (sdebug_statistics)
5547 			sd_dp->issuing_cpu = raw_smp_processor_id();
5548 		sd_dp->defer_t = SDEB_DEFER_WQ;
5549 		if (unlikely(sqcp->inj_cmd_abort))
5550 			sd_dp->aborted = true;
5551 		schedule_work(&sd_dp->ew.work);
5552 		if (unlikely(sqcp->inj_cmd_abort)) {
5553 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5554 				    cmnd->request->tag);
5555 			blk_abort_request(cmnd->request);
5556 		}
5557 	}
5558 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5559 		     (scsi_result == device_qfull_result)))
5560 		sdev_printk(KERN_INFO, sdp,
5561 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
5562 			    num_in_q, (inject ? "<inject> " : ""),
5563 			    "status: TASK SET FULL");
5564 	return 0;
5565 
5566 respond_in_thread:	/* call back to mid-layer using invocation thread */
5567 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5568 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5569 	if (cmnd->result == 0 && scsi_result != 0)
5570 		cmnd->result = scsi_result;
5571 	cmnd->scsi_done(cmnd);
5572 	return 0;
5573 }
5574 
5575 /* Note: The following macros create attribute files in the
5576    /sys/module/scsi_debug/parameters directory. Unfortunately this
5577    driver is unaware of a change and cannot trigger auxiliary actions
5578    as it can when the corresponding attribute in the
5579    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5580  */
5581 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5582 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5583 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5584 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5585 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5586 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5587 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5588 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5589 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5590 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5591 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5592 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5593 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5594 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5595 module_param_string(inq_product, sdebug_inq_product_id,
5596 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5597 module_param_string(inq_rev, sdebug_inq_product_rev,
5598 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5599 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5600 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5601 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5602 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5603 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5604 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5605 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5606 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5607 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5608 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5609 		   S_IRUGO | S_IWUSR);
5610 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5611 		   S_IRUGO | S_IWUSR);
5612 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5613 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5614 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5615 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5616 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5617 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5618 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5619 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5620 module_param_named(per_host_store, sdebug_per_host_store, bool,
5621 		   S_IRUGO | S_IWUSR);
5622 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5623 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5624 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5625 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5626 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5627 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5628 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5629 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5630 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5631 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5632 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5633 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5634 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5635 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5636 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5637 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5638 		   S_IRUGO | S_IWUSR);
5639 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5640 module_param_named(write_same_length, sdebug_write_same_length, int,
5641 		   S_IRUGO | S_IWUSR);
5642 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5643 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5644 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5645 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5646 
5647 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5648 MODULE_DESCRIPTION("SCSI debug adapter driver");
5649 MODULE_LICENSE("GPL");
5650 MODULE_VERSION(SDEBUG_VERSION);
5651 
5652 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5653 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5654 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5655 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5656 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5657 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5658 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5659 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5660 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5661 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5662 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5663 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5664 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5665 MODULE_PARM_DESC(host_max_queue,
5666 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5667 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5668 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5669 		 SDEBUG_VERSION "\")");
5670 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5671 MODULE_PARM_DESC(lbprz,
5672 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5673 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5674 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5675 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5676 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5677 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5678 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5679 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5680 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5681 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5682 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5683 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5684 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5685 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5686 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5687 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5688 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5689 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5690 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5691 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5692 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5693 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5694 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5695 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5696 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5697 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5698 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5699 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5700 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5701 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5702 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5703 MODULE_PARM_DESC(uuid_ctl,
5704 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5705 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5706 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5707 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5708 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5709 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5710 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5711 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5712 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5713 
5714 #define SDEBUG_INFO_LEN 256
5715 static char sdebug_info[SDEBUG_INFO_LEN];
5716 
5717 static const char *scsi_debug_info(struct Scsi_Host *shp)
5718 {
5719 	int k;
5720 
5721 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5722 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5723 	if (k >= (SDEBUG_INFO_LEN - 1))
5724 		return sdebug_info;
5725 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5726 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5727 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5728 		  "statistics", (int)sdebug_statistics);
5729 	return sdebug_info;
5730 }
5731 
5732 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5733 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5734 				 int length)
5735 {
5736 	char arr[16];
5737 	int opts;
5738 	int minLen = length > 15 ? 15 : length;
5739 
5740 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5741 		return -EACCES;
5742 	memcpy(arr, buffer, minLen);
5743 	arr[minLen] = '\0';
5744 	if (1 != sscanf(arr, "%d", &opts))
5745 		return -EINVAL;
5746 	sdebug_opts = opts;
5747 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5748 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5749 	if (sdebug_every_nth != 0)
5750 		tweak_cmnd_count();
5751 	return length;
5752 }
5753 
5754 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5755  * same for each scsi_debug host (if more than one). Some of the counters
5756  * output are not atomics so might be inaccurate in a busy system. */
5757 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5758 {
5759 	int f, j, l;
5760 	struct sdebug_queue *sqp;
5761 	struct sdebug_host_info *sdhp;
5762 
5763 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5764 		   SDEBUG_VERSION, sdebug_version_date);
5765 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5766 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5767 		   sdebug_opts, sdebug_every_nth);
5768 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5769 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5770 		   sdebug_sector_size, "bytes");
5771 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5772 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5773 		   num_aborts);
5774 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5775 		   num_dev_resets, num_target_resets, num_bus_resets,
5776 		   num_host_resets);
5777 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5778 		   dix_reads, dix_writes, dif_errors);
5779 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5780 		   sdebug_statistics);
5781 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5782 		   atomic_read(&sdebug_cmnd_count),
5783 		   atomic_read(&sdebug_completions),
5784 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5785 		   atomic_read(&sdebug_a_tsf));
5786 
5787 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5788 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5789 		seq_printf(m, "  queue %d:\n", j);
5790 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5791 		if (f != sdebug_max_queue) {
5792 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5793 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5794 				   "first,last bits", f, l);
5795 		}
5796 	}
5797 
5798 	seq_printf(m, "this host_no=%d\n", host->host_no);
5799 	if (!xa_empty(per_store_ap)) {
5800 		bool niu;
5801 		int idx;
5802 		unsigned long l_idx;
5803 		struct sdeb_store_info *sip;
5804 
5805 		seq_puts(m, "\nhost list:\n");
5806 		j = 0;
5807 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5808 			idx = sdhp->si_idx;
5809 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5810 				   sdhp->shost->host_no, idx);
5811 			++j;
5812 		}
5813 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5814 			   sdeb_most_recent_idx);
5815 		j = 0;
5816 		xa_for_each(per_store_ap, l_idx, sip) {
5817 			niu = xa_get_mark(per_store_ap, l_idx,
5818 					  SDEB_XA_NOT_IN_USE);
5819 			idx = (int)l_idx;
5820 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5821 				   (niu ? "  not_in_use" : ""));
5822 			++j;
5823 		}
5824 	}
5825 	return 0;
5826 }
5827 
5828 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5829 {
5830 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5831 }
5832 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5833  * of delay is jiffies.
5834  */
5835 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5836 			   size_t count)
5837 {
5838 	int jdelay, res;
5839 
5840 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5841 		res = count;
5842 		if (sdebug_jdelay != jdelay) {
5843 			int j, k;
5844 			struct sdebug_queue *sqp;
5845 
5846 			block_unblock_all_queues(true);
5847 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5848 			     ++j, ++sqp) {
5849 				k = find_first_bit(sqp->in_use_bm,
5850 						   sdebug_max_queue);
5851 				if (k != sdebug_max_queue) {
5852 					res = -EBUSY;   /* queued commands */
5853 					break;
5854 				}
5855 			}
5856 			if (res > 0) {
5857 				sdebug_jdelay = jdelay;
5858 				sdebug_ndelay = 0;
5859 			}
5860 			block_unblock_all_queues(false);
5861 		}
5862 		return res;
5863 	}
5864 	return -EINVAL;
5865 }
5866 static DRIVER_ATTR_RW(delay);
5867 
5868 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5869 {
5870 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5871 }
5872 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5873 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5874 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5875 			    size_t count)
5876 {
5877 	int ndelay, res;
5878 
5879 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5880 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5881 		res = count;
5882 		if (sdebug_ndelay != ndelay) {
5883 			int j, k;
5884 			struct sdebug_queue *sqp;
5885 
5886 			block_unblock_all_queues(true);
5887 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5888 			     ++j, ++sqp) {
5889 				k = find_first_bit(sqp->in_use_bm,
5890 						   sdebug_max_queue);
5891 				if (k != sdebug_max_queue) {
5892 					res = -EBUSY;   /* queued commands */
5893 					break;
5894 				}
5895 			}
5896 			if (res > 0) {
5897 				sdebug_ndelay = ndelay;
5898 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5899 							: DEF_JDELAY;
5900 			}
5901 			block_unblock_all_queues(false);
5902 		}
5903 		return res;
5904 	}
5905 	return -EINVAL;
5906 }
5907 static DRIVER_ATTR_RW(ndelay);
5908 
5909 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5910 {
5911 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5912 }
5913 
5914 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5915 			  size_t count)
5916 {
5917 	int opts;
5918 	char work[20];
5919 
5920 	if (sscanf(buf, "%10s", work) == 1) {
5921 		if (strncasecmp(work, "0x", 2) == 0) {
5922 			if (kstrtoint(work + 2, 16, &opts) == 0)
5923 				goto opts_done;
5924 		} else {
5925 			if (kstrtoint(work, 10, &opts) == 0)
5926 				goto opts_done;
5927 		}
5928 	}
5929 	return -EINVAL;
5930 opts_done:
5931 	sdebug_opts = opts;
5932 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5933 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5934 	tweak_cmnd_count();
5935 	return count;
5936 }
5937 static DRIVER_ATTR_RW(opts);
5938 
5939 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5940 {
5941 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5942 }
5943 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5944 			   size_t count)
5945 {
5946 	int n;
5947 
5948 	/* Cannot change from or to TYPE_ZBC with sysfs */
5949 	if (sdebug_ptype == TYPE_ZBC)
5950 		return -EINVAL;
5951 
5952 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5953 		if (n == TYPE_ZBC)
5954 			return -EINVAL;
5955 		sdebug_ptype = n;
5956 		return count;
5957 	}
5958 	return -EINVAL;
5959 }
5960 static DRIVER_ATTR_RW(ptype);
5961 
5962 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5963 {
5964 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5965 }
5966 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5967 			    size_t count)
5968 {
5969 	int n;
5970 
5971 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5972 		sdebug_dsense = n;
5973 		return count;
5974 	}
5975 	return -EINVAL;
5976 }
5977 static DRIVER_ATTR_RW(dsense);
5978 
5979 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5980 {
5981 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5982 }
5983 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5984 			     size_t count)
5985 {
5986 	int n, idx;
5987 
5988 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5989 		bool want_store = (n == 0);
5990 		struct sdebug_host_info *sdhp;
5991 
5992 		n = (n > 0);
5993 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5994 		if (sdebug_fake_rw == n)
5995 			return count;	/* not transitioning so do nothing */
5996 
5997 		if (want_store) {	/* 1 --> 0 transition, set up store */
5998 			if (sdeb_first_idx < 0) {
5999 				idx = sdebug_add_store();
6000 				if (idx < 0)
6001 					return idx;
6002 			} else {
6003 				idx = sdeb_first_idx;
6004 				xa_clear_mark(per_store_ap, idx,
6005 					      SDEB_XA_NOT_IN_USE);
6006 			}
6007 			/* make all hosts use same store */
6008 			list_for_each_entry(sdhp, &sdebug_host_list,
6009 					    host_list) {
6010 				if (sdhp->si_idx != idx) {
6011 					xa_set_mark(per_store_ap, sdhp->si_idx,
6012 						    SDEB_XA_NOT_IN_USE);
6013 					sdhp->si_idx = idx;
6014 				}
6015 			}
6016 			sdeb_most_recent_idx = idx;
6017 		} else {	/* 0 --> 1 transition is trigger for shrink */
6018 			sdebug_erase_all_stores(true /* apart from first */);
6019 		}
6020 		sdebug_fake_rw = n;
6021 		return count;
6022 	}
6023 	return -EINVAL;
6024 }
6025 static DRIVER_ATTR_RW(fake_rw);
6026 
6027 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6028 {
6029 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6030 }
6031 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6032 			      size_t count)
6033 {
6034 	int n;
6035 
6036 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6037 		sdebug_no_lun_0 = n;
6038 		return count;
6039 	}
6040 	return -EINVAL;
6041 }
6042 static DRIVER_ATTR_RW(no_lun_0);
6043 
6044 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6045 {
6046 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6047 }
6048 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6049 			      size_t count)
6050 {
6051 	int n;
6052 
6053 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6054 		sdebug_num_tgts = n;
6055 		sdebug_max_tgts_luns();
6056 		return count;
6057 	}
6058 	return -EINVAL;
6059 }
6060 static DRIVER_ATTR_RW(num_tgts);
6061 
6062 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6063 {
6064 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6065 }
6066 static DRIVER_ATTR_RO(dev_size_mb);
6067 
6068 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6069 {
6070 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6071 }
6072 
6073 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6074 				    size_t count)
6075 {
6076 	bool v;
6077 
6078 	if (kstrtobool(buf, &v))
6079 		return -EINVAL;
6080 
6081 	sdebug_per_host_store = v;
6082 	return count;
6083 }
6084 static DRIVER_ATTR_RW(per_host_store);
6085 
6086 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6087 {
6088 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6089 }
6090 static DRIVER_ATTR_RO(num_parts);
6091 
6092 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6093 {
6094 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6095 }
6096 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6097 			       size_t count)
6098 {
6099 	int nth;
6100 
6101 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
6102 		sdebug_every_nth = nth;
6103 		if (nth && !sdebug_statistics) {
6104 			pr_info("every_nth needs statistics=1, set it\n");
6105 			sdebug_statistics = true;
6106 		}
6107 		tweak_cmnd_count();
6108 		return count;
6109 	}
6110 	return -EINVAL;
6111 }
6112 static DRIVER_ATTR_RW(every_nth);
6113 
6114 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6115 {
6116 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6117 }
6118 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6119 			      size_t count)
6120 {
6121 	int n;
6122 	bool changed;
6123 
6124 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6125 		if (n > 256) {
6126 			pr_warn("max_luns can be no more than 256\n");
6127 			return -EINVAL;
6128 		}
6129 		changed = (sdebug_max_luns != n);
6130 		sdebug_max_luns = n;
6131 		sdebug_max_tgts_luns();
6132 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6133 			struct sdebug_host_info *sdhp;
6134 			struct sdebug_dev_info *dp;
6135 
6136 			spin_lock(&sdebug_host_list_lock);
6137 			list_for_each_entry(sdhp, &sdebug_host_list,
6138 					    host_list) {
6139 				list_for_each_entry(dp, &sdhp->dev_info_list,
6140 						    dev_list) {
6141 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6142 						dp->uas_bm);
6143 				}
6144 			}
6145 			spin_unlock(&sdebug_host_list_lock);
6146 		}
6147 		return count;
6148 	}
6149 	return -EINVAL;
6150 }
6151 static DRIVER_ATTR_RW(max_luns);
6152 
6153 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6154 {
6155 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6156 }
6157 /* N.B. max_queue can be changed while there are queued commands. In flight
6158  * commands beyond the new max_queue will be completed. */
6159 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6160 			       size_t count)
6161 {
6162 	int j, n, k, a;
6163 	struct sdebug_queue *sqp;
6164 
6165 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6166 	    (n <= SDEBUG_CANQUEUE) &&
6167 	    (sdebug_host_max_queue == 0)) {
6168 		block_unblock_all_queues(true);
6169 		k = 0;
6170 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6171 		     ++j, ++sqp) {
6172 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6173 			if (a > k)
6174 				k = a;
6175 		}
6176 		sdebug_max_queue = n;
6177 		if (k == SDEBUG_CANQUEUE)
6178 			atomic_set(&retired_max_queue, 0);
6179 		else if (k >= n)
6180 			atomic_set(&retired_max_queue, k + 1);
6181 		else
6182 			atomic_set(&retired_max_queue, 0);
6183 		block_unblock_all_queues(false);
6184 		return count;
6185 	}
6186 	return -EINVAL;
6187 }
6188 static DRIVER_ATTR_RW(max_queue);
6189 
6190 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6191 {
6192 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6193 }
6194 
6195 /*
6196  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6197  * in range [0, sdebug_host_max_queue), we can't change it.
6198  */
6199 static DRIVER_ATTR_RO(host_max_queue);
6200 
6201 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6202 {
6203 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6204 }
6205 static DRIVER_ATTR_RO(no_uld);
6206 
6207 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6208 {
6209 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6210 }
6211 static DRIVER_ATTR_RO(scsi_level);
6212 
6213 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6214 {
6215 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6216 }
6217 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6218 				size_t count)
6219 {
6220 	int n;
6221 	bool changed;
6222 
6223 	/* Ignore capacity change for ZBC drives for now */
6224 	if (sdeb_zbc_in_use)
6225 		return -ENOTSUPP;
6226 
6227 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228 		changed = (sdebug_virtual_gb != n);
6229 		sdebug_virtual_gb = n;
6230 		sdebug_capacity = get_sdebug_capacity();
6231 		if (changed) {
6232 			struct sdebug_host_info *sdhp;
6233 			struct sdebug_dev_info *dp;
6234 
6235 			spin_lock(&sdebug_host_list_lock);
6236 			list_for_each_entry(sdhp, &sdebug_host_list,
6237 					    host_list) {
6238 				list_for_each_entry(dp, &sdhp->dev_info_list,
6239 						    dev_list) {
6240 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6241 						dp->uas_bm);
6242 				}
6243 			}
6244 			spin_unlock(&sdebug_host_list_lock);
6245 		}
6246 		return count;
6247 	}
6248 	return -EINVAL;
6249 }
6250 static DRIVER_ATTR_RW(virtual_gb);
6251 
6252 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6253 {
6254 	/* absolute number of hosts currently active is what is shown */
6255 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6256 }
6257 
6258 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6259 			      size_t count)
6260 {
6261 	bool found;
6262 	unsigned long idx;
6263 	struct sdeb_store_info *sip;
6264 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6265 	int delta_hosts;
6266 
6267 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6268 		return -EINVAL;
6269 	if (delta_hosts > 0) {
6270 		do {
6271 			found = false;
6272 			if (want_phs) {
6273 				xa_for_each_marked(per_store_ap, idx, sip,
6274 						   SDEB_XA_NOT_IN_USE) {
6275 					sdeb_most_recent_idx = (int)idx;
6276 					found = true;
6277 					break;
6278 				}
6279 				if (found)	/* re-use case */
6280 					sdebug_add_host_helper((int)idx);
6281 				else
6282 					sdebug_do_add_host(true);
6283 			} else {
6284 				sdebug_do_add_host(false);
6285 			}
6286 		} while (--delta_hosts);
6287 	} else if (delta_hosts < 0) {
6288 		do {
6289 			sdebug_do_remove_host(false);
6290 		} while (++delta_hosts);
6291 	}
6292 	return count;
6293 }
6294 static DRIVER_ATTR_RW(add_host);
6295 
6296 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6297 {
6298 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6299 }
6300 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6301 				    size_t count)
6302 {
6303 	int n;
6304 
6305 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6306 		sdebug_vpd_use_hostno = n;
6307 		return count;
6308 	}
6309 	return -EINVAL;
6310 }
6311 static DRIVER_ATTR_RW(vpd_use_hostno);
6312 
6313 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6314 {
6315 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6316 }
6317 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6318 				size_t count)
6319 {
6320 	int n;
6321 
6322 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6323 		if (n > 0)
6324 			sdebug_statistics = true;
6325 		else {
6326 			clear_queue_stats();
6327 			sdebug_statistics = false;
6328 		}
6329 		return count;
6330 	}
6331 	return -EINVAL;
6332 }
6333 static DRIVER_ATTR_RW(statistics);
6334 
6335 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6336 {
6337 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6338 }
6339 static DRIVER_ATTR_RO(sector_size);
6340 
6341 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6342 {
6343 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6344 }
6345 static DRIVER_ATTR_RO(submit_queues);
6346 
6347 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6348 {
6349 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6350 }
6351 static DRIVER_ATTR_RO(dix);
6352 
6353 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6354 {
6355 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6356 }
6357 static DRIVER_ATTR_RO(dif);
6358 
6359 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6360 {
6361 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6362 }
6363 static DRIVER_ATTR_RO(guard);
6364 
6365 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6366 {
6367 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6368 }
6369 static DRIVER_ATTR_RO(ato);
6370 
6371 static ssize_t map_show(struct device_driver *ddp, char *buf)
6372 {
6373 	ssize_t count = 0;
6374 
6375 	if (!scsi_debug_lbp())
6376 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6377 				 sdebug_store_sectors);
6378 
6379 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6380 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6381 
6382 		if (sip)
6383 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6384 					  (int)map_size, sip->map_storep);
6385 	}
6386 	buf[count++] = '\n';
6387 	buf[count] = '\0';
6388 
6389 	return count;
6390 }
6391 static DRIVER_ATTR_RO(map);
6392 
6393 static ssize_t random_show(struct device_driver *ddp, char *buf)
6394 {
6395 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6396 }
6397 
6398 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6399 			    size_t count)
6400 {
6401 	bool v;
6402 
6403 	if (kstrtobool(buf, &v))
6404 		return -EINVAL;
6405 
6406 	sdebug_random = v;
6407 	return count;
6408 }
6409 static DRIVER_ATTR_RW(random);
6410 
6411 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6412 {
6413 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6414 }
6415 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6416 			       size_t count)
6417 {
6418 	int n;
6419 
6420 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6421 		sdebug_removable = (n > 0);
6422 		return count;
6423 	}
6424 	return -EINVAL;
6425 }
6426 static DRIVER_ATTR_RW(removable);
6427 
6428 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6429 {
6430 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6431 }
6432 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6433 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6434 			       size_t count)
6435 {
6436 	int n;
6437 
6438 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6439 		sdebug_host_lock = (n > 0);
6440 		return count;
6441 	}
6442 	return -EINVAL;
6443 }
6444 static DRIVER_ATTR_RW(host_lock);
6445 
6446 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6447 {
6448 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6449 }
6450 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6451 			    size_t count)
6452 {
6453 	int n;
6454 
6455 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6456 		sdebug_strict = (n > 0);
6457 		return count;
6458 	}
6459 	return -EINVAL;
6460 }
6461 static DRIVER_ATTR_RW(strict);
6462 
6463 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6464 {
6465 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6466 }
6467 static DRIVER_ATTR_RO(uuid_ctl);
6468 
6469 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6470 {
6471 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6472 }
6473 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6474 			     size_t count)
6475 {
6476 	int ret, n;
6477 
6478 	ret = kstrtoint(buf, 0, &n);
6479 	if (ret)
6480 		return ret;
6481 	sdebug_cdb_len = n;
6482 	all_config_cdb_len();
6483 	return count;
6484 }
6485 static DRIVER_ATTR_RW(cdb_len);
6486 
6487 static const char * const zbc_model_strs_a[] = {
6488 	[BLK_ZONED_NONE] = "none",
6489 	[BLK_ZONED_HA]   = "host-aware",
6490 	[BLK_ZONED_HM]   = "host-managed",
6491 };
6492 
6493 static const char * const zbc_model_strs_b[] = {
6494 	[BLK_ZONED_NONE] = "no",
6495 	[BLK_ZONED_HA]   = "aware",
6496 	[BLK_ZONED_HM]   = "managed",
6497 };
6498 
6499 static const char * const zbc_model_strs_c[] = {
6500 	[BLK_ZONED_NONE] = "0",
6501 	[BLK_ZONED_HA]   = "1",
6502 	[BLK_ZONED_HM]   = "2",
6503 };
6504 
6505 static int sdeb_zbc_model_str(const char *cp)
6506 {
6507 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6508 
6509 	if (res < 0) {
6510 		res = sysfs_match_string(zbc_model_strs_b, cp);
6511 		if (res < 0) {
6512 			res = sysfs_match_string(zbc_model_strs_c, cp);
6513 			if (res < 0)
6514 				return -EINVAL;
6515 		}
6516 	}
6517 	return res;
6518 }
6519 
6520 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6521 {
6522 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6523 			 zbc_model_strs_a[sdeb_zbc_model]);
6524 }
6525 static DRIVER_ATTR_RO(zbc);
6526 
6527 /* Note: The following array creates attribute files in the
6528    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6529    files (over those found in the /sys/module/scsi_debug/parameters
6530    directory) is that auxiliary actions can be triggered when an attribute
6531    is changed. For example see: add_host_store() above.
6532  */
6533 
6534 static struct attribute *sdebug_drv_attrs[] = {
6535 	&driver_attr_delay.attr,
6536 	&driver_attr_opts.attr,
6537 	&driver_attr_ptype.attr,
6538 	&driver_attr_dsense.attr,
6539 	&driver_attr_fake_rw.attr,
6540 	&driver_attr_host_max_queue.attr,
6541 	&driver_attr_no_lun_0.attr,
6542 	&driver_attr_num_tgts.attr,
6543 	&driver_attr_dev_size_mb.attr,
6544 	&driver_attr_num_parts.attr,
6545 	&driver_attr_every_nth.attr,
6546 	&driver_attr_max_luns.attr,
6547 	&driver_attr_max_queue.attr,
6548 	&driver_attr_no_uld.attr,
6549 	&driver_attr_scsi_level.attr,
6550 	&driver_attr_virtual_gb.attr,
6551 	&driver_attr_add_host.attr,
6552 	&driver_attr_per_host_store.attr,
6553 	&driver_attr_vpd_use_hostno.attr,
6554 	&driver_attr_sector_size.attr,
6555 	&driver_attr_statistics.attr,
6556 	&driver_attr_submit_queues.attr,
6557 	&driver_attr_dix.attr,
6558 	&driver_attr_dif.attr,
6559 	&driver_attr_guard.attr,
6560 	&driver_attr_ato.attr,
6561 	&driver_attr_map.attr,
6562 	&driver_attr_random.attr,
6563 	&driver_attr_removable.attr,
6564 	&driver_attr_host_lock.attr,
6565 	&driver_attr_ndelay.attr,
6566 	&driver_attr_strict.attr,
6567 	&driver_attr_uuid_ctl.attr,
6568 	&driver_attr_cdb_len.attr,
6569 	&driver_attr_zbc.attr,
6570 	NULL,
6571 };
6572 ATTRIBUTE_GROUPS(sdebug_drv);
6573 
6574 static struct device *pseudo_primary;
6575 
6576 static int __init scsi_debug_init(void)
6577 {
6578 	bool want_store = (sdebug_fake_rw == 0);
6579 	unsigned long sz;
6580 	int k, ret, hosts_to_add;
6581 	int idx = -1;
6582 
6583 	ramdisk_lck_a[0] = &atomic_rw;
6584 	ramdisk_lck_a[1] = &atomic_rw2;
6585 	atomic_set(&retired_max_queue, 0);
6586 
6587 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6588 		pr_warn("ndelay must be less than 1 second, ignored\n");
6589 		sdebug_ndelay = 0;
6590 	} else if (sdebug_ndelay > 0)
6591 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6592 
6593 	switch (sdebug_sector_size) {
6594 	case  512:
6595 	case 1024:
6596 	case 2048:
6597 	case 4096:
6598 		break;
6599 	default:
6600 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6601 		return -EINVAL;
6602 	}
6603 
6604 	switch (sdebug_dif) {
6605 	case T10_PI_TYPE0_PROTECTION:
6606 		break;
6607 	case T10_PI_TYPE1_PROTECTION:
6608 	case T10_PI_TYPE2_PROTECTION:
6609 	case T10_PI_TYPE3_PROTECTION:
6610 		have_dif_prot = true;
6611 		break;
6612 
6613 	default:
6614 		pr_err("dif must be 0, 1, 2 or 3\n");
6615 		return -EINVAL;
6616 	}
6617 
6618 	if (sdebug_num_tgts < 0) {
6619 		pr_err("num_tgts must be >= 0\n");
6620 		return -EINVAL;
6621 	}
6622 
6623 	if (sdebug_guard > 1) {
6624 		pr_err("guard must be 0 or 1\n");
6625 		return -EINVAL;
6626 	}
6627 
6628 	if (sdebug_ato > 1) {
6629 		pr_err("ato must be 0 or 1\n");
6630 		return -EINVAL;
6631 	}
6632 
6633 	if (sdebug_physblk_exp > 15) {
6634 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6635 		return -EINVAL;
6636 	}
6637 	if (sdebug_max_luns > 256) {
6638 		pr_warn("max_luns can be no more than 256, use default\n");
6639 		sdebug_max_luns = DEF_MAX_LUNS;
6640 	}
6641 
6642 	if (sdebug_lowest_aligned > 0x3fff) {
6643 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6644 		return -EINVAL;
6645 	}
6646 
6647 	if (submit_queues < 1) {
6648 		pr_err("submit_queues must be 1 or more\n");
6649 		return -EINVAL;
6650 	}
6651 
6652 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6653 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6654 		return -EINVAL;
6655 	}
6656 
6657 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6658 	    (sdebug_host_max_queue < 0)) {
6659 		pr_err("host_max_queue must be in range [0 %d]\n",
6660 		       SDEBUG_CANQUEUE);
6661 		return -EINVAL;
6662 	}
6663 
6664 	if (sdebug_host_max_queue &&
6665 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6666 		sdebug_max_queue = sdebug_host_max_queue;
6667 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6668 			sdebug_max_queue);
6669 	}
6670 
6671 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6672 			       GFP_KERNEL);
6673 	if (sdebug_q_arr == NULL)
6674 		return -ENOMEM;
6675 	for (k = 0; k < submit_queues; ++k)
6676 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6677 
6678 	/*
6679 	 * check for host managed zoned block device specified with
6680 	 * ptype=0x14 or zbc=XXX.
6681 	 */
6682 	if (sdebug_ptype == TYPE_ZBC) {
6683 		sdeb_zbc_model = BLK_ZONED_HM;
6684 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6685 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6686 		if (k < 0) {
6687 			ret = k;
6688 			goto free_vm;
6689 		}
6690 		sdeb_zbc_model = k;
6691 		switch (sdeb_zbc_model) {
6692 		case BLK_ZONED_NONE:
6693 		case BLK_ZONED_HA:
6694 			sdebug_ptype = TYPE_DISK;
6695 			break;
6696 		case BLK_ZONED_HM:
6697 			sdebug_ptype = TYPE_ZBC;
6698 			break;
6699 		default:
6700 			pr_err("Invalid ZBC model\n");
6701 			return -EINVAL;
6702 		}
6703 	}
6704 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6705 		sdeb_zbc_in_use = true;
6706 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6707 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6708 	}
6709 
6710 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6711 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6712 	if (sdebug_dev_size_mb < 1)
6713 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6714 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6715 	sdebug_store_sectors = sz / sdebug_sector_size;
6716 	sdebug_capacity = get_sdebug_capacity();
6717 
6718 	/* play around with geometry, don't waste too much on track 0 */
6719 	sdebug_heads = 8;
6720 	sdebug_sectors_per = 32;
6721 	if (sdebug_dev_size_mb >= 256)
6722 		sdebug_heads = 64;
6723 	else if (sdebug_dev_size_mb >= 16)
6724 		sdebug_heads = 32;
6725 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6726 			       (sdebug_sectors_per * sdebug_heads);
6727 	if (sdebug_cylinders_per >= 1024) {
6728 		/* other LLDs do this; implies >= 1GB ram disk ... */
6729 		sdebug_heads = 255;
6730 		sdebug_sectors_per = 63;
6731 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6732 			       (sdebug_sectors_per * sdebug_heads);
6733 	}
6734 	if (scsi_debug_lbp()) {
6735 		sdebug_unmap_max_blocks =
6736 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6737 
6738 		sdebug_unmap_max_desc =
6739 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6740 
6741 		sdebug_unmap_granularity =
6742 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6743 
6744 		if (sdebug_unmap_alignment &&
6745 		    sdebug_unmap_granularity <=
6746 		    sdebug_unmap_alignment) {
6747 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6748 			ret = -EINVAL;
6749 			goto free_q_arr;
6750 		}
6751 	}
6752 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6753 	if (want_store) {
6754 		idx = sdebug_add_store();
6755 		if (idx < 0) {
6756 			ret = idx;
6757 			goto free_q_arr;
6758 		}
6759 	}
6760 
6761 	pseudo_primary = root_device_register("pseudo_0");
6762 	if (IS_ERR(pseudo_primary)) {
6763 		pr_warn("root_device_register() error\n");
6764 		ret = PTR_ERR(pseudo_primary);
6765 		goto free_vm;
6766 	}
6767 	ret = bus_register(&pseudo_lld_bus);
6768 	if (ret < 0) {
6769 		pr_warn("bus_register error: %d\n", ret);
6770 		goto dev_unreg;
6771 	}
6772 	ret = driver_register(&sdebug_driverfs_driver);
6773 	if (ret < 0) {
6774 		pr_warn("driver_register error: %d\n", ret);
6775 		goto bus_unreg;
6776 	}
6777 
6778 	hosts_to_add = sdebug_add_host;
6779 	sdebug_add_host = 0;
6780 
6781 	for (k = 0; k < hosts_to_add; k++) {
6782 		if (want_store && k == 0) {
6783 			ret = sdebug_add_host_helper(idx);
6784 			if (ret < 0) {
6785 				pr_err("add_host_helper k=%d, error=%d\n",
6786 				       k, -ret);
6787 				break;
6788 			}
6789 		} else {
6790 			ret = sdebug_do_add_host(want_store &&
6791 						 sdebug_per_host_store);
6792 			if (ret < 0) {
6793 				pr_err("add_host k=%d error=%d\n", k, -ret);
6794 				break;
6795 			}
6796 		}
6797 	}
6798 	if (sdebug_verbose)
6799 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6800 
6801 	return 0;
6802 
6803 bus_unreg:
6804 	bus_unregister(&pseudo_lld_bus);
6805 dev_unreg:
6806 	root_device_unregister(pseudo_primary);
6807 free_vm:
6808 	sdebug_erase_store(idx, NULL);
6809 free_q_arr:
6810 	kfree(sdebug_q_arr);
6811 	return ret;
6812 }
6813 
6814 static void __exit scsi_debug_exit(void)
6815 {
6816 	int k = sdebug_num_hosts;
6817 
6818 	stop_all_queued();
6819 	for (; k; k--)
6820 		sdebug_do_remove_host(true);
6821 	free_all_queued();
6822 	driver_unregister(&sdebug_driverfs_driver);
6823 	bus_unregister(&pseudo_lld_bus);
6824 	root_device_unregister(pseudo_primary);
6825 
6826 	sdebug_erase_all_stores(false);
6827 	xa_destroy(per_store_ap);
6828 }
6829 
6830 device_initcall(scsi_debug_init);
6831 module_exit(scsi_debug_exit);
6832 
6833 static void sdebug_release_adapter(struct device *dev)
6834 {
6835 	struct sdebug_host_info *sdbg_host;
6836 
6837 	sdbg_host = to_sdebug_host(dev);
6838 	kfree(sdbg_host);
6839 }
6840 
6841 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6842 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6843 {
6844 	if (idx < 0)
6845 		return;
6846 	if (!sip) {
6847 		if (xa_empty(per_store_ap))
6848 			return;
6849 		sip = xa_load(per_store_ap, idx);
6850 		if (!sip)
6851 			return;
6852 	}
6853 	vfree(sip->map_storep);
6854 	vfree(sip->dif_storep);
6855 	vfree(sip->storep);
6856 	xa_erase(per_store_ap, idx);
6857 	kfree(sip);
6858 }
6859 
6860 /* Assume apart_from_first==false only in shutdown case. */
6861 static void sdebug_erase_all_stores(bool apart_from_first)
6862 {
6863 	unsigned long idx;
6864 	struct sdeb_store_info *sip = NULL;
6865 
6866 	xa_for_each(per_store_ap, idx, sip) {
6867 		if (apart_from_first)
6868 			apart_from_first = false;
6869 		else
6870 			sdebug_erase_store(idx, sip);
6871 	}
6872 	if (apart_from_first)
6873 		sdeb_most_recent_idx = sdeb_first_idx;
6874 }
6875 
6876 /*
6877  * Returns store xarray new element index (idx) if >=0 else negated errno.
6878  * Limit the number of stores to 65536.
6879  */
6880 static int sdebug_add_store(void)
6881 {
6882 	int res;
6883 	u32 n_idx;
6884 	unsigned long iflags;
6885 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6886 	struct sdeb_store_info *sip = NULL;
6887 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6888 
6889 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6890 	if (!sip)
6891 		return -ENOMEM;
6892 
6893 	xa_lock_irqsave(per_store_ap, iflags);
6894 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6895 	if (unlikely(res < 0)) {
6896 		xa_unlock_irqrestore(per_store_ap, iflags);
6897 		kfree(sip);
6898 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6899 		return res;
6900 	}
6901 	sdeb_most_recent_idx = n_idx;
6902 	if (sdeb_first_idx < 0)
6903 		sdeb_first_idx = n_idx;
6904 	xa_unlock_irqrestore(per_store_ap, iflags);
6905 
6906 	res = -ENOMEM;
6907 	sip->storep = vzalloc(sz);
6908 	if (!sip->storep) {
6909 		pr_err("user data oom\n");
6910 		goto err;
6911 	}
6912 	if (sdebug_num_parts > 0)
6913 		sdebug_build_parts(sip->storep, sz);
6914 
6915 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6916 	if (sdebug_dix) {
6917 		int dif_size;
6918 
6919 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6920 		sip->dif_storep = vmalloc(dif_size);
6921 
6922 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6923 			sip->dif_storep);
6924 
6925 		if (!sip->dif_storep) {
6926 			pr_err("DIX oom\n");
6927 			goto err;
6928 		}
6929 		memset(sip->dif_storep, 0xff, dif_size);
6930 	}
6931 	/* Logical Block Provisioning */
6932 	if (scsi_debug_lbp()) {
6933 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6934 		sip->map_storep = vmalloc(array_size(sizeof(long),
6935 						     BITS_TO_LONGS(map_size)));
6936 
6937 		pr_info("%lu provisioning blocks\n", map_size);
6938 
6939 		if (!sip->map_storep) {
6940 			pr_err("LBP map oom\n");
6941 			goto err;
6942 		}
6943 
6944 		bitmap_zero(sip->map_storep, map_size);
6945 
6946 		/* Map first 1KB for partition table */
6947 		if (sdebug_num_parts)
6948 			map_region(sip, 0, 2);
6949 	}
6950 
6951 	rwlock_init(&sip->macc_lck);
6952 	return (int)n_idx;
6953 err:
6954 	sdebug_erase_store((int)n_idx, sip);
6955 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6956 	return res;
6957 }
6958 
6959 static int sdebug_add_host_helper(int per_host_idx)
6960 {
6961 	int k, devs_per_host, idx;
6962 	int error = -ENOMEM;
6963 	struct sdebug_host_info *sdbg_host;
6964 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6965 
6966 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6967 	if (!sdbg_host)
6968 		return -ENOMEM;
6969 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6970 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6971 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6972 	sdbg_host->si_idx = idx;
6973 
6974 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6975 
6976 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6977 	for (k = 0; k < devs_per_host; k++) {
6978 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6979 		if (!sdbg_devinfo)
6980 			goto clean;
6981 	}
6982 
6983 	spin_lock(&sdebug_host_list_lock);
6984 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6985 	spin_unlock(&sdebug_host_list_lock);
6986 
6987 	sdbg_host->dev.bus = &pseudo_lld_bus;
6988 	sdbg_host->dev.parent = pseudo_primary;
6989 	sdbg_host->dev.release = &sdebug_release_adapter;
6990 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6991 
6992 	error = device_register(&sdbg_host->dev);
6993 	if (error)
6994 		goto clean;
6995 
6996 	++sdebug_num_hosts;
6997 	return 0;
6998 
6999 clean:
7000 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7001 				 dev_list) {
7002 		list_del(&sdbg_devinfo->dev_list);
7003 		kfree(sdbg_devinfo->zstate);
7004 		kfree(sdbg_devinfo);
7005 	}
7006 	kfree(sdbg_host);
7007 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7008 	return error;
7009 }
7010 
7011 static int sdebug_do_add_host(bool mk_new_store)
7012 {
7013 	int ph_idx = sdeb_most_recent_idx;
7014 
7015 	if (mk_new_store) {
7016 		ph_idx = sdebug_add_store();
7017 		if (ph_idx < 0)
7018 			return ph_idx;
7019 	}
7020 	return sdebug_add_host_helper(ph_idx);
7021 }
7022 
7023 static void sdebug_do_remove_host(bool the_end)
7024 {
7025 	int idx = -1;
7026 	struct sdebug_host_info *sdbg_host = NULL;
7027 	struct sdebug_host_info *sdbg_host2;
7028 
7029 	spin_lock(&sdebug_host_list_lock);
7030 	if (!list_empty(&sdebug_host_list)) {
7031 		sdbg_host = list_entry(sdebug_host_list.prev,
7032 				       struct sdebug_host_info, host_list);
7033 		idx = sdbg_host->si_idx;
7034 	}
7035 	if (!the_end && idx >= 0) {
7036 		bool unique = true;
7037 
7038 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7039 			if (sdbg_host2 == sdbg_host)
7040 				continue;
7041 			if (idx == sdbg_host2->si_idx) {
7042 				unique = false;
7043 				break;
7044 			}
7045 		}
7046 		if (unique) {
7047 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7048 			if (idx == sdeb_most_recent_idx)
7049 				--sdeb_most_recent_idx;
7050 		}
7051 	}
7052 	if (sdbg_host)
7053 		list_del(&sdbg_host->host_list);
7054 	spin_unlock(&sdebug_host_list_lock);
7055 
7056 	if (!sdbg_host)
7057 		return;
7058 
7059 	device_unregister(&sdbg_host->dev);
7060 	--sdebug_num_hosts;
7061 }
7062 
7063 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7064 {
7065 	int num_in_q = 0;
7066 	struct sdebug_dev_info *devip;
7067 
7068 	block_unblock_all_queues(true);
7069 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7070 	if (NULL == devip) {
7071 		block_unblock_all_queues(false);
7072 		return	-ENODEV;
7073 	}
7074 	num_in_q = atomic_read(&devip->num_in_q);
7075 
7076 	if (qdepth < 1)
7077 		qdepth = 1;
7078 	/* allow to exceed max host qc_arr elements for testing */
7079 	if (qdepth > SDEBUG_CANQUEUE + 10)
7080 		qdepth = SDEBUG_CANQUEUE + 10;
7081 	scsi_change_queue_depth(sdev, qdepth);
7082 
7083 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7084 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7085 			    __func__, qdepth, num_in_q);
7086 	}
7087 	block_unblock_all_queues(false);
7088 	return sdev->queue_depth;
7089 }
7090 
7091 static bool fake_timeout(struct scsi_cmnd *scp)
7092 {
7093 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7094 		if (sdebug_every_nth < -1)
7095 			sdebug_every_nth = -1;
7096 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7097 			return true; /* ignore command causing timeout */
7098 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7099 			 scsi_medium_access_command(scp))
7100 			return true; /* time out reads and writes */
7101 	}
7102 	return false;
7103 }
7104 
7105 static bool fake_host_busy(struct scsi_cmnd *scp)
7106 {
7107 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
7108 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7109 }
7110 
7111 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7112 				   struct scsi_cmnd *scp)
7113 {
7114 	u8 sdeb_i;
7115 	struct scsi_device *sdp = scp->device;
7116 	const struct opcode_info_t *oip;
7117 	const struct opcode_info_t *r_oip;
7118 	struct sdebug_dev_info *devip;
7119 
7120 	u8 *cmd = scp->cmnd;
7121 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7122 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7123 	int k, na;
7124 	int errsts = 0;
7125 	u32 flags;
7126 	u16 sa;
7127 	u8 opcode = cmd[0];
7128 	bool has_wlun_rl;
7129 
7130 	scsi_set_resid(scp, 0);
7131 	if (sdebug_statistics)
7132 		atomic_inc(&sdebug_cmnd_count);
7133 	if (unlikely(sdebug_verbose &&
7134 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7135 		char b[120];
7136 		int n, len, sb;
7137 
7138 		len = scp->cmd_len;
7139 		sb = (int)sizeof(b);
7140 		if (len > 32)
7141 			strcpy(b, "too long, over 32 bytes");
7142 		else {
7143 			for (k = 0, n = 0; k < len && n < sb; ++k)
7144 				n += scnprintf(b + n, sb - n, "%02x ",
7145 					       (u32)cmd[k]);
7146 		}
7147 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7148 			    blk_mq_unique_tag(scp->request), b);
7149 	}
7150 	if (fake_host_busy(scp))
7151 		return SCSI_MLQUEUE_HOST_BUSY;
7152 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7153 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7154 		goto err_out;
7155 
7156 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7157 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7158 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7159 	if (unlikely(!devip)) {
7160 		devip = find_build_dev_info(sdp);
7161 		if (NULL == devip)
7162 			goto err_out;
7163 	}
7164 	na = oip->num_attached;
7165 	r_pfp = oip->pfp;
7166 	if (na) {	/* multiple commands with this opcode */
7167 		r_oip = oip;
7168 		if (FF_SA & r_oip->flags) {
7169 			if (F_SA_LOW & oip->flags)
7170 				sa = 0x1f & cmd[1];
7171 			else
7172 				sa = get_unaligned_be16(cmd + 8);
7173 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7174 				if (opcode == oip->opcode && sa == oip->sa)
7175 					break;
7176 			}
7177 		} else {   /* since no service action only check opcode */
7178 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7179 				if (opcode == oip->opcode)
7180 					break;
7181 			}
7182 		}
7183 		if (k > na) {
7184 			if (F_SA_LOW & r_oip->flags)
7185 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7186 			else if (F_SA_HIGH & r_oip->flags)
7187 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7188 			else
7189 				mk_sense_invalid_opcode(scp);
7190 			goto check_cond;
7191 		}
7192 	}	/* else (when na==0) we assume the oip is a match */
7193 	flags = oip->flags;
7194 	if (unlikely(F_INV_OP & flags)) {
7195 		mk_sense_invalid_opcode(scp);
7196 		goto check_cond;
7197 	}
7198 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7199 		if (sdebug_verbose)
7200 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7201 				    my_name, opcode, " supported for wlun");
7202 		mk_sense_invalid_opcode(scp);
7203 		goto check_cond;
7204 	}
7205 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7206 		u8 rem;
7207 		int j;
7208 
7209 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7210 			rem = ~oip->len_mask[k] & cmd[k];
7211 			if (rem) {
7212 				for (j = 7; j >= 0; --j, rem <<= 1) {
7213 					if (0x80 & rem)
7214 						break;
7215 				}
7216 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7217 				goto check_cond;
7218 			}
7219 		}
7220 	}
7221 	if (unlikely(!(F_SKIP_UA & flags) &&
7222 		     find_first_bit(devip->uas_bm,
7223 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7224 		errsts = make_ua(scp, devip);
7225 		if (errsts)
7226 			goto check_cond;
7227 	}
7228 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7229 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7230 		if (sdebug_verbose)
7231 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7232 				    "%s\n", my_name, "initializing command "
7233 				    "required");
7234 		errsts = check_condition_result;
7235 		goto fini;
7236 	}
7237 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7238 		goto fini;
7239 	if (unlikely(sdebug_every_nth)) {
7240 		if (fake_timeout(scp))
7241 			return 0;	/* ignore command: make trouble */
7242 	}
7243 	if (likely(oip->pfp))
7244 		pfp = oip->pfp;	/* calls a resp_* function */
7245 	else
7246 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7247 
7248 fini:
7249 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7250 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7251 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7252 					    sdebug_ndelay > 10000)) {
7253 		/*
7254 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7255 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7256 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7257 		 * For Synchronize Cache want 1/20 of SSU's delay.
7258 		 */
7259 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7260 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7261 
7262 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7263 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7264 	} else
7265 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7266 				     sdebug_ndelay);
7267 check_cond:
7268 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7269 err_out:
7270 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7271 }
7272 
7273 static struct scsi_host_template sdebug_driver_template = {
7274 	.show_info =		scsi_debug_show_info,
7275 	.write_info =		scsi_debug_write_info,
7276 	.proc_name =		sdebug_proc_name,
7277 	.name =			"SCSI DEBUG",
7278 	.info =			scsi_debug_info,
7279 	.slave_alloc =		scsi_debug_slave_alloc,
7280 	.slave_configure =	scsi_debug_slave_configure,
7281 	.slave_destroy =	scsi_debug_slave_destroy,
7282 	.ioctl =		scsi_debug_ioctl,
7283 	.queuecommand =		scsi_debug_queuecommand,
7284 	.change_queue_depth =	sdebug_change_qdepth,
7285 	.eh_abort_handler =	scsi_debug_abort,
7286 	.eh_device_reset_handler = scsi_debug_device_reset,
7287 	.eh_target_reset_handler = scsi_debug_target_reset,
7288 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7289 	.eh_host_reset_handler = scsi_debug_host_reset,
7290 	.can_queue =		SDEBUG_CANQUEUE,
7291 	.this_id =		7,
7292 	.sg_tablesize =		SG_MAX_SEGMENTS,
7293 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7294 	.max_sectors =		-1U,
7295 	.max_segment_size =	-1U,
7296 	.module =		THIS_MODULE,
7297 	.track_queue_depth =	1,
7298 };
7299 
7300 static int sdebug_driver_probe(struct device *dev)
7301 {
7302 	int error = 0;
7303 	struct sdebug_host_info *sdbg_host;
7304 	struct Scsi_Host *hpnt;
7305 	int hprot;
7306 
7307 	sdbg_host = to_sdebug_host(dev);
7308 
7309 	if (sdebug_host_max_queue)
7310 		sdebug_driver_template.can_queue = sdebug_host_max_queue;
7311 	else
7312 		sdebug_driver_template.can_queue = sdebug_max_queue;
7313 	if (!sdebug_clustering)
7314 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7315 
7316 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7317 	if (NULL == hpnt) {
7318 		pr_err("scsi_host_alloc failed\n");
7319 		error = -ENODEV;
7320 		return error;
7321 	}
7322 	if (submit_queues > nr_cpu_ids) {
7323 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7324 			my_name, submit_queues, nr_cpu_ids);
7325 		submit_queues = nr_cpu_ids;
7326 	}
7327 	/*
7328 	 * Decide whether to tell scsi subsystem that we want mq. The
7329 	 * following should give the same answer for each host. If the host
7330 	 * has a limit of hostwide max commands, then do not set.
7331 	 */
7332 	if (!sdebug_host_max_queue)
7333 		hpnt->nr_hw_queues = submit_queues;
7334 
7335 	sdbg_host->shost = hpnt;
7336 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7337 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7338 		hpnt->max_id = sdebug_num_tgts + 1;
7339 	else
7340 		hpnt->max_id = sdebug_num_tgts;
7341 	/* = sdebug_max_luns; */
7342 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7343 
7344 	hprot = 0;
7345 
7346 	switch (sdebug_dif) {
7347 
7348 	case T10_PI_TYPE1_PROTECTION:
7349 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7350 		if (sdebug_dix)
7351 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7352 		break;
7353 
7354 	case T10_PI_TYPE2_PROTECTION:
7355 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7356 		if (sdebug_dix)
7357 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7358 		break;
7359 
7360 	case T10_PI_TYPE3_PROTECTION:
7361 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7362 		if (sdebug_dix)
7363 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7364 		break;
7365 
7366 	default:
7367 		if (sdebug_dix)
7368 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7369 		break;
7370 	}
7371 
7372 	scsi_host_set_prot(hpnt, hprot);
7373 
7374 	if (have_dif_prot || sdebug_dix)
7375 		pr_info("host protection%s%s%s%s%s%s%s\n",
7376 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7377 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7378 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7379 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7380 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7381 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7382 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7383 
7384 	if (sdebug_guard == 1)
7385 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7386 	else
7387 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7388 
7389 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7390 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7391 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7392 		sdebug_statistics = true;
7393 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7394 	if (error) {
7395 		pr_err("scsi_add_host failed\n");
7396 		error = -ENODEV;
7397 		scsi_host_put(hpnt);
7398 	} else {
7399 		scsi_scan_host(hpnt);
7400 	}
7401 
7402 	return error;
7403 }
7404 
7405 static int sdebug_driver_remove(struct device *dev)
7406 {
7407 	struct sdebug_host_info *sdbg_host;
7408 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7409 
7410 	sdbg_host = to_sdebug_host(dev);
7411 
7412 	if (!sdbg_host) {
7413 		pr_err("Unable to locate host info\n");
7414 		return -ENODEV;
7415 	}
7416 
7417 	scsi_remove_host(sdbg_host->shost);
7418 
7419 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7420 				 dev_list) {
7421 		list_del(&sdbg_devinfo->dev_list);
7422 		kfree(sdbg_devinfo->zstate);
7423 		kfree(sdbg_devinfo);
7424 	}
7425 
7426 	scsi_host_put(sdbg_host->shost);
7427 	return 0;
7428 }
7429 
7430 static int pseudo_lld_bus_match(struct device *dev,
7431 				struct device_driver *dev_driver)
7432 {
7433 	return 1;
7434 }
7435 
7436 static struct bus_type pseudo_lld_bus = {
7437 	.name = "pseudo",
7438 	.match = pseudo_lld_bus_match,
7439 	.probe = sdebug_driver_probe,
7440 	.remove = sdebug_driver_remove,
7441 	.drv_groups = sdebug_drv_groups,
7442 };
7443